11802d0beSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2cd11016eSAlexander Potapenko /*
3b232b999SAndrey Konovalov * Stack depot - a stack trace storage that avoids duplication.
4cd11016eSAlexander Potapenko *
5b232b999SAndrey Konovalov * Internally, stack depot maintains a hash table of unique stacktraces. The
6b232b999SAndrey Konovalov * stack traces themselves are stored contiguously one after another in a set
7b232b999SAndrey Konovalov * of separate page allocations.
8b232b999SAndrey Konovalov *
9cd11016eSAlexander Potapenko * Author: Alexander Potapenko <[email protected]>
10cd11016eSAlexander Potapenko * Copyright (C) 2016 Google, Inc.
11cd11016eSAlexander Potapenko *
12b232b999SAndrey Konovalov * Based on the code by Dmitry Chernenkov.
13cd11016eSAlexander Potapenko */
14cd11016eSAlexander Potapenko
154a6b5314SAndrey Konovalov #define pr_fmt(fmt) "stackdepot: " fmt
164a6b5314SAndrey Konovalov
17c2a29254SMarco Elver #include <linux/debugfs.h>
18cd11016eSAlexander Potapenko #include <linux/gfp.h>
19cd11016eSAlexander Potapenko #include <linux/jhash.h>
20cd11016eSAlexander Potapenko #include <linux/kernel.h>
218e00b2dfSAlexander Potapenko #include <linux/kmsan.h>
224805180bSAndrey Konovalov #include <linux/list.h>
23cd11016eSAlexander Potapenko #include <linux/mm.h>
242dba5eb1SVlastimil Babka #include <linux/mutex.h>
2531639fd6SMarco Elver #include <linux/poison.h>
26cd11016eSAlexander Potapenko #include <linux/printk.h>
274434a56eSMarco Elver #include <linux/rculist.h>
284434a56eSMarco Elver #include <linux/rcupdate.h>
29410b764fSAndrey Konovalov #include <linux/refcount.h>
30cd11016eSAlexander Potapenko #include <linux/slab.h>
31a6cd9570SAndrey Konovalov #include <linux/spinlock.h>
32cd11016eSAlexander Potapenko #include <linux/stacktrace.h>
33cd11016eSAlexander Potapenko #include <linux/stackdepot.h>
34cd11016eSAlexander Potapenko #include <linux/string.h>
35cd11016eSAlexander Potapenko #include <linux/types.h>
36e1fdc403SVijayanand Jitta #include <linux/memblock.h>
37f9987921SVlastimil Babka #include <linux/kasan-enabled.h>
38cd11016eSAlexander Potapenko
39424cafeeSAndrey Konovalov #define DEPOT_POOLS_CAP 8192
403ee34eabSOscar Salvador /* The pool_index is offset by 1 so the first record does not have a 0 handle. */
41424cafeeSAndrey Konovalov #define DEPOT_MAX_POOLS \
423ee34eabSOscar Salvador (((1LL << (DEPOT_POOL_INDEX_BITS)) - 1 < DEPOT_POOLS_CAP) ? \
433ee34eabSOscar Salvador (1LL << (DEPOT_POOL_INDEX_BITS)) - 1 : DEPOT_POOLS_CAP)
44cd11016eSAlexander Potapenko
45735df3c3SAndrey Konovalov static bool stack_depot_disabled;
461c0310adSAndrey Konovalov static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
47a5f1783bSVlastimil Babka static bool __stack_depot_early_init_passed __initdata;
48a5f1783bSVlastimil Babka
490d249ac0SAndrey Konovalov /* Use one hash table bucket per 16 KB of memory. */
504c2e9a67SAndrey Konovalov #define STACK_HASH_TABLE_SCALE 14
510d249ac0SAndrey Konovalov /* Limit the number of buckets between 4K and 1M. */
524c2e9a67SAndrey Konovalov #define STACK_BUCKET_NUMBER_ORDER_MIN 12
534c2e9a67SAndrey Konovalov #define STACK_BUCKET_NUMBER_ORDER_MAX 20
540d249ac0SAndrey Konovalov /* Initial seed for jhash2. */
55cd11016eSAlexander Potapenko #define STACK_HASH_SEED 0x9747b28c
56cd11016eSAlexander Potapenko
574805180bSAndrey Konovalov /* Hash table of stored stack records. */
584805180bSAndrey Konovalov static struct list_head *stack_table;
590d249ac0SAndrey Konovalov /* Fixed order of the number of table buckets. Used when KASAN is enabled. */
604c2e9a67SAndrey Konovalov static unsigned int stack_bucket_number_order;
610d249ac0SAndrey Konovalov /* Hash mask for indexing the table. */
62f9987921SVlastimil Babka static unsigned int stack_hash_mask;
63f9987921SVlastimil Babka
644805180bSAndrey Konovalov /* Array of memory regions that store stack records. */
65424cafeeSAndrey Konovalov static void *stack_pools[DEPOT_MAX_POOLS];
66a5d21f71SAndrey Konovalov /* Newly allocated pool that is not yet added to stack_pools. */
67a5d21f71SAndrey Konovalov static void *new_pool;
68b29d3188SAndrey Konovalov /* Number of pools in stack_pools. */
69b29d3188SAndrey Konovalov static int pools_num;
7031639fd6SMarco Elver /* Offset to the unused space in the currently used pool. */
7131639fd6SMarco Elver static size_t pool_offset = DEPOT_POOL_SIZE;
724805180bSAndrey Konovalov /* Freelist of stack records within stack_pools. */
734805180bSAndrey Konovalov static LIST_HEAD(free_stacks);
744434a56eSMarco Elver /* The lock must be held when performing pool or freelist modifications. */
754434a56eSMarco Elver static DEFINE_RAW_SPINLOCK(pool_lock);
76e1fdc403SVijayanand Jitta
77c2a29254SMarco Elver /* Statistics counters for debugfs. */
78c2a29254SMarco Elver enum depot_counter_id {
7931639fd6SMarco Elver DEPOT_COUNTER_REFD_ALLOCS,
8031639fd6SMarco Elver DEPOT_COUNTER_REFD_FREES,
8131639fd6SMarco Elver DEPOT_COUNTER_REFD_INUSE,
82c2a29254SMarco Elver DEPOT_COUNTER_FREELIST_SIZE,
8331639fd6SMarco Elver DEPOT_COUNTER_PERSIST_COUNT,
8431639fd6SMarco Elver DEPOT_COUNTER_PERSIST_BYTES,
85c2a29254SMarco Elver DEPOT_COUNTER_COUNT,
86c2a29254SMarco Elver };
87c2a29254SMarco Elver static long counters[DEPOT_COUNTER_COUNT];
88c2a29254SMarco Elver static const char *const counter_names[] = {
8931639fd6SMarco Elver [DEPOT_COUNTER_REFD_ALLOCS] = "refcounted_allocations",
9031639fd6SMarco Elver [DEPOT_COUNTER_REFD_FREES] = "refcounted_frees",
9131639fd6SMarco Elver [DEPOT_COUNTER_REFD_INUSE] = "refcounted_in_use",
92c2a29254SMarco Elver [DEPOT_COUNTER_FREELIST_SIZE] = "freelist_size",
9331639fd6SMarco Elver [DEPOT_COUNTER_PERSIST_COUNT] = "persistent_count",
9431639fd6SMarco Elver [DEPOT_COUNTER_PERSIST_BYTES] = "persistent_bytes",
95c2a29254SMarco Elver };
96c2a29254SMarco Elver static_assert(ARRAY_SIZE(counter_names) == DEPOT_COUNTER_COUNT);
97c2a29254SMarco Elver
disable_stack_depot(char * str)98735df3c3SAndrey Konovalov static int __init disable_stack_depot(char *str)
99e1fdc403SVijayanand Jitta {
1004d07a037SAndrey Konovalov return kstrtobool(str, &stack_depot_disabled);
101e1fdc403SVijayanand Jitta }
102735df3c3SAndrey Konovalov early_param("stack_depot_disable", disable_stack_depot);
103e1fdc403SVijayanand Jitta
stack_depot_request_early_init(void)1041c0310adSAndrey Konovalov void __init stack_depot_request_early_init(void)
105a5f1783bSVlastimil Babka {
1061c0310adSAndrey Konovalov /* Too late to request early init now. */
107a5f1783bSVlastimil Babka WARN_ON(__stack_depot_early_init_passed);
108a5f1783bSVlastimil Babka
1091c0310adSAndrey Konovalov __stack_depot_early_init_requested = true;
110a5f1783bSVlastimil Babka }
111a5f1783bSVlastimil Babka
1124805180bSAndrey Konovalov /* Initialize list_head's within the hash table. */
init_stack_table(unsigned long entries)1134805180bSAndrey Konovalov static void init_stack_table(unsigned long entries)
1144805180bSAndrey Konovalov {
1154805180bSAndrey Konovalov unsigned long i;
1164805180bSAndrey Konovalov
1174805180bSAndrey Konovalov for (i = 0; i < entries; i++)
1184805180bSAndrey Konovalov INIT_LIST_HEAD(&stack_table[i]);
1194805180bSAndrey Konovalov }
1204805180bSAndrey Konovalov
121df225c87SAndrey Konovalov /* Allocates a hash table via memblock. Can only be used during early boot. */
stack_depot_early_init(void)122a5f1783bSVlastimil Babka int __init stack_depot_early_init(void)
123a5f1783bSVlastimil Babka {
124f9987921SVlastimil Babka unsigned long entries = 0;
125a5f1783bSVlastimil Babka
126df225c87SAndrey Konovalov /* This function must be called only once, from mm_init(). */
127a5f1783bSVlastimil Babka if (WARN_ON(__stack_depot_early_init_passed))
128a5f1783bSVlastimil Babka return 0;
129a5f1783bSVlastimil Babka __stack_depot_early_init_passed = true;
130a5f1783bSVlastimil Babka
131df225c87SAndrey Konovalov /*
1324d07a037SAndrey Konovalov * Print disabled message even if early init has not been requested:
1334d07a037SAndrey Konovalov * stack_depot_init() will not print one.
1344d07a037SAndrey Konovalov */
1354d07a037SAndrey Konovalov if (stack_depot_disabled) {
1364d07a037SAndrey Konovalov pr_info("disabled\n");
1374d07a037SAndrey Konovalov return 0;
1384d07a037SAndrey Konovalov }
1394d07a037SAndrey Konovalov
1404d07a037SAndrey Konovalov /*
141df225c87SAndrey Konovalov * If KASAN is enabled, use the maximum order: KASAN is frequently used
142df225c87SAndrey Konovalov * in fuzzing scenarios, which leads to a large number of different
143df225c87SAndrey Konovalov * stack traces being stored in stack depot.
144df225c87SAndrey Konovalov */
1454c2e9a67SAndrey Konovalov if (kasan_enabled() && !stack_bucket_number_order)
1464c2e9a67SAndrey Konovalov stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX;
147f9987921SVlastimil Babka
1484d07a037SAndrey Konovalov /*
1494d07a037SAndrey Konovalov * Check if early init has been requested after setting
1504d07a037SAndrey Konovalov * stack_bucket_number_order: stack_depot_init() uses its value.
1514d07a037SAndrey Konovalov */
1524d07a037SAndrey Konovalov if (!__stack_depot_early_init_requested)
153a5f1783bSVlastimil Babka return 0;
154a5f1783bSVlastimil Babka
155df225c87SAndrey Konovalov /*
1564c2e9a67SAndrey Konovalov * If stack_bucket_number_order is not set, leave entries as 0 to rely
1574805180bSAndrey Konovalov * on the automatic calculations performed by alloc_large_system_hash().
158df225c87SAndrey Konovalov */
1594c2e9a67SAndrey Konovalov if (stack_bucket_number_order)
1604c2e9a67SAndrey Konovalov entries = 1UL << stack_bucket_number_order;
161df225c87SAndrey Konovalov pr_info("allocating hash table via alloc_large_system_hash\n");
162f9987921SVlastimil Babka stack_table = alloc_large_system_hash("stackdepot",
1634805180bSAndrey Konovalov sizeof(struct list_head),
164f9987921SVlastimil Babka entries,
1654c2e9a67SAndrey Konovalov STACK_HASH_TABLE_SCALE,
1664805180bSAndrey Konovalov HASH_EARLY,
167f9987921SVlastimil Babka NULL,
168f9987921SVlastimil Babka &stack_hash_mask,
1694c2e9a67SAndrey Konovalov 1UL << STACK_BUCKET_NUMBER_ORDER_MIN,
1704c2e9a67SAndrey Konovalov 1UL << STACK_BUCKET_NUMBER_ORDER_MAX);
171a5f1783bSVlastimil Babka if (!stack_table) {
1724a6b5314SAndrey Konovalov pr_err("hash table allocation failed, disabling\n");
173735df3c3SAndrey Konovalov stack_depot_disabled = true;
174a5f1783bSVlastimil Babka return -ENOMEM;
175a5f1783bSVlastimil Babka }
1764805180bSAndrey Konovalov if (!entries) {
1774805180bSAndrey Konovalov /*
1784805180bSAndrey Konovalov * Obtain the number of entries that was calculated by
1794805180bSAndrey Konovalov * alloc_large_system_hash().
1804805180bSAndrey Konovalov */
1814805180bSAndrey Konovalov entries = stack_hash_mask + 1;
1824805180bSAndrey Konovalov }
1834805180bSAndrey Konovalov init_stack_table(entries);
184a5f1783bSVlastimil Babka
185a5f1783bSVlastimil Babka return 0;
186a5f1783bSVlastimil Babka }
187a5f1783bSVlastimil Babka
188df225c87SAndrey Konovalov /* Allocates a hash table via kvcalloc. Can be used after boot. */
stack_depot_init(void)189a5f1783bSVlastimil Babka int stack_depot_init(void)
190e1fdc403SVijayanand Jitta {
1912dba5eb1SVlastimil Babka static DEFINE_MUTEX(stack_depot_init_mutex);
192c60324fbSAndrey Konovalov unsigned long entries;
193a5f1783bSVlastimil Babka int ret = 0;
1942dba5eb1SVlastimil Babka
1952dba5eb1SVlastimil Babka mutex_lock(&stack_depot_init_mutex);
196c60324fbSAndrey Konovalov
197c60324fbSAndrey Konovalov if (stack_depot_disabled || stack_table)
198c60324fbSAndrey Konovalov goto out_unlock;
199f9987921SVlastimil Babka
200df225c87SAndrey Konovalov /*
2014c2e9a67SAndrey Konovalov * Similarly to stack_depot_early_init, use stack_bucket_number_order
202df225c87SAndrey Konovalov * if assigned, and rely on automatic scaling otherwise.
203df225c87SAndrey Konovalov */
2044c2e9a67SAndrey Konovalov if (stack_bucket_number_order) {
2054c2e9a67SAndrey Konovalov entries = 1UL << stack_bucket_number_order;
206f9987921SVlastimil Babka } else {
2074c2e9a67SAndrey Konovalov int scale = STACK_HASH_TABLE_SCALE;
208df225c87SAndrey Konovalov
209f9987921SVlastimil Babka entries = nr_free_buffer_pages();
210f9987921SVlastimil Babka entries = roundup_pow_of_two(entries);
211f9987921SVlastimil Babka
212f9987921SVlastimil Babka if (scale > PAGE_SHIFT)
213f9987921SVlastimil Babka entries >>= (scale - PAGE_SHIFT);
214f9987921SVlastimil Babka else
215f9987921SVlastimil Babka entries <<= (PAGE_SHIFT - scale);
216f9987921SVlastimil Babka }
217f9987921SVlastimil Babka
2184c2e9a67SAndrey Konovalov if (entries < 1UL << STACK_BUCKET_NUMBER_ORDER_MIN)
2194c2e9a67SAndrey Konovalov entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MIN;
2204c2e9a67SAndrey Konovalov if (entries > 1UL << STACK_BUCKET_NUMBER_ORDER_MAX)
2214c2e9a67SAndrey Konovalov entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
222f9987921SVlastimil Babka
223c60324fbSAndrey Konovalov pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
2244805180bSAndrey Konovalov stack_table = kvcalloc(entries, sizeof(struct list_head), GFP_KERNEL);
225a5f1783bSVlastimil Babka if (!stack_table) {
2264a6b5314SAndrey Konovalov pr_err("hash table allocation failed, disabling\n");
227735df3c3SAndrey Konovalov stack_depot_disabled = true;
228a5f1783bSVlastimil Babka ret = -ENOMEM;
229c60324fbSAndrey Konovalov goto out_unlock;
230e1fdc403SVijayanand Jitta }
231f9987921SVlastimil Babka stack_hash_mask = entries - 1;
2324805180bSAndrey Konovalov init_stack_table(entries);
233c60324fbSAndrey Konovalov
234c60324fbSAndrey Konovalov out_unlock:
2352dba5eb1SVlastimil Babka mutex_unlock(&stack_depot_init_mutex);
236c60324fbSAndrey Konovalov
237a5f1783bSVlastimil Babka return ret;
238e1fdc403SVijayanand Jitta }
2392dba5eb1SVlastimil Babka EXPORT_SYMBOL_GPL(stack_depot_init);
240cd11016eSAlexander Potapenko
2414434a56eSMarco Elver /*
24231639fd6SMarco Elver * Initializes new stack pool, and updates the list of pools.
2434434a56eSMarco Elver */
depot_init_pool(void ** prealloc)24431639fd6SMarco Elver static bool depot_init_pool(void **prealloc)
245b29d3188SAndrey Konovalov {
2464434a56eSMarco Elver lockdep_assert_held(&pool_lock);
2474805180bSAndrey Konovalov
24831639fd6SMarco Elver if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
24931639fd6SMarco Elver /* Bail out if we reached the pool limit. */
25031639fd6SMarco Elver WARN_ON_ONCE(pools_num > DEPOT_MAX_POOLS); /* should never happen */
25131639fd6SMarco Elver WARN_ON_ONCE(!new_pool); /* to avoid unnecessary pre-allocation */
25231639fd6SMarco Elver WARN_ONCE(1, "Stack depot reached limit capacity");
25331639fd6SMarco Elver return false;
254b29d3188SAndrey Konovalov }
255b29d3188SAndrey Konovalov
25631639fd6SMarco Elver if (!new_pool && *prealloc) {
25731639fd6SMarco Elver /* We have preallocated memory, use it. */
25831639fd6SMarco Elver WRITE_ONCE(new_pool, *prealloc);
25931639fd6SMarco Elver *prealloc = NULL;
26031639fd6SMarco Elver }
26131639fd6SMarco Elver
26231639fd6SMarco Elver if (!new_pool)
26331639fd6SMarco Elver return false; /* new_pool and *prealloc are NULL */
26431639fd6SMarco Elver
265b29d3188SAndrey Konovalov /* Save reference to the pool to be used by depot_fetch_stack(). */
26631639fd6SMarco Elver stack_pools[pools_num] = new_pool;
26731639fd6SMarco Elver
26831639fd6SMarco Elver /*
26931639fd6SMarco Elver * Stack depot tries to keep an extra pool allocated even before it runs
27031639fd6SMarco Elver * out of space in the currently used pool.
27131639fd6SMarco Elver *
27231639fd6SMarco Elver * To indicate that a new preallocation is needed new_pool is reset to
27331639fd6SMarco Elver * NULL; do not reset to NULL if we have reached the maximum number of
27431639fd6SMarco Elver * pools.
27531639fd6SMarco Elver */
27631639fd6SMarco Elver if (pools_num < DEPOT_MAX_POOLS)
27731639fd6SMarco Elver WRITE_ONCE(new_pool, NULL);
27831639fd6SMarco Elver else
27931639fd6SMarco Elver WRITE_ONCE(new_pool, STACK_DEPOT_POISON);
2804434a56eSMarco Elver
2814434a56eSMarco Elver /* Pairs with concurrent READ_ONCE() in depot_fetch_stack(). */
2824434a56eSMarco Elver WRITE_ONCE(pools_num, pools_num + 1);
2834434a56eSMarco Elver ASSERT_EXCLUSIVE_WRITER(pools_num);
28431639fd6SMarco Elver
28531639fd6SMarco Elver pool_offset = 0;
28631639fd6SMarco Elver
28731639fd6SMarco Elver return true;
288b29d3188SAndrey Konovalov }
289b29d3188SAndrey Konovalov
290b6a353d3SAndrey Konovalov /* Keeps the preallocated memory to be used for a new stack depot pool. */
depot_keep_new_pool(void ** prealloc)291b6a353d3SAndrey Konovalov static void depot_keep_new_pool(void **prealloc)
29215ef6a98SAndrey Konovalov {
2934434a56eSMarco Elver lockdep_assert_held(&pool_lock);
294a6cd9570SAndrey Konovalov
29515ef6a98SAndrey Konovalov /*
296b6a353d3SAndrey Konovalov * If a new pool is already saved or the maximum number of
297d11a5621SAndrey Konovalov * pools is reached, do not use the preallocated memory.
29815ef6a98SAndrey Konovalov */
29931639fd6SMarco Elver if (new_pool)
300514d5c55SAndrey Konovalov return;
301cd0fc64eSAndrey Konovalov
30231639fd6SMarco Elver WRITE_ONCE(new_pool, *prealloc);
30315ef6a98SAndrey Konovalov *prealloc = NULL;
30415ef6a98SAndrey Konovalov }
30594b7d328SAndrey Konovalov
30615ef6a98SAndrey Konovalov /*
30731639fd6SMarco Elver * Try to initialize a new stack record from the current pool, a cached pool, or
30831639fd6SMarco Elver * the current pre-allocation.
30915ef6a98SAndrey Konovalov */
depot_pop_free_pool(void ** prealloc,size_t size)31031639fd6SMarco Elver static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size)
31115ef6a98SAndrey Konovalov {
31231639fd6SMarco Elver struct stack_record *stack;
31331639fd6SMarco Elver void *current_pool;
31431639fd6SMarco Elver u32 pool_index;
31531639fd6SMarco Elver
3164434a56eSMarco Elver lockdep_assert_held(&pool_lock);
317b29d3188SAndrey Konovalov
31831639fd6SMarco Elver if (pool_offset + size > DEPOT_POOL_SIZE) {
31931639fd6SMarco Elver if (!depot_init_pool(prealloc))
32031639fd6SMarco Elver return NULL;
321b29d3188SAndrey Konovalov }
322b29d3188SAndrey Konovalov
32331639fd6SMarco Elver if (WARN_ON_ONCE(pools_num < 1))
32431639fd6SMarco Elver return NULL;
32531639fd6SMarco Elver pool_index = pools_num - 1;
32631639fd6SMarco Elver current_pool = stack_pools[pool_index];
32731639fd6SMarco Elver if (WARN_ON_ONCE(!current_pool))
32831639fd6SMarco Elver return NULL;
32931639fd6SMarco Elver
33031639fd6SMarco Elver stack = current_pool + pool_offset;
33131639fd6SMarco Elver
33231639fd6SMarco Elver /* Pre-initialize handle once. */
333a6c1d9cbSPeter Collingbourne stack->handle.pool_index_plus_1 = pool_index + 1;
33431639fd6SMarco Elver stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
33531639fd6SMarco Elver stack->handle.extra = 0;
33631639fd6SMarco Elver INIT_LIST_HEAD(&stack->hash_list);
33731639fd6SMarco Elver
33831639fd6SMarco Elver pool_offset += size;
33931639fd6SMarco Elver
34031639fd6SMarco Elver return stack;
34115ef6a98SAndrey Konovalov }
342cd0fc64eSAndrey Konovalov
34331639fd6SMarco Elver /* Try to find next free usable entry from the freelist. */
depot_pop_free(void)3444434a56eSMarco Elver static struct stack_record *depot_pop_free(void)
3454434a56eSMarco Elver {
3464434a56eSMarco Elver struct stack_record *stack;
3474434a56eSMarco Elver
3484434a56eSMarco Elver lockdep_assert_held(&pool_lock);
3494434a56eSMarco Elver
3504434a56eSMarco Elver if (list_empty(&free_stacks))
3514434a56eSMarco Elver return NULL;
3524434a56eSMarco Elver
3534434a56eSMarco Elver /*
3544434a56eSMarco Elver * We maintain the invariant that the elements in front are least
3554434a56eSMarco Elver * recently used, and are therefore more likely to be associated with an
3564434a56eSMarco Elver * RCU grace period in the past. Consequently it is sufficient to only
3574434a56eSMarco Elver * check the first entry.
3584434a56eSMarco Elver */
3594434a56eSMarco Elver stack = list_first_entry(&free_stacks, struct stack_record, free_list);
36031639fd6SMarco Elver if (!poll_state_synchronize_rcu(stack->rcu_state))
3614434a56eSMarco Elver return NULL;
3624434a56eSMarco Elver
3634434a56eSMarco Elver list_del(&stack->free_list);
3644434a56eSMarco Elver counters[DEPOT_COUNTER_FREELIST_SIZE]--;
3654434a56eSMarco Elver
3664434a56eSMarco Elver return stack;
36794b7d328SAndrey Konovalov }
36894b7d328SAndrey Konovalov
depot_stack_record_size(struct stack_record * s,unsigned int nr_entries)36931639fd6SMarco Elver static inline size_t depot_stack_record_size(struct stack_record *s, unsigned int nr_entries)
37031639fd6SMarco Elver {
37131639fd6SMarco Elver const size_t used = flex_array_size(s, entries, nr_entries);
37231639fd6SMarco Elver const size_t unused = sizeof(s->entries) - used;
37331639fd6SMarco Elver
37431639fd6SMarco Elver WARN_ON_ONCE(sizeof(s->entries) < used);
37531639fd6SMarco Elver
37631639fd6SMarco Elver return ALIGN(sizeof(struct stack_record) - unused, 1 << DEPOT_STACK_ALIGN);
37731639fd6SMarco Elver }
37831639fd6SMarco Elver
37994b7d328SAndrey Konovalov /* Allocates a new stack in a stack depot pool. */
38094b7d328SAndrey Konovalov static struct stack_record *
depot_alloc_stack(unsigned long * entries,unsigned int nr_entries,u32 hash,depot_flags_t flags,void ** prealloc)38131639fd6SMarco Elver depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, depot_flags_t flags, void **prealloc)
38294b7d328SAndrey Konovalov {
38331639fd6SMarco Elver struct stack_record *stack = NULL;
38431639fd6SMarco Elver size_t record_size;
38594b7d328SAndrey Konovalov
3864434a56eSMarco Elver lockdep_assert_held(&pool_lock);
387a6cd9570SAndrey Konovalov
3884434a56eSMarco Elver /* This should already be checked by public API entry points. */
38931639fd6SMarco Elver if (WARN_ON_ONCE(!nr_entries))
39094b7d328SAndrey Konovalov return NULL;
391cd0fc64eSAndrey Konovalov
39231639fd6SMarco Elver /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
39331639fd6SMarco Elver if (nr_entries > CONFIG_STACKDEPOT_MAX_FRAMES)
39431639fd6SMarco Elver nr_entries = CONFIG_STACKDEPOT_MAX_FRAMES;
39531639fd6SMarco Elver
39631639fd6SMarco Elver if (flags & STACK_DEPOT_FLAG_GET) {
39731639fd6SMarco Elver /*
39831639fd6SMarco Elver * Evictable entries have to allocate the max. size so they may
39931639fd6SMarco Elver * safely be re-used by differently sized allocations.
40031639fd6SMarco Elver */
40131639fd6SMarco Elver record_size = depot_stack_record_size(stack, CONFIG_STACKDEPOT_MAX_FRAMES);
4024434a56eSMarco Elver stack = depot_pop_free();
40331639fd6SMarco Elver } else {
40431639fd6SMarco Elver record_size = depot_stack_record_size(stack, nr_entries);
40531639fd6SMarco Elver }
40631639fd6SMarco Elver
4074434a56eSMarco Elver if (!stack) {
40831639fd6SMarco Elver stack = depot_pop_free_pool(prealloc, record_size);
40931639fd6SMarco Elver if (!stack)
4104434a56eSMarco Elver return NULL;
4114434a56eSMarco Elver }
412b29d3188SAndrey Konovalov
413cd0fc64eSAndrey Konovalov /* Save the stack trace. */
41415ef6a98SAndrey Konovalov stack->hash = hash;
41531639fd6SMarco Elver stack->size = nr_entries;
41631639fd6SMarco Elver /* stack->handle is already filled in by depot_pop_free_pool(). */
41731639fd6SMarco Elver memcpy(stack->entries, entries, flex_array_size(stack, entries, nr_entries));
41831639fd6SMarco Elver
41931639fd6SMarco Elver if (flags & STACK_DEPOT_FLAG_GET) {
420410b764fSAndrey Konovalov refcount_set(&stack->count, 1);
42131639fd6SMarco Elver counters[DEPOT_COUNTER_REFD_ALLOCS]++;
42231639fd6SMarco Elver counters[DEPOT_COUNTER_REFD_INUSE]++;
42331639fd6SMarco Elver } else {
42431639fd6SMarco Elver /* Warn on attempts to switch to refcounting this entry. */
42531639fd6SMarco Elver refcount_set(&stack->count, REFCOUNT_SATURATED);
42631639fd6SMarco Elver counters[DEPOT_COUNTER_PERSIST_COUNT]++;
42731639fd6SMarco Elver counters[DEPOT_COUNTER_PERSIST_BYTES] += record_size;
42831639fd6SMarco Elver }
42983130ab2SAndrey Konovalov
4308e00b2dfSAlexander Potapenko /*
4318e00b2dfSAlexander Potapenko * Let KMSAN know the stored stack record is initialized. This shall
4328e00b2dfSAlexander Potapenko * prevent false positive reports if instrumented code accesses it.
4338e00b2dfSAlexander Potapenko */
43431639fd6SMarco Elver kmsan_unpoison_memory(stack, record_size);
43515ef6a98SAndrey Konovalov
43615ef6a98SAndrey Konovalov return stack;
43715ef6a98SAndrey Konovalov }
43815ef6a98SAndrey Konovalov
depot_fetch_stack(depot_stack_handle_t handle)43983130ab2SAndrey Konovalov static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
44083130ab2SAndrey Konovalov {
4414434a56eSMarco Elver const int pools_num_cached = READ_ONCE(pools_num);
44283130ab2SAndrey Konovalov union handle_parts parts = { .handle = handle };
44383130ab2SAndrey Konovalov void *pool;
444a6c1d9cbSPeter Collingbourne u32 pool_index = parts.pool_index_plus_1 - 1;
44583130ab2SAndrey Konovalov size_t offset = parts.offset << DEPOT_STACK_ALIGN;
44683130ab2SAndrey Konovalov struct stack_record *stack;
44783130ab2SAndrey Konovalov
4484434a56eSMarco Elver lockdep_assert_not_held(&pool_lock);
449a6cd9570SAndrey Konovalov
450dc245594SDan Carpenter if (pool_index >= pools_num_cached) {
45183130ab2SAndrey Konovalov WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
4523ee34eabSOscar Salvador pool_index, pools_num_cached, handle);
45383130ab2SAndrey Konovalov return NULL;
45483130ab2SAndrey Konovalov }
45583130ab2SAndrey Konovalov
4563ee34eabSOscar Salvador pool = stack_pools[pool_index];
4574434a56eSMarco Elver if (WARN_ON(!pool))
45883130ab2SAndrey Konovalov return NULL;
45983130ab2SAndrey Konovalov
46083130ab2SAndrey Konovalov stack = pool + offset;
4614434a56eSMarco Elver if (WARN_ON(!refcount_read(&stack->count)))
4624434a56eSMarco Elver return NULL;
4634434a56eSMarco Elver
46483130ab2SAndrey Konovalov return stack;
46583130ab2SAndrey Konovalov }
46683130ab2SAndrey Konovalov
467108be8deSAndrey Konovalov /* Links stack into the freelist. */
depot_free_stack(struct stack_record * stack)468108be8deSAndrey Konovalov static void depot_free_stack(struct stack_record *stack)
469108be8deSAndrey Konovalov {
4704434a56eSMarco Elver unsigned long flags;
471108be8deSAndrey Konovalov
4724434a56eSMarco Elver lockdep_assert_not_held(&pool_lock);
4734434a56eSMarco Elver
4744434a56eSMarco Elver raw_spin_lock_irqsave(&pool_lock, flags);
4754434a56eSMarco Elver printk_deferred_enter();
4764434a56eSMarco Elver
4774434a56eSMarco Elver /*
4784434a56eSMarco Elver * Remove the entry from the hash list. Concurrent list traversal may
4794434a56eSMarco Elver * still observe the entry, but since the refcount is zero, this entry
4804434a56eSMarco Elver * will no longer be considered as valid.
4814434a56eSMarco Elver */
4824434a56eSMarco Elver list_del_rcu(&stack->hash_list);
4834434a56eSMarco Elver
4844434a56eSMarco Elver /*
4854434a56eSMarco Elver * Due to being used from constrained contexts such as the allocators,
4864434a56eSMarco Elver * NMI, or even RCU itself, stack depot cannot rely on primitives that
4874434a56eSMarco Elver * would sleep (such as synchronize_rcu()) or recursively call into
4884434a56eSMarco Elver * stack depot again (such as call_rcu()).
4894434a56eSMarco Elver *
4904434a56eSMarco Elver * Instead, get an RCU cookie, so that we can ensure this entry isn't
4914434a56eSMarco Elver * moved onto another list until the next grace period, and concurrent
4924434a56eSMarco Elver * RCU list traversal remains safe.
4934434a56eSMarco Elver */
4944434a56eSMarco Elver stack->rcu_state = get_state_synchronize_rcu();
4954434a56eSMarco Elver
4964434a56eSMarco Elver /*
4974434a56eSMarco Elver * Add the entry to the freelist tail, so that older entries are
4984434a56eSMarco Elver * considered first - their RCU cookie is more likely to no longer be
4994434a56eSMarco Elver * associated with the current grace period.
5004434a56eSMarco Elver */
5014434a56eSMarco Elver list_add_tail(&stack->free_list, &free_stacks);
502c2a29254SMarco Elver
503c2a29254SMarco Elver counters[DEPOT_COUNTER_FREELIST_SIZE]++;
50431639fd6SMarco Elver counters[DEPOT_COUNTER_REFD_FREES]++;
50531639fd6SMarco Elver counters[DEPOT_COUNTER_REFD_INUSE]--;
5064434a56eSMarco Elver
5074434a56eSMarco Elver printk_deferred_exit();
5084434a56eSMarco Elver raw_spin_unlock_irqrestore(&pool_lock, flags);
509108be8deSAndrey Konovalov }
510108be8deSAndrey Konovalov
511b232b999SAndrey Konovalov /* Calculates the hash for a stack. */
hash_stack(unsigned long * entries,unsigned int size)512cd11016eSAlexander Potapenko static inline u32 hash_stack(unsigned long *entries, unsigned int size)
513cd11016eSAlexander Potapenko {
514cd11016eSAlexander Potapenko return jhash2((u32 *)entries,
515180644f8SGustavo A. R. Silva array_size(size, sizeof(*entries)) / sizeof(u32),
516cd11016eSAlexander Potapenko STACK_HASH_SEED);
517cd11016eSAlexander Potapenko }
518cd11016eSAlexander Potapenko
519b232b999SAndrey Konovalov /*
520b232b999SAndrey Konovalov * Non-instrumented version of memcmp().
521b232b999SAndrey Konovalov * Does not check the lexicographical order, only the equality.
522a571b272SAlexander Potapenko */
523a571b272SAlexander Potapenko static inline
stackdepot_memcmp(const unsigned long * u1,const unsigned long * u2,unsigned int n)524a571b272SAlexander Potapenko int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
525a571b272SAlexander Potapenko unsigned int n)
526a571b272SAlexander Potapenko {
527a571b272SAlexander Potapenko for ( ; n-- ; u1++, u2++) {
528a571b272SAlexander Potapenko if (*u1 != *u2)
529a571b272SAlexander Potapenko return 1;
530a571b272SAlexander Potapenko }
531a571b272SAlexander Potapenko return 0;
532a571b272SAlexander Potapenko }
533a571b272SAlexander Potapenko
534b232b999SAndrey Konovalov /* Finds a stack in a bucket of the hash table. */
find_stack(struct list_head * bucket,unsigned long * entries,int size,u32 hash,depot_flags_t flags)5354805180bSAndrey Konovalov static inline struct stack_record *find_stack(struct list_head *bucket,
536cd11016eSAlexander Potapenko unsigned long *entries, int size,
5374434a56eSMarco Elver u32 hash, depot_flags_t flags)
538cd11016eSAlexander Potapenko {
5394434a56eSMarco Elver struct stack_record *stack, *ret = NULL;
540cd11016eSAlexander Potapenko
5414434a56eSMarco Elver /*
5424434a56eSMarco Elver * Stack depot may be used from instrumentation that instruments RCU or
5434434a56eSMarco Elver * tracing itself; use variant that does not call into RCU and cannot be
5444434a56eSMarco Elver * traced.
5454434a56eSMarco Elver *
5464434a56eSMarco Elver * Note: Such use cases must take care when using refcounting to evict
5474434a56eSMarco Elver * unused entries, because the stack record free-then-reuse code paths
5484434a56eSMarco Elver * do call into RCU.
5494434a56eSMarco Elver */
5504434a56eSMarco Elver rcu_read_lock_sched_notrace();
551a6cd9570SAndrey Konovalov
5524434a56eSMarco Elver list_for_each_entry_rcu(stack, bucket, hash_list) {
5534434a56eSMarco Elver if (stack->hash != hash || stack->size != size)
5544434a56eSMarco Elver continue;
5554434a56eSMarco Elver
5564434a56eSMarco Elver /*
5574434a56eSMarco Elver * This may race with depot_free_stack() accessing the freelist
5584434a56eSMarco Elver * management state unioned with @entries. The refcount is zero
5594434a56eSMarco Elver * in that case and the below refcount_inc_not_zero() will fail.
5604434a56eSMarco Elver */
5614434a56eSMarco Elver if (data_race(stackdepot_memcmp(entries, stack->entries, size)))
5624434a56eSMarco Elver continue;
5634434a56eSMarco Elver
5644434a56eSMarco Elver /*
5654434a56eSMarco Elver * Try to increment refcount. If this succeeds, the stack record
5664434a56eSMarco Elver * is valid and has not yet been freed.
5674434a56eSMarco Elver *
5684434a56eSMarco Elver * If STACK_DEPOT_FLAG_GET is not used, it is undefined behavior
5694434a56eSMarco Elver * to then call stack_depot_put() later, and we can assume that
5704434a56eSMarco Elver * a stack record is never placed back on the freelist.
5714434a56eSMarco Elver */
5724434a56eSMarco Elver if ((flags & STACK_DEPOT_FLAG_GET) && !refcount_inc_not_zero(&stack->count))
5734434a56eSMarco Elver continue;
5744434a56eSMarco Elver
5754434a56eSMarco Elver ret = stack;
5764434a56eSMarco Elver break;
577cd11016eSAlexander Potapenko }
5784434a56eSMarco Elver
5794434a56eSMarco Elver rcu_read_unlock_sched_notrace();
5804434a56eSMarco Elver
5814434a56eSMarco Elver return ret;
582cd11016eSAlexander Potapenko }
583cd11016eSAlexander Potapenko
stack_depot_save_flags(unsigned long * entries,unsigned int nr_entries,gfp_t alloc_flags,depot_flags_t depot_flags)584022012dcSAndrey Konovalov depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
585c0cfc337SThomas Gleixner unsigned int nr_entries,
586022012dcSAndrey Konovalov gfp_t alloc_flags,
587022012dcSAndrey Konovalov depot_flags_t depot_flags)
588cd11016eSAlexander Potapenko {
5894805180bSAndrey Konovalov struct list_head *bucket;
5904805180bSAndrey Konovalov struct stack_record *found = NULL;
591603c000cSAndrey Konovalov depot_stack_handle_t handle = 0;
592cd11016eSAlexander Potapenko struct page *page = NULL;
593cd11016eSAlexander Potapenko void *prealloc = NULL;
59497769a53SAlexei Starovoitov bool allow_spin = gfpflags_allow_spinning(alloc_flags);
59597769a53SAlexei Starovoitov bool can_alloc = (depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC) && allow_spin;
596c0cfc337SThomas Gleixner unsigned long flags;
597c0cfc337SThomas Gleixner u32 hash;
598cd11016eSAlexander Potapenko
599022012dcSAndrey Konovalov if (WARN_ON(depot_flags & ~STACK_DEPOT_FLAGS_MASK))
600022012dcSAndrey Konovalov return 0;
601022012dcSAndrey Konovalov
602e9400660SMarco Elver /*
603e9400660SMarco Elver * If this stack trace is from an interrupt, including anything before
604e9400660SMarco Elver * interrupt entry usually leads to unbounded stack depot growth.
605e9400660SMarco Elver *
606b232b999SAndrey Konovalov * Since use of filter_irq_stacks() is a requirement to ensure stack
607b232b999SAndrey Konovalov * depot can efficiently deduplicate interrupt stacks, always
608e9400660SMarco Elver * filter_irq_stacks() to simplify all callers' use of stack depot.
609e9400660SMarco Elver */
610e9400660SMarco Elver nr_entries = filter_irq_stacks(entries, nr_entries);
611e9400660SMarco Elver
612735df3c3SAndrey Konovalov if (unlikely(nr_entries == 0) || stack_depot_disabled)
613603c000cSAndrey Konovalov return 0;
614cd11016eSAlexander Potapenko
615c0cfc337SThomas Gleixner hash = hash_stack(entries, nr_entries);
616f9987921SVlastimil Babka bucket = &stack_table[hash & stack_hash_mask];
617cd11016eSAlexander Potapenko
6184434a56eSMarco Elver /* Fast path: look the stack trace up without locking. */
6194434a56eSMarco Elver found = find_stack(bucket, entries, nr_entries, hash, depot_flags);
6204434a56eSMarco Elver if (found)
621cd11016eSAlexander Potapenko goto exit;
622cd11016eSAlexander Potapenko
623cd11016eSAlexander Potapenko /*
624a6cd9570SAndrey Konovalov * Allocate memory for a new pool if required now:
625a6cd9570SAndrey Konovalov * we won't be able to do that under the lock.
626cd11016eSAlexander Potapenko */
62731639fd6SMarco Elver if (unlikely(can_alloc && !READ_ONCE(new_pool))) {
62870c435caSDave Chinner page = alloc_pages(gfp_nested_mask(alloc_flags),
62970c435caSDave Chinner DEPOT_POOL_ORDER);
630cd11016eSAlexander Potapenko if (page)
631cd11016eSAlexander Potapenko prealloc = page_address(page);
632cd11016eSAlexander Potapenko }
633cd11016eSAlexander Potapenko
63497769a53SAlexei Starovoitov if (in_nmi() || !allow_spin) {
635031e04bdSMarco Elver /* We can never allocate in NMI context. */
636031e04bdSMarco Elver WARN_ON_ONCE(can_alloc);
637031e04bdSMarco Elver /* Best effort; bail if we fail to take the lock. */
638031e04bdSMarco Elver if (!raw_spin_trylock_irqsave(&pool_lock, flags))
639031e04bdSMarco Elver goto exit;
640031e04bdSMarco Elver } else {
6414434a56eSMarco Elver raw_spin_lock_irqsave(&pool_lock, flags);
642031e04bdSMarco Elver }
643a914d8d6SAndrey Konovalov printk_deferred_enter();
644cd11016eSAlexander Potapenko
6454434a56eSMarco Elver /* Try to find again, to avoid concurrently inserting duplicates. */
6464434a56eSMarco Elver found = find_stack(bucket, entries, nr_entries, hash, depot_flags);
647cd11016eSAlexander Potapenko if (!found) {
648b232b999SAndrey Konovalov struct stack_record *new =
64931639fd6SMarco Elver depot_alloc_stack(entries, nr_entries, hash, depot_flags, &prealloc);
6507f2b8818SMarco Elver
651cd11016eSAlexander Potapenko if (new) {
6524434a56eSMarco Elver /*
6534434a56eSMarco Elver * This releases the stack record into the bucket and
6544434a56eSMarco Elver * makes it visible to readers in find_stack().
6554434a56eSMarco Elver */
6564434a56eSMarco Elver list_add_rcu(&new->hash_list, bucket);
657cd11016eSAlexander Potapenko found = new;
658cd11016eSAlexander Potapenko }
6594434a56eSMarco Elver }
6604434a56eSMarco Elver
6614434a56eSMarco Elver if (prealloc) {
662cd11016eSAlexander Potapenko /*
6634434a56eSMarco Elver * Either stack depot already contains this stack trace, or
6644434a56eSMarco Elver * depot_alloc_stack() did not consume the preallocated memory.
6654434a56eSMarco Elver * Try to keep the preallocated memory for future.
666cd11016eSAlexander Potapenko */
667b6a353d3SAndrey Konovalov depot_keep_new_pool(&prealloc);
668cd11016eSAlexander Potapenko }
669cd11016eSAlexander Potapenko
670a914d8d6SAndrey Konovalov printk_deferred_exit();
6714434a56eSMarco Elver raw_spin_unlock_irqrestore(&pool_lock, flags);
672cd11016eSAlexander Potapenko exit:
673cd11016eSAlexander Potapenko if (prealloc) {
674b232b999SAndrey Konovalov /* Stack depot didn't use this memory, free it. */
675*8c57b687SAlexei Starovoitov if (!allow_spin)
676*8c57b687SAlexei Starovoitov free_pages_nolock(virt_to_page(prealloc), DEPOT_POOL_ORDER);
677*8c57b687SAlexei Starovoitov else
678424cafeeSAndrey Konovalov free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
679cd11016eSAlexander Potapenko }
680cd11016eSAlexander Potapenko if (found)
681603c000cSAndrey Konovalov handle = found->handle.handle;
682603c000cSAndrey Konovalov return handle;
683cd11016eSAlexander Potapenko }
684022012dcSAndrey Konovalov EXPORT_SYMBOL_GPL(stack_depot_save_flags);
68511ac25c6SMarco Elver
stack_depot_save(unsigned long * entries,unsigned int nr_entries,gfp_t alloc_flags)68611ac25c6SMarco Elver depot_stack_handle_t stack_depot_save(unsigned long *entries,
68711ac25c6SMarco Elver unsigned int nr_entries,
68811ac25c6SMarco Elver gfp_t alloc_flags)
68911ac25c6SMarco Elver {
690022012dcSAndrey Konovalov return stack_depot_save_flags(entries, nr_entries, alloc_flags,
691022012dcSAndrey Konovalov STACK_DEPOT_FLAG_CAN_ALLOC);
69211ac25c6SMarco Elver }
693c0cfc337SThomas Gleixner EXPORT_SYMBOL_GPL(stack_depot_save);
69415ef6a98SAndrey Konovalov
__stack_depot_get_stack_record(depot_stack_handle_t handle)6954bedfb31SOscar Salvador struct stack_record *__stack_depot_get_stack_record(depot_stack_handle_t handle)
6964bedfb31SOscar Salvador {
6974bedfb31SOscar Salvador if (!handle)
6984bedfb31SOscar Salvador return NULL;
6994bedfb31SOscar Salvador
7004bedfb31SOscar Salvador return depot_fetch_stack(handle);
7014bedfb31SOscar Salvador }
7024bedfb31SOscar Salvador
stack_depot_fetch(depot_stack_handle_t handle,unsigned long ** entries)70315ef6a98SAndrey Konovalov unsigned int stack_depot_fetch(depot_stack_handle_t handle,
70415ef6a98SAndrey Konovalov unsigned long **entries)
70515ef6a98SAndrey Konovalov {
70615ef6a98SAndrey Konovalov struct stack_record *stack;
70715ef6a98SAndrey Konovalov
70815ef6a98SAndrey Konovalov *entries = NULL;
7098e00b2dfSAlexander Potapenko /*
7108e00b2dfSAlexander Potapenko * Let KMSAN know *entries is initialized. This shall prevent false
7118e00b2dfSAlexander Potapenko * positive reports if instrumented code accesses it.
7128e00b2dfSAlexander Potapenko */
7138e00b2dfSAlexander Potapenko kmsan_unpoison_memory(entries, sizeof(*entries));
7148e00b2dfSAlexander Potapenko
7150c5d44a8SAndrey Konovalov if (!handle || stack_depot_disabled)
71615ef6a98SAndrey Konovalov return 0;
71715ef6a98SAndrey Konovalov
71883130ab2SAndrey Konovalov stack = depot_fetch_stack(handle);
7194434a56eSMarco Elver /*
7204434a56eSMarco Elver * Should never be NULL, otherwise this is a use-after-put (or just a
7214434a56eSMarco Elver * corrupt handle).
7224434a56eSMarco Elver */
7234434a56eSMarco Elver if (WARN(!stack, "corrupt handle or use after stack_depot_put()"))
7244434a56eSMarco Elver return 0;
725a6cd9570SAndrey Konovalov
72615ef6a98SAndrey Konovalov *entries = stack->entries;
72715ef6a98SAndrey Konovalov return stack->size;
72815ef6a98SAndrey Konovalov }
72915ef6a98SAndrey Konovalov EXPORT_SYMBOL_GPL(stack_depot_fetch);
73015ef6a98SAndrey Konovalov
stack_depot_put(depot_stack_handle_t handle)731108be8deSAndrey Konovalov void stack_depot_put(depot_stack_handle_t handle)
732108be8deSAndrey Konovalov {
733108be8deSAndrey Konovalov struct stack_record *stack;
734108be8deSAndrey Konovalov
735108be8deSAndrey Konovalov if (!handle || stack_depot_disabled)
736108be8deSAndrey Konovalov return;
737108be8deSAndrey Konovalov
738108be8deSAndrey Konovalov stack = depot_fetch_stack(handle);
7394434a56eSMarco Elver /*
7404434a56eSMarco Elver * Should always be able to find the stack record, otherwise this is an
7414434a56eSMarco Elver * unbalanced put attempt (or corrupt handle).
7424434a56eSMarco Elver */
7434434a56eSMarco Elver if (WARN(!stack, "corrupt handle or unbalanced stack_depot_put()"))
7444434a56eSMarco Elver return;
745108be8deSAndrey Konovalov
7464434a56eSMarco Elver if (refcount_dec_and_test(&stack->count))
747108be8deSAndrey Konovalov depot_free_stack(stack);
748108be8deSAndrey Konovalov }
749108be8deSAndrey Konovalov EXPORT_SYMBOL_GPL(stack_depot_put);
750108be8deSAndrey Konovalov
stack_depot_print(depot_stack_handle_t stack)75115ef6a98SAndrey Konovalov void stack_depot_print(depot_stack_handle_t stack)
75215ef6a98SAndrey Konovalov {
75315ef6a98SAndrey Konovalov unsigned long *entries;
75415ef6a98SAndrey Konovalov unsigned int nr_entries;
75515ef6a98SAndrey Konovalov
75615ef6a98SAndrey Konovalov nr_entries = stack_depot_fetch(stack, &entries);
75715ef6a98SAndrey Konovalov if (nr_entries > 0)
75815ef6a98SAndrey Konovalov stack_trace_print(entries, nr_entries, 0);
75915ef6a98SAndrey Konovalov }
76015ef6a98SAndrey Konovalov EXPORT_SYMBOL_GPL(stack_depot_print);
76115ef6a98SAndrey Konovalov
stack_depot_snprint(depot_stack_handle_t handle,char * buf,size_t size,int spaces)76215ef6a98SAndrey Konovalov int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
76315ef6a98SAndrey Konovalov int spaces)
76415ef6a98SAndrey Konovalov {
76515ef6a98SAndrey Konovalov unsigned long *entries;
76615ef6a98SAndrey Konovalov unsigned int nr_entries;
76715ef6a98SAndrey Konovalov
76815ef6a98SAndrey Konovalov nr_entries = stack_depot_fetch(handle, &entries);
76915ef6a98SAndrey Konovalov return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
77015ef6a98SAndrey Konovalov spaces) : 0;
77115ef6a98SAndrey Konovalov }
77215ef6a98SAndrey Konovalov EXPORT_SYMBOL_GPL(stack_depot_snprint);
77315ef6a98SAndrey Konovalov
stack_depot_set_extra_bits(depot_stack_handle_t handle,unsigned int extra_bits)77436aa1e67SAndrey Konovalov depot_stack_handle_t __must_check stack_depot_set_extra_bits(
77536aa1e67SAndrey Konovalov depot_stack_handle_t handle, unsigned int extra_bits)
77636aa1e67SAndrey Konovalov {
77736aa1e67SAndrey Konovalov union handle_parts parts = { .handle = handle };
77836aa1e67SAndrey Konovalov
77936aa1e67SAndrey Konovalov /* Don't set extra bits on empty handles. */
78036aa1e67SAndrey Konovalov if (!handle)
78136aa1e67SAndrey Konovalov return 0;
78236aa1e67SAndrey Konovalov
78336aa1e67SAndrey Konovalov parts.extra = extra_bits;
78436aa1e67SAndrey Konovalov return parts.handle;
78536aa1e67SAndrey Konovalov }
78636aa1e67SAndrey Konovalov EXPORT_SYMBOL(stack_depot_set_extra_bits);
78736aa1e67SAndrey Konovalov
stack_depot_get_extra_bits(depot_stack_handle_t handle)78815ef6a98SAndrey Konovalov unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
78915ef6a98SAndrey Konovalov {
79015ef6a98SAndrey Konovalov union handle_parts parts = { .handle = handle };
79115ef6a98SAndrey Konovalov
79215ef6a98SAndrey Konovalov return parts.extra;
79315ef6a98SAndrey Konovalov }
79415ef6a98SAndrey Konovalov EXPORT_SYMBOL(stack_depot_get_extra_bits);
795c2a29254SMarco Elver
stats_show(struct seq_file * seq,void * v)796c2a29254SMarco Elver static int stats_show(struct seq_file *seq, void *v)
797c2a29254SMarco Elver {
798c2a29254SMarco Elver /*
799c2a29254SMarco Elver * data race ok: These are just statistics counters, and approximate
800c2a29254SMarco Elver * statistics are ok for debugging.
801c2a29254SMarco Elver */
802c2a29254SMarco Elver seq_printf(seq, "pools: %d\n", data_race(pools_num));
803c2a29254SMarco Elver for (int i = 0; i < DEPOT_COUNTER_COUNT; i++)
804c2a29254SMarco Elver seq_printf(seq, "%s: %ld\n", counter_names[i], data_race(counters[i]));
805c2a29254SMarco Elver
806c2a29254SMarco Elver return 0;
807c2a29254SMarco Elver }
808c2a29254SMarco Elver DEFINE_SHOW_ATTRIBUTE(stats);
809c2a29254SMarco Elver
depot_debugfs_init(void)810c2a29254SMarco Elver static int depot_debugfs_init(void)
811c2a29254SMarco Elver {
812c2a29254SMarco Elver struct dentry *dir;
813c2a29254SMarco Elver
814c2a29254SMarco Elver if (stack_depot_disabled)
815c2a29254SMarco Elver return 0;
816c2a29254SMarco Elver
817c2a29254SMarco Elver dir = debugfs_create_dir("stackdepot", NULL);
818c2a29254SMarco Elver debugfs_create_file("stats", 0444, dir, NULL, &stats_fops);
819c2a29254SMarco Elver return 0;
820c2a29254SMarco Elver }
821c2a29254SMarco Elver late_initcall(depot_debugfs_init);
822