xref: /linux-6.15/lib/stackdepot.c (revision c2a29254)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Stack depot - a stack trace storage that avoids duplication.
4  *
5  * Internally, stack depot maintains a hash table of unique stacktraces. The
6  * stack traces themselves are stored contiguously one after another in a set
7  * of separate page allocations.
8  *
9  * Author: Alexander Potapenko <[email protected]>
10  * Copyright (C) 2016 Google, Inc.
11  *
12  * Based on the code by Dmitry Chernenkov.
13  */
14 
15 #define pr_fmt(fmt) "stackdepot: " fmt
16 
17 #include <linux/debugfs.h>
18 #include <linux/gfp.h>
19 #include <linux/jhash.h>
20 #include <linux/kernel.h>
21 #include <linux/kmsan.h>
22 #include <linux/list.h>
23 #include <linux/mm.h>
24 #include <linux/mutex.h>
25 #include <linux/percpu.h>
26 #include <linux/printk.h>
27 #include <linux/refcount.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/stacktrace.h>
31 #include <linux/stackdepot.h>
32 #include <linux/string.h>
33 #include <linux/types.h>
34 #include <linux/memblock.h>
35 #include <linux/kasan-enabled.h>
36 
37 #define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8)
38 
39 #define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */
40 #define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER))
41 #define DEPOT_STACK_ALIGN 4
42 #define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
43 #define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_OFFSET_BITS - \
44 			       STACK_DEPOT_EXTRA_BITS)
45 #if IS_ENABLED(CONFIG_KMSAN) && CONFIG_STACKDEPOT_MAX_FRAMES >= 32
46 /*
47  * KMSAN is frequently used in fuzzing scenarios and thus saves a lot of stack
48  * traces. As KMSAN does not support evicting stack traces from the stack
49  * depot, the stack depot capacity might be reached quickly with large stack
50  * records. Adjust the maximum number of stack depot pools for this case.
51  */
52 #define DEPOT_POOLS_CAP (8192 * (CONFIG_STACKDEPOT_MAX_FRAMES / 16))
53 #else
54 #define DEPOT_POOLS_CAP 8192
55 #endif
56 #define DEPOT_MAX_POOLS \
57 	(((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
58 	 (1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP)
59 
60 /* Compact structure that stores a reference to a stack. */
61 union handle_parts {
62 	depot_stack_handle_t handle;
63 	struct {
64 		u32 pool_index	: DEPOT_POOL_INDEX_BITS;
65 		u32 offset	: DEPOT_OFFSET_BITS;
66 		u32 extra	: STACK_DEPOT_EXTRA_BITS;
67 	};
68 };
69 
70 struct stack_record {
71 	struct list_head list;		/* Links in hash table or freelist */
72 	u32 hash;			/* Hash in hash table */
73 	u32 size;			/* Number of stored frames */
74 	union handle_parts handle;
75 	refcount_t count;
76 	unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES];	/* Frames */
77 };
78 
79 #define DEPOT_STACK_RECORD_SIZE \
80 	ALIGN(sizeof(struct stack_record), 1 << DEPOT_STACK_ALIGN)
81 
82 static bool stack_depot_disabled;
83 static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
84 static bool __stack_depot_early_init_passed __initdata;
85 
86 /* Use one hash table bucket per 16 KB of memory. */
87 #define STACK_HASH_TABLE_SCALE 14
88 /* Limit the number of buckets between 4K and 1M. */
89 #define STACK_BUCKET_NUMBER_ORDER_MIN 12
90 #define STACK_BUCKET_NUMBER_ORDER_MAX 20
91 /* Initial seed for jhash2. */
92 #define STACK_HASH_SEED 0x9747b28c
93 
94 /* Hash table of stored stack records. */
95 static struct list_head *stack_table;
96 /* Fixed order of the number of table buckets. Used when KASAN is enabled. */
97 static unsigned int stack_bucket_number_order;
98 /* Hash mask for indexing the table. */
99 static unsigned int stack_hash_mask;
100 
101 /* Array of memory regions that store stack records. */
102 static void *stack_pools[DEPOT_MAX_POOLS];
103 /* Newly allocated pool that is not yet added to stack_pools. */
104 static void *new_pool;
105 /* Number of pools in stack_pools. */
106 static int pools_num;
107 /* Freelist of stack records within stack_pools. */
108 static LIST_HEAD(free_stacks);
109 /*
110  * Stack depot tries to keep an extra pool allocated even before it runs out
111  * of space in the currently used pool. This flag marks whether this extra pool
112  * needs to be allocated. It has the value 0 when either an extra pool is not
113  * yet allocated or if the limit on the number of pools is reached.
114  */
115 static bool new_pool_required = true;
116 /* Lock that protects the variables above. */
117 static DEFINE_RWLOCK(pool_rwlock);
118 
119 /* Statistics counters for debugfs. */
120 enum depot_counter_id {
121 	DEPOT_COUNTER_ALLOCS,
122 	DEPOT_COUNTER_FREES,
123 	DEPOT_COUNTER_INUSE,
124 	DEPOT_COUNTER_FREELIST_SIZE,
125 	DEPOT_COUNTER_COUNT,
126 };
127 static long counters[DEPOT_COUNTER_COUNT];
128 static const char *const counter_names[] = {
129 	[DEPOT_COUNTER_ALLOCS]		= "allocations",
130 	[DEPOT_COUNTER_FREES]		= "frees",
131 	[DEPOT_COUNTER_INUSE]		= "in_use",
132 	[DEPOT_COUNTER_FREELIST_SIZE]	= "freelist_size",
133 };
134 static_assert(ARRAY_SIZE(counter_names) == DEPOT_COUNTER_COUNT);
135 
136 static int __init disable_stack_depot(char *str)
137 {
138 	return kstrtobool(str, &stack_depot_disabled);
139 }
140 early_param("stack_depot_disable", disable_stack_depot);
141 
142 void __init stack_depot_request_early_init(void)
143 {
144 	/* Too late to request early init now. */
145 	WARN_ON(__stack_depot_early_init_passed);
146 
147 	__stack_depot_early_init_requested = true;
148 }
149 
150 /* Initialize list_head's within the hash table. */
151 static void init_stack_table(unsigned long entries)
152 {
153 	unsigned long i;
154 
155 	for (i = 0; i < entries; i++)
156 		INIT_LIST_HEAD(&stack_table[i]);
157 }
158 
159 /* Allocates a hash table via memblock. Can only be used during early boot. */
160 int __init stack_depot_early_init(void)
161 {
162 	unsigned long entries = 0;
163 
164 	/* This function must be called only once, from mm_init(). */
165 	if (WARN_ON(__stack_depot_early_init_passed))
166 		return 0;
167 	__stack_depot_early_init_passed = true;
168 
169 	/*
170 	 * Print disabled message even if early init has not been requested:
171 	 * stack_depot_init() will not print one.
172 	 */
173 	if (stack_depot_disabled) {
174 		pr_info("disabled\n");
175 		return 0;
176 	}
177 
178 	/*
179 	 * If KASAN is enabled, use the maximum order: KASAN is frequently used
180 	 * in fuzzing scenarios, which leads to a large number of different
181 	 * stack traces being stored in stack depot.
182 	 */
183 	if (kasan_enabled() && !stack_bucket_number_order)
184 		stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX;
185 
186 	/*
187 	 * Check if early init has been requested after setting
188 	 * stack_bucket_number_order: stack_depot_init() uses its value.
189 	 */
190 	if (!__stack_depot_early_init_requested)
191 		return 0;
192 
193 	/*
194 	 * If stack_bucket_number_order is not set, leave entries as 0 to rely
195 	 * on the automatic calculations performed by alloc_large_system_hash().
196 	 */
197 	if (stack_bucket_number_order)
198 		entries = 1UL << stack_bucket_number_order;
199 	pr_info("allocating hash table via alloc_large_system_hash\n");
200 	stack_table = alloc_large_system_hash("stackdepot",
201 						sizeof(struct list_head),
202 						entries,
203 						STACK_HASH_TABLE_SCALE,
204 						HASH_EARLY,
205 						NULL,
206 						&stack_hash_mask,
207 						1UL << STACK_BUCKET_NUMBER_ORDER_MIN,
208 						1UL << STACK_BUCKET_NUMBER_ORDER_MAX);
209 	if (!stack_table) {
210 		pr_err("hash table allocation failed, disabling\n");
211 		stack_depot_disabled = true;
212 		return -ENOMEM;
213 	}
214 	if (!entries) {
215 		/*
216 		 * Obtain the number of entries that was calculated by
217 		 * alloc_large_system_hash().
218 		 */
219 		entries = stack_hash_mask + 1;
220 	}
221 	init_stack_table(entries);
222 
223 	return 0;
224 }
225 
226 /* Allocates a hash table via kvcalloc. Can be used after boot. */
227 int stack_depot_init(void)
228 {
229 	static DEFINE_MUTEX(stack_depot_init_mutex);
230 	unsigned long entries;
231 	int ret = 0;
232 
233 	mutex_lock(&stack_depot_init_mutex);
234 
235 	if (stack_depot_disabled || stack_table)
236 		goto out_unlock;
237 
238 	/*
239 	 * Similarly to stack_depot_early_init, use stack_bucket_number_order
240 	 * if assigned, and rely on automatic scaling otherwise.
241 	 */
242 	if (stack_bucket_number_order) {
243 		entries = 1UL << stack_bucket_number_order;
244 	} else {
245 		int scale = STACK_HASH_TABLE_SCALE;
246 
247 		entries = nr_free_buffer_pages();
248 		entries = roundup_pow_of_two(entries);
249 
250 		if (scale > PAGE_SHIFT)
251 			entries >>= (scale - PAGE_SHIFT);
252 		else
253 			entries <<= (PAGE_SHIFT - scale);
254 	}
255 
256 	if (entries < 1UL << STACK_BUCKET_NUMBER_ORDER_MIN)
257 		entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MIN;
258 	if (entries > 1UL << STACK_BUCKET_NUMBER_ORDER_MAX)
259 		entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
260 
261 	pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
262 	stack_table = kvcalloc(entries, sizeof(struct list_head), GFP_KERNEL);
263 	if (!stack_table) {
264 		pr_err("hash table allocation failed, disabling\n");
265 		stack_depot_disabled = true;
266 		ret = -ENOMEM;
267 		goto out_unlock;
268 	}
269 	stack_hash_mask = entries - 1;
270 	init_stack_table(entries);
271 
272 out_unlock:
273 	mutex_unlock(&stack_depot_init_mutex);
274 
275 	return ret;
276 }
277 EXPORT_SYMBOL_GPL(stack_depot_init);
278 
279 /* Initializes a stack depol pool. */
280 static void depot_init_pool(void *pool)
281 {
282 	int offset;
283 
284 	lockdep_assert_held_write(&pool_rwlock);
285 
286 	WARN_ON(!list_empty(&free_stacks));
287 
288 	/* Initialize handles and link stack records into the freelist. */
289 	for (offset = 0; offset <= DEPOT_POOL_SIZE - DEPOT_STACK_RECORD_SIZE;
290 	     offset += DEPOT_STACK_RECORD_SIZE) {
291 		struct stack_record *stack = pool + offset;
292 
293 		stack->handle.pool_index = pools_num;
294 		stack->handle.offset = offset >> DEPOT_STACK_ALIGN;
295 		stack->handle.extra = 0;
296 
297 		list_add(&stack->list, &free_stacks);
298 		counters[DEPOT_COUNTER_FREELIST_SIZE]++;
299 	}
300 
301 	/* Save reference to the pool to be used by depot_fetch_stack(). */
302 	stack_pools[pools_num] = pool;
303 	pools_num++;
304 }
305 
306 /* Keeps the preallocated memory to be used for a new stack depot pool. */
307 static void depot_keep_new_pool(void **prealloc)
308 {
309 	lockdep_assert_held_write(&pool_rwlock);
310 
311 	/*
312 	 * If a new pool is already saved or the maximum number of
313 	 * pools is reached, do not use the preallocated memory.
314 	 */
315 	if (!new_pool_required)
316 		return;
317 
318 	/*
319 	 * Use the preallocated memory for the new pool
320 	 * as long as we do not exceed the maximum number of pools.
321 	 */
322 	if (pools_num < DEPOT_MAX_POOLS) {
323 		new_pool = *prealloc;
324 		*prealloc = NULL;
325 	}
326 
327 	/*
328 	 * At this point, either a new pool is kept or the maximum
329 	 * number of pools is reached. In either case, take note that
330 	 * keeping another pool is not required.
331 	 */
332 	new_pool_required = false;
333 }
334 
335 /* Updates references to the current and the next stack depot pools. */
336 static bool depot_update_pools(void **prealloc)
337 {
338 	lockdep_assert_held_write(&pool_rwlock);
339 
340 	/* Check if we still have objects in the freelist. */
341 	if (!list_empty(&free_stacks))
342 		goto out_keep_prealloc;
343 
344 	/* Check if we have a new pool saved and use it. */
345 	if (new_pool) {
346 		depot_init_pool(new_pool);
347 		new_pool = NULL;
348 
349 		/* Take note that we might need a new new_pool. */
350 		if (pools_num < DEPOT_MAX_POOLS)
351 			new_pool_required = true;
352 
353 		/* Try keeping the preallocated memory for new_pool. */
354 		goto out_keep_prealloc;
355 	}
356 
357 	/* Bail out if we reached the pool limit. */
358 	if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
359 		WARN_ONCE(1, "Stack depot reached limit capacity");
360 		return false;
361 	}
362 
363 	/* Check if we have preallocated memory and use it. */
364 	if (*prealloc) {
365 		depot_init_pool(*prealloc);
366 		*prealloc = NULL;
367 		return true;
368 	}
369 
370 	return false;
371 
372 out_keep_prealloc:
373 	/* Keep the preallocated memory for a new pool if required. */
374 	if (*prealloc)
375 		depot_keep_new_pool(prealloc);
376 	return true;
377 }
378 
379 /* Allocates a new stack in a stack depot pool. */
380 static struct stack_record *
381 depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
382 {
383 	struct stack_record *stack;
384 
385 	lockdep_assert_held_write(&pool_rwlock);
386 
387 	/* Update current and new pools if required and possible. */
388 	if (!depot_update_pools(prealloc))
389 		return NULL;
390 
391 	/* Check if we have a stack record to save the stack trace. */
392 	if (list_empty(&free_stacks))
393 		return NULL;
394 
395 	/* Get and unlink the first entry from the freelist. */
396 	stack = list_first_entry(&free_stacks, struct stack_record, list);
397 	list_del(&stack->list);
398 	counters[DEPOT_COUNTER_FREELIST_SIZE]--;
399 
400 	/* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
401 	if (size > CONFIG_STACKDEPOT_MAX_FRAMES)
402 		size = CONFIG_STACKDEPOT_MAX_FRAMES;
403 
404 	/* Save the stack trace. */
405 	stack->hash = hash;
406 	stack->size = size;
407 	/* stack->handle is already filled in by depot_init_pool(). */
408 	refcount_set(&stack->count, 1);
409 	memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
410 
411 	/*
412 	 * Let KMSAN know the stored stack record is initialized. This shall
413 	 * prevent false positive reports if instrumented code accesses it.
414 	 */
415 	kmsan_unpoison_memory(stack, DEPOT_STACK_RECORD_SIZE);
416 
417 	counters[DEPOT_COUNTER_ALLOCS]++;
418 	counters[DEPOT_COUNTER_INUSE]++;
419 	return stack;
420 }
421 
422 static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
423 {
424 	union handle_parts parts = { .handle = handle };
425 	void *pool;
426 	size_t offset = parts.offset << DEPOT_STACK_ALIGN;
427 	struct stack_record *stack;
428 
429 	lockdep_assert_held(&pool_rwlock);
430 
431 	if (parts.pool_index > pools_num) {
432 		WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
433 		     parts.pool_index, pools_num, handle);
434 		return NULL;
435 	}
436 
437 	pool = stack_pools[parts.pool_index];
438 	if (!pool)
439 		return NULL;
440 
441 	stack = pool + offset;
442 	return stack;
443 }
444 
445 /* Links stack into the freelist. */
446 static void depot_free_stack(struct stack_record *stack)
447 {
448 	lockdep_assert_held_write(&pool_rwlock);
449 
450 	list_add(&stack->list, &free_stacks);
451 
452 	counters[DEPOT_COUNTER_FREELIST_SIZE]++;
453 	counters[DEPOT_COUNTER_FREES]++;
454 	counters[DEPOT_COUNTER_INUSE]--;
455 }
456 
457 /* Calculates the hash for a stack. */
458 static inline u32 hash_stack(unsigned long *entries, unsigned int size)
459 {
460 	return jhash2((u32 *)entries,
461 		      array_size(size,  sizeof(*entries)) / sizeof(u32),
462 		      STACK_HASH_SEED);
463 }
464 
465 /*
466  * Non-instrumented version of memcmp().
467  * Does not check the lexicographical order, only the equality.
468  */
469 static inline
470 int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
471 			unsigned int n)
472 {
473 	for ( ; n-- ; u1++, u2++) {
474 		if (*u1 != *u2)
475 			return 1;
476 	}
477 	return 0;
478 }
479 
480 /* Finds a stack in a bucket of the hash table. */
481 static inline struct stack_record *find_stack(struct list_head *bucket,
482 					     unsigned long *entries, int size,
483 					     u32 hash)
484 {
485 	struct list_head *pos;
486 	struct stack_record *found;
487 
488 	lockdep_assert_held(&pool_rwlock);
489 
490 	list_for_each(pos, bucket) {
491 		found = list_entry(pos, struct stack_record, list);
492 		if (found->hash == hash &&
493 		    found->size == size &&
494 		    !stackdepot_memcmp(entries, found->entries, size))
495 			return found;
496 	}
497 	return NULL;
498 }
499 
500 depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
501 					    unsigned int nr_entries,
502 					    gfp_t alloc_flags,
503 					    depot_flags_t depot_flags)
504 {
505 	struct list_head *bucket;
506 	struct stack_record *found = NULL;
507 	depot_stack_handle_t handle = 0;
508 	struct page *page = NULL;
509 	void *prealloc = NULL;
510 	bool can_alloc = depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC;
511 	bool need_alloc = false;
512 	unsigned long flags;
513 	u32 hash;
514 
515 	if (WARN_ON(depot_flags & ~STACK_DEPOT_FLAGS_MASK))
516 		return 0;
517 
518 	/*
519 	 * If this stack trace is from an interrupt, including anything before
520 	 * interrupt entry usually leads to unbounded stack depot growth.
521 	 *
522 	 * Since use of filter_irq_stacks() is a requirement to ensure stack
523 	 * depot can efficiently deduplicate interrupt stacks, always
524 	 * filter_irq_stacks() to simplify all callers' use of stack depot.
525 	 */
526 	nr_entries = filter_irq_stacks(entries, nr_entries);
527 
528 	if (unlikely(nr_entries == 0) || stack_depot_disabled)
529 		return 0;
530 
531 	hash = hash_stack(entries, nr_entries);
532 	bucket = &stack_table[hash & stack_hash_mask];
533 
534 	read_lock_irqsave(&pool_rwlock, flags);
535 	printk_deferred_enter();
536 
537 	/* Fast path: look the stack trace up without full locking. */
538 	found = find_stack(bucket, entries, nr_entries, hash);
539 	if (found) {
540 		if (depot_flags & STACK_DEPOT_FLAG_GET)
541 			refcount_inc(&found->count);
542 		printk_deferred_exit();
543 		read_unlock_irqrestore(&pool_rwlock, flags);
544 		goto exit;
545 	}
546 
547 	/* Take note if another stack pool needs to be allocated. */
548 	if (new_pool_required)
549 		need_alloc = true;
550 
551 	printk_deferred_exit();
552 	read_unlock_irqrestore(&pool_rwlock, flags);
553 
554 	/*
555 	 * Allocate memory for a new pool if required now:
556 	 * we won't be able to do that under the lock.
557 	 */
558 	if (unlikely(can_alloc && need_alloc)) {
559 		/*
560 		 * Zero out zone modifiers, as we don't have specific zone
561 		 * requirements. Keep the flags related to allocation in atomic
562 		 * contexts and I/O.
563 		 */
564 		alloc_flags &= ~GFP_ZONEMASK;
565 		alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
566 		alloc_flags |= __GFP_NOWARN;
567 		page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER);
568 		if (page)
569 			prealloc = page_address(page);
570 	}
571 
572 	write_lock_irqsave(&pool_rwlock, flags);
573 	printk_deferred_enter();
574 
575 	found = find_stack(bucket, entries, nr_entries, hash);
576 	if (!found) {
577 		struct stack_record *new =
578 			depot_alloc_stack(entries, nr_entries, hash, &prealloc);
579 
580 		if (new) {
581 			list_add(&new->list, bucket);
582 			found = new;
583 		}
584 	} else {
585 		if (depot_flags & STACK_DEPOT_FLAG_GET)
586 			refcount_inc(&found->count);
587 		/*
588 		 * Stack depot already contains this stack trace, but let's
589 		 * keep the preallocated memory for future.
590 		 */
591 		if (prealloc)
592 			depot_keep_new_pool(&prealloc);
593 	}
594 
595 	printk_deferred_exit();
596 	write_unlock_irqrestore(&pool_rwlock, flags);
597 exit:
598 	if (prealloc) {
599 		/* Stack depot didn't use this memory, free it. */
600 		free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
601 	}
602 	if (found)
603 		handle = found->handle.handle;
604 	return handle;
605 }
606 EXPORT_SYMBOL_GPL(stack_depot_save_flags);
607 
608 depot_stack_handle_t stack_depot_save(unsigned long *entries,
609 				      unsigned int nr_entries,
610 				      gfp_t alloc_flags)
611 {
612 	return stack_depot_save_flags(entries, nr_entries, alloc_flags,
613 				      STACK_DEPOT_FLAG_CAN_ALLOC);
614 }
615 EXPORT_SYMBOL_GPL(stack_depot_save);
616 
617 unsigned int stack_depot_fetch(depot_stack_handle_t handle,
618 			       unsigned long **entries)
619 {
620 	struct stack_record *stack;
621 	unsigned long flags;
622 
623 	*entries = NULL;
624 	/*
625 	 * Let KMSAN know *entries is initialized. This shall prevent false
626 	 * positive reports if instrumented code accesses it.
627 	 */
628 	kmsan_unpoison_memory(entries, sizeof(*entries));
629 
630 	if (!handle || stack_depot_disabled)
631 		return 0;
632 
633 	read_lock_irqsave(&pool_rwlock, flags);
634 	printk_deferred_enter();
635 
636 	stack = depot_fetch_stack(handle);
637 
638 	printk_deferred_exit();
639 	read_unlock_irqrestore(&pool_rwlock, flags);
640 
641 	*entries = stack->entries;
642 	return stack->size;
643 }
644 EXPORT_SYMBOL_GPL(stack_depot_fetch);
645 
646 void stack_depot_put(depot_stack_handle_t handle)
647 {
648 	struct stack_record *stack;
649 	unsigned long flags;
650 
651 	if (!handle || stack_depot_disabled)
652 		return;
653 
654 	write_lock_irqsave(&pool_rwlock, flags);
655 	printk_deferred_enter();
656 
657 	stack = depot_fetch_stack(handle);
658 	if (WARN_ON(!stack))
659 		goto out;
660 
661 	if (refcount_dec_and_test(&stack->count)) {
662 		/* Unlink stack from the hash table. */
663 		list_del(&stack->list);
664 
665 		/* Free stack. */
666 		depot_free_stack(stack);
667 	}
668 
669 out:
670 	printk_deferred_exit();
671 	write_unlock_irqrestore(&pool_rwlock, flags);
672 }
673 EXPORT_SYMBOL_GPL(stack_depot_put);
674 
675 void stack_depot_print(depot_stack_handle_t stack)
676 {
677 	unsigned long *entries;
678 	unsigned int nr_entries;
679 
680 	nr_entries = stack_depot_fetch(stack, &entries);
681 	if (nr_entries > 0)
682 		stack_trace_print(entries, nr_entries, 0);
683 }
684 EXPORT_SYMBOL_GPL(stack_depot_print);
685 
686 int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
687 		       int spaces)
688 {
689 	unsigned long *entries;
690 	unsigned int nr_entries;
691 
692 	nr_entries = stack_depot_fetch(handle, &entries);
693 	return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
694 						spaces) : 0;
695 }
696 EXPORT_SYMBOL_GPL(stack_depot_snprint);
697 
698 depot_stack_handle_t __must_check stack_depot_set_extra_bits(
699 			depot_stack_handle_t handle, unsigned int extra_bits)
700 {
701 	union handle_parts parts = { .handle = handle };
702 
703 	/* Don't set extra bits on empty handles. */
704 	if (!handle)
705 		return 0;
706 
707 	parts.extra = extra_bits;
708 	return parts.handle;
709 }
710 EXPORT_SYMBOL(stack_depot_set_extra_bits);
711 
712 unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
713 {
714 	union handle_parts parts = { .handle = handle };
715 
716 	return parts.extra;
717 }
718 EXPORT_SYMBOL(stack_depot_get_extra_bits);
719 
720 static int stats_show(struct seq_file *seq, void *v)
721 {
722 	/*
723 	 * data race ok: These are just statistics counters, and approximate
724 	 * statistics are ok for debugging.
725 	 */
726 	seq_printf(seq, "pools: %d\n", data_race(pools_num));
727 	for (int i = 0; i < DEPOT_COUNTER_COUNT; i++)
728 		seq_printf(seq, "%s: %ld\n", counter_names[i], data_race(counters[i]));
729 
730 	return 0;
731 }
732 DEFINE_SHOW_ATTRIBUTE(stats);
733 
734 static int depot_debugfs_init(void)
735 {
736 	struct dentry *dir;
737 
738 	if (stack_depot_disabled)
739 		return 0;
740 
741 	dir = debugfs_create_dir("stackdepot", NULL);
742 	debugfs_create_file("stats", 0444, dir, NULL, &stats_fops);
743 	return 0;
744 }
745 late_initcall(depot_debugfs_init);
746