xref: /linux-6.15/lib/stackdepot.c (revision a5d21f71)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Stack depot - a stack trace storage that avoids duplication.
4  *
5  * Internally, stack depot maintains a hash table of unique stacktraces. The
6  * stack traces themselves are stored contiguously one after another in a set
7  * of separate page allocations.
8  *
9  * Author: Alexander Potapenko <[email protected]>
10  * Copyright (C) 2016 Google, Inc.
11  *
12  * Based on the code by Dmitry Chernenkov.
13  */
14 
15 #define pr_fmt(fmt) "stackdepot: " fmt
16 
17 #include <linux/gfp.h>
18 #include <linux/jhash.h>
19 #include <linux/kernel.h>
20 #include <linux/kmsan.h>
21 #include <linux/mm.h>
22 #include <linux/mutex.h>
23 #include <linux/percpu.h>
24 #include <linux/printk.h>
25 #include <linux/slab.h>
26 #include <linux/stacktrace.h>
27 #include <linux/stackdepot.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/memblock.h>
31 #include <linux/kasan-enabled.h>
32 
33 #define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8)
34 
35 #define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */
36 #define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER))
37 #define DEPOT_STACK_ALIGN 4
38 #define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
39 #define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_OFFSET_BITS - \
40 			       STACK_DEPOT_EXTRA_BITS)
41 #define DEPOT_POOLS_CAP 8192
42 #define DEPOT_MAX_POOLS \
43 	(((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
44 	 (1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP)
45 
46 /* Compact structure that stores a reference to a stack. */
47 union handle_parts {
48 	depot_stack_handle_t handle;
49 	struct {
50 		u32 pool_index	: DEPOT_POOL_INDEX_BITS;
51 		u32 offset	: DEPOT_OFFSET_BITS;
52 		u32 extra	: STACK_DEPOT_EXTRA_BITS;
53 	};
54 };
55 
56 struct stack_record {
57 	struct stack_record *next;	/* Link in the hash table */
58 	u32 hash;			/* Hash in the hash table */
59 	u32 size;			/* Number of stored frames */
60 	union handle_parts handle;
61 	unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES];	/* Frames */
62 };
63 
64 #define DEPOT_STACK_RECORD_SIZE \
65 	ALIGN(sizeof(struct stack_record), 1 << DEPOT_STACK_ALIGN)
66 
67 static bool stack_depot_disabled;
68 static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
69 static bool __stack_depot_early_init_passed __initdata;
70 
71 /* Use one hash table bucket per 16 KB of memory. */
72 #define STACK_HASH_TABLE_SCALE 14
73 /* Limit the number of buckets between 4K and 1M. */
74 #define STACK_BUCKET_NUMBER_ORDER_MIN 12
75 #define STACK_BUCKET_NUMBER_ORDER_MAX 20
76 /* Initial seed for jhash2. */
77 #define STACK_HASH_SEED 0x9747b28c
78 
79 /* Hash table of pointers to stored stack traces. */
80 static struct stack_record **stack_table;
81 /* Fixed order of the number of table buckets. Used when KASAN is enabled. */
82 static unsigned int stack_bucket_number_order;
83 /* Hash mask for indexing the table. */
84 static unsigned int stack_hash_mask;
85 
86 /* Array of memory regions that store stack traces. */
87 static void *stack_pools[DEPOT_MAX_POOLS];
88 /* Newly allocated pool that is not yet added to stack_pools. */
89 static void *new_pool;
90 /* Currently used pool in stack_pools. */
91 static int pool_index;
92 /* Offset to the unused space in the currently used pool. */
93 static size_t pool_offset;
94 /* Lock that protects the variables above. */
95 static DEFINE_RAW_SPINLOCK(pool_lock);
96 /*
97  * Stack depot tries to keep an extra pool allocated even before it runs out
98  * of space in the currently used pool. This flag marks whether this extra pool
99  * needs to be allocated. It has the value 0 when either an extra pool is not
100  * yet allocated or if the limit on the number of pools is reached.
101  */
102 static int new_pool_required = 1;
103 
104 static int __init disable_stack_depot(char *str)
105 {
106 	return kstrtobool(str, &stack_depot_disabled);
107 }
108 early_param("stack_depot_disable", disable_stack_depot);
109 
110 void __init stack_depot_request_early_init(void)
111 {
112 	/* Too late to request early init now. */
113 	WARN_ON(__stack_depot_early_init_passed);
114 
115 	__stack_depot_early_init_requested = true;
116 }
117 
118 /* Allocates a hash table via memblock. Can only be used during early boot. */
119 int __init stack_depot_early_init(void)
120 {
121 	unsigned long entries = 0;
122 
123 	/* This function must be called only once, from mm_init(). */
124 	if (WARN_ON(__stack_depot_early_init_passed))
125 		return 0;
126 	__stack_depot_early_init_passed = true;
127 
128 	/*
129 	 * Print disabled message even if early init has not been requested:
130 	 * stack_depot_init() will not print one.
131 	 */
132 	if (stack_depot_disabled) {
133 		pr_info("disabled\n");
134 		return 0;
135 	}
136 
137 	/*
138 	 * If KASAN is enabled, use the maximum order: KASAN is frequently used
139 	 * in fuzzing scenarios, which leads to a large number of different
140 	 * stack traces being stored in stack depot.
141 	 */
142 	if (kasan_enabled() && !stack_bucket_number_order)
143 		stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX;
144 
145 	/*
146 	 * Check if early init has been requested after setting
147 	 * stack_bucket_number_order: stack_depot_init() uses its value.
148 	 */
149 	if (!__stack_depot_early_init_requested)
150 		return 0;
151 
152 	/*
153 	 * If stack_bucket_number_order is not set, leave entries as 0 to rely
154 	 * on the automatic calculations performed by alloc_large_system_hash.
155 	 */
156 	if (stack_bucket_number_order)
157 		entries = 1UL << stack_bucket_number_order;
158 	pr_info("allocating hash table via alloc_large_system_hash\n");
159 	stack_table = alloc_large_system_hash("stackdepot",
160 						sizeof(struct stack_record *),
161 						entries,
162 						STACK_HASH_TABLE_SCALE,
163 						HASH_EARLY | HASH_ZERO,
164 						NULL,
165 						&stack_hash_mask,
166 						1UL << STACK_BUCKET_NUMBER_ORDER_MIN,
167 						1UL << STACK_BUCKET_NUMBER_ORDER_MAX);
168 	if (!stack_table) {
169 		pr_err("hash table allocation failed, disabling\n");
170 		stack_depot_disabled = true;
171 		return -ENOMEM;
172 	}
173 
174 	return 0;
175 }
176 
177 /* Allocates a hash table via kvcalloc. Can be used after boot. */
178 int stack_depot_init(void)
179 {
180 	static DEFINE_MUTEX(stack_depot_init_mutex);
181 	unsigned long entries;
182 	int ret = 0;
183 
184 	mutex_lock(&stack_depot_init_mutex);
185 
186 	if (stack_depot_disabled || stack_table)
187 		goto out_unlock;
188 
189 	/*
190 	 * Similarly to stack_depot_early_init, use stack_bucket_number_order
191 	 * if assigned, and rely on automatic scaling otherwise.
192 	 */
193 	if (stack_bucket_number_order) {
194 		entries = 1UL << stack_bucket_number_order;
195 	} else {
196 		int scale = STACK_HASH_TABLE_SCALE;
197 
198 		entries = nr_free_buffer_pages();
199 		entries = roundup_pow_of_two(entries);
200 
201 		if (scale > PAGE_SHIFT)
202 			entries >>= (scale - PAGE_SHIFT);
203 		else
204 			entries <<= (PAGE_SHIFT - scale);
205 	}
206 
207 	if (entries < 1UL << STACK_BUCKET_NUMBER_ORDER_MIN)
208 		entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MIN;
209 	if (entries > 1UL << STACK_BUCKET_NUMBER_ORDER_MAX)
210 		entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
211 
212 	pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
213 	stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
214 	if (!stack_table) {
215 		pr_err("hash table allocation failed, disabling\n");
216 		stack_depot_disabled = true;
217 		ret = -ENOMEM;
218 		goto out_unlock;
219 	}
220 	stack_hash_mask = entries - 1;
221 
222 out_unlock:
223 	mutex_unlock(&stack_depot_init_mutex);
224 
225 	return ret;
226 }
227 EXPORT_SYMBOL_GPL(stack_depot_init);
228 
229 /* Keeps the preallocated memory to be used for a new stack depot pool. */
230 static void depot_keep_new_pool(void **prealloc)
231 {
232 	/*
233 	 * If a new pool is already saved or the maximum number of
234 	 * pools is reached, do not use the preallocated memory.
235 	 * Access new_pool_required non-atomically, as there are no concurrent
236 	 * write accesses to this variable.
237 	 */
238 	if (!new_pool_required)
239 		return;
240 
241 	/*
242 	 * Use the preallocated memory for the new pool
243 	 * as long as we do not exceed the maximum number of pools.
244 	 */
245 	if (pool_index + 1 < DEPOT_MAX_POOLS) {
246 		new_pool = *prealloc;
247 		*prealloc = NULL;
248 	}
249 
250 	/*
251 	 * At this point, either a new pool is kept or the maximum
252 	 * number of pools is reached. In either case, take note that
253 	 * keeping another pool is not required.
254 	 * smp_store_release() pairs with smp_load_acquire() in
255 	 * stack_depot_save().
256 	 */
257 	smp_store_release(&new_pool_required, 0);
258 }
259 
260 /* Updates references to the current and the next stack depot pools. */
261 static bool depot_update_pools(size_t required_size, void **prealloc)
262 {
263 	/* Check if there is not enough space in the current pool. */
264 	if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) {
265 		/* Bail out if we reached the pool limit. */
266 		if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) {
267 			WARN_ONCE(1, "Stack depot reached limit capacity");
268 			return false;
269 		}
270 
271 		/*
272 		 * Move on to the new pool.
273 		 * WRITE_ONCE() pairs with potential concurrent read in
274 		 * stack_depot_fetch().
275 		 */
276 		WRITE_ONCE(pool_index, pool_index + 1);
277 		stack_pools[pool_index] = new_pool;
278 		new_pool = NULL;
279 		pool_offset = 0;
280 
281 		/*
282 		 * If the maximum number of pools is not reached, take note
283 		 * that yet another new pool needs to be allocated.
284 		 * smp_store_release() pairs with smp_load_acquire() in
285 		 * stack_depot_save().
286 		 */
287 		if (pool_index + 1 < DEPOT_MAX_POOLS)
288 			smp_store_release(&new_pool_required, 1);
289 	}
290 
291 	/* Check if the current pool is not yet allocated. */
292 	if (*prealloc && stack_pools[pool_index] == NULL) {
293 		/* Use the preallocated memory for the current pool. */
294 		stack_pools[pool_index] = *prealloc;
295 		*prealloc = NULL;
296 		return true;
297 	}
298 
299 	/* Otherwise, try using the preallocated memory for a new pool. */
300 	if (*prealloc)
301 		depot_keep_new_pool(prealloc);
302 	return true;
303 }
304 
305 /* Allocates a new stack in a stack depot pool. */
306 static struct stack_record *
307 depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
308 {
309 	struct stack_record *stack;
310 	size_t required_size = DEPOT_STACK_RECORD_SIZE;
311 
312 	/* Update current and new pools if required and possible. */
313 	if (!depot_update_pools(required_size, prealloc))
314 		return NULL;
315 
316 	/* Check if we have a pool to save the stack trace. */
317 	if (stack_pools[pool_index] == NULL)
318 		return NULL;
319 
320 	/* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
321 	if (size > CONFIG_STACKDEPOT_MAX_FRAMES)
322 		size = CONFIG_STACKDEPOT_MAX_FRAMES;
323 
324 	/* Save the stack trace. */
325 	stack = stack_pools[pool_index] + pool_offset;
326 	stack->hash = hash;
327 	stack->size = size;
328 	stack->handle.pool_index = pool_index;
329 	stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
330 	stack->handle.extra = 0;
331 	memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
332 	pool_offset += required_size;
333 
334 	/*
335 	 * Let KMSAN know the stored stack record is initialized. This shall
336 	 * prevent false positive reports if instrumented code accesses it.
337 	 */
338 	kmsan_unpoison_memory(stack, required_size);
339 
340 	return stack;
341 }
342 
343 static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
344 {
345 	union handle_parts parts = { .handle = handle };
346 	/*
347 	 * READ_ONCE() pairs with potential concurrent write in
348 	 * depot_update_pools().
349 	 */
350 	int pool_index_cached = READ_ONCE(pool_index);
351 	void *pool;
352 	size_t offset = parts.offset << DEPOT_STACK_ALIGN;
353 	struct stack_record *stack;
354 
355 	if (parts.pool_index > pool_index_cached) {
356 		WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
357 		     parts.pool_index, pool_index_cached, handle);
358 		return NULL;
359 	}
360 
361 	pool = stack_pools[parts.pool_index];
362 	if (!pool)
363 		return NULL;
364 
365 	stack = pool + offset;
366 	return stack;
367 }
368 
369 /* Calculates the hash for a stack. */
370 static inline u32 hash_stack(unsigned long *entries, unsigned int size)
371 {
372 	return jhash2((u32 *)entries,
373 		      array_size(size,  sizeof(*entries)) / sizeof(u32),
374 		      STACK_HASH_SEED);
375 }
376 
377 /*
378  * Non-instrumented version of memcmp().
379  * Does not check the lexicographical order, only the equality.
380  */
381 static inline
382 int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
383 			unsigned int n)
384 {
385 	for ( ; n-- ; u1++, u2++) {
386 		if (*u1 != *u2)
387 			return 1;
388 	}
389 	return 0;
390 }
391 
392 /* Finds a stack in a bucket of the hash table. */
393 static inline struct stack_record *find_stack(struct stack_record *bucket,
394 					     unsigned long *entries, int size,
395 					     u32 hash)
396 {
397 	struct stack_record *found;
398 
399 	for (found = bucket; found; found = found->next) {
400 		if (found->hash == hash &&
401 		    found->size == size &&
402 		    !stackdepot_memcmp(entries, found->entries, size))
403 			return found;
404 	}
405 	return NULL;
406 }
407 
408 depot_stack_handle_t __stack_depot_save(unsigned long *entries,
409 					unsigned int nr_entries,
410 					gfp_t alloc_flags, bool can_alloc)
411 {
412 	struct stack_record *found = NULL, **bucket;
413 	depot_stack_handle_t handle = 0;
414 	struct page *page = NULL;
415 	void *prealloc = NULL;
416 	unsigned long flags;
417 	u32 hash;
418 
419 	/*
420 	 * If this stack trace is from an interrupt, including anything before
421 	 * interrupt entry usually leads to unbounded stack depot growth.
422 	 *
423 	 * Since use of filter_irq_stacks() is a requirement to ensure stack
424 	 * depot can efficiently deduplicate interrupt stacks, always
425 	 * filter_irq_stacks() to simplify all callers' use of stack depot.
426 	 */
427 	nr_entries = filter_irq_stacks(entries, nr_entries);
428 
429 	if (unlikely(nr_entries == 0) || stack_depot_disabled)
430 		return 0;
431 
432 	hash = hash_stack(entries, nr_entries);
433 	bucket = &stack_table[hash & stack_hash_mask];
434 
435 	/*
436 	 * Fast path: look the stack trace up without locking.
437 	 * smp_load_acquire() pairs with smp_store_release() to |bucket| below.
438 	 */
439 	found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash);
440 	if (found)
441 		goto exit;
442 
443 	/*
444 	 * Check if another stack pool needs to be allocated. If so, allocate
445 	 * the memory now: we won't be able to do that under the lock.
446 	 *
447 	 * smp_load_acquire() pairs with smp_store_release() in
448 	 * depot_update_pools() and depot_keep_new_pool().
449 	 */
450 	if (unlikely(can_alloc && smp_load_acquire(&new_pool_required))) {
451 		/*
452 		 * Zero out zone modifiers, as we don't have specific zone
453 		 * requirements. Keep the flags related to allocation in atomic
454 		 * contexts and I/O.
455 		 */
456 		alloc_flags &= ~GFP_ZONEMASK;
457 		alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
458 		alloc_flags |= __GFP_NOWARN;
459 		page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER);
460 		if (page)
461 			prealloc = page_address(page);
462 	}
463 
464 	raw_spin_lock_irqsave(&pool_lock, flags);
465 
466 	found = find_stack(*bucket, entries, nr_entries, hash);
467 	if (!found) {
468 		struct stack_record *new =
469 			depot_alloc_stack(entries, nr_entries, hash, &prealloc);
470 
471 		if (new) {
472 			new->next = *bucket;
473 			/*
474 			 * smp_store_release() pairs with smp_load_acquire()
475 			 * from |bucket| above.
476 			 */
477 			smp_store_release(bucket, new);
478 			found = new;
479 		}
480 	} else if (prealloc) {
481 		/*
482 		 * Stack depot already contains this stack trace, but let's
483 		 * keep the preallocated memory for future.
484 		 */
485 		depot_keep_new_pool(&prealloc);
486 	}
487 
488 	raw_spin_unlock_irqrestore(&pool_lock, flags);
489 exit:
490 	if (prealloc) {
491 		/* Stack depot didn't use this memory, free it. */
492 		free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
493 	}
494 	if (found)
495 		handle = found->handle.handle;
496 	return handle;
497 }
498 EXPORT_SYMBOL_GPL(__stack_depot_save);
499 
500 depot_stack_handle_t stack_depot_save(unsigned long *entries,
501 				      unsigned int nr_entries,
502 				      gfp_t alloc_flags)
503 {
504 	return __stack_depot_save(entries, nr_entries, alloc_flags, true);
505 }
506 EXPORT_SYMBOL_GPL(stack_depot_save);
507 
508 unsigned int stack_depot_fetch(depot_stack_handle_t handle,
509 			       unsigned long **entries)
510 {
511 	struct stack_record *stack;
512 
513 	*entries = NULL;
514 	/*
515 	 * Let KMSAN know *entries is initialized. This shall prevent false
516 	 * positive reports if instrumented code accesses it.
517 	 */
518 	kmsan_unpoison_memory(entries, sizeof(*entries));
519 
520 	if (!handle || stack_depot_disabled)
521 		return 0;
522 
523 	stack = depot_fetch_stack(handle);
524 
525 	*entries = stack->entries;
526 	return stack->size;
527 }
528 EXPORT_SYMBOL_GPL(stack_depot_fetch);
529 
530 void stack_depot_print(depot_stack_handle_t stack)
531 {
532 	unsigned long *entries;
533 	unsigned int nr_entries;
534 
535 	nr_entries = stack_depot_fetch(stack, &entries);
536 	if (nr_entries > 0)
537 		stack_trace_print(entries, nr_entries, 0);
538 }
539 EXPORT_SYMBOL_GPL(stack_depot_print);
540 
541 int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
542 		       int spaces)
543 {
544 	unsigned long *entries;
545 	unsigned int nr_entries;
546 
547 	nr_entries = stack_depot_fetch(handle, &entries);
548 	return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
549 						spaces) : 0;
550 }
551 EXPORT_SYMBOL_GPL(stack_depot_snprint);
552 
553 depot_stack_handle_t __must_check stack_depot_set_extra_bits(
554 			depot_stack_handle_t handle, unsigned int extra_bits)
555 {
556 	union handle_parts parts = { .handle = handle };
557 
558 	/* Don't set extra bits on empty handles. */
559 	if (!handle)
560 		return 0;
561 
562 	parts.extra = extra_bits;
563 	return parts.handle;
564 }
565 EXPORT_SYMBOL(stack_depot_set_extra_bits);
566 
567 unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
568 {
569 	union handle_parts parts = { .handle = handle };
570 
571 	return parts.extra;
572 }
573 EXPORT_SYMBOL(stack_depot_get_extra_bits);
574