1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Stack depot - a stack trace storage that avoids duplication. 4 * 5 * Internally, stack depot maintains a hash table of unique stacktraces. The 6 * stack traces themselves are stored contiguously one after another in a set 7 * of separate page allocations. 8 * 9 * Author: Alexander Potapenko <[email protected]> 10 * Copyright (C) 2016 Google, Inc. 11 * 12 * Based on the code by Dmitry Chernenkov. 13 */ 14 15 #define pr_fmt(fmt) "stackdepot: " fmt 16 17 #include <linux/gfp.h> 18 #include <linux/jhash.h> 19 #include <linux/kernel.h> 20 #include <linux/kmsan.h> 21 #include <linux/mm.h> 22 #include <linux/mutex.h> 23 #include <linux/percpu.h> 24 #include <linux/printk.h> 25 #include <linux/slab.h> 26 #include <linux/stacktrace.h> 27 #include <linux/stackdepot.h> 28 #include <linux/string.h> 29 #include <linux/types.h> 30 #include <linux/memblock.h> 31 #include <linux/kasan-enabled.h> 32 33 #define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8) 34 35 #define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */ 36 #define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER)) 37 #define DEPOT_STACK_ALIGN 4 38 #define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN) 39 #define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_OFFSET_BITS - \ 40 STACK_DEPOT_EXTRA_BITS) 41 #define DEPOT_POOLS_CAP 8192 42 #define DEPOT_MAX_POOLS \ 43 (((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \ 44 (1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP) 45 46 /* Compact structure that stores a reference to a stack. */ 47 union handle_parts { 48 depot_stack_handle_t handle; 49 struct { 50 u32 pool_index : DEPOT_POOL_INDEX_BITS; 51 u32 offset : DEPOT_OFFSET_BITS; 52 u32 extra : STACK_DEPOT_EXTRA_BITS; 53 }; 54 }; 55 56 struct stack_record { 57 struct stack_record *next; /* Link in the hash table */ 58 u32 hash; /* Hash in the hash table */ 59 u32 size; /* Number of stored frames */ 60 union handle_parts handle; 61 unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */ 62 }; 63 64 #define DEPOT_STACK_RECORD_SIZE \ 65 ALIGN(sizeof(struct stack_record), 1 << DEPOT_STACK_ALIGN) 66 67 static bool stack_depot_disabled; 68 static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT); 69 static bool __stack_depot_early_init_passed __initdata; 70 71 /* Use one hash table bucket per 16 KB of memory. */ 72 #define STACK_HASH_TABLE_SCALE 14 73 /* Limit the number of buckets between 4K and 1M. */ 74 #define STACK_BUCKET_NUMBER_ORDER_MIN 12 75 #define STACK_BUCKET_NUMBER_ORDER_MAX 20 76 /* Initial seed for jhash2. */ 77 #define STACK_HASH_SEED 0x9747b28c 78 79 /* Hash table of pointers to stored stack traces. */ 80 static struct stack_record **stack_table; 81 /* Fixed order of the number of table buckets. Used when KASAN is enabled. */ 82 static unsigned int stack_bucket_number_order; 83 /* Hash mask for indexing the table. */ 84 static unsigned int stack_hash_mask; 85 86 /* Array of memory regions that store stack traces. */ 87 static void *stack_pools[DEPOT_MAX_POOLS]; 88 /* Currently used pool in stack_pools. */ 89 static int pool_index; 90 /* Offset to the unused space in the currently used pool. */ 91 static size_t pool_offset; 92 /* Lock that protects the variables above. */ 93 static DEFINE_RAW_SPINLOCK(pool_lock); 94 /* 95 * Stack depot tries to keep an extra pool allocated even before it runs out 96 * of space in the currently used pool. 97 * This flag marks that this next extra pool needs to be allocated and 98 * initialized. It has the value 0 when either the next pool is not yet 99 * initialized or the limit on the number of pools is reached. 100 */ 101 static int next_pool_required = 1; 102 103 static int __init disable_stack_depot(char *str) 104 { 105 return kstrtobool(str, &stack_depot_disabled); 106 } 107 early_param("stack_depot_disable", disable_stack_depot); 108 109 void __init stack_depot_request_early_init(void) 110 { 111 /* Too late to request early init now. */ 112 WARN_ON(__stack_depot_early_init_passed); 113 114 __stack_depot_early_init_requested = true; 115 } 116 117 /* Allocates a hash table via memblock. Can only be used during early boot. */ 118 int __init stack_depot_early_init(void) 119 { 120 unsigned long entries = 0; 121 122 /* This function must be called only once, from mm_init(). */ 123 if (WARN_ON(__stack_depot_early_init_passed)) 124 return 0; 125 __stack_depot_early_init_passed = true; 126 127 /* 128 * Print disabled message even if early init has not been requested: 129 * stack_depot_init() will not print one. 130 */ 131 if (stack_depot_disabled) { 132 pr_info("disabled\n"); 133 return 0; 134 } 135 136 /* 137 * If KASAN is enabled, use the maximum order: KASAN is frequently used 138 * in fuzzing scenarios, which leads to a large number of different 139 * stack traces being stored in stack depot. 140 */ 141 if (kasan_enabled() && !stack_bucket_number_order) 142 stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX; 143 144 /* 145 * Check if early init has been requested after setting 146 * stack_bucket_number_order: stack_depot_init() uses its value. 147 */ 148 if (!__stack_depot_early_init_requested) 149 return 0; 150 151 /* 152 * If stack_bucket_number_order is not set, leave entries as 0 to rely 153 * on the automatic calculations performed by alloc_large_system_hash. 154 */ 155 if (stack_bucket_number_order) 156 entries = 1UL << stack_bucket_number_order; 157 pr_info("allocating hash table via alloc_large_system_hash\n"); 158 stack_table = alloc_large_system_hash("stackdepot", 159 sizeof(struct stack_record *), 160 entries, 161 STACK_HASH_TABLE_SCALE, 162 HASH_EARLY | HASH_ZERO, 163 NULL, 164 &stack_hash_mask, 165 1UL << STACK_BUCKET_NUMBER_ORDER_MIN, 166 1UL << STACK_BUCKET_NUMBER_ORDER_MAX); 167 if (!stack_table) { 168 pr_err("hash table allocation failed, disabling\n"); 169 stack_depot_disabled = true; 170 return -ENOMEM; 171 } 172 173 return 0; 174 } 175 176 /* Allocates a hash table via kvcalloc. Can be used after boot. */ 177 int stack_depot_init(void) 178 { 179 static DEFINE_MUTEX(stack_depot_init_mutex); 180 unsigned long entries; 181 int ret = 0; 182 183 mutex_lock(&stack_depot_init_mutex); 184 185 if (stack_depot_disabled || stack_table) 186 goto out_unlock; 187 188 /* 189 * Similarly to stack_depot_early_init, use stack_bucket_number_order 190 * if assigned, and rely on automatic scaling otherwise. 191 */ 192 if (stack_bucket_number_order) { 193 entries = 1UL << stack_bucket_number_order; 194 } else { 195 int scale = STACK_HASH_TABLE_SCALE; 196 197 entries = nr_free_buffer_pages(); 198 entries = roundup_pow_of_two(entries); 199 200 if (scale > PAGE_SHIFT) 201 entries >>= (scale - PAGE_SHIFT); 202 else 203 entries <<= (PAGE_SHIFT - scale); 204 } 205 206 if (entries < 1UL << STACK_BUCKET_NUMBER_ORDER_MIN) 207 entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MIN; 208 if (entries > 1UL << STACK_BUCKET_NUMBER_ORDER_MAX) 209 entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX; 210 211 pr_info("allocating hash table of %lu entries via kvcalloc\n", entries); 212 stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL); 213 if (!stack_table) { 214 pr_err("hash table allocation failed, disabling\n"); 215 stack_depot_disabled = true; 216 ret = -ENOMEM; 217 goto out_unlock; 218 } 219 stack_hash_mask = entries - 1; 220 221 out_unlock: 222 mutex_unlock(&stack_depot_init_mutex); 223 224 return ret; 225 } 226 EXPORT_SYMBOL_GPL(stack_depot_init); 227 228 /* Uses preallocated memory to initialize a new stack depot pool. */ 229 static void depot_init_pool(void **prealloc) 230 { 231 /* 232 * If the next pool is already initialized or the maximum number of 233 * pools is reached, do not use the preallocated memory. 234 * Access next_pool_required non-atomically, as there are no concurrent 235 * write accesses to this variable. 236 */ 237 if (!next_pool_required) 238 return; 239 240 /* Check if the current pool is not yet allocated. */ 241 if (stack_pools[pool_index] == NULL) { 242 /* Use the preallocated memory for the current pool. */ 243 stack_pools[pool_index] = *prealloc; 244 *prealloc = NULL; 245 } else { 246 /* 247 * Otherwise, use the preallocated memory for the next pool 248 * as long as we do not exceed the maximum number of pools. 249 */ 250 if (pool_index + 1 < DEPOT_MAX_POOLS) { 251 stack_pools[pool_index + 1] = *prealloc; 252 *prealloc = NULL; 253 } 254 /* 255 * At this point, either the next pool is initialized or the 256 * maximum number of pools is reached. In either case, take 257 * note that initializing another pool is not required. 258 * smp_store_release() pairs with smp_load_acquire() in 259 * stack_depot_save(). 260 */ 261 smp_store_release(&next_pool_required, 0); 262 } 263 } 264 265 /* Allocates a new stack in a stack depot pool. */ 266 static struct stack_record * 267 depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) 268 { 269 struct stack_record *stack; 270 size_t required_size = DEPOT_STACK_RECORD_SIZE; 271 272 /* Check if there is not enough space in the current pool. */ 273 if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) { 274 /* Bail out if we reached the pool limit. */ 275 if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) { 276 WARN_ONCE(1, "Stack depot reached limit capacity"); 277 return NULL; 278 } 279 280 /* 281 * Move on to the next pool. 282 * WRITE_ONCE() pairs with potential concurrent read in 283 * stack_depot_fetch(). 284 */ 285 WRITE_ONCE(pool_index, pool_index + 1); 286 pool_offset = 0; 287 /* 288 * If the maximum number of pools is not reached, take note 289 * that the next pool needs to initialized. 290 * smp_store_release() pairs with smp_load_acquire() in 291 * stack_depot_save(). 292 */ 293 if (pool_index + 1 < DEPOT_MAX_POOLS) 294 smp_store_release(&next_pool_required, 1); 295 } 296 297 /* Assign the preallocated memory to a pool if required. */ 298 if (*prealloc) 299 depot_init_pool(prealloc); 300 301 /* Check if we have a pool to save the stack trace. */ 302 if (stack_pools[pool_index] == NULL) 303 return NULL; 304 305 /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */ 306 if (size > CONFIG_STACKDEPOT_MAX_FRAMES) 307 size = CONFIG_STACKDEPOT_MAX_FRAMES; 308 309 /* Save the stack trace. */ 310 stack = stack_pools[pool_index] + pool_offset; 311 stack->hash = hash; 312 stack->size = size; 313 stack->handle.pool_index = pool_index; 314 stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN; 315 stack->handle.extra = 0; 316 memcpy(stack->entries, entries, flex_array_size(stack, entries, size)); 317 pool_offset += required_size; 318 319 /* 320 * Let KMSAN know the stored stack record is initialized. This shall 321 * prevent false positive reports if instrumented code accesses it. 322 */ 323 kmsan_unpoison_memory(stack, required_size); 324 325 return stack; 326 } 327 328 static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle) 329 { 330 union handle_parts parts = { .handle = handle }; 331 /* 332 * READ_ONCE() pairs with potential concurrent write in 333 * depot_alloc_stack(). 334 */ 335 int pool_index_cached = READ_ONCE(pool_index); 336 void *pool; 337 size_t offset = parts.offset << DEPOT_STACK_ALIGN; 338 struct stack_record *stack; 339 340 if (parts.pool_index > pool_index_cached) { 341 WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n", 342 parts.pool_index, pool_index_cached, handle); 343 return NULL; 344 } 345 346 pool = stack_pools[parts.pool_index]; 347 if (!pool) 348 return NULL; 349 350 stack = pool + offset; 351 return stack; 352 } 353 354 /* Calculates the hash for a stack. */ 355 static inline u32 hash_stack(unsigned long *entries, unsigned int size) 356 { 357 return jhash2((u32 *)entries, 358 array_size(size, sizeof(*entries)) / sizeof(u32), 359 STACK_HASH_SEED); 360 } 361 362 /* 363 * Non-instrumented version of memcmp(). 364 * Does not check the lexicographical order, only the equality. 365 */ 366 static inline 367 int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2, 368 unsigned int n) 369 { 370 for ( ; n-- ; u1++, u2++) { 371 if (*u1 != *u2) 372 return 1; 373 } 374 return 0; 375 } 376 377 /* Finds a stack in a bucket of the hash table. */ 378 static inline struct stack_record *find_stack(struct stack_record *bucket, 379 unsigned long *entries, int size, 380 u32 hash) 381 { 382 struct stack_record *found; 383 384 for (found = bucket; found; found = found->next) { 385 if (found->hash == hash && 386 found->size == size && 387 !stackdepot_memcmp(entries, found->entries, size)) 388 return found; 389 } 390 return NULL; 391 } 392 393 depot_stack_handle_t __stack_depot_save(unsigned long *entries, 394 unsigned int nr_entries, 395 gfp_t alloc_flags, bool can_alloc) 396 { 397 struct stack_record *found = NULL, **bucket; 398 depot_stack_handle_t handle = 0; 399 struct page *page = NULL; 400 void *prealloc = NULL; 401 unsigned long flags; 402 u32 hash; 403 404 /* 405 * If this stack trace is from an interrupt, including anything before 406 * interrupt entry usually leads to unbounded stack depot growth. 407 * 408 * Since use of filter_irq_stacks() is a requirement to ensure stack 409 * depot can efficiently deduplicate interrupt stacks, always 410 * filter_irq_stacks() to simplify all callers' use of stack depot. 411 */ 412 nr_entries = filter_irq_stacks(entries, nr_entries); 413 414 if (unlikely(nr_entries == 0) || stack_depot_disabled) 415 return 0; 416 417 hash = hash_stack(entries, nr_entries); 418 bucket = &stack_table[hash & stack_hash_mask]; 419 420 /* 421 * Fast path: look the stack trace up without locking. 422 * smp_load_acquire() pairs with smp_store_release() to |bucket| below. 423 */ 424 found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash); 425 if (found) 426 goto exit; 427 428 /* 429 * Check if another stack pool needs to be initialized. If so, allocate 430 * the memory now - we won't be able to do that under the lock. 431 * 432 * smp_load_acquire() pairs with smp_store_release() in 433 * depot_alloc_stack() and depot_init_pool(). 434 */ 435 if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) { 436 /* 437 * Zero out zone modifiers, as we don't have specific zone 438 * requirements. Keep the flags related to allocation in atomic 439 * contexts and I/O. 440 */ 441 alloc_flags &= ~GFP_ZONEMASK; 442 alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); 443 alloc_flags |= __GFP_NOWARN; 444 page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER); 445 if (page) 446 prealloc = page_address(page); 447 } 448 449 raw_spin_lock_irqsave(&pool_lock, flags); 450 451 found = find_stack(*bucket, entries, nr_entries, hash); 452 if (!found) { 453 struct stack_record *new = 454 depot_alloc_stack(entries, nr_entries, hash, &prealloc); 455 456 if (new) { 457 new->next = *bucket; 458 /* 459 * smp_store_release() pairs with smp_load_acquire() 460 * from |bucket| above. 461 */ 462 smp_store_release(bucket, new); 463 found = new; 464 } 465 } else if (prealloc) { 466 /* 467 * Stack depot already contains this stack trace, but let's 468 * keep the preallocated memory for the future. 469 */ 470 depot_init_pool(&prealloc); 471 } 472 473 raw_spin_unlock_irqrestore(&pool_lock, flags); 474 exit: 475 if (prealloc) { 476 /* Stack depot didn't use this memory, free it. */ 477 free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER); 478 } 479 if (found) 480 handle = found->handle.handle; 481 return handle; 482 } 483 EXPORT_SYMBOL_GPL(__stack_depot_save); 484 485 depot_stack_handle_t stack_depot_save(unsigned long *entries, 486 unsigned int nr_entries, 487 gfp_t alloc_flags) 488 { 489 return __stack_depot_save(entries, nr_entries, alloc_flags, true); 490 } 491 EXPORT_SYMBOL_GPL(stack_depot_save); 492 493 unsigned int stack_depot_fetch(depot_stack_handle_t handle, 494 unsigned long **entries) 495 { 496 struct stack_record *stack; 497 498 *entries = NULL; 499 /* 500 * Let KMSAN know *entries is initialized. This shall prevent false 501 * positive reports if instrumented code accesses it. 502 */ 503 kmsan_unpoison_memory(entries, sizeof(*entries)); 504 505 if (!handle || stack_depot_disabled) 506 return 0; 507 508 stack = depot_fetch_stack(handle); 509 510 *entries = stack->entries; 511 return stack->size; 512 } 513 EXPORT_SYMBOL_GPL(stack_depot_fetch); 514 515 void stack_depot_print(depot_stack_handle_t stack) 516 { 517 unsigned long *entries; 518 unsigned int nr_entries; 519 520 nr_entries = stack_depot_fetch(stack, &entries); 521 if (nr_entries > 0) 522 stack_trace_print(entries, nr_entries, 0); 523 } 524 EXPORT_SYMBOL_GPL(stack_depot_print); 525 526 int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size, 527 int spaces) 528 { 529 unsigned long *entries; 530 unsigned int nr_entries; 531 532 nr_entries = stack_depot_fetch(handle, &entries); 533 return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries, 534 spaces) : 0; 535 } 536 EXPORT_SYMBOL_GPL(stack_depot_snprint); 537 538 depot_stack_handle_t __must_check stack_depot_set_extra_bits( 539 depot_stack_handle_t handle, unsigned int extra_bits) 540 { 541 union handle_parts parts = { .handle = handle }; 542 543 /* Don't set extra bits on empty handles. */ 544 if (!handle) 545 return 0; 546 547 parts.extra = extra_bits; 548 return parts.handle; 549 } 550 EXPORT_SYMBOL(stack_depot_set_extra_bits); 551 552 unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle) 553 { 554 union handle_parts parts = { .handle = handle }; 555 556 return parts.extra; 557 } 558 EXPORT_SYMBOL(stack_depot_get_extra_bits); 559