1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Stack depot - a stack trace storage that avoids duplication. 4 * 5 * Internally, stack depot maintains a hash table of unique stacktraces. The 6 * stack traces themselves are stored contiguously one after another in a set 7 * of separate page allocations. 8 * 9 * Author: Alexander Potapenko <[email protected]> 10 * Copyright (C) 2016 Google, Inc. 11 * 12 * Based on the code by Dmitry Chernenkov. 13 */ 14 15 #define pr_fmt(fmt) "stackdepot: " fmt 16 17 #include <linux/gfp.h> 18 #include <linux/jhash.h> 19 #include <linux/kernel.h> 20 #include <linux/kmsan.h> 21 #include <linux/mm.h> 22 #include <linux/mutex.h> 23 #include <linux/percpu.h> 24 #include <linux/printk.h> 25 #include <linux/slab.h> 26 #include <linux/stacktrace.h> 27 #include <linux/stackdepot.h> 28 #include <linux/string.h> 29 #include <linux/types.h> 30 #include <linux/memblock.h> 31 #include <linux/kasan-enabled.h> 32 33 #define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8) 34 35 #define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */ 36 #define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER)) 37 #define DEPOT_STACK_ALIGN 4 38 #define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN) 39 #define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_OFFSET_BITS - \ 40 STACK_DEPOT_EXTRA_BITS) 41 #define DEPOT_POOLS_CAP 8192 42 #define DEPOT_MAX_POOLS \ 43 (((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \ 44 (1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP) 45 46 /* Compact structure that stores a reference to a stack. */ 47 union handle_parts { 48 depot_stack_handle_t handle; 49 struct { 50 u32 pool_index : DEPOT_POOL_INDEX_BITS; 51 u32 offset : DEPOT_OFFSET_BITS; 52 u32 extra : STACK_DEPOT_EXTRA_BITS; 53 }; 54 }; 55 56 struct stack_record { 57 struct stack_record *next; /* Link in hash table or freelist */ 58 u32 hash; /* Hash in hash table */ 59 u32 size; /* Number of stored frames */ 60 union handle_parts handle; 61 unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */ 62 }; 63 64 #define DEPOT_STACK_RECORD_SIZE \ 65 ALIGN(sizeof(struct stack_record), 1 << DEPOT_STACK_ALIGN) 66 67 static bool stack_depot_disabled; 68 static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT); 69 static bool __stack_depot_early_init_passed __initdata; 70 71 /* Use one hash table bucket per 16 KB of memory. */ 72 #define STACK_HASH_TABLE_SCALE 14 73 /* Limit the number of buckets between 4K and 1M. */ 74 #define STACK_BUCKET_NUMBER_ORDER_MIN 12 75 #define STACK_BUCKET_NUMBER_ORDER_MAX 20 76 /* Initial seed for jhash2. */ 77 #define STACK_HASH_SEED 0x9747b28c 78 79 /* Hash table of pointers to stored stack traces. */ 80 static struct stack_record **stack_table; 81 /* Fixed order of the number of table buckets. Used when KASAN is enabled. */ 82 static unsigned int stack_bucket_number_order; 83 /* Hash mask for indexing the table. */ 84 static unsigned int stack_hash_mask; 85 86 /* Array of memory regions that store stack traces. */ 87 static void *stack_pools[DEPOT_MAX_POOLS]; 88 /* Newly allocated pool that is not yet added to stack_pools. */ 89 static void *new_pool; 90 /* Number of pools in stack_pools. */ 91 static int pools_num; 92 /* Next stack in the freelist of stack records within stack_pools. */ 93 static struct stack_record *next_stack; 94 /* Lock that protects the variables above. */ 95 static DEFINE_RAW_SPINLOCK(pool_lock); 96 /* 97 * Stack depot tries to keep an extra pool allocated even before it runs out 98 * of space in the currently used pool. This flag marks whether this extra pool 99 * needs to be allocated. It has the value 0 when either an extra pool is not 100 * yet allocated or if the limit on the number of pools is reached. 101 */ 102 static int new_pool_required = 1; 103 104 static int __init disable_stack_depot(char *str) 105 { 106 return kstrtobool(str, &stack_depot_disabled); 107 } 108 early_param("stack_depot_disable", disable_stack_depot); 109 110 void __init stack_depot_request_early_init(void) 111 { 112 /* Too late to request early init now. */ 113 WARN_ON(__stack_depot_early_init_passed); 114 115 __stack_depot_early_init_requested = true; 116 } 117 118 /* Allocates a hash table via memblock. Can only be used during early boot. */ 119 int __init stack_depot_early_init(void) 120 { 121 unsigned long entries = 0; 122 123 /* This function must be called only once, from mm_init(). */ 124 if (WARN_ON(__stack_depot_early_init_passed)) 125 return 0; 126 __stack_depot_early_init_passed = true; 127 128 /* 129 * Print disabled message even if early init has not been requested: 130 * stack_depot_init() will not print one. 131 */ 132 if (stack_depot_disabled) { 133 pr_info("disabled\n"); 134 return 0; 135 } 136 137 /* 138 * If KASAN is enabled, use the maximum order: KASAN is frequently used 139 * in fuzzing scenarios, which leads to a large number of different 140 * stack traces being stored in stack depot. 141 */ 142 if (kasan_enabled() && !stack_bucket_number_order) 143 stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX; 144 145 /* 146 * Check if early init has been requested after setting 147 * stack_bucket_number_order: stack_depot_init() uses its value. 148 */ 149 if (!__stack_depot_early_init_requested) 150 return 0; 151 152 /* 153 * If stack_bucket_number_order is not set, leave entries as 0 to rely 154 * on the automatic calculations performed by alloc_large_system_hash. 155 */ 156 if (stack_bucket_number_order) 157 entries = 1UL << stack_bucket_number_order; 158 pr_info("allocating hash table via alloc_large_system_hash\n"); 159 stack_table = alloc_large_system_hash("stackdepot", 160 sizeof(struct stack_record *), 161 entries, 162 STACK_HASH_TABLE_SCALE, 163 HASH_EARLY | HASH_ZERO, 164 NULL, 165 &stack_hash_mask, 166 1UL << STACK_BUCKET_NUMBER_ORDER_MIN, 167 1UL << STACK_BUCKET_NUMBER_ORDER_MAX); 168 if (!stack_table) { 169 pr_err("hash table allocation failed, disabling\n"); 170 stack_depot_disabled = true; 171 return -ENOMEM; 172 } 173 174 return 0; 175 } 176 177 /* Allocates a hash table via kvcalloc. Can be used after boot. */ 178 int stack_depot_init(void) 179 { 180 static DEFINE_MUTEX(stack_depot_init_mutex); 181 unsigned long entries; 182 int ret = 0; 183 184 mutex_lock(&stack_depot_init_mutex); 185 186 if (stack_depot_disabled || stack_table) 187 goto out_unlock; 188 189 /* 190 * Similarly to stack_depot_early_init, use stack_bucket_number_order 191 * if assigned, and rely on automatic scaling otherwise. 192 */ 193 if (stack_bucket_number_order) { 194 entries = 1UL << stack_bucket_number_order; 195 } else { 196 int scale = STACK_HASH_TABLE_SCALE; 197 198 entries = nr_free_buffer_pages(); 199 entries = roundup_pow_of_two(entries); 200 201 if (scale > PAGE_SHIFT) 202 entries >>= (scale - PAGE_SHIFT); 203 else 204 entries <<= (PAGE_SHIFT - scale); 205 } 206 207 if (entries < 1UL << STACK_BUCKET_NUMBER_ORDER_MIN) 208 entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MIN; 209 if (entries > 1UL << STACK_BUCKET_NUMBER_ORDER_MAX) 210 entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX; 211 212 pr_info("allocating hash table of %lu entries via kvcalloc\n", entries); 213 stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL); 214 if (!stack_table) { 215 pr_err("hash table allocation failed, disabling\n"); 216 stack_depot_disabled = true; 217 ret = -ENOMEM; 218 goto out_unlock; 219 } 220 stack_hash_mask = entries - 1; 221 222 out_unlock: 223 mutex_unlock(&stack_depot_init_mutex); 224 225 return ret; 226 } 227 EXPORT_SYMBOL_GPL(stack_depot_init); 228 229 /* Initializes a stack depol pool. */ 230 static void depot_init_pool(void *pool) 231 { 232 const int records_in_pool = DEPOT_POOL_SIZE / DEPOT_STACK_RECORD_SIZE; 233 int i, offset; 234 235 /* Initialize handles and link stack records to each other. */ 236 for (i = 0, offset = 0; 237 offset <= DEPOT_POOL_SIZE - DEPOT_STACK_RECORD_SIZE; 238 i++, offset += DEPOT_STACK_RECORD_SIZE) { 239 struct stack_record *stack = pool + offset; 240 241 stack->handle.pool_index = pools_num; 242 stack->handle.offset = offset >> DEPOT_STACK_ALIGN; 243 stack->handle.extra = 0; 244 245 if (i < records_in_pool - 1) 246 stack->next = (void *)stack + DEPOT_STACK_RECORD_SIZE; 247 else 248 stack->next = NULL; 249 } 250 251 /* Link stack records into the freelist. */ 252 WARN_ON(next_stack); 253 next_stack = pool; 254 255 /* Save reference to the pool to be used by depot_fetch_stack(). */ 256 stack_pools[pools_num] = pool; 257 258 /* 259 * WRITE_ONCE() pairs with potential concurrent read in 260 * depot_fetch_stack(). 261 */ 262 WRITE_ONCE(pools_num, pools_num + 1); 263 } 264 265 /* Keeps the preallocated memory to be used for a new stack depot pool. */ 266 static void depot_keep_new_pool(void **prealloc) 267 { 268 /* 269 * If a new pool is already saved or the maximum number of 270 * pools is reached, do not use the preallocated memory. 271 * Access new_pool_required non-atomically, as there are no concurrent 272 * write accesses to this variable. 273 */ 274 if (!new_pool_required) 275 return; 276 277 /* 278 * Use the preallocated memory for the new pool 279 * as long as we do not exceed the maximum number of pools. 280 */ 281 if (pools_num < DEPOT_MAX_POOLS) { 282 new_pool = *prealloc; 283 *prealloc = NULL; 284 } 285 286 /* 287 * At this point, either a new pool is kept or the maximum 288 * number of pools is reached. In either case, take note that 289 * keeping another pool is not required. 290 * smp_store_release() pairs with smp_load_acquire() in 291 * stack_depot_save(). 292 */ 293 smp_store_release(&new_pool_required, 0); 294 } 295 296 /* Updates references to the current and the next stack depot pools. */ 297 static bool depot_update_pools(void **prealloc) 298 { 299 /* Check if we still have objects in the freelist. */ 300 if (next_stack) 301 goto out_keep_prealloc; 302 303 /* Check if we have a new pool saved and use it. */ 304 if (new_pool) { 305 depot_init_pool(new_pool); 306 new_pool = NULL; 307 308 /* Take note that we might need a new new_pool. */ 309 if (pools_num < DEPOT_MAX_POOLS) 310 smp_store_release(&new_pool_required, 1); 311 312 /* Try keeping the preallocated memory for new_pool. */ 313 goto out_keep_prealloc; 314 } 315 316 /* Bail out if we reached the pool limit. */ 317 if (unlikely(pools_num >= DEPOT_MAX_POOLS)) { 318 WARN_ONCE(1, "Stack depot reached limit capacity"); 319 return false; 320 } 321 322 /* Check if we have preallocated memory and use it. */ 323 if (*prealloc) { 324 depot_init_pool(*prealloc); 325 *prealloc = NULL; 326 return true; 327 } 328 329 return false; 330 331 out_keep_prealloc: 332 /* Keep the preallocated memory for a new pool if required. */ 333 if (*prealloc) 334 depot_keep_new_pool(prealloc); 335 return true; 336 } 337 338 /* Allocates a new stack in a stack depot pool. */ 339 static struct stack_record * 340 depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) 341 { 342 struct stack_record *stack; 343 344 /* Update current and new pools if required and possible. */ 345 if (!depot_update_pools(prealloc)) 346 return NULL; 347 348 /* Check if we have a stack record to save the stack trace. */ 349 stack = next_stack; 350 if (!stack) 351 return NULL; 352 353 /* Advance the freelist. */ 354 next_stack = stack->next; 355 356 /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */ 357 if (size > CONFIG_STACKDEPOT_MAX_FRAMES) 358 size = CONFIG_STACKDEPOT_MAX_FRAMES; 359 360 /* Save the stack trace. */ 361 stack->next = NULL; 362 stack->hash = hash; 363 stack->size = size; 364 /* stack->handle is already filled in by depot_init_pool(). */ 365 memcpy(stack->entries, entries, flex_array_size(stack, entries, size)); 366 367 /* 368 * Let KMSAN know the stored stack record is initialized. This shall 369 * prevent false positive reports if instrumented code accesses it. 370 */ 371 kmsan_unpoison_memory(stack, DEPOT_STACK_RECORD_SIZE); 372 373 return stack; 374 } 375 376 static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle) 377 { 378 union handle_parts parts = { .handle = handle }; 379 /* 380 * READ_ONCE() pairs with potential concurrent write in 381 * depot_init_pool(). 382 */ 383 int pools_num_cached = READ_ONCE(pools_num); 384 void *pool; 385 size_t offset = parts.offset << DEPOT_STACK_ALIGN; 386 struct stack_record *stack; 387 388 if (parts.pool_index > pools_num_cached) { 389 WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n", 390 parts.pool_index, pools_num_cached, handle); 391 return NULL; 392 } 393 394 pool = stack_pools[parts.pool_index]; 395 if (!pool) 396 return NULL; 397 398 stack = pool + offset; 399 return stack; 400 } 401 402 /* Calculates the hash for a stack. */ 403 static inline u32 hash_stack(unsigned long *entries, unsigned int size) 404 { 405 return jhash2((u32 *)entries, 406 array_size(size, sizeof(*entries)) / sizeof(u32), 407 STACK_HASH_SEED); 408 } 409 410 /* 411 * Non-instrumented version of memcmp(). 412 * Does not check the lexicographical order, only the equality. 413 */ 414 static inline 415 int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2, 416 unsigned int n) 417 { 418 for ( ; n-- ; u1++, u2++) { 419 if (*u1 != *u2) 420 return 1; 421 } 422 return 0; 423 } 424 425 /* Finds a stack in a bucket of the hash table. */ 426 static inline struct stack_record *find_stack(struct stack_record *bucket, 427 unsigned long *entries, int size, 428 u32 hash) 429 { 430 struct stack_record *found; 431 432 for (found = bucket; found; found = found->next) { 433 if (found->hash == hash && 434 found->size == size && 435 !stackdepot_memcmp(entries, found->entries, size)) 436 return found; 437 } 438 return NULL; 439 } 440 441 depot_stack_handle_t __stack_depot_save(unsigned long *entries, 442 unsigned int nr_entries, 443 gfp_t alloc_flags, bool can_alloc) 444 { 445 struct stack_record *found = NULL, **bucket; 446 depot_stack_handle_t handle = 0; 447 struct page *page = NULL; 448 void *prealloc = NULL; 449 unsigned long flags; 450 u32 hash; 451 452 /* 453 * If this stack trace is from an interrupt, including anything before 454 * interrupt entry usually leads to unbounded stack depot growth. 455 * 456 * Since use of filter_irq_stacks() is a requirement to ensure stack 457 * depot can efficiently deduplicate interrupt stacks, always 458 * filter_irq_stacks() to simplify all callers' use of stack depot. 459 */ 460 nr_entries = filter_irq_stacks(entries, nr_entries); 461 462 if (unlikely(nr_entries == 0) || stack_depot_disabled) 463 return 0; 464 465 hash = hash_stack(entries, nr_entries); 466 bucket = &stack_table[hash & stack_hash_mask]; 467 468 /* 469 * Fast path: look the stack trace up without locking. 470 * smp_load_acquire() pairs with smp_store_release() to |bucket| below. 471 */ 472 found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash); 473 if (found) 474 goto exit; 475 476 /* 477 * Check if another stack pool needs to be allocated. If so, allocate 478 * the memory now: we won't be able to do that under the lock. 479 * 480 * smp_load_acquire() pairs with smp_store_release() in 481 * depot_update_pools() and depot_keep_new_pool(). 482 */ 483 if (unlikely(can_alloc && smp_load_acquire(&new_pool_required))) { 484 /* 485 * Zero out zone modifiers, as we don't have specific zone 486 * requirements. Keep the flags related to allocation in atomic 487 * contexts and I/O. 488 */ 489 alloc_flags &= ~GFP_ZONEMASK; 490 alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); 491 alloc_flags |= __GFP_NOWARN; 492 page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER); 493 if (page) 494 prealloc = page_address(page); 495 } 496 497 raw_spin_lock_irqsave(&pool_lock, flags); 498 499 found = find_stack(*bucket, entries, nr_entries, hash); 500 if (!found) { 501 struct stack_record *new = 502 depot_alloc_stack(entries, nr_entries, hash, &prealloc); 503 504 if (new) { 505 new->next = *bucket; 506 /* 507 * smp_store_release() pairs with smp_load_acquire() 508 * from |bucket| above. 509 */ 510 smp_store_release(bucket, new); 511 found = new; 512 } 513 } else if (prealloc) { 514 /* 515 * Stack depot already contains this stack trace, but let's 516 * keep the preallocated memory for future. 517 */ 518 depot_keep_new_pool(&prealloc); 519 } 520 521 raw_spin_unlock_irqrestore(&pool_lock, flags); 522 exit: 523 if (prealloc) { 524 /* Stack depot didn't use this memory, free it. */ 525 free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER); 526 } 527 if (found) 528 handle = found->handle.handle; 529 return handle; 530 } 531 EXPORT_SYMBOL_GPL(__stack_depot_save); 532 533 depot_stack_handle_t stack_depot_save(unsigned long *entries, 534 unsigned int nr_entries, 535 gfp_t alloc_flags) 536 { 537 return __stack_depot_save(entries, nr_entries, alloc_flags, true); 538 } 539 EXPORT_SYMBOL_GPL(stack_depot_save); 540 541 unsigned int stack_depot_fetch(depot_stack_handle_t handle, 542 unsigned long **entries) 543 { 544 struct stack_record *stack; 545 546 *entries = NULL; 547 /* 548 * Let KMSAN know *entries is initialized. This shall prevent false 549 * positive reports if instrumented code accesses it. 550 */ 551 kmsan_unpoison_memory(entries, sizeof(*entries)); 552 553 if (!handle || stack_depot_disabled) 554 return 0; 555 556 stack = depot_fetch_stack(handle); 557 558 *entries = stack->entries; 559 return stack->size; 560 } 561 EXPORT_SYMBOL_GPL(stack_depot_fetch); 562 563 void stack_depot_print(depot_stack_handle_t stack) 564 { 565 unsigned long *entries; 566 unsigned int nr_entries; 567 568 nr_entries = stack_depot_fetch(stack, &entries); 569 if (nr_entries > 0) 570 stack_trace_print(entries, nr_entries, 0); 571 } 572 EXPORT_SYMBOL_GPL(stack_depot_print); 573 574 int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size, 575 int spaces) 576 { 577 unsigned long *entries; 578 unsigned int nr_entries; 579 580 nr_entries = stack_depot_fetch(handle, &entries); 581 return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries, 582 spaces) : 0; 583 } 584 EXPORT_SYMBOL_GPL(stack_depot_snprint); 585 586 depot_stack_handle_t __must_check stack_depot_set_extra_bits( 587 depot_stack_handle_t handle, unsigned int extra_bits) 588 { 589 union handle_parts parts = { .handle = handle }; 590 591 /* Don't set extra bits on empty handles. */ 592 if (!handle) 593 return 0; 594 595 parts.extra = extra_bits; 596 return parts.handle; 597 } 598 EXPORT_SYMBOL(stack_depot_set_extra_bits); 599 600 unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle) 601 { 602 union handle_parts parts = { .handle = handle }; 603 604 return parts.extra; 605 } 606 EXPORT_SYMBOL(stack_depot_get_extra_bits); 607