1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Generic infrastructure for lifetime debugging of objects. 4 * 5 * Copyright (C) 2008, Thomas Gleixner <[email protected]> 6 */ 7 8 #define pr_fmt(fmt) "ODEBUG: " fmt 9 10 #include <linux/debugobjects.h> 11 #include <linux/interrupt.h> 12 #include <linux/sched.h> 13 #include <linux/sched/task_stack.h> 14 #include <linux/seq_file.h> 15 #include <linux/debugfs.h> 16 #include <linux/slab.h> 17 #include <linux/hash.h> 18 #include <linux/kmemleak.h> 19 #include <linux/cpu.h> 20 21 #define ODEBUG_HASH_BITS 14 22 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) 23 24 #define ODEBUG_POOL_SIZE 1024 25 #define ODEBUG_POOL_MIN_LEVEL 256 26 #define ODEBUG_POOL_PERCPU_SIZE 64 27 #define ODEBUG_BATCH_SIZE 16 28 29 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT 30 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) 31 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) 32 33 /* 34 * We limit the freeing of debug objects via workqueue at a maximum 35 * frequency of 10Hz and about 1024 objects for each freeing operation. 36 * So it is freeing at most 10k debug objects per second. 37 */ 38 #define ODEBUG_FREE_WORK_MAX 1024 39 #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10) 40 41 struct debug_bucket { 42 struct hlist_head list; 43 raw_spinlock_t lock; 44 }; 45 46 struct obj_pool { 47 struct hlist_head objects; 48 unsigned int cnt; 49 unsigned int min_cnt; 50 unsigned int max_cnt; 51 } ____cacheline_aligned; 52 53 54 static DEFINE_PER_CPU_ALIGNED(struct obj_pool, pool_pcpu) = { 55 .max_cnt = ODEBUG_POOL_PERCPU_SIZE, 56 }; 57 58 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 59 60 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; 61 62 static DEFINE_RAW_SPINLOCK(pool_lock); 63 64 static struct obj_pool pool_global = { 65 .min_cnt = ODEBUG_POOL_MIN_LEVEL, 66 .max_cnt = ODEBUG_POOL_SIZE, 67 }; 68 69 static struct obj_pool pool_to_free = { 70 .max_cnt = UINT_MAX, 71 }; 72 73 static HLIST_HEAD(pool_boot); 74 75 /* 76 * Because of the presence of percpu free pools, obj_pool_free will 77 * under-count those in the percpu free pools. Similarly, obj_pool_used 78 * will over-count those in the percpu free pools. Adjustments will be 79 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used 80 * can be off. 81 */ 82 static int __data_racy obj_pool_min_free = ODEBUG_POOL_SIZE; 83 static int obj_pool_used; 84 static int __data_racy obj_pool_max_used; 85 static bool obj_freeing; 86 87 static int __data_racy debug_objects_maxchain __read_mostly; 88 static int __data_racy __maybe_unused debug_objects_maxchecked __read_mostly; 89 static int __data_racy debug_objects_fixups __read_mostly; 90 static int __data_racy debug_objects_warnings __read_mostly; 91 static bool __data_racy debug_objects_enabled __read_mostly 92 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; 93 94 static const struct debug_obj_descr *descr_test __read_mostly; 95 static struct kmem_cache *obj_cache __ro_after_init; 96 97 /* 98 * Track numbers of kmem_cache_alloc()/free() calls done. 99 */ 100 static int __data_racy debug_objects_allocated; 101 static int __data_racy debug_objects_freed; 102 103 static void free_obj_work(struct work_struct *work); 104 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work); 105 106 static int __init enable_object_debug(char *str) 107 { 108 debug_objects_enabled = true; 109 return 0; 110 } 111 early_param("debug_objects", enable_object_debug); 112 113 static int __init disable_object_debug(char *str) 114 { 115 debug_objects_enabled = false; 116 return 0; 117 } 118 early_param("no_debug_objects", disable_object_debug); 119 120 static const char *obj_states[ODEBUG_STATE_MAX] = { 121 [ODEBUG_STATE_NONE] = "none", 122 [ODEBUG_STATE_INIT] = "initialized", 123 [ODEBUG_STATE_INACTIVE] = "inactive", 124 [ODEBUG_STATE_ACTIVE] = "active", 125 [ODEBUG_STATE_DESTROYED] = "destroyed", 126 [ODEBUG_STATE_NOTAVAILABLE] = "not available", 127 }; 128 129 static __always_inline unsigned int pool_count(struct obj_pool *pool) 130 { 131 return READ_ONCE(pool->cnt); 132 } 133 134 static __always_inline bool pool_should_refill(struct obj_pool *pool) 135 { 136 return pool_count(pool) < pool->min_cnt; 137 } 138 139 static __always_inline bool pool_must_refill(struct obj_pool *pool) 140 { 141 return pool_count(pool) < pool->min_cnt / 2; 142 } 143 144 static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src) 145 { 146 if (dst->cnt + ODEBUG_BATCH_SIZE > dst->max_cnt || !src->cnt) 147 return false; 148 149 for (int i = 0; i < ODEBUG_BATCH_SIZE && src->cnt; i++) { 150 struct hlist_node *node = src->objects.first; 151 152 WRITE_ONCE(src->cnt, src->cnt - 1); 153 WRITE_ONCE(dst->cnt, dst->cnt + 1); 154 155 hlist_del(node); 156 hlist_add_head(node, &dst->objects); 157 } 158 return true; 159 } 160 161 static struct debug_obj *__alloc_object(struct hlist_head *list) 162 { 163 struct debug_obj *obj; 164 165 if (unlikely(!list->first)) 166 return NULL; 167 168 obj = hlist_entry(list->first, typeof(*obj), node); 169 hlist_del(&obj->node); 170 return obj; 171 } 172 173 static struct debug_obj *pcpu_alloc(void) 174 { 175 struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu); 176 177 lockdep_assert_irqs_disabled(); 178 179 for (;;) { 180 struct debug_obj *obj = __alloc_object(&pcp->objects); 181 182 if (likely(obj)) { 183 pcp->cnt--; 184 return obj; 185 } 186 187 guard(raw_spinlock)(&pool_lock); 188 if (!pool_move_batch(pcp, &pool_to_free)) { 189 if (!pool_move_batch(pcp, &pool_global)) 190 return NULL; 191 } 192 obj_pool_used += pcp->cnt; 193 194 if (obj_pool_used > obj_pool_max_used) 195 obj_pool_max_used = obj_pool_used; 196 197 if (pool_global.cnt < obj_pool_min_free) 198 obj_pool_min_free = pool_global.cnt; 199 } 200 } 201 202 static void free_object_list(struct hlist_head *head) 203 { 204 struct hlist_node *tmp; 205 struct debug_obj *obj; 206 int cnt = 0; 207 208 hlist_for_each_entry_safe(obj, tmp, head, node) { 209 hlist_del(&obj->node); 210 kmem_cache_free(obj_cache, obj); 211 cnt++; 212 } 213 debug_objects_freed += cnt; 214 } 215 216 static void fill_pool_from_freelist(void) 217 { 218 static unsigned long state; 219 220 /* 221 * Reuse objs from the global obj_to_free list; they will be 222 * reinitialized when allocating. 223 */ 224 if (!pool_count(&pool_to_free)) 225 return; 226 227 /* 228 * Prevent the context from being scheduled or interrupted after 229 * setting the state flag; 230 */ 231 guard(irqsave)(); 232 233 /* 234 * Avoid lock contention on &pool_lock and avoid making the cache 235 * line exclusive by testing the bit before attempting to set it. 236 */ 237 if (test_bit(0, &state) || test_and_set_bit(0, &state)) 238 return; 239 240 /* Avoid taking the lock when there is no work to do */ 241 while (pool_should_refill(&pool_global) && pool_count(&pool_to_free)) { 242 guard(raw_spinlock)(&pool_lock); 243 /* Move a batch if possible */ 244 pool_move_batch(&pool_global, &pool_to_free); 245 } 246 clear_bit(0, &state); 247 } 248 249 static void fill_pool(void) 250 { 251 static atomic_t cpus_allocating; 252 253 /* 254 * Avoid allocation and lock contention when: 255 * - One other CPU is already allocating 256 * - the global pool has not reached the critical level yet 257 */ 258 if (!pool_must_refill(&pool_global) && atomic_read(&cpus_allocating)) 259 return; 260 261 atomic_inc(&cpus_allocating); 262 while (pool_should_refill(&pool_global)) { 263 struct debug_obj *new, *last = NULL; 264 HLIST_HEAD(head); 265 int cnt; 266 267 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) { 268 new = kmem_cache_zalloc(obj_cache, __GFP_HIGH | __GFP_NOWARN); 269 if (!new) 270 break; 271 hlist_add_head(&new->node, &head); 272 if (!last) 273 last = new; 274 } 275 if (!cnt) 276 break; 277 278 guard(raw_spinlock_irqsave)(&pool_lock); 279 hlist_splice_init(&head, &last->node, &pool_global.objects); 280 debug_objects_allocated += cnt; 281 WRITE_ONCE(pool_global.cnt, pool_global.cnt + cnt); 282 } 283 atomic_dec(&cpus_allocating); 284 } 285 286 /* 287 * Lookup an object in the hash bucket. 288 */ 289 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) 290 { 291 struct debug_obj *obj; 292 int cnt = 0; 293 294 hlist_for_each_entry(obj, &b->list, node) { 295 cnt++; 296 if (obj->object == addr) 297 return obj; 298 } 299 if (cnt > debug_objects_maxchain) 300 debug_objects_maxchain = cnt; 301 302 return NULL; 303 } 304 305 static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b, 306 const struct debug_obj_descr *descr) 307 { 308 struct debug_obj *obj; 309 310 if (likely(obj_cache)) 311 obj = pcpu_alloc(); 312 else 313 obj = __alloc_object(&pool_boot); 314 315 if (likely(obj)) { 316 obj->object = addr; 317 obj->descr = descr; 318 obj->state = ODEBUG_STATE_NONE; 319 obj->astate = 0; 320 hlist_add_head(&obj->node, &b->list); 321 } 322 return obj; 323 } 324 325 /* 326 * workqueue function to free objects. 327 * 328 * To reduce contention on the global pool_lock, the actual freeing of 329 * debug objects will be delayed if the pool_lock is busy. 330 */ 331 static void free_obj_work(struct work_struct *work) 332 { 333 struct debug_obj *obj; 334 unsigned long flags; 335 HLIST_HEAD(tofree); 336 337 WRITE_ONCE(obj_freeing, false); 338 if (!raw_spin_trylock_irqsave(&pool_lock, flags)) 339 return; 340 341 if (pool_global.cnt >= pool_global.max_cnt) 342 goto free_objs; 343 344 /* 345 * The objs on the pool list might be allocated before the work is 346 * run, so recheck if pool list it full or not, if not fill pool 347 * list from the global free list. As it is likely that a workload 348 * may be gearing up to use more and more objects, don't free any 349 * of them until the next round. 350 */ 351 while (pool_to_free.cnt && pool_global.cnt < pool_global.max_cnt) { 352 obj = hlist_entry(pool_to_free.objects.first, typeof(*obj), node); 353 hlist_del(&obj->node); 354 hlist_add_head(&obj->node, &pool_global.objects); 355 WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt - 1); 356 WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1); 357 } 358 raw_spin_unlock_irqrestore(&pool_lock, flags); 359 return; 360 361 free_objs: 362 /* 363 * Pool list is already full and there are still objs on the free 364 * list. Move remaining free objs to a temporary list to free the 365 * memory outside the pool_lock held region. 366 */ 367 if (pool_to_free.cnt) { 368 hlist_move_list(&pool_to_free.objects, &tofree); 369 WRITE_ONCE(pool_to_free.cnt, 0); 370 } 371 raw_spin_unlock_irqrestore(&pool_lock, flags); 372 373 free_object_list(&tofree); 374 } 375 376 static void __free_object(struct debug_obj *obj) 377 { 378 struct debug_obj *objs[ODEBUG_BATCH_SIZE]; 379 struct obj_pool *percpu_pool; 380 int lookahead_count = 0; 381 bool work; 382 383 guard(irqsave)(); 384 385 if (unlikely(!obj_cache)) { 386 hlist_add_head(&obj->node, &pool_boot); 387 return; 388 } 389 390 /* 391 * Try to free it into the percpu pool first. 392 */ 393 percpu_pool = this_cpu_ptr(&pool_pcpu); 394 if (percpu_pool->cnt < ODEBUG_POOL_PERCPU_SIZE) { 395 hlist_add_head(&obj->node, &percpu_pool->objects); 396 percpu_pool->cnt++; 397 return; 398 } 399 400 /* 401 * As the percpu pool is full, look ahead and pull out a batch 402 * of objects from the percpu pool and free them as well. 403 */ 404 for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) { 405 objs[lookahead_count] = __alloc_object(&percpu_pool->objects); 406 if (!objs[lookahead_count]) 407 break; 408 percpu_pool->cnt--; 409 } 410 411 raw_spin_lock(&pool_lock); 412 work = (pool_global.cnt > pool_global.max_cnt) && obj_cache && 413 (pool_to_free.cnt < ODEBUG_FREE_WORK_MAX); 414 obj_pool_used--; 415 416 if (work) { 417 WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + 1); 418 hlist_add_head(&obj->node, &pool_to_free.objects); 419 if (lookahead_count) { 420 WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + lookahead_count); 421 obj_pool_used -= lookahead_count; 422 while (lookahead_count) { 423 hlist_add_head(&objs[--lookahead_count]->node, 424 &pool_to_free.objects); 425 } 426 } 427 428 if ((pool_global.cnt > pool_global.max_cnt) && 429 (pool_to_free.cnt < ODEBUG_FREE_WORK_MAX)) { 430 int i; 431 432 /* 433 * Free one more batch of objects from obj_pool. 434 */ 435 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { 436 obj = __alloc_object(&pool_global.objects); 437 hlist_add_head(&obj->node, &pool_to_free.objects); 438 WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1); 439 WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + 1); 440 } 441 } 442 } else { 443 WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1); 444 hlist_add_head(&obj->node, &pool_global.objects); 445 if (lookahead_count) { 446 WRITE_ONCE(pool_global.cnt, pool_global.cnt + lookahead_count); 447 obj_pool_used -= lookahead_count; 448 while (lookahead_count) { 449 hlist_add_head(&objs[--lookahead_count]->node, 450 &pool_global.objects); 451 } 452 } 453 } 454 raw_spin_unlock(&pool_lock); 455 } 456 457 /* 458 * Put the object back into the pool and schedule work to free objects 459 * if necessary. 460 */ 461 static void free_object(struct debug_obj *obj) 462 { 463 __free_object(obj); 464 if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) { 465 WRITE_ONCE(obj_freeing, true); 466 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); 467 } 468 } 469 470 static void put_objects(struct hlist_head *list) 471 { 472 struct hlist_node *tmp; 473 struct debug_obj *obj; 474 475 /* 476 * Using free_object() puts the objects into reuse or schedules 477 * them for freeing and it get's all the accounting correct. 478 */ 479 hlist_for_each_entry_safe(obj, tmp, list, node) { 480 hlist_del(&obj->node); 481 free_object(obj); 482 } 483 } 484 485 #ifdef CONFIG_HOTPLUG_CPU 486 static int object_cpu_offline(unsigned int cpu) 487 { 488 /* Remote access is safe as the CPU is dead already */ 489 struct obj_pool *pcp = per_cpu_ptr(&pool_pcpu, cpu); 490 491 put_objects(&pcp->objects); 492 pcp->cnt = 0; 493 return 0; 494 } 495 #endif 496 497 /* Out of memory. Free all objects from hash */ 498 static void debug_objects_oom(void) 499 { 500 struct debug_bucket *db = obj_hash; 501 HLIST_HEAD(freelist); 502 503 pr_warn("Out of memory. ODEBUG disabled\n"); 504 505 for (int i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 506 scoped_guard(raw_spinlock_irqsave, &db->lock) 507 hlist_move_list(&db->list, &freelist); 508 509 put_objects(&freelist); 510 } 511 } 512 513 /* 514 * We use the pfn of the address for the hash. That way we can check 515 * for freed objects simply by checking the affected bucket. 516 */ 517 static struct debug_bucket *get_bucket(unsigned long addr) 518 { 519 unsigned long hash; 520 521 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); 522 return &obj_hash[hash]; 523 } 524 525 static void debug_print_object(struct debug_obj *obj, char *msg) 526 { 527 const struct debug_obj_descr *descr = obj->descr; 528 static int limit; 529 530 /* 531 * Don't report if lookup_object_or_alloc() by the current thread 532 * failed because lookup_object_or_alloc()/debug_objects_oom() by a 533 * concurrent thread turned off debug_objects_enabled and cleared 534 * the hash buckets. 535 */ 536 if (!debug_objects_enabled) 537 return; 538 539 if (limit < 5 && descr != descr_test) { 540 void *hint = descr->debug_hint ? 541 descr->debug_hint(obj->object) : NULL; 542 limit++; 543 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " 544 "object: %p object type: %s hint: %pS\n", 545 msg, obj_states[obj->state], obj->astate, 546 obj->object, descr->name, hint); 547 } 548 debug_objects_warnings++; 549 } 550 551 /* 552 * Try to repair the damage, so we have a better chance to get useful 553 * debug output. 554 */ 555 static bool 556 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state), 557 void * addr, enum debug_obj_state state) 558 { 559 if (fixup && fixup(addr, state)) { 560 debug_objects_fixups++; 561 return true; 562 } 563 return false; 564 } 565 566 static void debug_object_is_on_stack(void *addr, int onstack) 567 { 568 int is_on_stack; 569 static int limit; 570 571 if (limit > 4) 572 return; 573 574 is_on_stack = object_is_on_stack(addr); 575 if (is_on_stack == onstack) 576 return; 577 578 limit++; 579 if (is_on_stack) 580 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr, 581 task_stack_page(current)); 582 else 583 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr, 584 task_stack_page(current)); 585 586 WARN_ON(1); 587 } 588 589 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b, 590 const struct debug_obj_descr *descr, 591 bool onstack, bool alloc_ifstatic) 592 { 593 struct debug_obj *obj = lookup_object(addr, b); 594 enum debug_obj_state state = ODEBUG_STATE_NONE; 595 596 if (likely(obj)) 597 return obj; 598 599 /* 600 * debug_object_init() unconditionally allocates untracked 601 * objects. It does not matter whether it is a static object or 602 * not. 603 * 604 * debug_object_assert_init() and debug_object_activate() allow 605 * allocation only if the descriptor callback confirms that the 606 * object is static and considered initialized. For non-static 607 * objects the allocation needs to be done from the fixup callback. 608 */ 609 if (unlikely(alloc_ifstatic)) { 610 if (!descr->is_static_object || !descr->is_static_object(addr)) 611 return ERR_PTR(-ENOENT); 612 /* Statically allocated objects are considered initialized */ 613 state = ODEBUG_STATE_INIT; 614 } 615 616 obj = alloc_object(addr, b, descr); 617 if (likely(obj)) { 618 obj->state = state; 619 debug_object_is_on_stack(addr, onstack); 620 return obj; 621 } 622 623 /* Out of memory. Do the cleanup outside of the locked region */ 624 debug_objects_enabled = false; 625 return NULL; 626 } 627 628 static void debug_objects_fill_pool(void) 629 { 630 if (unlikely(!obj_cache)) 631 return; 632 633 if (likely(!pool_should_refill(&pool_global))) 634 return; 635 636 /* Try reusing objects from obj_to_free_list */ 637 fill_pool_from_freelist(); 638 639 if (likely(!pool_should_refill(&pool_global))) 640 return; 641 642 /* 643 * On RT enabled kernels the pool refill must happen in preemptible 644 * context -- for !RT kernels we rely on the fact that spinlock_t and 645 * raw_spinlock_t are basically the same type and this lock-type 646 * inversion works just fine. 647 */ 648 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) { 649 /* 650 * Annotate away the spinlock_t inside raw_spinlock_t warning 651 * by temporarily raising the wait-type to WAIT_SLEEP, matching 652 * the preemptible() condition above. 653 */ 654 static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP); 655 lock_map_acquire_try(&fill_pool_map); 656 fill_pool(); 657 lock_map_release(&fill_pool_map); 658 } 659 } 660 661 static void 662 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack) 663 { 664 struct debug_obj *obj, o; 665 struct debug_bucket *db; 666 unsigned long flags; 667 668 debug_objects_fill_pool(); 669 670 db = get_bucket((unsigned long) addr); 671 672 raw_spin_lock_irqsave(&db->lock, flags); 673 674 obj = lookup_object_or_alloc(addr, db, descr, onstack, false); 675 if (unlikely(!obj)) { 676 raw_spin_unlock_irqrestore(&db->lock, flags); 677 debug_objects_oom(); 678 return; 679 } 680 681 switch (obj->state) { 682 case ODEBUG_STATE_NONE: 683 case ODEBUG_STATE_INIT: 684 case ODEBUG_STATE_INACTIVE: 685 obj->state = ODEBUG_STATE_INIT; 686 raw_spin_unlock_irqrestore(&db->lock, flags); 687 return; 688 default: 689 break; 690 } 691 692 o = *obj; 693 raw_spin_unlock_irqrestore(&db->lock, flags); 694 debug_print_object(&o, "init"); 695 696 if (o.state == ODEBUG_STATE_ACTIVE) 697 debug_object_fixup(descr->fixup_init, addr, o.state); 698 } 699 700 /** 701 * debug_object_init - debug checks when an object is initialized 702 * @addr: address of the object 703 * @descr: pointer to an object specific debug description structure 704 */ 705 void debug_object_init(void *addr, const struct debug_obj_descr *descr) 706 { 707 if (!debug_objects_enabled) 708 return; 709 710 __debug_object_init(addr, descr, 0); 711 } 712 EXPORT_SYMBOL_GPL(debug_object_init); 713 714 /** 715 * debug_object_init_on_stack - debug checks when an object on stack is 716 * initialized 717 * @addr: address of the object 718 * @descr: pointer to an object specific debug description structure 719 */ 720 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr) 721 { 722 if (!debug_objects_enabled) 723 return; 724 725 __debug_object_init(addr, descr, 1); 726 } 727 EXPORT_SYMBOL_GPL(debug_object_init_on_stack); 728 729 /** 730 * debug_object_activate - debug checks when an object is activated 731 * @addr: address of the object 732 * @descr: pointer to an object specific debug description structure 733 * Returns 0 for success, -EINVAL for check failed. 734 */ 735 int debug_object_activate(void *addr, const struct debug_obj_descr *descr) 736 { 737 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; 738 struct debug_bucket *db; 739 struct debug_obj *obj; 740 unsigned long flags; 741 742 if (!debug_objects_enabled) 743 return 0; 744 745 debug_objects_fill_pool(); 746 747 db = get_bucket((unsigned long) addr); 748 749 raw_spin_lock_irqsave(&db->lock, flags); 750 751 obj = lookup_object_or_alloc(addr, db, descr, false, true); 752 if (unlikely(!obj)) { 753 raw_spin_unlock_irqrestore(&db->lock, flags); 754 debug_objects_oom(); 755 return 0; 756 } else if (likely(!IS_ERR(obj))) { 757 switch (obj->state) { 758 case ODEBUG_STATE_ACTIVE: 759 case ODEBUG_STATE_DESTROYED: 760 o = *obj; 761 break; 762 case ODEBUG_STATE_INIT: 763 case ODEBUG_STATE_INACTIVE: 764 obj->state = ODEBUG_STATE_ACTIVE; 765 fallthrough; 766 default: 767 raw_spin_unlock_irqrestore(&db->lock, flags); 768 return 0; 769 } 770 } 771 772 raw_spin_unlock_irqrestore(&db->lock, flags); 773 debug_print_object(&o, "activate"); 774 775 switch (o.state) { 776 case ODEBUG_STATE_ACTIVE: 777 case ODEBUG_STATE_NOTAVAILABLE: 778 if (debug_object_fixup(descr->fixup_activate, addr, o.state)) 779 return 0; 780 fallthrough; 781 default: 782 return -EINVAL; 783 } 784 } 785 EXPORT_SYMBOL_GPL(debug_object_activate); 786 787 /** 788 * debug_object_deactivate - debug checks when an object is deactivated 789 * @addr: address of the object 790 * @descr: pointer to an object specific debug description structure 791 */ 792 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr) 793 { 794 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; 795 struct debug_bucket *db; 796 struct debug_obj *obj; 797 unsigned long flags; 798 799 if (!debug_objects_enabled) 800 return; 801 802 db = get_bucket((unsigned long) addr); 803 804 raw_spin_lock_irqsave(&db->lock, flags); 805 806 obj = lookup_object(addr, db); 807 if (obj) { 808 switch (obj->state) { 809 case ODEBUG_STATE_DESTROYED: 810 break; 811 case ODEBUG_STATE_INIT: 812 case ODEBUG_STATE_INACTIVE: 813 case ODEBUG_STATE_ACTIVE: 814 if (obj->astate) 815 break; 816 obj->state = ODEBUG_STATE_INACTIVE; 817 fallthrough; 818 default: 819 raw_spin_unlock_irqrestore(&db->lock, flags); 820 return; 821 } 822 o = *obj; 823 } 824 825 raw_spin_unlock_irqrestore(&db->lock, flags); 826 debug_print_object(&o, "deactivate"); 827 } 828 EXPORT_SYMBOL_GPL(debug_object_deactivate); 829 830 /** 831 * debug_object_destroy - debug checks when an object is destroyed 832 * @addr: address of the object 833 * @descr: pointer to an object specific debug description structure 834 */ 835 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr) 836 { 837 struct debug_obj *obj, o; 838 struct debug_bucket *db; 839 unsigned long flags; 840 841 if (!debug_objects_enabled) 842 return; 843 844 db = get_bucket((unsigned long) addr); 845 846 raw_spin_lock_irqsave(&db->lock, flags); 847 848 obj = lookup_object(addr, db); 849 if (!obj) { 850 raw_spin_unlock_irqrestore(&db->lock, flags); 851 return; 852 } 853 854 switch (obj->state) { 855 case ODEBUG_STATE_ACTIVE: 856 case ODEBUG_STATE_DESTROYED: 857 break; 858 case ODEBUG_STATE_NONE: 859 case ODEBUG_STATE_INIT: 860 case ODEBUG_STATE_INACTIVE: 861 obj->state = ODEBUG_STATE_DESTROYED; 862 fallthrough; 863 default: 864 raw_spin_unlock_irqrestore(&db->lock, flags); 865 return; 866 } 867 868 o = *obj; 869 raw_spin_unlock_irqrestore(&db->lock, flags); 870 debug_print_object(&o, "destroy"); 871 872 if (o.state == ODEBUG_STATE_ACTIVE) 873 debug_object_fixup(descr->fixup_destroy, addr, o.state); 874 } 875 EXPORT_SYMBOL_GPL(debug_object_destroy); 876 877 /** 878 * debug_object_free - debug checks when an object is freed 879 * @addr: address of the object 880 * @descr: pointer to an object specific debug description structure 881 */ 882 void debug_object_free(void *addr, const struct debug_obj_descr *descr) 883 { 884 struct debug_obj *obj, o; 885 struct debug_bucket *db; 886 unsigned long flags; 887 888 if (!debug_objects_enabled) 889 return; 890 891 db = get_bucket((unsigned long) addr); 892 893 raw_spin_lock_irqsave(&db->lock, flags); 894 895 obj = lookup_object(addr, db); 896 if (!obj) { 897 raw_spin_unlock_irqrestore(&db->lock, flags); 898 return; 899 } 900 901 switch (obj->state) { 902 case ODEBUG_STATE_ACTIVE: 903 break; 904 default: 905 hlist_del(&obj->node); 906 raw_spin_unlock_irqrestore(&db->lock, flags); 907 free_object(obj); 908 return; 909 } 910 911 o = *obj; 912 raw_spin_unlock_irqrestore(&db->lock, flags); 913 debug_print_object(&o, "free"); 914 915 debug_object_fixup(descr->fixup_free, addr, o.state); 916 } 917 EXPORT_SYMBOL_GPL(debug_object_free); 918 919 /** 920 * debug_object_assert_init - debug checks when object should be init-ed 921 * @addr: address of the object 922 * @descr: pointer to an object specific debug description structure 923 */ 924 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) 925 { 926 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; 927 struct debug_bucket *db; 928 struct debug_obj *obj; 929 unsigned long flags; 930 931 if (!debug_objects_enabled) 932 return; 933 934 debug_objects_fill_pool(); 935 936 db = get_bucket((unsigned long) addr); 937 938 raw_spin_lock_irqsave(&db->lock, flags); 939 obj = lookup_object_or_alloc(addr, db, descr, false, true); 940 raw_spin_unlock_irqrestore(&db->lock, flags); 941 if (likely(!IS_ERR_OR_NULL(obj))) 942 return; 943 944 /* If NULL the allocation has hit OOM */ 945 if (!obj) { 946 debug_objects_oom(); 947 return; 948 } 949 950 /* Object is neither tracked nor static. It's not initialized. */ 951 debug_print_object(&o, "assert_init"); 952 debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE); 953 } 954 EXPORT_SYMBOL_GPL(debug_object_assert_init); 955 956 /** 957 * debug_object_active_state - debug checks object usage state machine 958 * @addr: address of the object 959 * @descr: pointer to an object specific debug description structure 960 * @expect: expected state 961 * @next: state to move to if expected state is found 962 */ 963 void 964 debug_object_active_state(void *addr, const struct debug_obj_descr *descr, 965 unsigned int expect, unsigned int next) 966 { 967 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; 968 struct debug_bucket *db; 969 struct debug_obj *obj; 970 unsigned long flags; 971 972 if (!debug_objects_enabled) 973 return; 974 975 db = get_bucket((unsigned long) addr); 976 977 raw_spin_lock_irqsave(&db->lock, flags); 978 979 obj = lookup_object(addr, db); 980 if (obj) { 981 switch (obj->state) { 982 case ODEBUG_STATE_ACTIVE: 983 if (obj->astate != expect) 984 break; 985 obj->astate = next; 986 raw_spin_unlock_irqrestore(&db->lock, flags); 987 return; 988 default: 989 break; 990 } 991 o = *obj; 992 } 993 994 raw_spin_unlock_irqrestore(&db->lock, flags); 995 debug_print_object(&o, "active_state"); 996 } 997 EXPORT_SYMBOL_GPL(debug_object_active_state); 998 999 #ifdef CONFIG_DEBUG_OBJECTS_FREE 1000 static void __debug_check_no_obj_freed(const void *address, unsigned long size) 1001 { 1002 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; 1003 int cnt, objs_checked = 0; 1004 struct debug_obj *obj, o; 1005 struct debug_bucket *db; 1006 struct hlist_node *tmp; 1007 1008 saddr = (unsigned long) address; 1009 eaddr = saddr + size; 1010 paddr = saddr & ODEBUG_CHUNK_MASK; 1011 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); 1012 chunks >>= ODEBUG_CHUNK_SHIFT; 1013 1014 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { 1015 db = get_bucket(paddr); 1016 1017 repeat: 1018 cnt = 0; 1019 raw_spin_lock_irqsave(&db->lock, flags); 1020 hlist_for_each_entry_safe(obj, tmp, &db->list, node) { 1021 cnt++; 1022 oaddr = (unsigned long) obj->object; 1023 if (oaddr < saddr || oaddr >= eaddr) 1024 continue; 1025 1026 switch (obj->state) { 1027 case ODEBUG_STATE_ACTIVE: 1028 o = *obj; 1029 raw_spin_unlock_irqrestore(&db->lock, flags); 1030 debug_print_object(&o, "free"); 1031 debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state); 1032 goto repeat; 1033 default: 1034 hlist_del(&obj->node); 1035 __free_object(obj); 1036 break; 1037 } 1038 } 1039 raw_spin_unlock_irqrestore(&db->lock, flags); 1040 1041 if (cnt > debug_objects_maxchain) 1042 debug_objects_maxchain = cnt; 1043 1044 objs_checked += cnt; 1045 } 1046 1047 if (objs_checked > debug_objects_maxchecked) 1048 debug_objects_maxchecked = objs_checked; 1049 1050 /* Schedule work to actually kmem_cache_free() objects */ 1051 if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) { 1052 WRITE_ONCE(obj_freeing, true); 1053 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); 1054 } 1055 } 1056 1057 void debug_check_no_obj_freed(const void *address, unsigned long size) 1058 { 1059 if (debug_objects_enabled) 1060 __debug_check_no_obj_freed(address, size); 1061 } 1062 #endif 1063 1064 #ifdef CONFIG_DEBUG_FS 1065 1066 static int debug_stats_show(struct seq_file *m, void *v) 1067 { 1068 int cpu, obj_percpu_free = 0; 1069 1070 for_each_possible_cpu(cpu) 1071 obj_percpu_free += per_cpu(pool_pcpu.cnt, cpu); 1072 1073 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); 1074 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked); 1075 seq_printf(m, "warnings :%d\n", debug_objects_warnings); 1076 seq_printf(m, "fixups :%d\n", debug_objects_fixups); 1077 seq_printf(m, "pool_free :%d\n", pool_count(&pool_global) + obj_percpu_free); 1078 seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free); 1079 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 1080 seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free); 1081 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 1082 seq_printf(m, "on_free_list :%d\n", pool_count(&pool_to_free)); 1083 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); 1084 seq_printf(m, "objs_freed :%d\n", debug_objects_freed); 1085 return 0; 1086 } 1087 DEFINE_SHOW_ATTRIBUTE(debug_stats); 1088 1089 static int __init debug_objects_init_debugfs(void) 1090 { 1091 struct dentry *dbgdir; 1092 1093 if (!debug_objects_enabled) 1094 return 0; 1095 1096 dbgdir = debugfs_create_dir("debug_objects", NULL); 1097 1098 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops); 1099 1100 return 0; 1101 } 1102 __initcall(debug_objects_init_debugfs); 1103 1104 #else 1105 static inline void debug_objects_init_debugfs(void) { } 1106 #endif 1107 1108 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST 1109 1110 /* Random data structure for the self test */ 1111 struct self_test { 1112 unsigned long dummy1[6]; 1113 int static_init; 1114 unsigned long dummy2[3]; 1115 }; 1116 1117 static __initconst const struct debug_obj_descr descr_type_test; 1118 1119 static bool __init is_static_object(void *addr) 1120 { 1121 struct self_test *obj = addr; 1122 1123 return obj->static_init; 1124 } 1125 1126 /* 1127 * fixup_init is called when: 1128 * - an active object is initialized 1129 */ 1130 static bool __init fixup_init(void *addr, enum debug_obj_state state) 1131 { 1132 struct self_test *obj = addr; 1133 1134 switch (state) { 1135 case ODEBUG_STATE_ACTIVE: 1136 debug_object_deactivate(obj, &descr_type_test); 1137 debug_object_init(obj, &descr_type_test); 1138 return true; 1139 default: 1140 return false; 1141 } 1142 } 1143 1144 /* 1145 * fixup_activate is called when: 1146 * - an active object is activated 1147 * - an unknown non-static object is activated 1148 */ 1149 static bool __init fixup_activate(void *addr, enum debug_obj_state state) 1150 { 1151 struct self_test *obj = addr; 1152 1153 switch (state) { 1154 case ODEBUG_STATE_NOTAVAILABLE: 1155 return true; 1156 case ODEBUG_STATE_ACTIVE: 1157 debug_object_deactivate(obj, &descr_type_test); 1158 debug_object_activate(obj, &descr_type_test); 1159 return true; 1160 1161 default: 1162 return false; 1163 } 1164 } 1165 1166 /* 1167 * fixup_destroy is called when: 1168 * - an active object is destroyed 1169 */ 1170 static bool __init fixup_destroy(void *addr, enum debug_obj_state state) 1171 { 1172 struct self_test *obj = addr; 1173 1174 switch (state) { 1175 case ODEBUG_STATE_ACTIVE: 1176 debug_object_deactivate(obj, &descr_type_test); 1177 debug_object_destroy(obj, &descr_type_test); 1178 return true; 1179 default: 1180 return false; 1181 } 1182 } 1183 1184 /* 1185 * fixup_free is called when: 1186 * - an active object is freed 1187 */ 1188 static bool __init fixup_free(void *addr, enum debug_obj_state state) 1189 { 1190 struct self_test *obj = addr; 1191 1192 switch (state) { 1193 case ODEBUG_STATE_ACTIVE: 1194 debug_object_deactivate(obj, &descr_type_test); 1195 debug_object_free(obj, &descr_type_test); 1196 return true; 1197 default: 1198 return false; 1199 } 1200 } 1201 1202 static int __init 1203 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) 1204 { 1205 struct debug_bucket *db; 1206 struct debug_obj *obj; 1207 unsigned long flags; 1208 int res = -EINVAL; 1209 1210 db = get_bucket((unsigned long) addr); 1211 1212 raw_spin_lock_irqsave(&db->lock, flags); 1213 1214 obj = lookup_object(addr, db); 1215 if (!obj && state != ODEBUG_STATE_NONE) { 1216 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); 1217 goto out; 1218 } 1219 if (obj && obj->state != state) { 1220 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", 1221 obj->state, state); 1222 goto out; 1223 } 1224 if (fixups != debug_objects_fixups) { 1225 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", 1226 fixups, debug_objects_fixups); 1227 goto out; 1228 } 1229 if (warnings != debug_objects_warnings) { 1230 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", 1231 warnings, debug_objects_warnings); 1232 goto out; 1233 } 1234 res = 0; 1235 out: 1236 raw_spin_unlock_irqrestore(&db->lock, flags); 1237 if (res) 1238 debug_objects_enabled = false; 1239 return res; 1240 } 1241 1242 static __initconst const struct debug_obj_descr descr_type_test = { 1243 .name = "selftest", 1244 .is_static_object = is_static_object, 1245 .fixup_init = fixup_init, 1246 .fixup_activate = fixup_activate, 1247 .fixup_destroy = fixup_destroy, 1248 .fixup_free = fixup_free, 1249 }; 1250 1251 static __initdata struct self_test obj = { .static_init = 0 }; 1252 1253 static bool __init debug_objects_selftest(void) 1254 { 1255 int fixups, oldfixups, warnings, oldwarnings; 1256 unsigned long flags; 1257 1258 local_irq_save(flags); 1259 1260 fixups = oldfixups = debug_objects_fixups; 1261 warnings = oldwarnings = debug_objects_warnings; 1262 descr_test = &descr_type_test; 1263 1264 debug_object_init(&obj, &descr_type_test); 1265 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 1266 goto out; 1267 debug_object_activate(&obj, &descr_type_test); 1268 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 1269 goto out; 1270 debug_object_activate(&obj, &descr_type_test); 1271 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) 1272 goto out; 1273 debug_object_deactivate(&obj, &descr_type_test); 1274 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) 1275 goto out; 1276 debug_object_destroy(&obj, &descr_type_test); 1277 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) 1278 goto out; 1279 debug_object_init(&obj, &descr_type_test); 1280 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1281 goto out; 1282 debug_object_activate(&obj, &descr_type_test); 1283 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1284 goto out; 1285 debug_object_deactivate(&obj, &descr_type_test); 1286 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1287 goto out; 1288 debug_object_free(&obj, &descr_type_test); 1289 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 1290 goto out; 1291 1292 obj.static_init = 1; 1293 debug_object_activate(&obj, &descr_type_test); 1294 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 1295 goto out; 1296 debug_object_init(&obj, &descr_type_test); 1297 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) 1298 goto out; 1299 debug_object_free(&obj, &descr_type_test); 1300 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 1301 goto out; 1302 1303 #ifdef CONFIG_DEBUG_OBJECTS_FREE 1304 debug_object_init(&obj, &descr_type_test); 1305 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 1306 goto out; 1307 debug_object_activate(&obj, &descr_type_test); 1308 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 1309 goto out; 1310 __debug_check_no_obj_freed(&obj, sizeof(obj)); 1311 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) 1312 goto out; 1313 #endif 1314 pr_info("selftest passed\n"); 1315 1316 out: 1317 debug_objects_fixups = oldfixups; 1318 debug_objects_warnings = oldwarnings; 1319 descr_test = NULL; 1320 1321 local_irq_restore(flags); 1322 return debug_objects_enabled; 1323 } 1324 #else 1325 static inline bool debug_objects_selftest(void) { return true; } 1326 #endif 1327 1328 /* 1329 * Called during early boot to initialize the hash buckets and link 1330 * the static object pool objects into the poll list. After this call 1331 * the object tracker is fully operational. 1332 */ 1333 void __init debug_objects_early_init(void) 1334 { 1335 int i; 1336 1337 for (i = 0; i < ODEBUG_HASH_SIZE; i++) 1338 raw_spin_lock_init(&obj_hash[i].lock); 1339 1340 /* Keep early boot simple and add everything to the boot list */ 1341 for (i = 0; i < ODEBUG_POOL_SIZE; i++) 1342 hlist_add_head(&obj_static_pool[i].node, &pool_boot); 1343 } 1344 1345 /* 1346 * Convert the statically allocated objects to dynamic ones. 1347 * debug_objects_mem_init() is called early so only one CPU is up and 1348 * interrupts are disabled, which means it is safe to replace the active 1349 * object references. 1350 */ 1351 static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache) 1352 { 1353 struct debug_bucket *db = obj_hash; 1354 struct debug_obj *obj, *new; 1355 struct hlist_node *tmp; 1356 HLIST_HEAD(objects); 1357 int i; 1358 1359 for (i = 0; i < ODEBUG_POOL_SIZE; i++) { 1360 obj = kmem_cache_zalloc(cache, GFP_KERNEL); 1361 if (!obj) 1362 goto free; 1363 hlist_add_head(&obj->node, &objects); 1364 } 1365 1366 debug_objects_allocated = ODEBUG_POOL_SIZE; 1367 pool_global.cnt = ODEBUG_POOL_SIZE; 1368 1369 /* 1370 * Move the allocated objects to the global pool and disconnect the 1371 * boot pool. 1372 */ 1373 hlist_move_list(&objects, &pool_global.objects); 1374 pool_boot.first = NULL; 1375 1376 /* Replace the active object references */ 1377 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 1378 hlist_move_list(&db->list, &objects); 1379 1380 hlist_for_each_entry(obj, &objects, node) { 1381 new = hlist_entry(pool_global.objects.first, typeof(*obj), node); 1382 hlist_del(&new->node); 1383 pool_global.cnt--; 1384 /* copy object data */ 1385 *new = *obj; 1386 hlist_add_head(&new->node, &db->list); 1387 } 1388 } 1389 return true; 1390 free: 1391 /* Can't use free_object_list() as the cache is not populated yet */ 1392 hlist_for_each_entry_safe(obj, tmp, &objects, node) { 1393 hlist_del(&obj->node); 1394 kmem_cache_free(cache, obj); 1395 } 1396 return false; 1397 } 1398 1399 /* 1400 * Called after the kmem_caches are functional to setup a dedicated 1401 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag 1402 * prevents that the debug code is called on kmem_cache_free() for the 1403 * debug tracker objects to avoid recursive calls. 1404 */ 1405 void __init debug_objects_mem_init(void) 1406 { 1407 struct kmem_cache *cache; 1408 int extras; 1409 1410 if (!debug_objects_enabled) 1411 return; 1412 1413 if (!debug_objects_selftest()) 1414 return; 1415 1416 cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0, 1417 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL); 1418 1419 if (!cache || !debug_objects_replace_static_objects(cache)) { 1420 debug_objects_enabled = false; 1421 pr_warn("Out of memory.\n"); 1422 return; 1423 } 1424 1425 /* 1426 * Adjust the thresholds for allocating and freeing objects 1427 * according to the number of possible CPUs available in the 1428 * system. 1429 */ 1430 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE; 1431 pool_global.max_cnt += extras; 1432 pool_global.min_cnt += extras; 1433 1434 /* Everything worked. Expose the cache */ 1435 obj_cache = cache; 1436 1437 #ifdef CONFIG_HOTPLUG_CPU 1438 cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL, 1439 object_cpu_offline); 1440 #endif 1441 return; 1442 } 1443