1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Generic infrastructure for lifetime debugging of objects. 4 * 5 * Copyright (C) 2008, Thomas Gleixner <[email protected]> 6 */ 7 8 #define pr_fmt(fmt) "ODEBUG: " fmt 9 10 #include <linux/cpu.h> 11 #include <linux/debugobjects.h> 12 #include <linux/debugfs.h> 13 #include <linux/hash.h> 14 #include <linux/kmemleak.h> 15 #include <linux/sched.h> 16 #include <linux/sched/task_stack.h> 17 #include <linux/seq_file.h> 18 #include <linux/slab.h> 19 #include <linux/static_key.h> 20 21 #define ODEBUG_HASH_BITS 14 22 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) 23 24 #define ODEBUG_POOL_SIZE 1024 25 #define ODEBUG_POOL_MIN_LEVEL 256 26 #define ODEBUG_POOL_PERCPU_SIZE 64 27 #define ODEBUG_BATCH_SIZE 16 28 29 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT 30 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) 31 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) 32 33 /* 34 * We limit the freeing of debug objects via workqueue at a maximum 35 * frequency of 10Hz and about 1024 objects for each freeing operation. 36 * So it is freeing at most 10k debug objects per second. 37 */ 38 #define ODEBUG_FREE_WORK_MAX (1024 / ODEBUG_BATCH_SIZE) 39 #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10) 40 41 struct debug_bucket { 42 struct hlist_head list; 43 raw_spinlock_t lock; 44 }; 45 46 struct obj_pool { 47 struct hlist_head objects; 48 unsigned int cnt; 49 unsigned int min_cnt; 50 unsigned int max_cnt; 51 } ____cacheline_aligned; 52 53 54 static DEFINE_PER_CPU_ALIGNED(struct obj_pool, pool_pcpu) = { 55 .max_cnt = ODEBUG_POOL_PERCPU_SIZE, 56 }; 57 58 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 59 60 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; 61 62 static DEFINE_RAW_SPINLOCK(pool_lock); 63 64 static struct obj_pool pool_global = { 65 .min_cnt = ODEBUG_POOL_MIN_LEVEL, 66 .max_cnt = ODEBUG_POOL_SIZE, 67 }; 68 69 static struct obj_pool pool_to_free = { 70 .max_cnt = UINT_MAX, 71 }; 72 73 static HLIST_HEAD(pool_boot); 74 75 /* 76 * Because of the presence of percpu free pools, obj_pool_free will 77 * under-count those in the percpu free pools. Similarly, obj_pool_used 78 * will over-count those in the percpu free pools. Adjustments will be 79 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used 80 * can be off. 81 */ 82 static int __data_racy obj_pool_min_free = ODEBUG_POOL_SIZE; 83 static int obj_pool_used; 84 static int __data_racy obj_pool_max_used; 85 static bool obj_freeing; 86 87 static int __data_racy debug_objects_maxchain __read_mostly; 88 static int __data_racy __maybe_unused debug_objects_maxchecked __read_mostly; 89 static int __data_racy debug_objects_fixups __read_mostly; 90 static int __data_racy debug_objects_warnings __read_mostly; 91 static bool __data_racy debug_objects_enabled __read_mostly 92 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; 93 94 static const struct debug_obj_descr *descr_test __read_mostly; 95 static struct kmem_cache *obj_cache __ro_after_init; 96 97 /* 98 * Track numbers of kmem_cache_alloc()/free() calls done. 99 */ 100 static int __data_racy debug_objects_allocated; 101 static int __data_racy debug_objects_freed; 102 103 static void free_obj_work(struct work_struct *work); 104 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work); 105 106 static DEFINE_STATIC_KEY_FALSE(obj_cache_enabled); 107 108 static int __init enable_object_debug(char *str) 109 { 110 debug_objects_enabled = true; 111 return 0; 112 } 113 early_param("debug_objects", enable_object_debug); 114 115 static int __init disable_object_debug(char *str) 116 { 117 debug_objects_enabled = false; 118 return 0; 119 } 120 early_param("no_debug_objects", disable_object_debug); 121 122 static const char *obj_states[ODEBUG_STATE_MAX] = { 123 [ODEBUG_STATE_NONE] = "none", 124 [ODEBUG_STATE_INIT] = "initialized", 125 [ODEBUG_STATE_INACTIVE] = "inactive", 126 [ODEBUG_STATE_ACTIVE] = "active", 127 [ODEBUG_STATE_DESTROYED] = "destroyed", 128 [ODEBUG_STATE_NOTAVAILABLE] = "not available", 129 }; 130 131 static __always_inline unsigned int pool_count(struct obj_pool *pool) 132 { 133 return READ_ONCE(pool->cnt); 134 } 135 136 static __always_inline bool pool_should_refill(struct obj_pool *pool) 137 { 138 return pool_count(pool) < pool->min_cnt; 139 } 140 141 static __always_inline bool pool_must_refill(struct obj_pool *pool) 142 { 143 return pool_count(pool) < pool->min_cnt / 2; 144 } 145 146 static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src) 147 { 148 if (dst->cnt + ODEBUG_BATCH_SIZE > dst->max_cnt || !src->cnt) 149 return false; 150 151 for (int i = 0; i < ODEBUG_BATCH_SIZE && src->cnt; i++) { 152 struct hlist_node *node = src->objects.first; 153 154 WRITE_ONCE(src->cnt, src->cnt - 1); 155 WRITE_ONCE(dst->cnt, dst->cnt + 1); 156 157 hlist_del(node); 158 hlist_add_head(node, &dst->objects); 159 } 160 return true; 161 } 162 163 static bool pool_pop_batch(struct hlist_head *head, struct obj_pool *src) 164 { 165 if (!src->cnt) 166 return false; 167 168 for (int i = 0; src->cnt && i < ODEBUG_BATCH_SIZE; i++) { 169 struct hlist_node *node = src->objects.first; 170 171 WRITE_ONCE(src->cnt, src->cnt - 1); 172 hlist_del(node); 173 hlist_add_head(node, head); 174 } 175 return true; 176 } 177 178 static struct debug_obj *__alloc_object(struct hlist_head *list) 179 { 180 struct debug_obj *obj; 181 182 if (unlikely(!list->first)) 183 return NULL; 184 185 obj = hlist_entry(list->first, typeof(*obj), node); 186 hlist_del(&obj->node); 187 return obj; 188 } 189 190 static struct debug_obj *pcpu_alloc(void) 191 { 192 struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu); 193 194 lockdep_assert_irqs_disabled(); 195 196 for (;;) { 197 struct debug_obj *obj = __alloc_object(&pcp->objects); 198 199 if (likely(obj)) { 200 pcp->cnt--; 201 return obj; 202 } 203 204 guard(raw_spinlock)(&pool_lock); 205 if (!pool_move_batch(pcp, &pool_to_free)) { 206 if (!pool_move_batch(pcp, &pool_global)) 207 return NULL; 208 } 209 obj_pool_used += pcp->cnt; 210 211 if (obj_pool_used > obj_pool_max_used) 212 obj_pool_max_used = obj_pool_used; 213 214 if (pool_global.cnt < obj_pool_min_free) 215 obj_pool_min_free = pool_global.cnt; 216 } 217 } 218 219 static void pcpu_free(struct debug_obj *obj) 220 { 221 struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu); 222 223 lockdep_assert_irqs_disabled(); 224 225 hlist_add_head(&obj->node, &pcp->objects); 226 pcp->cnt++; 227 228 /* Pool full ? */ 229 if (pcp->cnt < ODEBUG_POOL_PERCPU_SIZE) 230 return; 231 232 /* Remove a batch from the per CPU pool */ 233 guard(raw_spinlock)(&pool_lock); 234 /* Try to fit the batch into the pool_global first */ 235 if (!pool_move_batch(&pool_global, pcp)) 236 pool_move_batch(&pool_to_free, pcp); 237 obj_pool_used -= ODEBUG_BATCH_SIZE; 238 } 239 240 static void free_object_list(struct hlist_head *head) 241 { 242 struct hlist_node *tmp; 243 struct debug_obj *obj; 244 int cnt = 0; 245 246 hlist_for_each_entry_safe(obj, tmp, head, node) { 247 hlist_del(&obj->node); 248 kmem_cache_free(obj_cache, obj); 249 cnt++; 250 } 251 debug_objects_freed += cnt; 252 } 253 254 static void fill_pool_from_freelist(void) 255 { 256 static unsigned long state; 257 258 /* 259 * Reuse objs from the global obj_to_free list; they will be 260 * reinitialized when allocating. 261 */ 262 if (!pool_count(&pool_to_free)) 263 return; 264 265 /* 266 * Prevent the context from being scheduled or interrupted after 267 * setting the state flag; 268 */ 269 guard(irqsave)(); 270 271 /* 272 * Avoid lock contention on &pool_lock and avoid making the cache 273 * line exclusive by testing the bit before attempting to set it. 274 */ 275 if (test_bit(0, &state) || test_and_set_bit(0, &state)) 276 return; 277 278 /* Avoid taking the lock when there is no work to do */ 279 while (pool_should_refill(&pool_global) && pool_count(&pool_to_free)) { 280 guard(raw_spinlock)(&pool_lock); 281 /* Move a batch if possible */ 282 pool_move_batch(&pool_global, &pool_to_free); 283 } 284 clear_bit(0, &state); 285 } 286 287 static void fill_pool(void) 288 { 289 static atomic_t cpus_allocating; 290 291 /* 292 * Avoid allocation and lock contention when: 293 * - One other CPU is already allocating 294 * - the global pool has not reached the critical level yet 295 */ 296 if (!pool_must_refill(&pool_global) && atomic_read(&cpus_allocating)) 297 return; 298 299 atomic_inc(&cpus_allocating); 300 while (pool_should_refill(&pool_global)) { 301 struct debug_obj *new, *last = NULL; 302 HLIST_HEAD(head); 303 int cnt; 304 305 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) { 306 new = kmem_cache_zalloc(obj_cache, __GFP_HIGH | __GFP_NOWARN); 307 if (!new) 308 break; 309 hlist_add_head(&new->node, &head); 310 if (!last) 311 last = new; 312 } 313 if (!cnt) 314 break; 315 316 guard(raw_spinlock_irqsave)(&pool_lock); 317 hlist_splice_init(&head, &last->node, &pool_global.objects); 318 debug_objects_allocated += cnt; 319 WRITE_ONCE(pool_global.cnt, pool_global.cnt + cnt); 320 } 321 atomic_dec(&cpus_allocating); 322 } 323 324 /* 325 * Lookup an object in the hash bucket. 326 */ 327 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) 328 { 329 struct debug_obj *obj; 330 int cnt = 0; 331 332 hlist_for_each_entry(obj, &b->list, node) { 333 cnt++; 334 if (obj->object == addr) 335 return obj; 336 } 337 if (cnt > debug_objects_maxchain) 338 debug_objects_maxchain = cnt; 339 340 return NULL; 341 } 342 343 static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b, 344 const struct debug_obj_descr *descr) 345 { 346 struct debug_obj *obj; 347 348 if (static_branch_likely(&obj_cache_enabled)) 349 obj = pcpu_alloc(); 350 else 351 obj = __alloc_object(&pool_boot); 352 353 if (likely(obj)) { 354 obj->object = addr; 355 obj->descr = descr; 356 obj->state = ODEBUG_STATE_NONE; 357 obj->astate = 0; 358 hlist_add_head(&obj->node, &b->list); 359 } 360 return obj; 361 } 362 363 /* workqueue function to free objects. */ 364 static void free_obj_work(struct work_struct *work) 365 { 366 bool free = true; 367 368 WRITE_ONCE(obj_freeing, false); 369 370 if (!pool_count(&pool_to_free)) 371 return; 372 373 for (unsigned int cnt = 0; cnt < ODEBUG_FREE_WORK_MAX; cnt++) { 374 HLIST_HEAD(tofree); 375 376 /* Acquire and drop the lock for each batch */ 377 scoped_guard(raw_spinlock_irqsave, &pool_lock) { 378 if (!pool_to_free.cnt) 379 return; 380 381 /* Refill the global pool if possible */ 382 if (pool_move_batch(&pool_global, &pool_to_free)) { 383 /* Don't free as there seems to be demand */ 384 free = false; 385 } else if (free) { 386 pool_pop_batch(&tofree, &pool_to_free); 387 } else { 388 return; 389 } 390 } 391 free_object_list(&tofree); 392 } 393 } 394 395 static void __free_object(struct debug_obj *obj) 396 { 397 guard(irqsave)(); 398 if (static_branch_likely(&obj_cache_enabled)) 399 pcpu_free(obj); 400 else 401 hlist_add_head(&obj->node, &pool_boot); 402 } 403 404 /* 405 * Put the object back into the pool and schedule work to free objects 406 * if necessary. 407 */ 408 static void free_object(struct debug_obj *obj) 409 { 410 __free_object(obj); 411 if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) { 412 WRITE_ONCE(obj_freeing, true); 413 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); 414 } 415 } 416 417 static void put_objects(struct hlist_head *list) 418 { 419 struct hlist_node *tmp; 420 struct debug_obj *obj; 421 422 /* 423 * Using free_object() puts the objects into reuse or schedules 424 * them for freeing and it get's all the accounting correct. 425 */ 426 hlist_for_each_entry_safe(obj, tmp, list, node) { 427 hlist_del(&obj->node); 428 free_object(obj); 429 } 430 } 431 432 #ifdef CONFIG_HOTPLUG_CPU 433 static int object_cpu_offline(unsigned int cpu) 434 { 435 /* Remote access is safe as the CPU is dead already */ 436 struct obj_pool *pcp = per_cpu_ptr(&pool_pcpu, cpu); 437 438 put_objects(&pcp->objects); 439 pcp->cnt = 0; 440 return 0; 441 } 442 #endif 443 444 /* Out of memory. Free all objects from hash */ 445 static void debug_objects_oom(void) 446 { 447 struct debug_bucket *db = obj_hash; 448 HLIST_HEAD(freelist); 449 450 pr_warn("Out of memory. ODEBUG disabled\n"); 451 452 for (int i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 453 scoped_guard(raw_spinlock_irqsave, &db->lock) 454 hlist_move_list(&db->list, &freelist); 455 456 put_objects(&freelist); 457 } 458 } 459 460 /* 461 * We use the pfn of the address for the hash. That way we can check 462 * for freed objects simply by checking the affected bucket. 463 */ 464 static struct debug_bucket *get_bucket(unsigned long addr) 465 { 466 unsigned long hash; 467 468 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); 469 return &obj_hash[hash]; 470 } 471 472 static void debug_print_object(struct debug_obj *obj, char *msg) 473 { 474 const struct debug_obj_descr *descr = obj->descr; 475 static int limit; 476 477 /* 478 * Don't report if lookup_object_or_alloc() by the current thread 479 * failed because lookup_object_or_alloc()/debug_objects_oom() by a 480 * concurrent thread turned off debug_objects_enabled and cleared 481 * the hash buckets. 482 */ 483 if (!debug_objects_enabled) 484 return; 485 486 if (limit < 5 && descr != descr_test) { 487 void *hint = descr->debug_hint ? 488 descr->debug_hint(obj->object) : NULL; 489 limit++; 490 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " 491 "object: %p object type: %s hint: %pS\n", 492 msg, obj_states[obj->state], obj->astate, 493 obj->object, descr->name, hint); 494 } 495 debug_objects_warnings++; 496 } 497 498 /* 499 * Try to repair the damage, so we have a better chance to get useful 500 * debug output. 501 */ 502 static bool 503 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state), 504 void * addr, enum debug_obj_state state) 505 { 506 if (fixup && fixup(addr, state)) { 507 debug_objects_fixups++; 508 return true; 509 } 510 return false; 511 } 512 513 static void debug_object_is_on_stack(void *addr, int onstack) 514 { 515 int is_on_stack; 516 static int limit; 517 518 if (limit > 4) 519 return; 520 521 is_on_stack = object_is_on_stack(addr); 522 if (is_on_stack == onstack) 523 return; 524 525 limit++; 526 if (is_on_stack) 527 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr, 528 task_stack_page(current)); 529 else 530 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr, 531 task_stack_page(current)); 532 533 WARN_ON(1); 534 } 535 536 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b, 537 const struct debug_obj_descr *descr, 538 bool onstack, bool alloc_ifstatic) 539 { 540 struct debug_obj *obj = lookup_object(addr, b); 541 enum debug_obj_state state = ODEBUG_STATE_NONE; 542 543 if (likely(obj)) 544 return obj; 545 546 /* 547 * debug_object_init() unconditionally allocates untracked 548 * objects. It does not matter whether it is a static object or 549 * not. 550 * 551 * debug_object_assert_init() and debug_object_activate() allow 552 * allocation only if the descriptor callback confirms that the 553 * object is static and considered initialized. For non-static 554 * objects the allocation needs to be done from the fixup callback. 555 */ 556 if (unlikely(alloc_ifstatic)) { 557 if (!descr->is_static_object || !descr->is_static_object(addr)) 558 return ERR_PTR(-ENOENT); 559 /* Statically allocated objects are considered initialized */ 560 state = ODEBUG_STATE_INIT; 561 } 562 563 obj = alloc_object(addr, b, descr); 564 if (likely(obj)) { 565 obj->state = state; 566 debug_object_is_on_stack(addr, onstack); 567 return obj; 568 } 569 570 /* Out of memory. Do the cleanup outside of the locked region */ 571 debug_objects_enabled = false; 572 return NULL; 573 } 574 575 static void debug_objects_fill_pool(void) 576 { 577 if (!static_branch_likely(&obj_cache_enabled)) 578 return; 579 580 if (likely(!pool_should_refill(&pool_global))) 581 return; 582 583 /* Try reusing objects from obj_to_free_list */ 584 fill_pool_from_freelist(); 585 586 if (likely(!pool_should_refill(&pool_global))) 587 return; 588 589 /* 590 * On RT enabled kernels the pool refill must happen in preemptible 591 * context -- for !RT kernels we rely on the fact that spinlock_t and 592 * raw_spinlock_t are basically the same type and this lock-type 593 * inversion works just fine. 594 */ 595 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) { 596 /* 597 * Annotate away the spinlock_t inside raw_spinlock_t warning 598 * by temporarily raising the wait-type to WAIT_SLEEP, matching 599 * the preemptible() condition above. 600 */ 601 static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP); 602 lock_map_acquire_try(&fill_pool_map); 603 fill_pool(); 604 lock_map_release(&fill_pool_map); 605 } 606 } 607 608 static void 609 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack) 610 { 611 struct debug_obj *obj, o; 612 struct debug_bucket *db; 613 unsigned long flags; 614 615 debug_objects_fill_pool(); 616 617 db = get_bucket((unsigned long) addr); 618 619 raw_spin_lock_irqsave(&db->lock, flags); 620 621 obj = lookup_object_or_alloc(addr, db, descr, onstack, false); 622 if (unlikely(!obj)) { 623 raw_spin_unlock_irqrestore(&db->lock, flags); 624 debug_objects_oom(); 625 return; 626 } 627 628 switch (obj->state) { 629 case ODEBUG_STATE_NONE: 630 case ODEBUG_STATE_INIT: 631 case ODEBUG_STATE_INACTIVE: 632 obj->state = ODEBUG_STATE_INIT; 633 raw_spin_unlock_irqrestore(&db->lock, flags); 634 return; 635 default: 636 break; 637 } 638 639 o = *obj; 640 raw_spin_unlock_irqrestore(&db->lock, flags); 641 debug_print_object(&o, "init"); 642 643 if (o.state == ODEBUG_STATE_ACTIVE) 644 debug_object_fixup(descr->fixup_init, addr, o.state); 645 } 646 647 /** 648 * debug_object_init - debug checks when an object is initialized 649 * @addr: address of the object 650 * @descr: pointer to an object specific debug description structure 651 */ 652 void debug_object_init(void *addr, const struct debug_obj_descr *descr) 653 { 654 if (!debug_objects_enabled) 655 return; 656 657 __debug_object_init(addr, descr, 0); 658 } 659 EXPORT_SYMBOL_GPL(debug_object_init); 660 661 /** 662 * debug_object_init_on_stack - debug checks when an object on stack is 663 * initialized 664 * @addr: address of the object 665 * @descr: pointer to an object specific debug description structure 666 */ 667 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr) 668 { 669 if (!debug_objects_enabled) 670 return; 671 672 __debug_object_init(addr, descr, 1); 673 } 674 EXPORT_SYMBOL_GPL(debug_object_init_on_stack); 675 676 /** 677 * debug_object_activate - debug checks when an object is activated 678 * @addr: address of the object 679 * @descr: pointer to an object specific debug description structure 680 * Returns 0 for success, -EINVAL for check failed. 681 */ 682 int debug_object_activate(void *addr, const struct debug_obj_descr *descr) 683 { 684 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; 685 struct debug_bucket *db; 686 struct debug_obj *obj; 687 unsigned long flags; 688 689 if (!debug_objects_enabled) 690 return 0; 691 692 debug_objects_fill_pool(); 693 694 db = get_bucket((unsigned long) addr); 695 696 raw_spin_lock_irqsave(&db->lock, flags); 697 698 obj = lookup_object_or_alloc(addr, db, descr, false, true); 699 if (unlikely(!obj)) { 700 raw_spin_unlock_irqrestore(&db->lock, flags); 701 debug_objects_oom(); 702 return 0; 703 } else if (likely(!IS_ERR(obj))) { 704 switch (obj->state) { 705 case ODEBUG_STATE_ACTIVE: 706 case ODEBUG_STATE_DESTROYED: 707 o = *obj; 708 break; 709 case ODEBUG_STATE_INIT: 710 case ODEBUG_STATE_INACTIVE: 711 obj->state = ODEBUG_STATE_ACTIVE; 712 fallthrough; 713 default: 714 raw_spin_unlock_irqrestore(&db->lock, flags); 715 return 0; 716 } 717 } 718 719 raw_spin_unlock_irqrestore(&db->lock, flags); 720 debug_print_object(&o, "activate"); 721 722 switch (o.state) { 723 case ODEBUG_STATE_ACTIVE: 724 case ODEBUG_STATE_NOTAVAILABLE: 725 if (debug_object_fixup(descr->fixup_activate, addr, o.state)) 726 return 0; 727 fallthrough; 728 default: 729 return -EINVAL; 730 } 731 } 732 EXPORT_SYMBOL_GPL(debug_object_activate); 733 734 /** 735 * debug_object_deactivate - debug checks when an object is deactivated 736 * @addr: address of the object 737 * @descr: pointer to an object specific debug description structure 738 */ 739 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr) 740 { 741 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; 742 struct debug_bucket *db; 743 struct debug_obj *obj; 744 unsigned long flags; 745 746 if (!debug_objects_enabled) 747 return; 748 749 db = get_bucket((unsigned long) addr); 750 751 raw_spin_lock_irqsave(&db->lock, flags); 752 753 obj = lookup_object(addr, db); 754 if (obj) { 755 switch (obj->state) { 756 case ODEBUG_STATE_DESTROYED: 757 break; 758 case ODEBUG_STATE_INIT: 759 case ODEBUG_STATE_INACTIVE: 760 case ODEBUG_STATE_ACTIVE: 761 if (obj->astate) 762 break; 763 obj->state = ODEBUG_STATE_INACTIVE; 764 fallthrough; 765 default: 766 raw_spin_unlock_irqrestore(&db->lock, flags); 767 return; 768 } 769 o = *obj; 770 } 771 772 raw_spin_unlock_irqrestore(&db->lock, flags); 773 debug_print_object(&o, "deactivate"); 774 } 775 EXPORT_SYMBOL_GPL(debug_object_deactivate); 776 777 /** 778 * debug_object_destroy - debug checks when an object is destroyed 779 * @addr: address of the object 780 * @descr: pointer to an object specific debug description structure 781 */ 782 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr) 783 { 784 struct debug_obj *obj, o; 785 struct debug_bucket *db; 786 unsigned long flags; 787 788 if (!debug_objects_enabled) 789 return; 790 791 db = get_bucket((unsigned long) addr); 792 793 raw_spin_lock_irqsave(&db->lock, flags); 794 795 obj = lookup_object(addr, db); 796 if (!obj) { 797 raw_spin_unlock_irqrestore(&db->lock, flags); 798 return; 799 } 800 801 switch (obj->state) { 802 case ODEBUG_STATE_ACTIVE: 803 case ODEBUG_STATE_DESTROYED: 804 break; 805 case ODEBUG_STATE_NONE: 806 case ODEBUG_STATE_INIT: 807 case ODEBUG_STATE_INACTIVE: 808 obj->state = ODEBUG_STATE_DESTROYED; 809 fallthrough; 810 default: 811 raw_spin_unlock_irqrestore(&db->lock, flags); 812 return; 813 } 814 815 o = *obj; 816 raw_spin_unlock_irqrestore(&db->lock, flags); 817 debug_print_object(&o, "destroy"); 818 819 if (o.state == ODEBUG_STATE_ACTIVE) 820 debug_object_fixup(descr->fixup_destroy, addr, o.state); 821 } 822 EXPORT_SYMBOL_GPL(debug_object_destroy); 823 824 /** 825 * debug_object_free - debug checks when an object is freed 826 * @addr: address of the object 827 * @descr: pointer to an object specific debug description structure 828 */ 829 void debug_object_free(void *addr, const struct debug_obj_descr *descr) 830 { 831 struct debug_obj *obj, o; 832 struct debug_bucket *db; 833 unsigned long flags; 834 835 if (!debug_objects_enabled) 836 return; 837 838 db = get_bucket((unsigned long) addr); 839 840 raw_spin_lock_irqsave(&db->lock, flags); 841 842 obj = lookup_object(addr, db); 843 if (!obj) { 844 raw_spin_unlock_irqrestore(&db->lock, flags); 845 return; 846 } 847 848 switch (obj->state) { 849 case ODEBUG_STATE_ACTIVE: 850 break; 851 default: 852 hlist_del(&obj->node); 853 raw_spin_unlock_irqrestore(&db->lock, flags); 854 free_object(obj); 855 return; 856 } 857 858 o = *obj; 859 raw_spin_unlock_irqrestore(&db->lock, flags); 860 debug_print_object(&o, "free"); 861 862 debug_object_fixup(descr->fixup_free, addr, o.state); 863 } 864 EXPORT_SYMBOL_GPL(debug_object_free); 865 866 /** 867 * debug_object_assert_init - debug checks when object should be init-ed 868 * @addr: address of the object 869 * @descr: pointer to an object specific debug description structure 870 */ 871 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) 872 { 873 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; 874 struct debug_bucket *db; 875 struct debug_obj *obj; 876 unsigned long flags; 877 878 if (!debug_objects_enabled) 879 return; 880 881 debug_objects_fill_pool(); 882 883 db = get_bucket((unsigned long) addr); 884 885 raw_spin_lock_irqsave(&db->lock, flags); 886 obj = lookup_object_or_alloc(addr, db, descr, false, true); 887 raw_spin_unlock_irqrestore(&db->lock, flags); 888 if (likely(!IS_ERR_OR_NULL(obj))) 889 return; 890 891 /* If NULL the allocation has hit OOM */ 892 if (!obj) { 893 debug_objects_oom(); 894 return; 895 } 896 897 /* Object is neither tracked nor static. It's not initialized. */ 898 debug_print_object(&o, "assert_init"); 899 debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE); 900 } 901 EXPORT_SYMBOL_GPL(debug_object_assert_init); 902 903 /** 904 * debug_object_active_state - debug checks object usage state machine 905 * @addr: address of the object 906 * @descr: pointer to an object specific debug description structure 907 * @expect: expected state 908 * @next: state to move to if expected state is found 909 */ 910 void 911 debug_object_active_state(void *addr, const struct debug_obj_descr *descr, 912 unsigned int expect, unsigned int next) 913 { 914 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; 915 struct debug_bucket *db; 916 struct debug_obj *obj; 917 unsigned long flags; 918 919 if (!debug_objects_enabled) 920 return; 921 922 db = get_bucket((unsigned long) addr); 923 924 raw_spin_lock_irqsave(&db->lock, flags); 925 926 obj = lookup_object(addr, db); 927 if (obj) { 928 switch (obj->state) { 929 case ODEBUG_STATE_ACTIVE: 930 if (obj->astate != expect) 931 break; 932 obj->astate = next; 933 raw_spin_unlock_irqrestore(&db->lock, flags); 934 return; 935 default: 936 break; 937 } 938 o = *obj; 939 } 940 941 raw_spin_unlock_irqrestore(&db->lock, flags); 942 debug_print_object(&o, "active_state"); 943 } 944 EXPORT_SYMBOL_GPL(debug_object_active_state); 945 946 #ifdef CONFIG_DEBUG_OBJECTS_FREE 947 static void __debug_check_no_obj_freed(const void *address, unsigned long size) 948 { 949 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; 950 int cnt, objs_checked = 0; 951 struct debug_obj *obj, o; 952 struct debug_bucket *db; 953 struct hlist_node *tmp; 954 955 saddr = (unsigned long) address; 956 eaddr = saddr + size; 957 paddr = saddr & ODEBUG_CHUNK_MASK; 958 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); 959 chunks >>= ODEBUG_CHUNK_SHIFT; 960 961 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { 962 db = get_bucket(paddr); 963 964 repeat: 965 cnt = 0; 966 raw_spin_lock_irqsave(&db->lock, flags); 967 hlist_for_each_entry_safe(obj, tmp, &db->list, node) { 968 cnt++; 969 oaddr = (unsigned long) obj->object; 970 if (oaddr < saddr || oaddr >= eaddr) 971 continue; 972 973 switch (obj->state) { 974 case ODEBUG_STATE_ACTIVE: 975 o = *obj; 976 raw_spin_unlock_irqrestore(&db->lock, flags); 977 debug_print_object(&o, "free"); 978 debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state); 979 goto repeat; 980 default: 981 hlist_del(&obj->node); 982 __free_object(obj); 983 break; 984 } 985 } 986 raw_spin_unlock_irqrestore(&db->lock, flags); 987 988 if (cnt > debug_objects_maxchain) 989 debug_objects_maxchain = cnt; 990 991 objs_checked += cnt; 992 } 993 994 if (objs_checked > debug_objects_maxchecked) 995 debug_objects_maxchecked = objs_checked; 996 997 /* Schedule work to actually kmem_cache_free() objects */ 998 if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) { 999 WRITE_ONCE(obj_freeing, true); 1000 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); 1001 } 1002 } 1003 1004 void debug_check_no_obj_freed(const void *address, unsigned long size) 1005 { 1006 if (debug_objects_enabled) 1007 __debug_check_no_obj_freed(address, size); 1008 } 1009 #endif 1010 1011 #ifdef CONFIG_DEBUG_FS 1012 1013 static int debug_stats_show(struct seq_file *m, void *v) 1014 { 1015 int cpu, obj_percpu_free = 0; 1016 1017 for_each_possible_cpu(cpu) 1018 obj_percpu_free += per_cpu(pool_pcpu.cnt, cpu); 1019 1020 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); 1021 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked); 1022 seq_printf(m, "warnings :%d\n", debug_objects_warnings); 1023 seq_printf(m, "fixups :%d\n", debug_objects_fixups); 1024 seq_printf(m, "pool_free :%d\n", pool_count(&pool_global) + obj_percpu_free); 1025 seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free); 1026 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 1027 seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free); 1028 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 1029 seq_printf(m, "on_free_list :%d\n", pool_count(&pool_to_free)); 1030 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); 1031 seq_printf(m, "objs_freed :%d\n", debug_objects_freed); 1032 return 0; 1033 } 1034 DEFINE_SHOW_ATTRIBUTE(debug_stats); 1035 1036 static int __init debug_objects_init_debugfs(void) 1037 { 1038 struct dentry *dbgdir; 1039 1040 if (!debug_objects_enabled) 1041 return 0; 1042 1043 dbgdir = debugfs_create_dir("debug_objects", NULL); 1044 1045 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops); 1046 1047 return 0; 1048 } 1049 __initcall(debug_objects_init_debugfs); 1050 1051 #else 1052 static inline void debug_objects_init_debugfs(void) { } 1053 #endif 1054 1055 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST 1056 1057 /* Random data structure for the self test */ 1058 struct self_test { 1059 unsigned long dummy1[6]; 1060 int static_init; 1061 unsigned long dummy2[3]; 1062 }; 1063 1064 static __initconst const struct debug_obj_descr descr_type_test; 1065 1066 static bool __init is_static_object(void *addr) 1067 { 1068 struct self_test *obj = addr; 1069 1070 return obj->static_init; 1071 } 1072 1073 /* 1074 * fixup_init is called when: 1075 * - an active object is initialized 1076 */ 1077 static bool __init fixup_init(void *addr, enum debug_obj_state state) 1078 { 1079 struct self_test *obj = addr; 1080 1081 switch (state) { 1082 case ODEBUG_STATE_ACTIVE: 1083 debug_object_deactivate(obj, &descr_type_test); 1084 debug_object_init(obj, &descr_type_test); 1085 return true; 1086 default: 1087 return false; 1088 } 1089 } 1090 1091 /* 1092 * fixup_activate is called when: 1093 * - an active object is activated 1094 * - an unknown non-static object is activated 1095 */ 1096 static bool __init fixup_activate(void *addr, enum debug_obj_state state) 1097 { 1098 struct self_test *obj = addr; 1099 1100 switch (state) { 1101 case ODEBUG_STATE_NOTAVAILABLE: 1102 return true; 1103 case ODEBUG_STATE_ACTIVE: 1104 debug_object_deactivate(obj, &descr_type_test); 1105 debug_object_activate(obj, &descr_type_test); 1106 return true; 1107 1108 default: 1109 return false; 1110 } 1111 } 1112 1113 /* 1114 * fixup_destroy is called when: 1115 * - an active object is destroyed 1116 */ 1117 static bool __init fixup_destroy(void *addr, enum debug_obj_state state) 1118 { 1119 struct self_test *obj = addr; 1120 1121 switch (state) { 1122 case ODEBUG_STATE_ACTIVE: 1123 debug_object_deactivate(obj, &descr_type_test); 1124 debug_object_destroy(obj, &descr_type_test); 1125 return true; 1126 default: 1127 return false; 1128 } 1129 } 1130 1131 /* 1132 * fixup_free is called when: 1133 * - an active object is freed 1134 */ 1135 static bool __init fixup_free(void *addr, enum debug_obj_state state) 1136 { 1137 struct self_test *obj = addr; 1138 1139 switch (state) { 1140 case ODEBUG_STATE_ACTIVE: 1141 debug_object_deactivate(obj, &descr_type_test); 1142 debug_object_free(obj, &descr_type_test); 1143 return true; 1144 default: 1145 return false; 1146 } 1147 } 1148 1149 static int __init 1150 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) 1151 { 1152 struct debug_bucket *db; 1153 struct debug_obj *obj; 1154 unsigned long flags; 1155 int res = -EINVAL; 1156 1157 db = get_bucket((unsigned long) addr); 1158 1159 raw_spin_lock_irqsave(&db->lock, flags); 1160 1161 obj = lookup_object(addr, db); 1162 if (!obj && state != ODEBUG_STATE_NONE) { 1163 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); 1164 goto out; 1165 } 1166 if (obj && obj->state != state) { 1167 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", 1168 obj->state, state); 1169 goto out; 1170 } 1171 if (fixups != debug_objects_fixups) { 1172 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", 1173 fixups, debug_objects_fixups); 1174 goto out; 1175 } 1176 if (warnings != debug_objects_warnings) { 1177 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", 1178 warnings, debug_objects_warnings); 1179 goto out; 1180 } 1181 res = 0; 1182 out: 1183 raw_spin_unlock_irqrestore(&db->lock, flags); 1184 if (res) 1185 debug_objects_enabled = false; 1186 return res; 1187 } 1188 1189 static __initconst const struct debug_obj_descr descr_type_test = { 1190 .name = "selftest", 1191 .is_static_object = is_static_object, 1192 .fixup_init = fixup_init, 1193 .fixup_activate = fixup_activate, 1194 .fixup_destroy = fixup_destroy, 1195 .fixup_free = fixup_free, 1196 }; 1197 1198 static __initdata struct self_test obj = { .static_init = 0 }; 1199 1200 static bool __init debug_objects_selftest(void) 1201 { 1202 int fixups, oldfixups, warnings, oldwarnings; 1203 unsigned long flags; 1204 1205 local_irq_save(flags); 1206 1207 fixups = oldfixups = debug_objects_fixups; 1208 warnings = oldwarnings = debug_objects_warnings; 1209 descr_test = &descr_type_test; 1210 1211 debug_object_init(&obj, &descr_type_test); 1212 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 1213 goto out; 1214 debug_object_activate(&obj, &descr_type_test); 1215 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 1216 goto out; 1217 debug_object_activate(&obj, &descr_type_test); 1218 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) 1219 goto out; 1220 debug_object_deactivate(&obj, &descr_type_test); 1221 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) 1222 goto out; 1223 debug_object_destroy(&obj, &descr_type_test); 1224 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) 1225 goto out; 1226 debug_object_init(&obj, &descr_type_test); 1227 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1228 goto out; 1229 debug_object_activate(&obj, &descr_type_test); 1230 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1231 goto out; 1232 debug_object_deactivate(&obj, &descr_type_test); 1233 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1234 goto out; 1235 debug_object_free(&obj, &descr_type_test); 1236 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 1237 goto out; 1238 1239 obj.static_init = 1; 1240 debug_object_activate(&obj, &descr_type_test); 1241 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 1242 goto out; 1243 debug_object_init(&obj, &descr_type_test); 1244 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) 1245 goto out; 1246 debug_object_free(&obj, &descr_type_test); 1247 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 1248 goto out; 1249 1250 #ifdef CONFIG_DEBUG_OBJECTS_FREE 1251 debug_object_init(&obj, &descr_type_test); 1252 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 1253 goto out; 1254 debug_object_activate(&obj, &descr_type_test); 1255 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 1256 goto out; 1257 __debug_check_no_obj_freed(&obj, sizeof(obj)); 1258 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) 1259 goto out; 1260 #endif 1261 pr_info("selftest passed\n"); 1262 1263 out: 1264 debug_objects_fixups = oldfixups; 1265 debug_objects_warnings = oldwarnings; 1266 descr_test = NULL; 1267 1268 local_irq_restore(flags); 1269 return debug_objects_enabled; 1270 } 1271 #else 1272 static inline bool debug_objects_selftest(void) { return true; } 1273 #endif 1274 1275 /* 1276 * Called during early boot to initialize the hash buckets and link 1277 * the static object pool objects into the poll list. After this call 1278 * the object tracker is fully operational. 1279 */ 1280 void __init debug_objects_early_init(void) 1281 { 1282 int i; 1283 1284 for (i = 0; i < ODEBUG_HASH_SIZE; i++) 1285 raw_spin_lock_init(&obj_hash[i].lock); 1286 1287 /* Keep early boot simple and add everything to the boot list */ 1288 for (i = 0; i < ODEBUG_POOL_SIZE; i++) 1289 hlist_add_head(&obj_static_pool[i].node, &pool_boot); 1290 } 1291 1292 /* 1293 * Convert the statically allocated objects to dynamic ones. 1294 * debug_objects_mem_init() is called early so only one CPU is up and 1295 * interrupts are disabled, which means it is safe to replace the active 1296 * object references. 1297 */ 1298 static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache) 1299 { 1300 struct debug_bucket *db = obj_hash; 1301 struct debug_obj *obj, *new; 1302 struct hlist_node *tmp; 1303 HLIST_HEAD(objects); 1304 int i; 1305 1306 for (i = 0; i < ODEBUG_POOL_SIZE; i++) { 1307 obj = kmem_cache_zalloc(cache, GFP_KERNEL); 1308 if (!obj) 1309 goto free; 1310 hlist_add_head(&obj->node, &objects); 1311 } 1312 1313 debug_objects_allocated = ODEBUG_POOL_SIZE; 1314 pool_global.cnt = ODEBUG_POOL_SIZE; 1315 1316 /* 1317 * Move the allocated objects to the global pool and disconnect the 1318 * boot pool. 1319 */ 1320 hlist_move_list(&objects, &pool_global.objects); 1321 pool_boot.first = NULL; 1322 1323 /* Replace the active object references */ 1324 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 1325 hlist_move_list(&db->list, &objects); 1326 1327 hlist_for_each_entry(obj, &objects, node) { 1328 new = hlist_entry(pool_global.objects.first, typeof(*obj), node); 1329 hlist_del(&new->node); 1330 pool_global.cnt--; 1331 /* copy object data */ 1332 *new = *obj; 1333 hlist_add_head(&new->node, &db->list); 1334 } 1335 } 1336 return true; 1337 free: 1338 /* Can't use free_object_list() as the cache is not populated yet */ 1339 hlist_for_each_entry_safe(obj, tmp, &objects, node) { 1340 hlist_del(&obj->node); 1341 kmem_cache_free(cache, obj); 1342 } 1343 return false; 1344 } 1345 1346 /* 1347 * Called after the kmem_caches are functional to setup a dedicated 1348 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag 1349 * prevents that the debug code is called on kmem_cache_free() for the 1350 * debug tracker objects to avoid recursive calls. 1351 */ 1352 void __init debug_objects_mem_init(void) 1353 { 1354 struct kmem_cache *cache; 1355 int extras; 1356 1357 if (!debug_objects_enabled) 1358 return; 1359 1360 if (!debug_objects_selftest()) 1361 return; 1362 1363 cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0, 1364 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL); 1365 1366 if (!cache || !debug_objects_replace_static_objects(cache)) { 1367 debug_objects_enabled = false; 1368 pr_warn("Out of memory.\n"); 1369 return; 1370 } 1371 1372 /* 1373 * Adjust the thresholds for allocating and freeing objects 1374 * according to the number of possible CPUs available in the 1375 * system. 1376 */ 1377 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE; 1378 pool_global.max_cnt += extras; 1379 pool_global.min_cnt += extras; 1380 1381 /* Everything worked. Expose the cache */ 1382 obj_cache = cache; 1383 static_branch_enable(&obj_cache_enabled); 1384 1385 #ifdef CONFIG_HOTPLUG_CPU 1386 cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL, 1387 object_cpu_offline); 1388 #endif 1389 return; 1390 } 1391