1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Generic infrastructure for lifetime debugging of objects. 4 * 5 * Copyright (C) 2008, Thomas Gleixner <[email protected]> 6 */ 7 8 #define pr_fmt(fmt) "ODEBUG: " fmt 9 10 #include <linux/debugobjects.h> 11 #include <linux/interrupt.h> 12 #include <linux/sched.h> 13 #include <linux/sched/task_stack.h> 14 #include <linux/seq_file.h> 15 #include <linux/debugfs.h> 16 #include <linux/slab.h> 17 #include <linux/hash.h> 18 #include <linux/kmemleak.h> 19 #include <linux/cpu.h> 20 21 #define ODEBUG_HASH_BITS 14 22 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) 23 24 #define ODEBUG_POOL_SIZE 1024 25 #define ODEBUG_POOL_MIN_LEVEL 256 26 #define ODEBUG_POOL_PERCPU_SIZE 64 27 #define ODEBUG_BATCH_SIZE 16 28 29 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT 30 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) 31 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) 32 33 /* 34 * We limit the freeing of debug objects via workqueue at a maximum 35 * frequency of 10Hz and about 1024 objects for each freeing operation. 36 * So it is freeing at most 10k debug objects per second. 37 */ 38 #define ODEBUG_FREE_WORK_MAX (1024 / ODEBUG_BATCH_SIZE) 39 #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10) 40 41 struct debug_bucket { 42 struct hlist_head list; 43 raw_spinlock_t lock; 44 }; 45 46 struct obj_pool { 47 struct hlist_head objects; 48 unsigned int cnt; 49 unsigned int min_cnt; 50 unsigned int max_cnt; 51 } ____cacheline_aligned; 52 53 54 static DEFINE_PER_CPU_ALIGNED(struct obj_pool, pool_pcpu) = { 55 .max_cnt = ODEBUG_POOL_PERCPU_SIZE, 56 }; 57 58 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 59 60 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; 61 62 static DEFINE_RAW_SPINLOCK(pool_lock); 63 64 static struct obj_pool pool_global = { 65 .min_cnt = ODEBUG_POOL_MIN_LEVEL, 66 .max_cnt = ODEBUG_POOL_SIZE, 67 }; 68 69 static struct obj_pool pool_to_free = { 70 .max_cnt = UINT_MAX, 71 }; 72 73 static HLIST_HEAD(pool_boot); 74 75 /* 76 * Because of the presence of percpu free pools, obj_pool_free will 77 * under-count those in the percpu free pools. Similarly, obj_pool_used 78 * will over-count those in the percpu free pools. Adjustments will be 79 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used 80 * can be off. 81 */ 82 static int __data_racy obj_pool_min_free = ODEBUG_POOL_SIZE; 83 static int obj_pool_used; 84 static int __data_racy obj_pool_max_used; 85 static bool obj_freeing; 86 87 static int __data_racy debug_objects_maxchain __read_mostly; 88 static int __data_racy __maybe_unused debug_objects_maxchecked __read_mostly; 89 static int __data_racy debug_objects_fixups __read_mostly; 90 static int __data_racy debug_objects_warnings __read_mostly; 91 static bool __data_racy debug_objects_enabled __read_mostly 92 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; 93 94 static const struct debug_obj_descr *descr_test __read_mostly; 95 static struct kmem_cache *obj_cache __ro_after_init; 96 97 /* 98 * Track numbers of kmem_cache_alloc()/free() calls done. 99 */ 100 static int __data_racy debug_objects_allocated; 101 static int __data_racy debug_objects_freed; 102 103 static void free_obj_work(struct work_struct *work); 104 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work); 105 106 static int __init enable_object_debug(char *str) 107 { 108 debug_objects_enabled = true; 109 return 0; 110 } 111 early_param("debug_objects", enable_object_debug); 112 113 static int __init disable_object_debug(char *str) 114 { 115 debug_objects_enabled = false; 116 return 0; 117 } 118 early_param("no_debug_objects", disable_object_debug); 119 120 static const char *obj_states[ODEBUG_STATE_MAX] = { 121 [ODEBUG_STATE_NONE] = "none", 122 [ODEBUG_STATE_INIT] = "initialized", 123 [ODEBUG_STATE_INACTIVE] = "inactive", 124 [ODEBUG_STATE_ACTIVE] = "active", 125 [ODEBUG_STATE_DESTROYED] = "destroyed", 126 [ODEBUG_STATE_NOTAVAILABLE] = "not available", 127 }; 128 129 static __always_inline unsigned int pool_count(struct obj_pool *pool) 130 { 131 return READ_ONCE(pool->cnt); 132 } 133 134 static __always_inline bool pool_should_refill(struct obj_pool *pool) 135 { 136 return pool_count(pool) < pool->min_cnt; 137 } 138 139 static __always_inline bool pool_must_refill(struct obj_pool *pool) 140 { 141 return pool_count(pool) < pool->min_cnt / 2; 142 } 143 144 static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src) 145 { 146 if (dst->cnt + ODEBUG_BATCH_SIZE > dst->max_cnt || !src->cnt) 147 return false; 148 149 for (int i = 0; i < ODEBUG_BATCH_SIZE && src->cnt; i++) { 150 struct hlist_node *node = src->objects.first; 151 152 WRITE_ONCE(src->cnt, src->cnt - 1); 153 WRITE_ONCE(dst->cnt, dst->cnt + 1); 154 155 hlist_del(node); 156 hlist_add_head(node, &dst->objects); 157 } 158 return true; 159 } 160 161 static bool pool_pop_batch(struct hlist_head *head, struct obj_pool *src) 162 { 163 if (!src->cnt) 164 return false; 165 166 for (int i = 0; src->cnt && i < ODEBUG_BATCH_SIZE; i++) { 167 struct hlist_node *node = src->objects.first; 168 169 WRITE_ONCE(src->cnt, src->cnt - 1); 170 hlist_del(node); 171 hlist_add_head(node, head); 172 } 173 return true; 174 } 175 176 static struct debug_obj *__alloc_object(struct hlist_head *list) 177 { 178 struct debug_obj *obj; 179 180 if (unlikely(!list->first)) 181 return NULL; 182 183 obj = hlist_entry(list->first, typeof(*obj), node); 184 hlist_del(&obj->node); 185 return obj; 186 } 187 188 static struct debug_obj *pcpu_alloc(void) 189 { 190 struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu); 191 192 lockdep_assert_irqs_disabled(); 193 194 for (;;) { 195 struct debug_obj *obj = __alloc_object(&pcp->objects); 196 197 if (likely(obj)) { 198 pcp->cnt--; 199 return obj; 200 } 201 202 guard(raw_spinlock)(&pool_lock); 203 if (!pool_move_batch(pcp, &pool_to_free)) { 204 if (!pool_move_batch(pcp, &pool_global)) 205 return NULL; 206 } 207 obj_pool_used += pcp->cnt; 208 209 if (obj_pool_used > obj_pool_max_used) 210 obj_pool_max_used = obj_pool_used; 211 212 if (pool_global.cnt < obj_pool_min_free) 213 obj_pool_min_free = pool_global.cnt; 214 } 215 } 216 217 static void pcpu_free(struct debug_obj *obj) 218 { 219 struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu); 220 221 lockdep_assert_irqs_disabled(); 222 223 hlist_add_head(&obj->node, &pcp->objects); 224 pcp->cnt++; 225 226 /* Pool full ? */ 227 if (pcp->cnt < ODEBUG_POOL_PERCPU_SIZE) 228 return; 229 230 /* Remove a batch from the per CPU pool */ 231 guard(raw_spinlock)(&pool_lock); 232 /* Try to fit the batch into the pool_global first */ 233 if (!pool_move_batch(&pool_global, pcp)) 234 pool_move_batch(&pool_to_free, pcp); 235 obj_pool_used -= ODEBUG_BATCH_SIZE; 236 } 237 238 static void free_object_list(struct hlist_head *head) 239 { 240 struct hlist_node *tmp; 241 struct debug_obj *obj; 242 int cnt = 0; 243 244 hlist_for_each_entry_safe(obj, tmp, head, node) { 245 hlist_del(&obj->node); 246 kmem_cache_free(obj_cache, obj); 247 cnt++; 248 } 249 debug_objects_freed += cnt; 250 } 251 252 static void fill_pool_from_freelist(void) 253 { 254 static unsigned long state; 255 256 /* 257 * Reuse objs from the global obj_to_free list; they will be 258 * reinitialized when allocating. 259 */ 260 if (!pool_count(&pool_to_free)) 261 return; 262 263 /* 264 * Prevent the context from being scheduled or interrupted after 265 * setting the state flag; 266 */ 267 guard(irqsave)(); 268 269 /* 270 * Avoid lock contention on &pool_lock and avoid making the cache 271 * line exclusive by testing the bit before attempting to set it. 272 */ 273 if (test_bit(0, &state) || test_and_set_bit(0, &state)) 274 return; 275 276 /* Avoid taking the lock when there is no work to do */ 277 while (pool_should_refill(&pool_global) && pool_count(&pool_to_free)) { 278 guard(raw_spinlock)(&pool_lock); 279 /* Move a batch if possible */ 280 pool_move_batch(&pool_global, &pool_to_free); 281 } 282 clear_bit(0, &state); 283 } 284 285 static void fill_pool(void) 286 { 287 static atomic_t cpus_allocating; 288 289 /* 290 * Avoid allocation and lock contention when: 291 * - One other CPU is already allocating 292 * - the global pool has not reached the critical level yet 293 */ 294 if (!pool_must_refill(&pool_global) && atomic_read(&cpus_allocating)) 295 return; 296 297 atomic_inc(&cpus_allocating); 298 while (pool_should_refill(&pool_global)) { 299 struct debug_obj *new, *last = NULL; 300 HLIST_HEAD(head); 301 int cnt; 302 303 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) { 304 new = kmem_cache_zalloc(obj_cache, __GFP_HIGH | __GFP_NOWARN); 305 if (!new) 306 break; 307 hlist_add_head(&new->node, &head); 308 if (!last) 309 last = new; 310 } 311 if (!cnt) 312 break; 313 314 guard(raw_spinlock_irqsave)(&pool_lock); 315 hlist_splice_init(&head, &last->node, &pool_global.objects); 316 debug_objects_allocated += cnt; 317 WRITE_ONCE(pool_global.cnt, pool_global.cnt + cnt); 318 } 319 atomic_dec(&cpus_allocating); 320 } 321 322 /* 323 * Lookup an object in the hash bucket. 324 */ 325 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) 326 { 327 struct debug_obj *obj; 328 int cnt = 0; 329 330 hlist_for_each_entry(obj, &b->list, node) { 331 cnt++; 332 if (obj->object == addr) 333 return obj; 334 } 335 if (cnt > debug_objects_maxchain) 336 debug_objects_maxchain = cnt; 337 338 return NULL; 339 } 340 341 static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b, 342 const struct debug_obj_descr *descr) 343 { 344 struct debug_obj *obj; 345 346 if (likely(obj_cache)) 347 obj = pcpu_alloc(); 348 else 349 obj = __alloc_object(&pool_boot); 350 351 if (likely(obj)) { 352 obj->object = addr; 353 obj->descr = descr; 354 obj->state = ODEBUG_STATE_NONE; 355 obj->astate = 0; 356 hlist_add_head(&obj->node, &b->list); 357 } 358 return obj; 359 } 360 361 /* workqueue function to free objects. */ 362 static void free_obj_work(struct work_struct *work) 363 { 364 bool free = true; 365 366 WRITE_ONCE(obj_freeing, false); 367 368 if (!pool_count(&pool_to_free)) 369 return; 370 371 for (unsigned int cnt = 0; cnt < ODEBUG_FREE_WORK_MAX; cnt++) { 372 HLIST_HEAD(tofree); 373 374 /* Acquire and drop the lock for each batch */ 375 scoped_guard(raw_spinlock_irqsave, &pool_lock) { 376 if (!pool_to_free.cnt) 377 return; 378 379 /* Refill the global pool if possible */ 380 if (pool_move_batch(&pool_global, &pool_to_free)) { 381 /* Don't free as there seems to be demand */ 382 free = false; 383 } else if (free) { 384 pool_pop_batch(&tofree, &pool_to_free); 385 } else { 386 return; 387 } 388 } 389 free_object_list(&tofree); 390 } 391 } 392 393 static void __free_object(struct debug_obj *obj) 394 { 395 guard(irqsave)(); 396 if (likely(obj_cache)) 397 pcpu_free(obj); 398 else 399 hlist_add_head(&obj->node, &pool_boot); 400 } 401 402 /* 403 * Put the object back into the pool and schedule work to free objects 404 * if necessary. 405 */ 406 static void free_object(struct debug_obj *obj) 407 { 408 __free_object(obj); 409 if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) { 410 WRITE_ONCE(obj_freeing, true); 411 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); 412 } 413 } 414 415 static void put_objects(struct hlist_head *list) 416 { 417 struct hlist_node *tmp; 418 struct debug_obj *obj; 419 420 /* 421 * Using free_object() puts the objects into reuse or schedules 422 * them for freeing and it get's all the accounting correct. 423 */ 424 hlist_for_each_entry_safe(obj, tmp, list, node) { 425 hlist_del(&obj->node); 426 free_object(obj); 427 } 428 } 429 430 #ifdef CONFIG_HOTPLUG_CPU 431 static int object_cpu_offline(unsigned int cpu) 432 { 433 /* Remote access is safe as the CPU is dead already */ 434 struct obj_pool *pcp = per_cpu_ptr(&pool_pcpu, cpu); 435 436 put_objects(&pcp->objects); 437 pcp->cnt = 0; 438 return 0; 439 } 440 #endif 441 442 /* Out of memory. Free all objects from hash */ 443 static void debug_objects_oom(void) 444 { 445 struct debug_bucket *db = obj_hash; 446 HLIST_HEAD(freelist); 447 448 pr_warn("Out of memory. ODEBUG disabled\n"); 449 450 for (int i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 451 scoped_guard(raw_spinlock_irqsave, &db->lock) 452 hlist_move_list(&db->list, &freelist); 453 454 put_objects(&freelist); 455 } 456 } 457 458 /* 459 * We use the pfn of the address for the hash. That way we can check 460 * for freed objects simply by checking the affected bucket. 461 */ 462 static struct debug_bucket *get_bucket(unsigned long addr) 463 { 464 unsigned long hash; 465 466 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); 467 return &obj_hash[hash]; 468 } 469 470 static void debug_print_object(struct debug_obj *obj, char *msg) 471 { 472 const struct debug_obj_descr *descr = obj->descr; 473 static int limit; 474 475 /* 476 * Don't report if lookup_object_or_alloc() by the current thread 477 * failed because lookup_object_or_alloc()/debug_objects_oom() by a 478 * concurrent thread turned off debug_objects_enabled and cleared 479 * the hash buckets. 480 */ 481 if (!debug_objects_enabled) 482 return; 483 484 if (limit < 5 && descr != descr_test) { 485 void *hint = descr->debug_hint ? 486 descr->debug_hint(obj->object) : NULL; 487 limit++; 488 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " 489 "object: %p object type: %s hint: %pS\n", 490 msg, obj_states[obj->state], obj->astate, 491 obj->object, descr->name, hint); 492 } 493 debug_objects_warnings++; 494 } 495 496 /* 497 * Try to repair the damage, so we have a better chance to get useful 498 * debug output. 499 */ 500 static bool 501 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state), 502 void * addr, enum debug_obj_state state) 503 { 504 if (fixup && fixup(addr, state)) { 505 debug_objects_fixups++; 506 return true; 507 } 508 return false; 509 } 510 511 static void debug_object_is_on_stack(void *addr, int onstack) 512 { 513 int is_on_stack; 514 static int limit; 515 516 if (limit > 4) 517 return; 518 519 is_on_stack = object_is_on_stack(addr); 520 if (is_on_stack == onstack) 521 return; 522 523 limit++; 524 if (is_on_stack) 525 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr, 526 task_stack_page(current)); 527 else 528 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr, 529 task_stack_page(current)); 530 531 WARN_ON(1); 532 } 533 534 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b, 535 const struct debug_obj_descr *descr, 536 bool onstack, bool alloc_ifstatic) 537 { 538 struct debug_obj *obj = lookup_object(addr, b); 539 enum debug_obj_state state = ODEBUG_STATE_NONE; 540 541 if (likely(obj)) 542 return obj; 543 544 /* 545 * debug_object_init() unconditionally allocates untracked 546 * objects. It does not matter whether it is a static object or 547 * not. 548 * 549 * debug_object_assert_init() and debug_object_activate() allow 550 * allocation only if the descriptor callback confirms that the 551 * object is static and considered initialized. For non-static 552 * objects the allocation needs to be done from the fixup callback. 553 */ 554 if (unlikely(alloc_ifstatic)) { 555 if (!descr->is_static_object || !descr->is_static_object(addr)) 556 return ERR_PTR(-ENOENT); 557 /* Statically allocated objects are considered initialized */ 558 state = ODEBUG_STATE_INIT; 559 } 560 561 obj = alloc_object(addr, b, descr); 562 if (likely(obj)) { 563 obj->state = state; 564 debug_object_is_on_stack(addr, onstack); 565 return obj; 566 } 567 568 /* Out of memory. Do the cleanup outside of the locked region */ 569 debug_objects_enabled = false; 570 return NULL; 571 } 572 573 static void debug_objects_fill_pool(void) 574 { 575 if (unlikely(!obj_cache)) 576 return; 577 578 if (likely(!pool_should_refill(&pool_global))) 579 return; 580 581 /* Try reusing objects from obj_to_free_list */ 582 fill_pool_from_freelist(); 583 584 if (likely(!pool_should_refill(&pool_global))) 585 return; 586 587 /* 588 * On RT enabled kernels the pool refill must happen in preemptible 589 * context -- for !RT kernels we rely on the fact that spinlock_t and 590 * raw_spinlock_t are basically the same type and this lock-type 591 * inversion works just fine. 592 */ 593 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) { 594 /* 595 * Annotate away the spinlock_t inside raw_spinlock_t warning 596 * by temporarily raising the wait-type to WAIT_SLEEP, matching 597 * the preemptible() condition above. 598 */ 599 static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP); 600 lock_map_acquire_try(&fill_pool_map); 601 fill_pool(); 602 lock_map_release(&fill_pool_map); 603 } 604 } 605 606 static void 607 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack) 608 { 609 struct debug_obj *obj, o; 610 struct debug_bucket *db; 611 unsigned long flags; 612 613 debug_objects_fill_pool(); 614 615 db = get_bucket((unsigned long) addr); 616 617 raw_spin_lock_irqsave(&db->lock, flags); 618 619 obj = lookup_object_or_alloc(addr, db, descr, onstack, false); 620 if (unlikely(!obj)) { 621 raw_spin_unlock_irqrestore(&db->lock, flags); 622 debug_objects_oom(); 623 return; 624 } 625 626 switch (obj->state) { 627 case ODEBUG_STATE_NONE: 628 case ODEBUG_STATE_INIT: 629 case ODEBUG_STATE_INACTIVE: 630 obj->state = ODEBUG_STATE_INIT; 631 raw_spin_unlock_irqrestore(&db->lock, flags); 632 return; 633 default: 634 break; 635 } 636 637 o = *obj; 638 raw_spin_unlock_irqrestore(&db->lock, flags); 639 debug_print_object(&o, "init"); 640 641 if (o.state == ODEBUG_STATE_ACTIVE) 642 debug_object_fixup(descr->fixup_init, addr, o.state); 643 } 644 645 /** 646 * debug_object_init - debug checks when an object is initialized 647 * @addr: address of the object 648 * @descr: pointer to an object specific debug description structure 649 */ 650 void debug_object_init(void *addr, const struct debug_obj_descr *descr) 651 { 652 if (!debug_objects_enabled) 653 return; 654 655 __debug_object_init(addr, descr, 0); 656 } 657 EXPORT_SYMBOL_GPL(debug_object_init); 658 659 /** 660 * debug_object_init_on_stack - debug checks when an object on stack is 661 * initialized 662 * @addr: address of the object 663 * @descr: pointer to an object specific debug description structure 664 */ 665 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr) 666 { 667 if (!debug_objects_enabled) 668 return; 669 670 __debug_object_init(addr, descr, 1); 671 } 672 EXPORT_SYMBOL_GPL(debug_object_init_on_stack); 673 674 /** 675 * debug_object_activate - debug checks when an object is activated 676 * @addr: address of the object 677 * @descr: pointer to an object specific debug description structure 678 * Returns 0 for success, -EINVAL for check failed. 679 */ 680 int debug_object_activate(void *addr, const struct debug_obj_descr *descr) 681 { 682 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; 683 struct debug_bucket *db; 684 struct debug_obj *obj; 685 unsigned long flags; 686 687 if (!debug_objects_enabled) 688 return 0; 689 690 debug_objects_fill_pool(); 691 692 db = get_bucket((unsigned long) addr); 693 694 raw_spin_lock_irqsave(&db->lock, flags); 695 696 obj = lookup_object_or_alloc(addr, db, descr, false, true); 697 if (unlikely(!obj)) { 698 raw_spin_unlock_irqrestore(&db->lock, flags); 699 debug_objects_oom(); 700 return 0; 701 } else if (likely(!IS_ERR(obj))) { 702 switch (obj->state) { 703 case ODEBUG_STATE_ACTIVE: 704 case ODEBUG_STATE_DESTROYED: 705 o = *obj; 706 break; 707 case ODEBUG_STATE_INIT: 708 case ODEBUG_STATE_INACTIVE: 709 obj->state = ODEBUG_STATE_ACTIVE; 710 fallthrough; 711 default: 712 raw_spin_unlock_irqrestore(&db->lock, flags); 713 return 0; 714 } 715 } 716 717 raw_spin_unlock_irqrestore(&db->lock, flags); 718 debug_print_object(&o, "activate"); 719 720 switch (o.state) { 721 case ODEBUG_STATE_ACTIVE: 722 case ODEBUG_STATE_NOTAVAILABLE: 723 if (debug_object_fixup(descr->fixup_activate, addr, o.state)) 724 return 0; 725 fallthrough; 726 default: 727 return -EINVAL; 728 } 729 } 730 EXPORT_SYMBOL_GPL(debug_object_activate); 731 732 /** 733 * debug_object_deactivate - debug checks when an object is deactivated 734 * @addr: address of the object 735 * @descr: pointer to an object specific debug description structure 736 */ 737 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr) 738 { 739 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; 740 struct debug_bucket *db; 741 struct debug_obj *obj; 742 unsigned long flags; 743 744 if (!debug_objects_enabled) 745 return; 746 747 db = get_bucket((unsigned long) addr); 748 749 raw_spin_lock_irqsave(&db->lock, flags); 750 751 obj = lookup_object(addr, db); 752 if (obj) { 753 switch (obj->state) { 754 case ODEBUG_STATE_DESTROYED: 755 break; 756 case ODEBUG_STATE_INIT: 757 case ODEBUG_STATE_INACTIVE: 758 case ODEBUG_STATE_ACTIVE: 759 if (obj->astate) 760 break; 761 obj->state = ODEBUG_STATE_INACTIVE; 762 fallthrough; 763 default: 764 raw_spin_unlock_irqrestore(&db->lock, flags); 765 return; 766 } 767 o = *obj; 768 } 769 770 raw_spin_unlock_irqrestore(&db->lock, flags); 771 debug_print_object(&o, "deactivate"); 772 } 773 EXPORT_SYMBOL_GPL(debug_object_deactivate); 774 775 /** 776 * debug_object_destroy - debug checks when an object is destroyed 777 * @addr: address of the object 778 * @descr: pointer to an object specific debug description structure 779 */ 780 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr) 781 { 782 struct debug_obj *obj, o; 783 struct debug_bucket *db; 784 unsigned long flags; 785 786 if (!debug_objects_enabled) 787 return; 788 789 db = get_bucket((unsigned long) addr); 790 791 raw_spin_lock_irqsave(&db->lock, flags); 792 793 obj = lookup_object(addr, db); 794 if (!obj) { 795 raw_spin_unlock_irqrestore(&db->lock, flags); 796 return; 797 } 798 799 switch (obj->state) { 800 case ODEBUG_STATE_ACTIVE: 801 case ODEBUG_STATE_DESTROYED: 802 break; 803 case ODEBUG_STATE_NONE: 804 case ODEBUG_STATE_INIT: 805 case ODEBUG_STATE_INACTIVE: 806 obj->state = ODEBUG_STATE_DESTROYED; 807 fallthrough; 808 default: 809 raw_spin_unlock_irqrestore(&db->lock, flags); 810 return; 811 } 812 813 o = *obj; 814 raw_spin_unlock_irqrestore(&db->lock, flags); 815 debug_print_object(&o, "destroy"); 816 817 if (o.state == ODEBUG_STATE_ACTIVE) 818 debug_object_fixup(descr->fixup_destroy, addr, o.state); 819 } 820 EXPORT_SYMBOL_GPL(debug_object_destroy); 821 822 /** 823 * debug_object_free - debug checks when an object is freed 824 * @addr: address of the object 825 * @descr: pointer to an object specific debug description structure 826 */ 827 void debug_object_free(void *addr, const struct debug_obj_descr *descr) 828 { 829 struct debug_obj *obj, o; 830 struct debug_bucket *db; 831 unsigned long flags; 832 833 if (!debug_objects_enabled) 834 return; 835 836 db = get_bucket((unsigned long) addr); 837 838 raw_spin_lock_irqsave(&db->lock, flags); 839 840 obj = lookup_object(addr, db); 841 if (!obj) { 842 raw_spin_unlock_irqrestore(&db->lock, flags); 843 return; 844 } 845 846 switch (obj->state) { 847 case ODEBUG_STATE_ACTIVE: 848 break; 849 default: 850 hlist_del(&obj->node); 851 raw_spin_unlock_irqrestore(&db->lock, flags); 852 free_object(obj); 853 return; 854 } 855 856 o = *obj; 857 raw_spin_unlock_irqrestore(&db->lock, flags); 858 debug_print_object(&o, "free"); 859 860 debug_object_fixup(descr->fixup_free, addr, o.state); 861 } 862 EXPORT_SYMBOL_GPL(debug_object_free); 863 864 /** 865 * debug_object_assert_init - debug checks when object should be init-ed 866 * @addr: address of the object 867 * @descr: pointer to an object specific debug description structure 868 */ 869 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) 870 { 871 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; 872 struct debug_bucket *db; 873 struct debug_obj *obj; 874 unsigned long flags; 875 876 if (!debug_objects_enabled) 877 return; 878 879 debug_objects_fill_pool(); 880 881 db = get_bucket((unsigned long) addr); 882 883 raw_spin_lock_irqsave(&db->lock, flags); 884 obj = lookup_object_or_alloc(addr, db, descr, false, true); 885 raw_spin_unlock_irqrestore(&db->lock, flags); 886 if (likely(!IS_ERR_OR_NULL(obj))) 887 return; 888 889 /* If NULL the allocation has hit OOM */ 890 if (!obj) { 891 debug_objects_oom(); 892 return; 893 } 894 895 /* Object is neither tracked nor static. It's not initialized. */ 896 debug_print_object(&o, "assert_init"); 897 debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE); 898 } 899 EXPORT_SYMBOL_GPL(debug_object_assert_init); 900 901 /** 902 * debug_object_active_state - debug checks object usage state machine 903 * @addr: address of the object 904 * @descr: pointer to an object specific debug description structure 905 * @expect: expected state 906 * @next: state to move to if expected state is found 907 */ 908 void 909 debug_object_active_state(void *addr, const struct debug_obj_descr *descr, 910 unsigned int expect, unsigned int next) 911 { 912 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; 913 struct debug_bucket *db; 914 struct debug_obj *obj; 915 unsigned long flags; 916 917 if (!debug_objects_enabled) 918 return; 919 920 db = get_bucket((unsigned long) addr); 921 922 raw_spin_lock_irqsave(&db->lock, flags); 923 924 obj = lookup_object(addr, db); 925 if (obj) { 926 switch (obj->state) { 927 case ODEBUG_STATE_ACTIVE: 928 if (obj->astate != expect) 929 break; 930 obj->astate = next; 931 raw_spin_unlock_irqrestore(&db->lock, flags); 932 return; 933 default: 934 break; 935 } 936 o = *obj; 937 } 938 939 raw_spin_unlock_irqrestore(&db->lock, flags); 940 debug_print_object(&o, "active_state"); 941 } 942 EXPORT_SYMBOL_GPL(debug_object_active_state); 943 944 #ifdef CONFIG_DEBUG_OBJECTS_FREE 945 static void __debug_check_no_obj_freed(const void *address, unsigned long size) 946 { 947 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; 948 int cnt, objs_checked = 0; 949 struct debug_obj *obj, o; 950 struct debug_bucket *db; 951 struct hlist_node *tmp; 952 953 saddr = (unsigned long) address; 954 eaddr = saddr + size; 955 paddr = saddr & ODEBUG_CHUNK_MASK; 956 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); 957 chunks >>= ODEBUG_CHUNK_SHIFT; 958 959 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { 960 db = get_bucket(paddr); 961 962 repeat: 963 cnt = 0; 964 raw_spin_lock_irqsave(&db->lock, flags); 965 hlist_for_each_entry_safe(obj, tmp, &db->list, node) { 966 cnt++; 967 oaddr = (unsigned long) obj->object; 968 if (oaddr < saddr || oaddr >= eaddr) 969 continue; 970 971 switch (obj->state) { 972 case ODEBUG_STATE_ACTIVE: 973 o = *obj; 974 raw_spin_unlock_irqrestore(&db->lock, flags); 975 debug_print_object(&o, "free"); 976 debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state); 977 goto repeat; 978 default: 979 hlist_del(&obj->node); 980 __free_object(obj); 981 break; 982 } 983 } 984 raw_spin_unlock_irqrestore(&db->lock, flags); 985 986 if (cnt > debug_objects_maxchain) 987 debug_objects_maxchain = cnt; 988 989 objs_checked += cnt; 990 } 991 992 if (objs_checked > debug_objects_maxchecked) 993 debug_objects_maxchecked = objs_checked; 994 995 /* Schedule work to actually kmem_cache_free() objects */ 996 if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) { 997 WRITE_ONCE(obj_freeing, true); 998 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); 999 } 1000 } 1001 1002 void debug_check_no_obj_freed(const void *address, unsigned long size) 1003 { 1004 if (debug_objects_enabled) 1005 __debug_check_no_obj_freed(address, size); 1006 } 1007 #endif 1008 1009 #ifdef CONFIG_DEBUG_FS 1010 1011 static int debug_stats_show(struct seq_file *m, void *v) 1012 { 1013 int cpu, obj_percpu_free = 0; 1014 1015 for_each_possible_cpu(cpu) 1016 obj_percpu_free += per_cpu(pool_pcpu.cnt, cpu); 1017 1018 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); 1019 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked); 1020 seq_printf(m, "warnings :%d\n", debug_objects_warnings); 1021 seq_printf(m, "fixups :%d\n", debug_objects_fixups); 1022 seq_printf(m, "pool_free :%d\n", pool_count(&pool_global) + obj_percpu_free); 1023 seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free); 1024 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 1025 seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free); 1026 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 1027 seq_printf(m, "on_free_list :%d\n", pool_count(&pool_to_free)); 1028 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); 1029 seq_printf(m, "objs_freed :%d\n", debug_objects_freed); 1030 return 0; 1031 } 1032 DEFINE_SHOW_ATTRIBUTE(debug_stats); 1033 1034 static int __init debug_objects_init_debugfs(void) 1035 { 1036 struct dentry *dbgdir; 1037 1038 if (!debug_objects_enabled) 1039 return 0; 1040 1041 dbgdir = debugfs_create_dir("debug_objects", NULL); 1042 1043 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops); 1044 1045 return 0; 1046 } 1047 __initcall(debug_objects_init_debugfs); 1048 1049 #else 1050 static inline void debug_objects_init_debugfs(void) { } 1051 #endif 1052 1053 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST 1054 1055 /* Random data structure for the self test */ 1056 struct self_test { 1057 unsigned long dummy1[6]; 1058 int static_init; 1059 unsigned long dummy2[3]; 1060 }; 1061 1062 static __initconst const struct debug_obj_descr descr_type_test; 1063 1064 static bool __init is_static_object(void *addr) 1065 { 1066 struct self_test *obj = addr; 1067 1068 return obj->static_init; 1069 } 1070 1071 /* 1072 * fixup_init is called when: 1073 * - an active object is initialized 1074 */ 1075 static bool __init fixup_init(void *addr, enum debug_obj_state state) 1076 { 1077 struct self_test *obj = addr; 1078 1079 switch (state) { 1080 case ODEBUG_STATE_ACTIVE: 1081 debug_object_deactivate(obj, &descr_type_test); 1082 debug_object_init(obj, &descr_type_test); 1083 return true; 1084 default: 1085 return false; 1086 } 1087 } 1088 1089 /* 1090 * fixup_activate is called when: 1091 * - an active object is activated 1092 * - an unknown non-static object is activated 1093 */ 1094 static bool __init fixup_activate(void *addr, enum debug_obj_state state) 1095 { 1096 struct self_test *obj = addr; 1097 1098 switch (state) { 1099 case ODEBUG_STATE_NOTAVAILABLE: 1100 return true; 1101 case ODEBUG_STATE_ACTIVE: 1102 debug_object_deactivate(obj, &descr_type_test); 1103 debug_object_activate(obj, &descr_type_test); 1104 return true; 1105 1106 default: 1107 return false; 1108 } 1109 } 1110 1111 /* 1112 * fixup_destroy is called when: 1113 * - an active object is destroyed 1114 */ 1115 static bool __init fixup_destroy(void *addr, enum debug_obj_state state) 1116 { 1117 struct self_test *obj = addr; 1118 1119 switch (state) { 1120 case ODEBUG_STATE_ACTIVE: 1121 debug_object_deactivate(obj, &descr_type_test); 1122 debug_object_destroy(obj, &descr_type_test); 1123 return true; 1124 default: 1125 return false; 1126 } 1127 } 1128 1129 /* 1130 * fixup_free is called when: 1131 * - an active object is freed 1132 */ 1133 static bool __init fixup_free(void *addr, enum debug_obj_state state) 1134 { 1135 struct self_test *obj = addr; 1136 1137 switch (state) { 1138 case ODEBUG_STATE_ACTIVE: 1139 debug_object_deactivate(obj, &descr_type_test); 1140 debug_object_free(obj, &descr_type_test); 1141 return true; 1142 default: 1143 return false; 1144 } 1145 } 1146 1147 static int __init 1148 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) 1149 { 1150 struct debug_bucket *db; 1151 struct debug_obj *obj; 1152 unsigned long flags; 1153 int res = -EINVAL; 1154 1155 db = get_bucket((unsigned long) addr); 1156 1157 raw_spin_lock_irqsave(&db->lock, flags); 1158 1159 obj = lookup_object(addr, db); 1160 if (!obj && state != ODEBUG_STATE_NONE) { 1161 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); 1162 goto out; 1163 } 1164 if (obj && obj->state != state) { 1165 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", 1166 obj->state, state); 1167 goto out; 1168 } 1169 if (fixups != debug_objects_fixups) { 1170 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", 1171 fixups, debug_objects_fixups); 1172 goto out; 1173 } 1174 if (warnings != debug_objects_warnings) { 1175 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", 1176 warnings, debug_objects_warnings); 1177 goto out; 1178 } 1179 res = 0; 1180 out: 1181 raw_spin_unlock_irqrestore(&db->lock, flags); 1182 if (res) 1183 debug_objects_enabled = false; 1184 return res; 1185 } 1186 1187 static __initconst const struct debug_obj_descr descr_type_test = { 1188 .name = "selftest", 1189 .is_static_object = is_static_object, 1190 .fixup_init = fixup_init, 1191 .fixup_activate = fixup_activate, 1192 .fixup_destroy = fixup_destroy, 1193 .fixup_free = fixup_free, 1194 }; 1195 1196 static __initdata struct self_test obj = { .static_init = 0 }; 1197 1198 static bool __init debug_objects_selftest(void) 1199 { 1200 int fixups, oldfixups, warnings, oldwarnings; 1201 unsigned long flags; 1202 1203 local_irq_save(flags); 1204 1205 fixups = oldfixups = debug_objects_fixups; 1206 warnings = oldwarnings = debug_objects_warnings; 1207 descr_test = &descr_type_test; 1208 1209 debug_object_init(&obj, &descr_type_test); 1210 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 1211 goto out; 1212 debug_object_activate(&obj, &descr_type_test); 1213 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 1214 goto out; 1215 debug_object_activate(&obj, &descr_type_test); 1216 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) 1217 goto out; 1218 debug_object_deactivate(&obj, &descr_type_test); 1219 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) 1220 goto out; 1221 debug_object_destroy(&obj, &descr_type_test); 1222 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) 1223 goto out; 1224 debug_object_init(&obj, &descr_type_test); 1225 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1226 goto out; 1227 debug_object_activate(&obj, &descr_type_test); 1228 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1229 goto out; 1230 debug_object_deactivate(&obj, &descr_type_test); 1231 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1232 goto out; 1233 debug_object_free(&obj, &descr_type_test); 1234 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 1235 goto out; 1236 1237 obj.static_init = 1; 1238 debug_object_activate(&obj, &descr_type_test); 1239 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 1240 goto out; 1241 debug_object_init(&obj, &descr_type_test); 1242 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) 1243 goto out; 1244 debug_object_free(&obj, &descr_type_test); 1245 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 1246 goto out; 1247 1248 #ifdef CONFIG_DEBUG_OBJECTS_FREE 1249 debug_object_init(&obj, &descr_type_test); 1250 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 1251 goto out; 1252 debug_object_activate(&obj, &descr_type_test); 1253 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 1254 goto out; 1255 __debug_check_no_obj_freed(&obj, sizeof(obj)); 1256 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) 1257 goto out; 1258 #endif 1259 pr_info("selftest passed\n"); 1260 1261 out: 1262 debug_objects_fixups = oldfixups; 1263 debug_objects_warnings = oldwarnings; 1264 descr_test = NULL; 1265 1266 local_irq_restore(flags); 1267 return debug_objects_enabled; 1268 } 1269 #else 1270 static inline bool debug_objects_selftest(void) { return true; } 1271 #endif 1272 1273 /* 1274 * Called during early boot to initialize the hash buckets and link 1275 * the static object pool objects into the poll list. After this call 1276 * the object tracker is fully operational. 1277 */ 1278 void __init debug_objects_early_init(void) 1279 { 1280 int i; 1281 1282 for (i = 0; i < ODEBUG_HASH_SIZE; i++) 1283 raw_spin_lock_init(&obj_hash[i].lock); 1284 1285 /* Keep early boot simple and add everything to the boot list */ 1286 for (i = 0; i < ODEBUG_POOL_SIZE; i++) 1287 hlist_add_head(&obj_static_pool[i].node, &pool_boot); 1288 } 1289 1290 /* 1291 * Convert the statically allocated objects to dynamic ones. 1292 * debug_objects_mem_init() is called early so only one CPU is up and 1293 * interrupts are disabled, which means it is safe to replace the active 1294 * object references. 1295 */ 1296 static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache) 1297 { 1298 struct debug_bucket *db = obj_hash; 1299 struct debug_obj *obj, *new; 1300 struct hlist_node *tmp; 1301 HLIST_HEAD(objects); 1302 int i; 1303 1304 for (i = 0; i < ODEBUG_POOL_SIZE; i++) { 1305 obj = kmem_cache_zalloc(cache, GFP_KERNEL); 1306 if (!obj) 1307 goto free; 1308 hlist_add_head(&obj->node, &objects); 1309 } 1310 1311 debug_objects_allocated = ODEBUG_POOL_SIZE; 1312 pool_global.cnt = ODEBUG_POOL_SIZE; 1313 1314 /* 1315 * Move the allocated objects to the global pool and disconnect the 1316 * boot pool. 1317 */ 1318 hlist_move_list(&objects, &pool_global.objects); 1319 pool_boot.first = NULL; 1320 1321 /* Replace the active object references */ 1322 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 1323 hlist_move_list(&db->list, &objects); 1324 1325 hlist_for_each_entry(obj, &objects, node) { 1326 new = hlist_entry(pool_global.objects.first, typeof(*obj), node); 1327 hlist_del(&new->node); 1328 pool_global.cnt--; 1329 /* copy object data */ 1330 *new = *obj; 1331 hlist_add_head(&new->node, &db->list); 1332 } 1333 } 1334 return true; 1335 free: 1336 /* Can't use free_object_list() as the cache is not populated yet */ 1337 hlist_for_each_entry_safe(obj, tmp, &objects, node) { 1338 hlist_del(&obj->node); 1339 kmem_cache_free(cache, obj); 1340 } 1341 return false; 1342 } 1343 1344 /* 1345 * Called after the kmem_caches are functional to setup a dedicated 1346 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag 1347 * prevents that the debug code is called on kmem_cache_free() for the 1348 * debug tracker objects to avoid recursive calls. 1349 */ 1350 void __init debug_objects_mem_init(void) 1351 { 1352 struct kmem_cache *cache; 1353 int extras; 1354 1355 if (!debug_objects_enabled) 1356 return; 1357 1358 if (!debug_objects_selftest()) 1359 return; 1360 1361 cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0, 1362 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL); 1363 1364 if (!cache || !debug_objects_replace_static_objects(cache)) { 1365 debug_objects_enabled = false; 1366 pr_warn("Out of memory.\n"); 1367 return; 1368 } 1369 1370 /* 1371 * Adjust the thresholds for allocating and freeing objects 1372 * according to the number of possible CPUs available in the 1373 * system. 1374 */ 1375 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE; 1376 pool_global.max_cnt += extras; 1377 pool_global.min_cnt += extras; 1378 1379 /* Everything worked. Expose the cache */ 1380 obj_cache = cache; 1381 1382 #ifdef CONFIG_HOTPLUG_CPU 1383 cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL, 1384 object_cpu_offline); 1385 #endif 1386 return; 1387 } 1388