xref: /linux-6.15/lib/debugobjects.c (revision a3b9e191)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic infrastructure for lifetime debugging of objects.
4  *
5  * Copyright (C) 2008, Thomas Gleixner <[email protected]>
6  */
7 
8 #define pr_fmt(fmt) "ODEBUG: " fmt
9 
10 #include <linux/debugobjects.h>
11 #include <linux/interrupt.h>
12 #include <linux/sched.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/seq_file.h>
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/kmemleak.h>
19 #include <linux/cpu.h>
20 
21 #define ODEBUG_HASH_BITS	14
22 #define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
23 
24 #define ODEBUG_POOL_SIZE	1024
25 #define ODEBUG_POOL_MIN_LEVEL	256
26 #define ODEBUG_POOL_PERCPU_SIZE	64
27 #define ODEBUG_BATCH_SIZE	16
28 
29 #define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
32 
33 /*
34  * We limit the freeing of debug objects via workqueue at a maximum
35  * frequency of 10Hz and about 1024 objects for each freeing operation.
36  * So it is freeing at most 10k debug objects per second.
37  */
38 #define ODEBUG_FREE_WORK_MAX	1024
39 #define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
40 
41 struct debug_bucket {
42 	struct hlist_head	list;
43 	raw_spinlock_t		lock;
44 };
45 
46 struct obj_pool {
47 	struct hlist_head	objects;
48 	unsigned int		cnt;
49 	unsigned int		min_cnt;
50 	unsigned int		max_cnt;
51 } ____cacheline_aligned;
52 
53 
54 static DEFINE_PER_CPU_ALIGNED(struct obj_pool, pool_pcpu)  = {
55 	.max_cnt	= ODEBUG_POOL_PERCPU_SIZE,
56 };
57 
58 static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
59 
60 static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
61 
62 static DEFINE_RAW_SPINLOCK(pool_lock);
63 
64 static struct obj_pool pool_global = {
65 	.min_cnt	= ODEBUG_POOL_MIN_LEVEL,
66 	.max_cnt	= ODEBUG_POOL_SIZE,
67 };
68 
69 static struct obj_pool pool_to_free = {
70 	.max_cnt	= UINT_MAX,
71 };
72 
73 static HLIST_HEAD(pool_boot);
74 
75 /*
76  * Because of the presence of percpu free pools, obj_pool_free will
77  * under-count those in the percpu free pools. Similarly, obj_pool_used
78  * will over-count those in the percpu free pools. Adjustments will be
79  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
80  * can be off.
81  */
82 static int __data_racy		obj_pool_min_free = ODEBUG_POOL_SIZE;
83 static int			obj_pool_used;
84 static int __data_racy		obj_pool_max_used;
85 static bool			obj_freeing;
86 
87 static int __data_racy			debug_objects_maxchain __read_mostly;
88 static int __data_racy __maybe_unused	debug_objects_maxchecked __read_mostly;
89 static int __data_racy			debug_objects_fixups __read_mostly;
90 static int __data_racy			debug_objects_warnings __read_mostly;
91 static bool __data_racy			debug_objects_enabled __read_mostly
92 					= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
93 
94 static const struct debug_obj_descr	*descr_test  __read_mostly;
95 static struct kmem_cache		*obj_cache __ro_after_init;
96 
97 /*
98  * Track numbers of kmem_cache_alloc()/free() calls done.
99  */
100 static int __data_racy		debug_objects_allocated;
101 static int __data_racy		debug_objects_freed;
102 
103 static void free_obj_work(struct work_struct *work);
104 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
105 
106 static int __init enable_object_debug(char *str)
107 {
108 	debug_objects_enabled = true;
109 	return 0;
110 }
111 early_param("debug_objects", enable_object_debug);
112 
113 static int __init disable_object_debug(char *str)
114 {
115 	debug_objects_enabled = false;
116 	return 0;
117 }
118 early_param("no_debug_objects", disable_object_debug);
119 
120 static const char *obj_states[ODEBUG_STATE_MAX] = {
121 	[ODEBUG_STATE_NONE]		= "none",
122 	[ODEBUG_STATE_INIT]		= "initialized",
123 	[ODEBUG_STATE_INACTIVE]		= "inactive",
124 	[ODEBUG_STATE_ACTIVE]		= "active",
125 	[ODEBUG_STATE_DESTROYED]	= "destroyed",
126 	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
127 };
128 
129 static __always_inline unsigned int pool_count(struct obj_pool *pool)
130 {
131 	return READ_ONCE(pool->cnt);
132 }
133 
134 static __always_inline bool pool_should_refill(struct obj_pool *pool)
135 {
136 	return pool_count(pool) < pool->min_cnt;
137 }
138 
139 static __always_inline bool pool_must_refill(struct obj_pool *pool)
140 {
141 	return pool_count(pool) < pool->min_cnt / 2;
142 }
143 
144 static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src)
145 {
146 	if (dst->cnt + ODEBUG_BATCH_SIZE > dst->max_cnt || !src->cnt)
147 		return false;
148 
149 	for (int i = 0; i < ODEBUG_BATCH_SIZE && src->cnt; i++) {
150 		struct hlist_node *node = src->objects.first;
151 
152 		WRITE_ONCE(src->cnt, src->cnt - 1);
153 		WRITE_ONCE(dst->cnt, dst->cnt + 1);
154 
155 		hlist_del(node);
156 		hlist_add_head(node, &dst->objects);
157 	}
158 	return true;
159 }
160 
161 static struct debug_obj *__alloc_object(struct hlist_head *list)
162 {
163 	struct debug_obj *obj;
164 
165 	if (unlikely(!list->first))
166 		return NULL;
167 
168 	obj = hlist_entry(list->first, typeof(*obj), node);
169 	hlist_del(&obj->node);
170 	return obj;
171 }
172 
173 static struct debug_obj *pcpu_alloc(void)
174 {
175 	struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
176 
177 	lockdep_assert_irqs_disabled();
178 
179 	for (;;) {
180 		struct debug_obj *obj = __alloc_object(&pcp->objects);
181 
182 		if (likely(obj)) {
183 			pcp->cnt--;
184 			return obj;
185 		}
186 
187 		guard(raw_spinlock)(&pool_lock);
188 		if (!pool_move_batch(pcp, &pool_to_free)) {
189 			if (!pool_move_batch(pcp, &pool_global))
190 				return NULL;
191 		}
192 		obj_pool_used += pcp->cnt;
193 
194 		if (obj_pool_used > obj_pool_max_used)
195 			obj_pool_max_used = obj_pool_used;
196 
197 		if (pool_global.cnt < obj_pool_min_free)
198 			obj_pool_min_free = pool_global.cnt;
199 	}
200 }
201 
202 static void pcpu_free(struct debug_obj *obj)
203 {
204 	struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
205 
206 	lockdep_assert_irqs_disabled();
207 
208 	hlist_add_head(&obj->node, &pcp->objects);
209 	pcp->cnt++;
210 
211 	/* Pool full ? */
212 	if (pcp->cnt < ODEBUG_POOL_PERCPU_SIZE)
213 		return;
214 
215 	/* Remove a batch from the per CPU pool */
216 	guard(raw_spinlock)(&pool_lock);
217 	/* Try to fit the batch into the pool_global first */
218 	if (!pool_move_batch(&pool_global, pcp))
219 		pool_move_batch(&pool_to_free, pcp);
220 	obj_pool_used -= ODEBUG_BATCH_SIZE;
221 }
222 
223 static void free_object_list(struct hlist_head *head)
224 {
225 	struct hlist_node *tmp;
226 	struct debug_obj *obj;
227 	int cnt = 0;
228 
229 	hlist_for_each_entry_safe(obj, tmp, head, node) {
230 		hlist_del(&obj->node);
231 		kmem_cache_free(obj_cache, obj);
232 		cnt++;
233 	}
234 	debug_objects_freed += cnt;
235 }
236 
237 static void fill_pool_from_freelist(void)
238 {
239 	static unsigned long state;
240 
241 	/*
242 	 * Reuse objs from the global obj_to_free list; they will be
243 	 * reinitialized when allocating.
244 	 */
245 	if (!pool_count(&pool_to_free))
246 		return;
247 
248 	/*
249 	 * Prevent the context from being scheduled or interrupted after
250 	 * setting the state flag;
251 	 */
252 	guard(irqsave)();
253 
254 	/*
255 	 * Avoid lock contention on &pool_lock and avoid making the cache
256 	 * line exclusive by testing the bit before attempting to set it.
257 	 */
258 	if (test_bit(0, &state) || test_and_set_bit(0, &state))
259 		return;
260 
261 	/* Avoid taking the lock when there is no work to do */
262 	while (pool_should_refill(&pool_global) && pool_count(&pool_to_free)) {
263 		guard(raw_spinlock)(&pool_lock);
264 		/* Move a batch if possible */
265 		pool_move_batch(&pool_global, &pool_to_free);
266 	}
267 	clear_bit(0, &state);
268 }
269 
270 static void fill_pool(void)
271 {
272 	static atomic_t cpus_allocating;
273 
274 	/*
275 	 * Avoid allocation and lock contention when:
276 	 *   - One other CPU is already allocating
277 	 *   - the global pool has not reached the critical level yet
278 	 */
279 	if (!pool_must_refill(&pool_global) && atomic_read(&cpus_allocating))
280 		return;
281 
282 	atomic_inc(&cpus_allocating);
283 	while (pool_should_refill(&pool_global)) {
284 		struct debug_obj *new, *last = NULL;
285 		HLIST_HEAD(head);
286 		int cnt;
287 
288 		for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
289 			new = kmem_cache_zalloc(obj_cache, __GFP_HIGH | __GFP_NOWARN);
290 			if (!new)
291 				break;
292 			hlist_add_head(&new->node, &head);
293 			if (!last)
294 				last = new;
295 		}
296 		if (!cnt)
297 			break;
298 
299 		guard(raw_spinlock_irqsave)(&pool_lock);
300 		hlist_splice_init(&head, &last->node, &pool_global.objects);
301 		debug_objects_allocated += cnt;
302 		WRITE_ONCE(pool_global.cnt, pool_global.cnt + cnt);
303 	}
304 	atomic_dec(&cpus_allocating);
305 }
306 
307 /*
308  * Lookup an object in the hash bucket.
309  */
310 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
311 {
312 	struct debug_obj *obj;
313 	int cnt = 0;
314 
315 	hlist_for_each_entry(obj, &b->list, node) {
316 		cnt++;
317 		if (obj->object == addr)
318 			return obj;
319 	}
320 	if (cnt > debug_objects_maxchain)
321 		debug_objects_maxchain = cnt;
322 
323 	return NULL;
324 }
325 
326 static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b,
327 				      const struct debug_obj_descr *descr)
328 {
329 	struct debug_obj *obj;
330 
331 	if (likely(obj_cache))
332 		obj = pcpu_alloc();
333 	else
334 		obj = __alloc_object(&pool_boot);
335 
336 	if (likely(obj)) {
337 		obj->object = addr;
338 		obj->descr  = descr;
339 		obj->state  = ODEBUG_STATE_NONE;
340 		obj->astate = 0;
341 		hlist_add_head(&obj->node, &b->list);
342 	}
343 	return obj;
344 }
345 
346 /*
347  * workqueue function to free objects.
348  *
349  * To reduce contention on the global pool_lock, the actual freeing of
350  * debug objects will be delayed if the pool_lock is busy.
351  */
352 static void free_obj_work(struct work_struct *work)
353 {
354 	struct debug_obj *obj;
355 	unsigned long flags;
356 	HLIST_HEAD(tofree);
357 
358 	WRITE_ONCE(obj_freeing, false);
359 	if (!raw_spin_trylock_irqsave(&pool_lock, flags))
360 		return;
361 
362 	if (pool_global.cnt >= pool_global.max_cnt)
363 		goto free_objs;
364 
365 	/*
366 	 * The objs on the pool list might be allocated before the work is
367 	 * run, so recheck if pool list it full or not, if not fill pool
368 	 * list from the global free list. As it is likely that a workload
369 	 * may be gearing up to use more and more objects, don't free any
370 	 * of them until the next round.
371 	 */
372 	while (pool_to_free.cnt && pool_global.cnt < pool_global.max_cnt) {
373 		obj = hlist_entry(pool_to_free.objects.first, typeof(*obj), node);
374 		hlist_del(&obj->node);
375 		hlist_add_head(&obj->node, &pool_global.objects);
376 		WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt - 1);
377 		WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
378 	}
379 	raw_spin_unlock_irqrestore(&pool_lock, flags);
380 	return;
381 
382 free_objs:
383 	/*
384 	 * Pool list is already full and there are still objs on the free
385 	 * list. Move remaining free objs to a temporary list to free the
386 	 * memory outside the pool_lock held region.
387 	 */
388 	if (pool_to_free.cnt) {
389 		hlist_move_list(&pool_to_free.objects, &tofree);
390 		WRITE_ONCE(pool_to_free.cnt, 0);
391 	}
392 	raw_spin_unlock_irqrestore(&pool_lock, flags);
393 
394 	free_object_list(&tofree);
395 }
396 
397 static void __free_object(struct debug_obj *obj)
398 {
399 	guard(irqsave)();
400 	if (likely(obj_cache))
401 		pcpu_free(obj);
402 	else
403 		hlist_add_head(&obj->node, &pool_boot);
404 }
405 
406 /*
407  * Put the object back into the pool and schedule work to free objects
408  * if necessary.
409  */
410 static void free_object(struct debug_obj *obj)
411 {
412 	__free_object(obj);
413 	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
414 		WRITE_ONCE(obj_freeing, true);
415 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
416 	}
417 }
418 
419 static void put_objects(struct hlist_head *list)
420 {
421 	struct hlist_node *tmp;
422 	struct debug_obj *obj;
423 
424 	/*
425 	 * Using free_object() puts the objects into reuse or schedules
426 	 * them for freeing and it get's all the accounting correct.
427 	 */
428 	hlist_for_each_entry_safe(obj, tmp, list, node) {
429 		hlist_del(&obj->node);
430 		free_object(obj);
431 	}
432 }
433 
434 #ifdef CONFIG_HOTPLUG_CPU
435 static int object_cpu_offline(unsigned int cpu)
436 {
437 	/* Remote access is safe as the CPU is dead already */
438 	struct obj_pool *pcp = per_cpu_ptr(&pool_pcpu, cpu);
439 
440 	put_objects(&pcp->objects);
441 	pcp->cnt = 0;
442 	return 0;
443 }
444 #endif
445 
446 /* Out of memory. Free all objects from hash */
447 static void debug_objects_oom(void)
448 {
449 	struct debug_bucket *db = obj_hash;
450 	HLIST_HEAD(freelist);
451 
452 	pr_warn("Out of memory. ODEBUG disabled\n");
453 
454 	for (int i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
455 		scoped_guard(raw_spinlock_irqsave, &db->lock)
456 			hlist_move_list(&db->list, &freelist);
457 
458 		put_objects(&freelist);
459 	}
460 }
461 
462 /*
463  * We use the pfn of the address for the hash. That way we can check
464  * for freed objects simply by checking the affected bucket.
465  */
466 static struct debug_bucket *get_bucket(unsigned long addr)
467 {
468 	unsigned long hash;
469 
470 	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
471 	return &obj_hash[hash];
472 }
473 
474 static void debug_print_object(struct debug_obj *obj, char *msg)
475 {
476 	const struct debug_obj_descr *descr = obj->descr;
477 	static int limit;
478 
479 	/*
480 	 * Don't report if lookup_object_or_alloc() by the current thread
481 	 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
482 	 * concurrent thread turned off debug_objects_enabled and cleared
483 	 * the hash buckets.
484 	 */
485 	if (!debug_objects_enabled)
486 		return;
487 
488 	if (limit < 5 && descr != descr_test) {
489 		void *hint = descr->debug_hint ?
490 			descr->debug_hint(obj->object) : NULL;
491 		limit++;
492 		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
493 				 "object: %p object type: %s hint: %pS\n",
494 			msg, obj_states[obj->state], obj->astate,
495 			obj->object, descr->name, hint);
496 	}
497 	debug_objects_warnings++;
498 }
499 
500 /*
501  * Try to repair the damage, so we have a better chance to get useful
502  * debug output.
503  */
504 static bool
505 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
506 		   void * addr, enum debug_obj_state state)
507 {
508 	if (fixup && fixup(addr, state)) {
509 		debug_objects_fixups++;
510 		return true;
511 	}
512 	return false;
513 }
514 
515 static void debug_object_is_on_stack(void *addr, int onstack)
516 {
517 	int is_on_stack;
518 	static int limit;
519 
520 	if (limit > 4)
521 		return;
522 
523 	is_on_stack = object_is_on_stack(addr);
524 	if (is_on_stack == onstack)
525 		return;
526 
527 	limit++;
528 	if (is_on_stack)
529 		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
530 			 task_stack_page(current));
531 	else
532 		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
533 			 task_stack_page(current));
534 
535 	WARN_ON(1);
536 }
537 
538 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
539 						const struct debug_obj_descr *descr,
540 						bool onstack, bool alloc_ifstatic)
541 {
542 	struct debug_obj *obj = lookup_object(addr, b);
543 	enum debug_obj_state state = ODEBUG_STATE_NONE;
544 
545 	if (likely(obj))
546 		return obj;
547 
548 	/*
549 	 * debug_object_init() unconditionally allocates untracked
550 	 * objects. It does not matter whether it is a static object or
551 	 * not.
552 	 *
553 	 * debug_object_assert_init() and debug_object_activate() allow
554 	 * allocation only if the descriptor callback confirms that the
555 	 * object is static and considered initialized. For non-static
556 	 * objects the allocation needs to be done from the fixup callback.
557 	 */
558 	if (unlikely(alloc_ifstatic)) {
559 		if (!descr->is_static_object || !descr->is_static_object(addr))
560 			return ERR_PTR(-ENOENT);
561 		/* Statically allocated objects are considered initialized */
562 		state = ODEBUG_STATE_INIT;
563 	}
564 
565 	obj = alloc_object(addr, b, descr);
566 	if (likely(obj)) {
567 		obj->state = state;
568 		debug_object_is_on_stack(addr, onstack);
569 		return obj;
570 	}
571 
572 	/* Out of memory. Do the cleanup outside of the locked region */
573 	debug_objects_enabled = false;
574 	return NULL;
575 }
576 
577 static void debug_objects_fill_pool(void)
578 {
579 	if (unlikely(!obj_cache))
580 		return;
581 
582 	if (likely(!pool_should_refill(&pool_global)))
583 		return;
584 
585 	/* Try reusing objects from obj_to_free_list */
586 	fill_pool_from_freelist();
587 
588 	if (likely(!pool_should_refill(&pool_global)))
589 		return;
590 
591 	/*
592 	 * On RT enabled kernels the pool refill must happen in preemptible
593 	 * context -- for !RT kernels we rely on the fact that spinlock_t and
594 	 * raw_spinlock_t are basically the same type and this lock-type
595 	 * inversion works just fine.
596 	 */
597 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
598 		/*
599 		 * Annotate away the spinlock_t inside raw_spinlock_t warning
600 		 * by temporarily raising the wait-type to WAIT_SLEEP, matching
601 		 * the preemptible() condition above.
602 		 */
603 		static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
604 		lock_map_acquire_try(&fill_pool_map);
605 		fill_pool();
606 		lock_map_release(&fill_pool_map);
607 	}
608 }
609 
610 static void
611 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
612 {
613 	struct debug_obj *obj, o;
614 	struct debug_bucket *db;
615 	unsigned long flags;
616 
617 	debug_objects_fill_pool();
618 
619 	db = get_bucket((unsigned long) addr);
620 
621 	raw_spin_lock_irqsave(&db->lock, flags);
622 
623 	obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
624 	if (unlikely(!obj)) {
625 		raw_spin_unlock_irqrestore(&db->lock, flags);
626 		debug_objects_oom();
627 		return;
628 	}
629 
630 	switch (obj->state) {
631 	case ODEBUG_STATE_NONE:
632 	case ODEBUG_STATE_INIT:
633 	case ODEBUG_STATE_INACTIVE:
634 		obj->state = ODEBUG_STATE_INIT;
635 		raw_spin_unlock_irqrestore(&db->lock, flags);
636 		return;
637 	default:
638 		break;
639 	}
640 
641 	o = *obj;
642 	raw_spin_unlock_irqrestore(&db->lock, flags);
643 	debug_print_object(&o, "init");
644 
645 	if (o.state == ODEBUG_STATE_ACTIVE)
646 		debug_object_fixup(descr->fixup_init, addr, o.state);
647 }
648 
649 /**
650  * debug_object_init - debug checks when an object is initialized
651  * @addr:	address of the object
652  * @descr:	pointer to an object specific debug description structure
653  */
654 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
655 {
656 	if (!debug_objects_enabled)
657 		return;
658 
659 	__debug_object_init(addr, descr, 0);
660 }
661 EXPORT_SYMBOL_GPL(debug_object_init);
662 
663 /**
664  * debug_object_init_on_stack - debug checks when an object on stack is
665  *				initialized
666  * @addr:	address of the object
667  * @descr:	pointer to an object specific debug description structure
668  */
669 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
670 {
671 	if (!debug_objects_enabled)
672 		return;
673 
674 	__debug_object_init(addr, descr, 1);
675 }
676 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
677 
678 /**
679  * debug_object_activate - debug checks when an object is activated
680  * @addr:	address of the object
681  * @descr:	pointer to an object specific debug description structure
682  * Returns 0 for success, -EINVAL for check failed.
683  */
684 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
685 {
686 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
687 	struct debug_bucket *db;
688 	struct debug_obj *obj;
689 	unsigned long flags;
690 
691 	if (!debug_objects_enabled)
692 		return 0;
693 
694 	debug_objects_fill_pool();
695 
696 	db = get_bucket((unsigned long) addr);
697 
698 	raw_spin_lock_irqsave(&db->lock, flags);
699 
700 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
701 	if (unlikely(!obj)) {
702 		raw_spin_unlock_irqrestore(&db->lock, flags);
703 		debug_objects_oom();
704 		return 0;
705 	} else if (likely(!IS_ERR(obj))) {
706 		switch (obj->state) {
707 		case ODEBUG_STATE_ACTIVE:
708 		case ODEBUG_STATE_DESTROYED:
709 			o = *obj;
710 			break;
711 		case ODEBUG_STATE_INIT:
712 		case ODEBUG_STATE_INACTIVE:
713 			obj->state = ODEBUG_STATE_ACTIVE;
714 			fallthrough;
715 		default:
716 			raw_spin_unlock_irqrestore(&db->lock, flags);
717 			return 0;
718 		}
719 	}
720 
721 	raw_spin_unlock_irqrestore(&db->lock, flags);
722 	debug_print_object(&o, "activate");
723 
724 	switch (o.state) {
725 	case ODEBUG_STATE_ACTIVE:
726 	case ODEBUG_STATE_NOTAVAILABLE:
727 		if (debug_object_fixup(descr->fixup_activate, addr, o.state))
728 			return 0;
729 		fallthrough;
730 	default:
731 		return -EINVAL;
732 	}
733 }
734 EXPORT_SYMBOL_GPL(debug_object_activate);
735 
736 /**
737  * debug_object_deactivate - debug checks when an object is deactivated
738  * @addr:	address of the object
739  * @descr:	pointer to an object specific debug description structure
740  */
741 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
742 {
743 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
744 	struct debug_bucket *db;
745 	struct debug_obj *obj;
746 	unsigned long flags;
747 
748 	if (!debug_objects_enabled)
749 		return;
750 
751 	db = get_bucket((unsigned long) addr);
752 
753 	raw_spin_lock_irqsave(&db->lock, flags);
754 
755 	obj = lookup_object(addr, db);
756 	if (obj) {
757 		switch (obj->state) {
758 		case ODEBUG_STATE_DESTROYED:
759 			break;
760 		case ODEBUG_STATE_INIT:
761 		case ODEBUG_STATE_INACTIVE:
762 		case ODEBUG_STATE_ACTIVE:
763 			if (obj->astate)
764 				break;
765 			obj->state = ODEBUG_STATE_INACTIVE;
766 			fallthrough;
767 		default:
768 			raw_spin_unlock_irqrestore(&db->lock, flags);
769 			return;
770 		}
771 		o = *obj;
772 	}
773 
774 	raw_spin_unlock_irqrestore(&db->lock, flags);
775 	debug_print_object(&o, "deactivate");
776 }
777 EXPORT_SYMBOL_GPL(debug_object_deactivate);
778 
779 /**
780  * debug_object_destroy - debug checks when an object is destroyed
781  * @addr:	address of the object
782  * @descr:	pointer to an object specific debug description structure
783  */
784 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
785 {
786 	struct debug_obj *obj, o;
787 	struct debug_bucket *db;
788 	unsigned long flags;
789 
790 	if (!debug_objects_enabled)
791 		return;
792 
793 	db = get_bucket((unsigned long) addr);
794 
795 	raw_spin_lock_irqsave(&db->lock, flags);
796 
797 	obj = lookup_object(addr, db);
798 	if (!obj) {
799 		raw_spin_unlock_irqrestore(&db->lock, flags);
800 		return;
801 	}
802 
803 	switch (obj->state) {
804 	case ODEBUG_STATE_ACTIVE:
805 	case ODEBUG_STATE_DESTROYED:
806 		break;
807 	case ODEBUG_STATE_NONE:
808 	case ODEBUG_STATE_INIT:
809 	case ODEBUG_STATE_INACTIVE:
810 		obj->state = ODEBUG_STATE_DESTROYED;
811 		fallthrough;
812 	default:
813 		raw_spin_unlock_irqrestore(&db->lock, flags);
814 		return;
815 	}
816 
817 	o = *obj;
818 	raw_spin_unlock_irqrestore(&db->lock, flags);
819 	debug_print_object(&o, "destroy");
820 
821 	if (o.state == ODEBUG_STATE_ACTIVE)
822 		debug_object_fixup(descr->fixup_destroy, addr, o.state);
823 }
824 EXPORT_SYMBOL_GPL(debug_object_destroy);
825 
826 /**
827  * debug_object_free - debug checks when an object is freed
828  * @addr:	address of the object
829  * @descr:	pointer to an object specific debug description structure
830  */
831 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
832 {
833 	struct debug_obj *obj, o;
834 	struct debug_bucket *db;
835 	unsigned long flags;
836 
837 	if (!debug_objects_enabled)
838 		return;
839 
840 	db = get_bucket((unsigned long) addr);
841 
842 	raw_spin_lock_irqsave(&db->lock, flags);
843 
844 	obj = lookup_object(addr, db);
845 	if (!obj) {
846 		raw_spin_unlock_irqrestore(&db->lock, flags);
847 		return;
848 	}
849 
850 	switch (obj->state) {
851 	case ODEBUG_STATE_ACTIVE:
852 		break;
853 	default:
854 		hlist_del(&obj->node);
855 		raw_spin_unlock_irqrestore(&db->lock, flags);
856 		free_object(obj);
857 		return;
858 	}
859 
860 	o = *obj;
861 	raw_spin_unlock_irqrestore(&db->lock, flags);
862 	debug_print_object(&o, "free");
863 
864 	debug_object_fixup(descr->fixup_free, addr, o.state);
865 }
866 EXPORT_SYMBOL_GPL(debug_object_free);
867 
868 /**
869  * debug_object_assert_init - debug checks when object should be init-ed
870  * @addr:	address of the object
871  * @descr:	pointer to an object specific debug description structure
872  */
873 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
874 {
875 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
876 	struct debug_bucket *db;
877 	struct debug_obj *obj;
878 	unsigned long flags;
879 
880 	if (!debug_objects_enabled)
881 		return;
882 
883 	debug_objects_fill_pool();
884 
885 	db = get_bucket((unsigned long) addr);
886 
887 	raw_spin_lock_irqsave(&db->lock, flags);
888 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
889 	raw_spin_unlock_irqrestore(&db->lock, flags);
890 	if (likely(!IS_ERR_OR_NULL(obj)))
891 		return;
892 
893 	/* If NULL the allocation has hit OOM */
894 	if (!obj) {
895 		debug_objects_oom();
896 		return;
897 	}
898 
899 	/* Object is neither tracked nor static. It's not initialized. */
900 	debug_print_object(&o, "assert_init");
901 	debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
902 }
903 EXPORT_SYMBOL_GPL(debug_object_assert_init);
904 
905 /**
906  * debug_object_active_state - debug checks object usage state machine
907  * @addr:	address of the object
908  * @descr:	pointer to an object specific debug description structure
909  * @expect:	expected state
910  * @next:	state to move to if expected state is found
911  */
912 void
913 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
914 			  unsigned int expect, unsigned int next)
915 {
916 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
917 	struct debug_bucket *db;
918 	struct debug_obj *obj;
919 	unsigned long flags;
920 
921 	if (!debug_objects_enabled)
922 		return;
923 
924 	db = get_bucket((unsigned long) addr);
925 
926 	raw_spin_lock_irqsave(&db->lock, flags);
927 
928 	obj = lookup_object(addr, db);
929 	if (obj) {
930 		switch (obj->state) {
931 		case ODEBUG_STATE_ACTIVE:
932 			if (obj->astate != expect)
933 				break;
934 			obj->astate = next;
935 			raw_spin_unlock_irqrestore(&db->lock, flags);
936 			return;
937 		default:
938 			break;
939 		}
940 		o = *obj;
941 	}
942 
943 	raw_spin_unlock_irqrestore(&db->lock, flags);
944 	debug_print_object(&o, "active_state");
945 }
946 EXPORT_SYMBOL_GPL(debug_object_active_state);
947 
948 #ifdef CONFIG_DEBUG_OBJECTS_FREE
949 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
950 {
951 	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
952 	int cnt, objs_checked = 0;
953 	struct debug_obj *obj, o;
954 	struct debug_bucket *db;
955 	struct hlist_node *tmp;
956 
957 	saddr = (unsigned long) address;
958 	eaddr = saddr + size;
959 	paddr = saddr & ODEBUG_CHUNK_MASK;
960 	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
961 	chunks >>= ODEBUG_CHUNK_SHIFT;
962 
963 	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
964 		db = get_bucket(paddr);
965 
966 repeat:
967 		cnt = 0;
968 		raw_spin_lock_irqsave(&db->lock, flags);
969 		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
970 			cnt++;
971 			oaddr = (unsigned long) obj->object;
972 			if (oaddr < saddr || oaddr >= eaddr)
973 				continue;
974 
975 			switch (obj->state) {
976 			case ODEBUG_STATE_ACTIVE:
977 				o = *obj;
978 				raw_spin_unlock_irqrestore(&db->lock, flags);
979 				debug_print_object(&o, "free");
980 				debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
981 				goto repeat;
982 			default:
983 				hlist_del(&obj->node);
984 				__free_object(obj);
985 				break;
986 			}
987 		}
988 		raw_spin_unlock_irqrestore(&db->lock, flags);
989 
990 		if (cnt > debug_objects_maxchain)
991 			debug_objects_maxchain = cnt;
992 
993 		objs_checked += cnt;
994 	}
995 
996 	if (objs_checked > debug_objects_maxchecked)
997 		debug_objects_maxchecked = objs_checked;
998 
999 	/* Schedule work to actually kmem_cache_free() objects */
1000 	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
1001 		WRITE_ONCE(obj_freeing, true);
1002 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1003 	}
1004 }
1005 
1006 void debug_check_no_obj_freed(const void *address, unsigned long size)
1007 {
1008 	if (debug_objects_enabled)
1009 		__debug_check_no_obj_freed(address, size);
1010 }
1011 #endif
1012 
1013 #ifdef CONFIG_DEBUG_FS
1014 
1015 static int debug_stats_show(struct seq_file *m, void *v)
1016 {
1017 	int cpu, obj_percpu_free = 0;
1018 
1019 	for_each_possible_cpu(cpu)
1020 		obj_percpu_free += per_cpu(pool_pcpu.cnt, cpu);
1021 
1022 	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1023 	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1024 	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1025 	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1026 	seq_printf(m, "pool_free     :%d\n", pool_count(&pool_global) + obj_percpu_free);
1027 	seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1028 	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1029 	seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1030 	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1031 	seq_printf(m, "on_free_list  :%d\n", pool_count(&pool_to_free));
1032 	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1033 	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1034 	return 0;
1035 }
1036 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1037 
1038 static int __init debug_objects_init_debugfs(void)
1039 {
1040 	struct dentry *dbgdir;
1041 
1042 	if (!debug_objects_enabled)
1043 		return 0;
1044 
1045 	dbgdir = debugfs_create_dir("debug_objects", NULL);
1046 
1047 	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1048 
1049 	return 0;
1050 }
1051 __initcall(debug_objects_init_debugfs);
1052 
1053 #else
1054 static inline void debug_objects_init_debugfs(void) { }
1055 #endif
1056 
1057 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1058 
1059 /* Random data structure for the self test */
1060 struct self_test {
1061 	unsigned long	dummy1[6];
1062 	int		static_init;
1063 	unsigned long	dummy2[3];
1064 };
1065 
1066 static __initconst const struct debug_obj_descr descr_type_test;
1067 
1068 static bool __init is_static_object(void *addr)
1069 {
1070 	struct self_test *obj = addr;
1071 
1072 	return obj->static_init;
1073 }
1074 
1075 /*
1076  * fixup_init is called when:
1077  * - an active object is initialized
1078  */
1079 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1080 {
1081 	struct self_test *obj = addr;
1082 
1083 	switch (state) {
1084 	case ODEBUG_STATE_ACTIVE:
1085 		debug_object_deactivate(obj, &descr_type_test);
1086 		debug_object_init(obj, &descr_type_test);
1087 		return true;
1088 	default:
1089 		return false;
1090 	}
1091 }
1092 
1093 /*
1094  * fixup_activate is called when:
1095  * - an active object is activated
1096  * - an unknown non-static object is activated
1097  */
1098 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1099 {
1100 	struct self_test *obj = addr;
1101 
1102 	switch (state) {
1103 	case ODEBUG_STATE_NOTAVAILABLE:
1104 		return true;
1105 	case ODEBUG_STATE_ACTIVE:
1106 		debug_object_deactivate(obj, &descr_type_test);
1107 		debug_object_activate(obj, &descr_type_test);
1108 		return true;
1109 
1110 	default:
1111 		return false;
1112 	}
1113 }
1114 
1115 /*
1116  * fixup_destroy is called when:
1117  * - an active object is destroyed
1118  */
1119 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1120 {
1121 	struct self_test *obj = addr;
1122 
1123 	switch (state) {
1124 	case ODEBUG_STATE_ACTIVE:
1125 		debug_object_deactivate(obj, &descr_type_test);
1126 		debug_object_destroy(obj, &descr_type_test);
1127 		return true;
1128 	default:
1129 		return false;
1130 	}
1131 }
1132 
1133 /*
1134  * fixup_free is called when:
1135  * - an active object is freed
1136  */
1137 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1138 {
1139 	struct self_test *obj = addr;
1140 
1141 	switch (state) {
1142 	case ODEBUG_STATE_ACTIVE:
1143 		debug_object_deactivate(obj, &descr_type_test);
1144 		debug_object_free(obj, &descr_type_test);
1145 		return true;
1146 	default:
1147 		return false;
1148 	}
1149 }
1150 
1151 static int __init
1152 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1153 {
1154 	struct debug_bucket *db;
1155 	struct debug_obj *obj;
1156 	unsigned long flags;
1157 	int res = -EINVAL;
1158 
1159 	db = get_bucket((unsigned long) addr);
1160 
1161 	raw_spin_lock_irqsave(&db->lock, flags);
1162 
1163 	obj = lookup_object(addr, db);
1164 	if (!obj && state != ODEBUG_STATE_NONE) {
1165 		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1166 		goto out;
1167 	}
1168 	if (obj && obj->state != state) {
1169 		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1170 		       obj->state, state);
1171 		goto out;
1172 	}
1173 	if (fixups != debug_objects_fixups) {
1174 		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1175 		       fixups, debug_objects_fixups);
1176 		goto out;
1177 	}
1178 	if (warnings != debug_objects_warnings) {
1179 		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1180 		       warnings, debug_objects_warnings);
1181 		goto out;
1182 	}
1183 	res = 0;
1184 out:
1185 	raw_spin_unlock_irqrestore(&db->lock, flags);
1186 	if (res)
1187 		debug_objects_enabled = false;
1188 	return res;
1189 }
1190 
1191 static __initconst const struct debug_obj_descr descr_type_test = {
1192 	.name			= "selftest",
1193 	.is_static_object	= is_static_object,
1194 	.fixup_init		= fixup_init,
1195 	.fixup_activate		= fixup_activate,
1196 	.fixup_destroy		= fixup_destroy,
1197 	.fixup_free		= fixup_free,
1198 };
1199 
1200 static __initdata struct self_test obj = { .static_init = 0 };
1201 
1202 static bool __init debug_objects_selftest(void)
1203 {
1204 	int fixups, oldfixups, warnings, oldwarnings;
1205 	unsigned long flags;
1206 
1207 	local_irq_save(flags);
1208 
1209 	fixups = oldfixups = debug_objects_fixups;
1210 	warnings = oldwarnings = debug_objects_warnings;
1211 	descr_test = &descr_type_test;
1212 
1213 	debug_object_init(&obj, &descr_type_test);
1214 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1215 		goto out;
1216 	debug_object_activate(&obj, &descr_type_test);
1217 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1218 		goto out;
1219 	debug_object_activate(&obj, &descr_type_test);
1220 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1221 		goto out;
1222 	debug_object_deactivate(&obj, &descr_type_test);
1223 	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1224 		goto out;
1225 	debug_object_destroy(&obj, &descr_type_test);
1226 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1227 		goto out;
1228 	debug_object_init(&obj, &descr_type_test);
1229 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1230 		goto out;
1231 	debug_object_activate(&obj, &descr_type_test);
1232 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1233 		goto out;
1234 	debug_object_deactivate(&obj, &descr_type_test);
1235 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1236 		goto out;
1237 	debug_object_free(&obj, &descr_type_test);
1238 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1239 		goto out;
1240 
1241 	obj.static_init = 1;
1242 	debug_object_activate(&obj, &descr_type_test);
1243 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1244 		goto out;
1245 	debug_object_init(&obj, &descr_type_test);
1246 	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1247 		goto out;
1248 	debug_object_free(&obj, &descr_type_test);
1249 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1250 		goto out;
1251 
1252 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1253 	debug_object_init(&obj, &descr_type_test);
1254 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1255 		goto out;
1256 	debug_object_activate(&obj, &descr_type_test);
1257 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1258 		goto out;
1259 	__debug_check_no_obj_freed(&obj, sizeof(obj));
1260 	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1261 		goto out;
1262 #endif
1263 	pr_info("selftest passed\n");
1264 
1265 out:
1266 	debug_objects_fixups = oldfixups;
1267 	debug_objects_warnings = oldwarnings;
1268 	descr_test = NULL;
1269 
1270 	local_irq_restore(flags);
1271 	return debug_objects_enabled;
1272 }
1273 #else
1274 static inline bool debug_objects_selftest(void) { return true; }
1275 #endif
1276 
1277 /*
1278  * Called during early boot to initialize the hash buckets and link
1279  * the static object pool objects into the poll list. After this call
1280  * the object tracker is fully operational.
1281  */
1282 void __init debug_objects_early_init(void)
1283 {
1284 	int i;
1285 
1286 	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1287 		raw_spin_lock_init(&obj_hash[i].lock);
1288 
1289 	/* Keep early boot simple and add everything to the boot list */
1290 	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1291 		hlist_add_head(&obj_static_pool[i].node, &pool_boot);
1292 }
1293 
1294 /*
1295  * Convert the statically allocated objects to dynamic ones.
1296  * debug_objects_mem_init() is called early so only one CPU is up and
1297  * interrupts are disabled, which means it is safe to replace the active
1298  * object references.
1299  */
1300 static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache)
1301 {
1302 	struct debug_bucket *db = obj_hash;
1303 	struct debug_obj *obj, *new;
1304 	struct hlist_node *tmp;
1305 	HLIST_HEAD(objects);
1306 	int i;
1307 
1308 	for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1309 		obj = kmem_cache_zalloc(cache, GFP_KERNEL);
1310 		if (!obj)
1311 			goto free;
1312 		hlist_add_head(&obj->node, &objects);
1313 	}
1314 
1315 	debug_objects_allocated = ODEBUG_POOL_SIZE;
1316 	pool_global.cnt = ODEBUG_POOL_SIZE;
1317 
1318 	/*
1319 	 * Move the allocated objects to the global pool and disconnect the
1320 	 * boot pool.
1321 	 */
1322 	hlist_move_list(&objects, &pool_global.objects);
1323 	pool_boot.first = NULL;
1324 
1325 	/* Replace the active object references */
1326 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1327 		hlist_move_list(&db->list, &objects);
1328 
1329 		hlist_for_each_entry(obj, &objects, node) {
1330 			new = hlist_entry(pool_global.objects.first, typeof(*obj), node);
1331 			hlist_del(&new->node);
1332 			pool_global.cnt--;
1333 			/* copy object data */
1334 			*new = *obj;
1335 			hlist_add_head(&new->node, &db->list);
1336 		}
1337 	}
1338 	return true;
1339 free:
1340 	/* Can't use free_object_list() as the cache is not populated yet */
1341 	hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1342 		hlist_del(&obj->node);
1343 		kmem_cache_free(cache, obj);
1344 	}
1345 	return false;
1346 }
1347 
1348 /*
1349  * Called after the kmem_caches are functional to setup a dedicated
1350  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1351  * prevents that the debug code is called on kmem_cache_free() for the
1352  * debug tracker objects to avoid recursive calls.
1353  */
1354 void __init debug_objects_mem_init(void)
1355 {
1356 	struct kmem_cache *cache;
1357 	int extras;
1358 
1359 	if (!debug_objects_enabled)
1360 		return;
1361 
1362 	if (!debug_objects_selftest())
1363 		return;
1364 
1365 	cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0,
1366 				  SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL);
1367 
1368 	if (!cache || !debug_objects_replace_static_objects(cache)) {
1369 		debug_objects_enabled = false;
1370 		pr_warn("Out of memory.\n");
1371 		return;
1372 	}
1373 
1374 	/*
1375 	 * Adjust the thresholds for allocating and freeing objects
1376 	 * according to the number of possible CPUs available in the
1377 	 * system.
1378 	 */
1379 	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1380 	pool_global.max_cnt += extras;
1381 	pool_global.min_cnt += extras;
1382 
1383 	/* Everything worked. Expose the cache */
1384 	obj_cache = cache;
1385 
1386 #ifdef CONFIG_HOTPLUG_CPU
1387 	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1388 				  object_cpu_offline);
1389 #endif
1390 	return;
1391 }
1392