xref: /linux-6.15/lib/debugobjects.c (revision 74fe1ad4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic infrastructure for lifetime debugging of objects.
4  *
5  * Copyright (C) 2008, Thomas Gleixner <[email protected]>
6  */
7 
8 #define pr_fmt(fmt) "ODEBUG: " fmt
9 
10 #include <linux/cpu.h>
11 #include <linux/debugobjects.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/kmemleak.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/slab.h>
19 #include <linux/static_key.h>
20 
21 #define ODEBUG_HASH_BITS	14
22 #define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
23 
24 /* Must be power of two */
25 #define ODEBUG_BATCH_SIZE	16
26 
27 /* Initial values. Must all be a multiple of batch size */
28 #define ODEBUG_POOL_SIZE	(64 * ODEBUG_BATCH_SIZE)
29 #define ODEBUG_POOL_MIN_LEVEL	(ODEBUG_POOL_SIZE / 4)
30 
31 #define ODEBUG_POOL_PERCPU_SIZE	(4 * ODEBUG_BATCH_SIZE)
32 
33 #define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
34 #define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
35 #define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
36 
37 /*
38  * We limit the freeing of debug objects via workqueue at a maximum
39  * frequency of 10Hz and about 1024 objects for each freeing operation.
40  * So it is freeing at most 10k debug objects per second.
41  */
42 #define ODEBUG_FREE_WORK_MAX	(1024 / ODEBUG_BATCH_SIZE)
43 #define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
44 
45 struct debug_bucket {
46 	struct hlist_head	list;
47 	raw_spinlock_t		lock;
48 };
49 
50 struct obj_pool {
51 	struct hlist_head	objects;
52 	unsigned int		cnt;
53 	unsigned int		min_cnt;
54 	unsigned int		max_cnt;
55 } ____cacheline_aligned;
56 
57 
58 static DEFINE_PER_CPU_ALIGNED(struct obj_pool, pool_pcpu)  = {
59 	.max_cnt	= ODEBUG_POOL_PERCPU_SIZE,
60 };
61 
62 static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
63 
64 static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
65 
66 static DEFINE_RAW_SPINLOCK(pool_lock);
67 
68 static struct obj_pool pool_global = {
69 	.min_cnt	= ODEBUG_POOL_MIN_LEVEL,
70 	.max_cnt	= ODEBUG_POOL_SIZE,
71 };
72 
73 static struct obj_pool pool_to_free = {
74 	.max_cnt	= UINT_MAX,
75 };
76 
77 static HLIST_HEAD(pool_boot);
78 
79 /*
80  * Because of the presence of percpu free pools, obj_pool_free will
81  * under-count those in the percpu free pools. Similarly, obj_pool_used
82  * will over-count those in the percpu free pools. Adjustments will be
83  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
84  * can be off.
85  */
86 static int __data_racy		obj_pool_min_free = ODEBUG_POOL_SIZE;
87 static int			obj_pool_used;
88 static int __data_racy		obj_pool_max_used;
89 static bool			obj_freeing;
90 
91 static int __data_racy			debug_objects_maxchain __read_mostly;
92 static int __data_racy __maybe_unused	debug_objects_maxchecked __read_mostly;
93 static int __data_racy			debug_objects_fixups __read_mostly;
94 static int __data_racy			debug_objects_warnings __read_mostly;
95 static bool __data_racy			debug_objects_enabled __read_mostly
96 					= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
97 
98 static const struct debug_obj_descr	*descr_test  __read_mostly;
99 static struct kmem_cache		*obj_cache __ro_after_init;
100 
101 /*
102  * Track numbers of kmem_cache_alloc()/free() calls done.
103  */
104 static int __data_racy		debug_objects_allocated;
105 static int __data_racy		debug_objects_freed;
106 
107 static void free_obj_work(struct work_struct *work);
108 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
109 
110 static DEFINE_STATIC_KEY_FALSE(obj_cache_enabled);
111 
112 static int __init enable_object_debug(char *str)
113 {
114 	debug_objects_enabled = true;
115 	return 0;
116 }
117 early_param("debug_objects", enable_object_debug);
118 
119 static int __init disable_object_debug(char *str)
120 {
121 	debug_objects_enabled = false;
122 	return 0;
123 }
124 early_param("no_debug_objects", disable_object_debug);
125 
126 static const char *obj_states[ODEBUG_STATE_MAX] = {
127 	[ODEBUG_STATE_NONE]		= "none",
128 	[ODEBUG_STATE_INIT]		= "initialized",
129 	[ODEBUG_STATE_INACTIVE]		= "inactive",
130 	[ODEBUG_STATE_ACTIVE]		= "active",
131 	[ODEBUG_STATE_DESTROYED]	= "destroyed",
132 	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
133 };
134 
135 static __always_inline unsigned int pool_count(struct obj_pool *pool)
136 {
137 	return READ_ONCE(pool->cnt);
138 }
139 
140 static __always_inline bool pool_should_refill(struct obj_pool *pool)
141 {
142 	return pool_count(pool) < pool->min_cnt;
143 }
144 
145 static __always_inline bool pool_must_refill(struct obj_pool *pool)
146 {
147 	return pool_count(pool) < pool->min_cnt / 2;
148 }
149 
150 static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src)
151 {
152 	if (dst->cnt + ODEBUG_BATCH_SIZE > dst->max_cnt || !src->cnt)
153 		return false;
154 
155 	for (int i = 0; i < ODEBUG_BATCH_SIZE && src->cnt; i++) {
156 		struct hlist_node *node = src->objects.first;
157 
158 		WRITE_ONCE(src->cnt, src->cnt - 1);
159 		WRITE_ONCE(dst->cnt, dst->cnt + 1);
160 
161 		hlist_del(node);
162 		hlist_add_head(node, &dst->objects);
163 	}
164 	return true;
165 }
166 
167 static bool pool_pop_batch(struct hlist_head *head, struct obj_pool *src)
168 {
169 	if (!src->cnt)
170 		return false;
171 
172 	for (int i = 0; src->cnt && i < ODEBUG_BATCH_SIZE; i++) {
173 		struct hlist_node *node = src->objects.first;
174 
175 		WRITE_ONCE(src->cnt, src->cnt - 1);
176 		hlist_del(node);
177 		hlist_add_head(node, head);
178 	}
179 	return true;
180 }
181 
182 static struct debug_obj *__alloc_object(struct hlist_head *list)
183 {
184 	struct debug_obj *obj;
185 
186 	if (unlikely(!list->first))
187 		return NULL;
188 
189 	obj = hlist_entry(list->first, typeof(*obj), node);
190 	hlist_del(&obj->node);
191 	return obj;
192 }
193 
194 static struct debug_obj *pcpu_alloc(void)
195 {
196 	struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
197 
198 	lockdep_assert_irqs_disabled();
199 
200 	for (;;) {
201 		struct debug_obj *obj = __alloc_object(&pcp->objects);
202 
203 		if (likely(obj)) {
204 			pcp->cnt--;
205 			return obj;
206 		}
207 
208 		guard(raw_spinlock)(&pool_lock);
209 		if (!pool_move_batch(pcp, &pool_to_free)) {
210 			if (!pool_move_batch(pcp, &pool_global))
211 				return NULL;
212 		}
213 		obj_pool_used += pcp->cnt;
214 
215 		if (obj_pool_used > obj_pool_max_used)
216 			obj_pool_max_used = obj_pool_used;
217 
218 		if (pool_global.cnt < obj_pool_min_free)
219 			obj_pool_min_free = pool_global.cnt;
220 	}
221 }
222 
223 static void pcpu_free(struct debug_obj *obj)
224 {
225 	struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
226 
227 	lockdep_assert_irqs_disabled();
228 
229 	hlist_add_head(&obj->node, &pcp->objects);
230 	pcp->cnt++;
231 
232 	/* Pool full ? */
233 	if (pcp->cnt < ODEBUG_POOL_PERCPU_SIZE)
234 		return;
235 
236 	/* Remove a batch from the per CPU pool */
237 	guard(raw_spinlock)(&pool_lock);
238 	/* Try to fit the batch into the pool_global first */
239 	if (!pool_move_batch(&pool_global, pcp))
240 		pool_move_batch(&pool_to_free, pcp);
241 	obj_pool_used -= ODEBUG_BATCH_SIZE;
242 }
243 
244 static void free_object_list(struct hlist_head *head)
245 {
246 	struct hlist_node *tmp;
247 	struct debug_obj *obj;
248 	int cnt = 0;
249 
250 	hlist_for_each_entry_safe(obj, tmp, head, node) {
251 		hlist_del(&obj->node);
252 		kmem_cache_free(obj_cache, obj);
253 		cnt++;
254 	}
255 	debug_objects_freed += cnt;
256 }
257 
258 static void fill_pool_from_freelist(void)
259 {
260 	static unsigned long state;
261 
262 	/*
263 	 * Reuse objs from the global obj_to_free list; they will be
264 	 * reinitialized when allocating.
265 	 */
266 	if (!pool_count(&pool_to_free))
267 		return;
268 
269 	/*
270 	 * Prevent the context from being scheduled or interrupted after
271 	 * setting the state flag;
272 	 */
273 	guard(irqsave)();
274 
275 	/*
276 	 * Avoid lock contention on &pool_lock and avoid making the cache
277 	 * line exclusive by testing the bit before attempting to set it.
278 	 */
279 	if (test_bit(0, &state) || test_and_set_bit(0, &state))
280 		return;
281 
282 	/* Avoid taking the lock when there is no work to do */
283 	while (pool_should_refill(&pool_global) && pool_count(&pool_to_free)) {
284 		guard(raw_spinlock)(&pool_lock);
285 		/* Move a batch if possible */
286 		pool_move_batch(&pool_global, &pool_to_free);
287 	}
288 	clear_bit(0, &state);
289 }
290 
291 static void fill_pool(void)
292 {
293 	static atomic_t cpus_allocating;
294 
295 	/*
296 	 * Avoid allocation and lock contention when:
297 	 *   - One other CPU is already allocating
298 	 *   - the global pool has not reached the critical level yet
299 	 */
300 	if (!pool_must_refill(&pool_global) && atomic_read(&cpus_allocating))
301 		return;
302 
303 	atomic_inc(&cpus_allocating);
304 	while (pool_should_refill(&pool_global)) {
305 		struct debug_obj *new, *last = NULL;
306 		HLIST_HEAD(head);
307 		int cnt;
308 
309 		for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
310 			new = kmem_cache_zalloc(obj_cache, __GFP_HIGH | __GFP_NOWARN);
311 			if (!new)
312 				break;
313 			hlist_add_head(&new->node, &head);
314 			if (!last)
315 				last = new;
316 		}
317 		if (!cnt)
318 			break;
319 
320 		guard(raw_spinlock_irqsave)(&pool_lock);
321 		hlist_splice_init(&head, &last->node, &pool_global.objects);
322 		debug_objects_allocated += cnt;
323 		WRITE_ONCE(pool_global.cnt, pool_global.cnt + cnt);
324 	}
325 	atomic_dec(&cpus_allocating);
326 }
327 
328 /*
329  * Lookup an object in the hash bucket.
330  */
331 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
332 {
333 	struct debug_obj *obj;
334 	int cnt = 0;
335 
336 	hlist_for_each_entry(obj, &b->list, node) {
337 		cnt++;
338 		if (obj->object == addr)
339 			return obj;
340 	}
341 	if (cnt > debug_objects_maxchain)
342 		debug_objects_maxchain = cnt;
343 
344 	return NULL;
345 }
346 
347 static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b,
348 				      const struct debug_obj_descr *descr)
349 {
350 	struct debug_obj *obj;
351 
352 	if (static_branch_likely(&obj_cache_enabled))
353 		obj = pcpu_alloc();
354 	else
355 		obj = __alloc_object(&pool_boot);
356 
357 	if (likely(obj)) {
358 		obj->object = addr;
359 		obj->descr  = descr;
360 		obj->state  = ODEBUG_STATE_NONE;
361 		obj->astate = 0;
362 		hlist_add_head(&obj->node, &b->list);
363 	}
364 	return obj;
365 }
366 
367 /* workqueue function to free objects. */
368 static void free_obj_work(struct work_struct *work)
369 {
370 	bool free = true;
371 
372 	WRITE_ONCE(obj_freeing, false);
373 
374 	if (!pool_count(&pool_to_free))
375 		return;
376 
377 	for (unsigned int cnt = 0; cnt < ODEBUG_FREE_WORK_MAX; cnt++) {
378 		HLIST_HEAD(tofree);
379 
380 		/* Acquire and drop the lock for each batch */
381 		scoped_guard(raw_spinlock_irqsave, &pool_lock) {
382 			if (!pool_to_free.cnt)
383 				return;
384 
385 			/* Refill the global pool if possible */
386 			if (pool_move_batch(&pool_global, &pool_to_free)) {
387 				/* Don't free as there seems to be demand */
388 				free = false;
389 			} else if (free) {
390 				pool_pop_batch(&tofree, &pool_to_free);
391 			} else {
392 				return;
393 			}
394 		}
395 		free_object_list(&tofree);
396 	}
397 }
398 
399 static void __free_object(struct debug_obj *obj)
400 {
401 	guard(irqsave)();
402 	if (static_branch_likely(&obj_cache_enabled))
403 		pcpu_free(obj);
404 	else
405 		hlist_add_head(&obj->node, &pool_boot);
406 }
407 
408 /*
409  * Put the object back into the pool and schedule work to free objects
410  * if necessary.
411  */
412 static void free_object(struct debug_obj *obj)
413 {
414 	__free_object(obj);
415 	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
416 		WRITE_ONCE(obj_freeing, true);
417 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
418 	}
419 }
420 
421 static void put_objects(struct hlist_head *list)
422 {
423 	struct hlist_node *tmp;
424 	struct debug_obj *obj;
425 
426 	/*
427 	 * Using free_object() puts the objects into reuse or schedules
428 	 * them for freeing and it get's all the accounting correct.
429 	 */
430 	hlist_for_each_entry_safe(obj, tmp, list, node) {
431 		hlist_del(&obj->node);
432 		free_object(obj);
433 	}
434 }
435 
436 #ifdef CONFIG_HOTPLUG_CPU
437 static int object_cpu_offline(unsigned int cpu)
438 {
439 	/* Remote access is safe as the CPU is dead already */
440 	struct obj_pool *pcp = per_cpu_ptr(&pool_pcpu, cpu);
441 
442 	put_objects(&pcp->objects);
443 	pcp->cnt = 0;
444 	return 0;
445 }
446 #endif
447 
448 /* Out of memory. Free all objects from hash */
449 static void debug_objects_oom(void)
450 {
451 	struct debug_bucket *db = obj_hash;
452 	HLIST_HEAD(freelist);
453 
454 	pr_warn("Out of memory. ODEBUG disabled\n");
455 
456 	for (int i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
457 		scoped_guard(raw_spinlock_irqsave, &db->lock)
458 			hlist_move_list(&db->list, &freelist);
459 
460 		put_objects(&freelist);
461 	}
462 }
463 
464 /*
465  * We use the pfn of the address for the hash. That way we can check
466  * for freed objects simply by checking the affected bucket.
467  */
468 static struct debug_bucket *get_bucket(unsigned long addr)
469 {
470 	unsigned long hash;
471 
472 	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
473 	return &obj_hash[hash];
474 }
475 
476 static void debug_print_object(struct debug_obj *obj, char *msg)
477 {
478 	const struct debug_obj_descr *descr = obj->descr;
479 	static int limit;
480 
481 	/*
482 	 * Don't report if lookup_object_or_alloc() by the current thread
483 	 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
484 	 * concurrent thread turned off debug_objects_enabled and cleared
485 	 * the hash buckets.
486 	 */
487 	if (!debug_objects_enabled)
488 		return;
489 
490 	if (limit < 5 && descr != descr_test) {
491 		void *hint = descr->debug_hint ?
492 			descr->debug_hint(obj->object) : NULL;
493 		limit++;
494 		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
495 				 "object: %p object type: %s hint: %pS\n",
496 			msg, obj_states[obj->state], obj->astate,
497 			obj->object, descr->name, hint);
498 	}
499 	debug_objects_warnings++;
500 }
501 
502 /*
503  * Try to repair the damage, so we have a better chance to get useful
504  * debug output.
505  */
506 static bool
507 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
508 		   void * addr, enum debug_obj_state state)
509 {
510 	if (fixup && fixup(addr, state)) {
511 		debug_objects_fixups++;
512 		return true;
513 	}
514 	return false;
515 }
516 
517 static void debug_object_is_on_stack(void *addr, int onstack)
518 {
519 	int is_on_stack;
520 	static int limit;
521 
522 	if (limit > 4)
523 		return;
524 
525 	is_on_stack = object_is_on_stack(addr);
526 	if (is_on_stack == onstack)
527 		return;
528 
529 	limit++;
530 	if (is_on_stack)
531 		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
532 			 task_stack_page(current));
533 	else
534 		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
535 			 task_stack_page(current));
536 
537 	WARN_ON(1);
538 }
539 
540 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
541 						const struct debug_obj_descr *descr,
542 						bool onstack, bool alloc_ifstatic)
543 {
544 	struct debug_obj *obj = lookup_object(addr, b);
545 	enum debug_obj_state state = ODEBUG_STATE_NONE;
546 
547 	if (likely(obj))
548 		return obj;
549 
550 	/*
551 	 * debug_object_init() unconditionally allocates untracked
552 	 * objects. It does not matter whether it is a static object or
553 	 * not.
554 	 *
555 	 * debug_object_assert_init() and debug_object_activate() allow
556 	 * allocation only if the descriptor callback confirms that the
557 	 * object is static and considered initialized. For non-static
558 	 * objects the allocation needs to be done from the fixup callback.
559 	 */
560 	if (unlikely(alloc_ifstatic)) {
561 		if (!descr->is_static_object || !descr->is_static_object(addr))
562 			return ERR_PTR(-ENOENT);
563 		/* Statically allocated objects are considered initialized */
564 		state = ODEBUG_STATE_INIT;
565 	}
566 
567 	obj = alloc_object(addr, b, descr);
568 	if (likely(obj)) {
569 		obj->state = state;
570 		debug_object_is_on_stack(addr, onstack);
571 		return obj;
572 	}
573 
574 	/* Out of memory. Do the cleanup outside of the locked region */
575 	debug_objects_enabled = false;
576 	return NULL;
577 }
578 
579 static void debug_objects_fill_pool(void)
580 {
581 	if (!static_branch_likely(&obj_cache_enabled))
582 		return;
583 
584 	if (likely(!pool_should_refill(&pool_global)))
585 		return;
586 
587 	/* Try reusing objects from obj_to_free_list */
588 	fill_pool_from_freelist();
589 
590 	if (likely(!pool_should_refill(&pool_global)))
591 		return;
592 
593 	/*
594 	 * On RT enabled kernels the pool refill must happen in preemptible
595 	 * context -- for !RT kernels we rely on the fact that spinlock_t and
596 	 * raw_spinlock_t are basically the same type and this lock-type
597 	 * inversion works just fine.
598 	 */
599 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
600 		/*
601 		 * Annotate away the spinlock_t inside raw_spinlock_t warning
602 		 * by temporarily raising the wait-type to WAIT_SLEEP, matching
603 		 * the preemptible() condition above.
604 		 */
605 		static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
606 		lock_map_acquire_try(&fill_pool_map);
607 		fill_pool();
608 		lock_map_release(&fill_pool_map);
609 	}
610 }
611 
612 static void
613 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
614 {
615 	struct debug_obj *obj, o;
616 	struct debug_bucket *db;
617 	unsigned long flags;
618 
619 	debug_objects_fill_pool();
620 
621 	db = get_bucket((unsigned long) addr);
622 
623 	raw_spin_lock_irqsave(&db->lock, flags);
624 
625 	obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
626 	if (unlikely(!obj)) {
627 		raw_spin_unlock_irqrestore(&db->lock, flags);
628 		debug_objects_oom();
629 		return;
630 	}
631 
632 	switch (obj->state) {
633 	case ODEBUG_STATE_NONE:
634 	case ODEBUG_STATE_INIT:
635 	case ODEBUG_STATE_INACTIVE:
636 		obj->state = ODEBUG_STATE_INIT;
637 		raw_spin_unlock_irqrestore(&db->lock, flags);
638 		return;
639 	default:
640 		break;
641 	}
642 
643 	o = *obj;
644 	raw_spin_unlock_irqrestore(&db->lock, flags);
645 	debug_print_object(&o, "init");
646 
647 	if (o.state == ODEBUG_STATE_ACTIVE)
648 		debug_object_fixup(descr->fixup_init, addr, o.state);
649 }
650 
651 /**
652  * debug_object_init - debug checks when an object is initialized
653  * @addr:	address of the object
654  * @descr:	pointer to an object specific debug description structure
655  */
656 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
657 {
658 	if (!debug_objects_enabled)
659 		return;
660 
661 	__debug_object_init(addr, descr, 0);
662 }
663 EXPORT_SYMBOL_GPL(debug_object_init);
664 
665 /**
666  * debug_object_init_on_stack - debug checks when an object on stack is
667  *				initialized
668  * @addr:	address of the object
669  * @descr:	pointer to an object specific debug description structure
670  */
671 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
672 {
673 	if (!debug_objects_enabled)
674 		return;
675 
676 	__debug_object_init(addr, descr, 1);
677 }
678 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
679 
680 /**
681  * debug_object_activate - debug checks when an object is activated
682  * @addr:	address of the object
683  * @descr:	pointer to an object specific debug description structure
684  * Returns 0 for success, -EINVAL for check failed.
685  */
686 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
687 {
688 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
689 	struct debug_bucket *db;
690 	struct debug_obj *obj;
691 	unsigned long flags;
692 
693 	if (!debug_objects_enabled)
694 		return 0;
695 
696 	debug_objects_fill_pool();
697 
698 	db = get_bucket((unsigned long) addr);
699 
700 	raw_spin_lock_irqsave(&db->lock, flags);
701 
702 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
703 	if (unlikely(!obj)) {
704 		raw_spin_unlock_irqrestore(&db->lock, flags);
705 		debug_objects_oom();
706 		return 0;
707 	} else if (likely(!IS_ERR(obj))) {
708 		switch (obj->state) {
709 		case ODEBUG_STATE_ACTIVE:
710 		case ODEBUG_STATE_DESTROYED:
711 			o = *obj;
712 			break;
713 		case ODEBUG_STATE_INIT:
714 		case ODEBUG_STATE_INACTIVE:
715 			obj->state = ODEBUG_STATE_ACTIVE;
716 			fallthrough;
717 		default:
718 			raw_spin_unlock_irqrestore(&db->lock, flags);
719 			return 0;
720 		}
721 	}
722 
723 	raw_spin_unlock_irqrestore(&db->lock, flags);
724 	debug_print_object(&o, "activate");
725 
726 	switch (o.state) {
727 	case ODEBUG_STATE_ACTIVE:
728 	case ODEBUG_STATE_NOTAVAILABLE:
729 		if (debug_object_fixup(descr->fixup_activate, addr, o.state))
730 			return 0;
731 		fallthrough;
732 	default:
733 		return -EINVAL;
734 	}
735 }
736 EXPORT_SYMBOL_GPL(debug_object_activate);
737 
738 /**
739  * debug_object_deactivate - debug checks when an object is deactivated
740  * @addr:	address of the object
741  * @descr:	pointer to an object specific debug description structure
742  */
743 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
744 {
745 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
746 	struct debug_bucket *db;
747 	struct debug_obj *obj;
748 	unsigned long flags;
749 
750 	if (!debug_objects_enabled)
751 		return;
752 
753 	db = get_bucket((unsigned long) addr);
754 
755 	raw_spin_lock_irqsave(&db->lock, flags);
756 
757 	obj = lookup_object(addr, db);
758 	if (obj) {
759 		switch (obj->state) {
760 		case ODEBUG_STATE_DESTROYED:
761 			break;
762 		case ODEBUG_STATE_INIT:
763 		case ODEBUG_STATE_INACTIVE:
764 		case ODEBUG_STATE_ACTIVE:
765 			if (obj->astate)
766 				break;
767 			obj->state = ODEBUG_STATE_INACTIVE;
768 			fallthrough;
769 		default:
770 			raw_spin_unlock_irqrestore(&db->lock, flags);
771 			return;
772 		}
773 		o = *obj;
774 	}
775 
776 	raw_spin_unlock_irqrestore(&db->lock, flags);
777 	debug_print_object(&o, "deactivate");
778 }
779 EXPORT_SYMBOL_GPL(debug_object_deactivate);
780 
781 /**
782  * debug_object_destroy - debug checks when an object is destroyed
783  * @addr:	address of the object
784  * @descr:	pointer to an object specific debug description structure
785  */
786 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
787 {
788 	struct debug_obj *obj, o;
789 	struct debug_bucket *db;
790 	unsigned long flags;
791 
792 	if (!debug_objects_enabled)
793 		return;
794 
795 	db = get_bucket((unsigned long) addr);
796 
797 	raw_spin_lock_irqsave(&db->lock, flags);
798 
799 	obj = lookup_object(addr, db);
800 	if (!obj) {
801 		raw_spin_unlock_irqrestore(&db->lock, flags);
802 		return;
803 	}
804 
805 	switch (obj->state) {
806 	case ODEBUG_STATE_ACTIVE:
807 	case ODEBUG_STATE_DESTROYED:
808 		break;
809 	case ODEBUG_STATE_NONE:
810 	case ODEBUG_STATE_INIT:
811 	case ODEBUG_STATE_INACTIVE:
812 		obj->state = ODEBUG_STATE_DESTROYED;
813 		fallthrough;
814 	default:
815 		raw_spin_unlock_irqrestore(&db->lock, flags);
816 		return;
817 	}
818 
819 	o = *obj;
820 	raw_spin_unlock_irqrestore(&db->lock, flags);
821 	debug_print_object(&o, "destroy");
822 
823 	if (o.state == ODEBUG_STATE_ACTIVE)
824 		debug_object_fixup(descr->fixup_destroy, addr, o.state);
825 }
826 EXPORT_SYMBOL_GPL(debug_object_destroy);
827 
828 /**
829  * debug_object_free - debug checks when an object is freed
830  * @addr:	address of the object
831  * @descr:	pointer to an object specific debug description structure
832  */
833 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
834 {
835 	struct debug_obj *obj, o;
836 	struct debug_bucket *db;
837 	unsigned long flags;
838 
839 	if (!debug_objects_enabled)
840 		return;
841 
842 	db = get_bucket((unsigned long) addr);
843 
844 	raw_spin_lock_irqsave(&db->lock, flags);
845 
846 	obj = lookup_object(addr, db);
847 	if (!obj) {
848 		raw_spin_unlock_irqrestore(&db->lock, flags);
849 		return;
850 	}
851 
852 	switch (obj->state) {
853 	case ODEBUG_STATE_ACTIVE:
854 		break;
855 	default:
856 		hlist_del(&obj->node);
857 		raw_spin_unlock_irqrestore(&db->lock, flags);
858 		free_object(obj);
859 		return;
860 	}
861 
862 	o = *obj;
863 	raw_spin_unlock_irqrestore(&db->lock, flags);
864 	debug_print_object(&o, "free");
865 
866 	debug_object_fixup(descr->fixup_free, addr, o.state);
867 }
868 EXPORT_SYMBOL_GPL(debug_object_free);
869 
870 /**
871  * debug_object_assert_init - debug checks when object should be init-ed
872  * @addr:	address of the object
873  * @descr:	pointer to an object specific debug description structure
874  */
875 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
876 {
877 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
878 	struct debug_bucket *db;
879 	struct debug_obj *obj;
880 	unsigned long flags;
881 
882 	if (!debug_objects_enabled)
883 		return;
884 
885 	debug_objects_fill_pool();
886 
887 	db = get_bucket((unsigned long) addr);
888 
889 	raw_spin_lock_irqsave(&db->lock, flags);
890 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
891 	raw_spin_unlock_irqrestore(&db->lock, flags);
892 	if (likely(!IS_ERR_OR_NULL(obj)))
893 		return;
894 
895 	/* If NULL the allocation has hit OOM */
896 	if (!obj) {
897 		debug_objects_oom();
898 		return;
899 	}
900 
901 	/* Object is neither tracked nor static. It's not initialized. */
902 	debug_print_object(&o, "assert_init");
903 	debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
904 }
905 EXPORT_SYMBOL_GPL(debug_object_assert_init);
906 
907 /**
908  * debug_object_active_state - debug checks object usage state machine
909  * @addr:	address of the object
910  * @descr:	pointer to an object specific debug description structure
911  * @expect:	expected state
912  * @next:	state to move to if expected state is found
913  */
914 void
915 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
916 			  unsigned int expect, unsigned int next)
917 {
918 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
919 	struct debug_bucket *db;
920 	struct debug_obj *obj;
921 	unsigned long flags;
922 
923 	if (!debug_objects_enabled)
924 		return;
925 
926 	db = get_bucket((unsigned long) addr);
927 
928 	raw_spin_lock_irqsave(&db->lock, flags);
929 
930 	obj = lookup_object(addr, db);
931 	if (obj) {
932 		switch (obj->state) {
933 		case ODEBUG_STATE_ACTIVE:
934 			if (obj->astate != expect)
935 				break;
936 			obj->astate = next;
937 			raw_spin_unlock_irqrestore(&db->lock, flags);
938 			return;
939 		default:
940 			break;
941 		}
942 		o = *obj;
943 	}
944 
945 	raw_spin_unlock_irqrestore(&db->lock, flags);
946 	debug_print_object(&o, "active_state");
947 }
948 EXPORT_SYMBOL_GPL(debug_object_active_state);
949 
950 #ifdef CONFIG_DEBUG_OBJECTS_FREE
951 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
952 {
953 	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
954 	int cnt, objs_checked = 0;
955 	struct debug_obj *obj, o;
956 	struct debug_bucket *db;
957 	struct hlist_node *tmp;
958 
959 	saddr = (unsigned long) address;
960 	eaddr = saddr + size;
961 	paddr = saddr & ODEBUG_CHUNK_MASK;
962 	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
963 	chunks >>= ODEBUG_CHUNK_SHIFT;
964 
965 	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
966 		db = get_bucket(paddr);
967 
968 repeat:
969 		cnt = 0;
970 		raw_spin_lock_irqsave(&db->lock, flags);
971 		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
972 			cnt++;
973 			oaddr = (unsigned long) obj->object;
974 			if (oaddr < saddr || oaddr >= eaddr)
975 				continue;
976 
977 			switch (obj->state) {
978 			case ODEBUG_STATE_ACTIVE:
979 				o = *obj;
980 				raw_spin_unlock_irqrestore(&db->lock, flags);
981 				debug_print_object(&o, "free");
982 				debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
983 				goto repeat;
984 			default:
985 				hlist_del(&obj->node);
986 				__free_object(obj);
987 				break;
988 			}
989 		}
990 		raw_spin_unlock_irqrestore(&db->lock, flags);
991 
992 		if (cnt > debug_objects_maxchain)
993 			debug_objects_maxchain = cnt;
994 
995 		objs_checked += cnt;
996 	}
997 
998 	if (objs_checked > debug_objects_maxchecked)
999 		debug_objects_maxchecked = objs_checked;
1000 
1001 	/* Schedule work to actually kmem_cache_free() objects */
1002 	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
1003 		WRITE_ONCE(obj_freeing, true);
1004 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1005 	}
1006 }
1007 
1008 void debug_check_no_obj_freed(const void *address, unsigned long size)
1009 {
1010 	if (debug_objects_enabled)
1011 		__debug_check_no_obj_freed(address, size);
1012 }
1013 #endif
1014 
1015 #ifdef CONFIG_DEBUG_FS
1016 
1017 static int debug_stats_show(struct seq_file *m, void *v)
1018 {
1019 	int cpu, obj_percpu_free = 0;
1020 
1021 	for_each_possible_cpu(cpu)
1022 		obj_percpu_free += per_cpu(pool_pcpu.cnt, cpu);
1023 
1024 	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1025 	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1026 	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1027 	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1028 	seq_printf(m, "pool_free     :%d\n", pool_count(&pool_global) + obj_percpu_free);
1029 	seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1030 	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1031 	seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1032 	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1033 	seq_printf(m, "on_free_list  :%d\n", pool_count(&pool_to_free));
1034 	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1035 	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1036 	return 0;
1037 }
1038 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1039 
1040 static int __init debug_objects_init_debugfs(void)
1041 {
1042 	struct dentry *dbgdir;
1043 
1044 	if (!debug_objects_enabled)
1045 		return 0;
1046 
1047 	dbgdir = debugfs_create_dir("debug_objects", NULL);
1048 
1049 	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1050 
1051 	return 0;
1052 }
1053 __initcall(debug_objects_init_debugfs);
1054 
1055 #else
1056 static inline void debug_objects_init_debugfs(void) { }
1057 #endif
1058 
1059 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1060 
1061 /* Random data structure for the self test */
1062 struct self_test {
1063 	unsigned long	dummy1[6];
1064 	int		static_init;
1065 	unsigned long	dummy2[3];
1066 };
1067 
1068 static __initconst const struct debug_obj_descr descr_type_test;
1069 
1070 static bool __init is_static_object(void *addr)
1071 {
1072 	struct self_test *obj = addr;
1073 
1074 	return obj->static_init;
1075 }
1076 
1077 /*
1078  * fixup_init is called when:
1079  * - an active object is initialized
1080  */
1081 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1082 {
1083 	struct self_test *obj = addr;
1084 
1085 	switch (state) {
1086 	case ODEBUG_STATE_ACTIVE:
1087 		debug_object_deactivate(obj, &descr_type_test);
1088 		debug_object_init(obj, &descr_type_test);
1089 		return true;
1090 	default:
1091 		return false;
1092 	}
1093 }
1094 
1095 /*
1096  * fixup_activate is called when:
1097  * - an active object is activated
1098  * - an unknown non-static object is activated
1099  */
1100 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1101 {
1102 	struct self_test *obj = addr;
1103 
1104 	switch (state) {
1105 	case ODEBUG_STATE_NOTAVAILABLE:
1106 		return true;
1107 	case ODEBUG_STATE_ACTIVE:
1108 		debug_object_deactivate(obj, &descr_type_test);
1109 		debug_object_activate(obj, &descr_type_test);
1110 		return true;
1111 
1112 	default:
1113 		return false;
1114 	}
1115 }
1116 
1117 /*
1118  * fixup_destroy is called when:
1119  * - an active object is destroyed
1120  */
1121 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1122 {
1123 	struct self_test *obj = addr;
1124 
1125 	switch (state) {
1126 	case ODEBUG_STATE_ACTIVE:
1127 		debug_object_deactivate(obj, &descr_type_test);
1128 		debug_object_destroy(obj, &descr_type_test);
1129 		return true;
1130 	default:
1131 		return false;
1132 	}
1133 }
1134 
1135 /*
1136  * fixup_free is called when:
1137  * - an active object is freed
1138  */
1139 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1140 {
1141 	struct self_test *obj = addr;
1142 
1143 	switch (state) {
1144 	case ODEBUG_STATE_ACTIVE:
1145 		debug_object_deactivate(obj, &descr_type_test);
1146 		debug_object_free(obj, &descr_type_test);
1147 		return true;
1148 	default:
1149 		return false;
1150 	}
1151 }
1152 
1153 static int __init
1154 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1155 {
1156 	struct debug_bucket *db;
1157 	struct debug_obj *obj;
1158 	unsigned long flags;
1159 	int res = -EINVAL;
1160 
1161 	db = get_bucket((unsigned long) addr);
1162 
1163 	raw_spin_lock_irqsave(&db->lock, flags);
1164 
1165 	obj = lookup_object(addr, db);
1166 	if (!obj && state != ODEBUG_STATE_NONE) {
1167 		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1168 		goto out;
1169 	}
1170 	if (obj && obj->state != state) {
1171 		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1172 		       obj->state, state);
1173 		goto out;
1174 	}
1175 	if (fixups != debug_objects_fixups) {
1176 		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1177 		       fixups, debug_objects_fixups);
1178 		goto out;
1179 	}
1180 	if (warnings != debug_objects_warnings) {
1181 		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1182 		       warnings, debug_objects_warnings);
1183 		goto out;
1184 	}
1185 	res = 0;
1186 out:
1187 	raw_spin_unlock_irqrestore(&db->lock, flags);
1188 	if (res)
1189 		debug_objects_enabled = false;
1190 	return res;
1191 }
1192 
1193 static __initconst const struct debug_obj_descr descr_type_test = {
1194 	.name			= "selftest",
1195 	.is_static_object	= is_static_object,
1196 	.fixup_init		= fixup_init,
1197 	.fixup_activate		= fixup_activate,
1198 	.fixup_destroy		= fixup_destroy,
1199 	.fixup_free		= fixup_free,
1200 };
1201 
1202 static __initdata struct self_test obj = { .static_init = 0 };
1203 
1204 static bool __init debug_objects_selftest(void)
1205 {
1206 	int fixups, oldfixups, warnings, oldwarnings;
1207 	unsigned long flags;
1208 
1209 	local_irq_save(flags);
1210 
1211 	fixups = oldfixups = debug_objects_fixups;
1212 	warnings = oldwarnings = debug_objects_warnings;
1213 	descr_test = &descr_type_test;
1214 
1215 	debug_object_init(&obj, &descr_type_test);
1216 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1217 		goto out;
1218 	debug_object_activate(&obj, &descr_type_test);
1219 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1220 		goto out;
1221 	debug_object_activate(&obj, &descr_type_test);
1222 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1223 		goto out;
1224 	debug_object_deactivate(&obj, &descr_type_test);
1225 	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1226 		goto out;
1227 	debug_object_destroy(&obj, &descr_type_test);
1228 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1229 		goto out;
1230 	debug_object_init(&obj, &descr_type_test);
1231 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1232 		goto out;
1233 	debug_object_activate(&obj, &descr_type_test);
1234 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1235 		goto out;
1236 	debug_object_deactivate(&obj, &descr_type_test);
1237 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1238 		goto out;
1239 	debug_object_free(&obj, &descr_type_test);
1240 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1241 		goto out;
1242 
1243 	obj.static_init = 1;
1244 	debug_object_activate(&obj, &descr_type_test);
1245 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1246 		goto out;
1247 	debug_object_init(&obj, &descr_type_test);
1248 	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1249 		goto out;
1250 	debug_object_free(&obj, &descr_type_test);
1251 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1252 		goto out;
1253 
1254 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1255 	debug_object_init(&obj, &descr_type_test);
1256 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1257 		goto out;
1258 	debug_object_activate(&obj, &descr_type_test);
1259 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1260 		goto out;
1261 	__debug_check_no_obj_freed(&obj, sizeof(obj));
1262 	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1263 		goto out;
1264 #endif
1265 	pr_info("selftest passed\n");
1266 
1267 out:
1268 	debug_objects_fixups = oldfixups;
1269 	debug_objects_warnings = oldwarnings;
1270 	descr_test = NULL;
1271 
1272 	local_irq_restore(flags);
1273 	return debug_objects_enabled;
1274 }
1275 #else
1276 static inline bool debug_objects_selftest(void) { return true; }
1277 #endif
1278 
1279 /*
1280  * Called during early boot to initialize the hash buckets and link
1281  * the static object pool objects into the poll list. After this call
1282  * the object tracker is fully operational.
1283  */
1284 void __init debug_objects_early_init(void)
1285 {
1286 	int i;
1287 
1288 	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1289 		raw_spin_lock_init(&obj_hash[i].lock);
1290 
1291 	/* Keep early boot simple and add everything to the boot list */
1292 	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1293 		hlist_add_head(&obj_static_pool[i].node, &pool_boot);
1294 }
1295 
1296 /*
1297  * Convert the statically allocated objects to dynamic ones.
1298  * debug_objects_mem_init() is called early so only one CPU is up and
1299  * interrupts are disabled, which means it is safe to replace the active
1300  * object references.
1301  */
1302 static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache)
1303 {
1304 	struct debug_bucket *db = obj_hash;
1305 	struct debug_obj *obj, *new;
1306 	struct hlist_node *tmp;
1307 	HLIST_HEAD(objects);
1308 	int i;
1309 
1310 	for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1311 		obj = kmem_cache_zalloc(cache, GFP_KERNEL);
1312 		if (!obj)
1313 			goto free;
1314 		hlist_add_head(&obj->node, &objects);
1315 	}
1316 
1317 	debug_objects_allocated = ODEBUG_POOL_SIZE;
1318 	pool_global.cnt = ODEBUG_POOL_SIZE;
1319 
1320 	/*
1321 	 * Move the allocated objects to the global pool and disconnect the
1322 	 * boot pool.
1323 	 */
1324 	hlist_move_list(&objects, &pool_global.objects);
1325 	pool_boot.first = NULL;
1326 
1327 	/* Replace the active object references */
1328 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1329 		hlist_move_list(&db->list, &objects);
1330 
1331 		hlist_for_each_entry(obj, &objects, node) {
1332 			new = hlist_entry(pool_global.objects.first, typeof(*obj), node);
1333 			hlist_del(&new->node);
1334 			pool_global.cnt--;
1335 			/* copy object data */
1336 			*new = *obj;
1337 			hlist_add_head(&new->node, &db->list);
1338 		}
1339 	}
1340 	return true;
1341 free:
1342 	/* Can't use free_object_list() as the cache is not populated yet */
1343 	hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1344 		hlist_del(&obj->node);
1345 		kmem_cache_free(cache, obj);
1346 	}
1347 	return false;
1348 }
1349 
1350 /*
1351  * Called after the kmem_caches are functional to setup a dedicated
1352  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1353  * prevents that the debug code is called on kmem_cache_free() for the
1354  * debug tracker objects to avoid recursive calls.
1355  */
1356 void __init debug_objects_mem_init(void)
1357 {
1358 	struct kmem_cache *cache;
1359 	int extras;
1360 
1361 	if (!debug_objects_enabled)
1362 		return;
1363 
1364 	if (!debug_objects_selftest())
1365 		return;
1366 
1367 	cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0,
1368 				  SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL);
1369 
1370 	if (!cache || !debug_objects_replace_static_objects(cache)) {
1371 		debug_objects_enabled = false;
1372 		pr_warn("Out of memory.\n");
1373 		return;
1374 	}
1375 
1376 	/*
1377 	 * Adjust the thresholds for allocating and freeing objects
1378 	 * according to the number of possible CPUs available in the
1379 	 * system.
1380 	 */
1381 	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1382 	pool_global.max_cnt += extras;
1383 	pool_global.min_cnt += extras;
1384 
1385 	/* Everything worked. Expose the cache */
1386 	obj_cache = cache;
1387 	static_branch_enable(&obj_cache_enabled);
1388 
1389 #ifdef CONFIG_HOTPLUG_CPU
1390 	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1391 				  object_cpu_offline);
1392 #endif
1393 	return;
1394 }
1395