xref: /linux-6.15/lib/debugobjects.c (revision aebbfe07)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic infrastructure for lifetime debugging of objects.
4  *
5  * Copyright (C) 2008, Thomas Gleixner <[email protected]>
6  */
7 
8 #define pr_fmt(fmt) "ODEBUG: " fmt
9 
10 #include <linux/cpu.h>
11 #include <linux/debugobjects.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/kmemleak.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/slab.h>
19 #include <linux/static_key.h>
20 
21 #define ODEBUG_HASH_BITS	14
22 #define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
23 
24 /* Must be power of two */
25 #define ODEBUG_BATCH_SIZE	16
26 
27 /* Initial values. Must all be a multiple of batch size */
28 #define ODEBUG_POOL_SIZE	(64 * ODEBUG_BATCH_SIZE)
29 #define ODEBUG_POOL_MIN_LEVEL	(ODEBUG_POOL_SIZE / 4)
30 
31 #define ODEBUG_POOL_PERCPU_SIZE	(4 * ODEBUG_BATCH_SIZE)
32 
33 #define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
34 #define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
35 #define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
36 
37 /*
38  * We limit the freeing of debug objects via workqueue at a maximum
39  * frequency of 10Hz and about 1024 objects for each freeing operation.
40  * So it is freeing at most 10k debug objects per second.
41  */
42 #define ODEBUG_FREE_WORK_MAX	(1024 / ODEBUG_BATCH_SIZE)
43 #define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
44 
45 struct debug_bucket {
46 	struct hlist_head	list;
47 	raw_spinlock_t		lock;
48 };
49 
50 struct obj_pool {
51 	struct hlist_head	objects;
52 	unsigned int		cnt;
53 	unsigned int		min_cnt;
54 	unsigned int		max_cnt;
55 } ____cacheline_aligned;
56 
57 
58 static DEFINE_PER_CPU_ALIGNED(struct obj_pool, pool_pcpu)  = {
59 	.max_cnt	= ODEBUG_POOL_PERCPU_SIZE,
60 };
61 
62 static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
63 
64 static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
65 
66 static DEFINE_RAW_SPINLOCK(pool_lock);
67 
68 static struct obj_pool pool_global = {
69 	.min_cnt	= ODEBUG_POOL_MIN_LEVEL,
70 	.max_cnt	= ODEBUG_POOL_SIZE,
71 };
72 
73 static struct obj_pool pool_to_free = {
74 	.max_cnt	= UINT_MAX,
75 };
76 
77 static HLIST_HEAD(pool_boot);
78 
79 /*
80  * Because of the presence of percpu free pools, obj_pool_free will
81  * under-count those in the percpu free pools. Similarly, obj_pool_used
82  * will over-count those in the percpu free pools. Adjustments will be
83  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
84  * can be off.
85  */
86 static int __data_racy		obj_pool_min_free = ODEBUG_POOL_SIZE;
87 static int			obj_pool_used;
88 static int __data_racy		obj_pool_max_used;
89 static bool			obj_freeing;
90 
91 static int __data_racy			debug_objects_maxchain __read_mostly;
92 static int __data_racy __maybe_unused	debug_objects_maxchecked __read_mostly;
93 static int __data_racy			debug_objects_fixups __read_mostly;
94 static int __data_racy			debug_objects_warnings __read_mostly;
95 static bool __data_racy			debug_objects_enabled __read_mostly
96 					= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
97 
98 static const struct debug_obj_descr	*descr_test  __read_mostly;
99 static struct kmem_cache		*obj_cache __ro_after_init;
100 
101 /*
102  * Track numbers of kmem_cache_alloc()/free() calls done.
103  */
104 static int __data_racy		debug_objects_allocated;
105 static int __data_racy		debug_objects_freed;
106 
107 static void free_obj_work(struct work_struct *work);
108 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
109 
110 static DEFINE_STATIC_KEY_FALSE(obj_cache_enabled);
111 
112 static int __init enable_object_debug(char *str)
113 {
114 	debug_objects_enabled = true;
115 	return 0;
116 }
117 early_param("debug_objects", enable_object_debug);
118 
119 static int __init disable_object_debug(char *str)
120 {
121 	debug_objects_enabled = false;
122 	return 0;
123 }
124 early_param("no_debug_objects", disable_object_debug);
125 
126 static const char *obj_states[ODEBUG_STATE_MAX] = {
127 	[ODEBUG_STATE_NONE]		= "none",
128 	[ODEBUG_STATE_INIT]		= "initialized",
129 	[ODEBUG_STATE_INACTIVE]		= "inactive",
130 	[ODEBUG_STATE_ACTIVE]		= "active",
131 	[ODEBUG_STATE_DESTROYED]	= "destroyed",
132 	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
133 };
134 
135 static __always_inline unsigned int pool_count(struct obj_pool *pool)
136 {
137 	return READ_ONCE(pool->cnt);
138 }
139 
140 static __always_inline bool pool_should_refill(struct obj_pool *pool)
141 {
142 	return pool_count(pool) < pool->min_cnt;
143 }
144 
145 static __always_inline bool pool_must_refill(struct obj_pool *pool)
146 {
147 	return pool_count(pool) < pool->min_cnt / 2;
148 }
149 
150 static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src)
151 {
152 	if (dst->cnt + ODEBUG_BATCH_SIZE > dst->max_cnt || !src->cnt)
153 		return false;
154 
155 	for (int i = 0; i < ODEBUG_BATCH_SIZE && src->cnt; i++) {
156 		struct hlist_node *node = src->objects.first;
157 
158 		WRITE_ONCE(src->cnt, src->cnt - 1);
159 		WRITE_ONCE(dst->cnt, dst->cnt + 1);
160 
161 		hlist_del(node);
162 		hlist_add_head(node, &dst->objects);
163 	}
164 	return true;
165 }
166 
167 static bool pool_push_batch(struct obj_pool *dst, struct hlist_head *head)
168 {
169 	struct hlist_node *last;
170 	struct debug_obj *obj;
171 
172 	if (dst->cnt >= dst->max_cnt)
173 		return false;
174 
175 	obj = hlist_entry(head->first, typeof(*obj), node);
176 	last = obj->batch_last;
177 
178 	hlist_splice_init(head, last, &dst->objects);
179 	WRITE_ONCE(dst->cnt, dst->cnt + ODEBUG_BATCH_SIZE);
180 	return true;
181 }
182 
183 static bool pool_pop_batch(struct hlist_head *head, struct obj_pool *src)
184 {
185 	if (!src->cnt)
186 		return false;
187 
188 	for (int i = 0; src->cnt && i < ODEBUG_BATCH_SIZE; i++) {
189 		struct hlist_node *node = src->objects.first;
190 
191 		WRITE_ONCE(src->cnt, src->cnt - 1);
192 		hlist_del(node);
193 		hlist_add_head(node, head);
194 	}
195 	return true;
196 }
197 
198 static struct debug_obj *__alloc_object(struct hlist_head *list)
199 {
200 	struct debug_obj *obj;
201 
202 	if (unlikely(!list->first))
203 		return NULL;
204 
205 	obj = hlist_entry(list->first, typeof(*obj), node);
206 	hlist_del(&obj->node);
207 	return obj;
208 }
209 
210 static struct debug_obj *pcpu_alloc(void)
211 {
212 	struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
213 
214 	lockdep_assert_irqs_disabled();
215 
216 	for (;;) {
217 		struct debug_obj *obj = __alloc_object(&pcp->objects);
218 
219 		if (likely(obj)) {
220 			pcp->cnt--;
221 			return obj;
222 		}
223 
224 		guard(raw_spinlock)(&pool_lock);
225 		if (!pool_move_batch(pcp, &pool_to_free)) {
226 			if (!pool_move_batch(pcp, &pool_global))
227 				return NULL;
228 		}
229 		obj_pool_used += pcp->cnt;
230 
231 		if (obj_pool_used > obj_pool_max_used)
232 			obj_pool_max_used = obj_pool_used;
233 
234 		if (pool_global.cnt < obj_pool_min_free)
235 			obj_pool_min_free = pool_global.cnt;
236 	}
237 }
238 
239 static void pcpu_free(struct debug_obj *obj)
240 {
241 	struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
242 
243 	lockdep_assert_irqs_disabled();
244 
245 	hlist_add_head(&obj->node, &pcp->objects);
246 	pcp->cnt++;
247 
248 	/* Pool full ? */
249 	if (pcp->cnt < ODEBUG_POOL_PERCPU_SIZE)
250 		return;
251 
252 	/* Remove a batch from the per CPU pool */
253 	guard(raw_spinlock)(&pool_lock);
254 	/* Try to fit the batch into the pool_global first */
255 	if (!pool_move_batch(&pool_global, pcp))
256 		pool_move_batch(&pool_to_free, pcp);
257 	obj_pool_used -= ODEBUG_BATCH_SIZE;
258 }
259 
260 static void free_object_list(struct hlist_head *head)
261 {
262 	struct hlist_node *tmp;
263 	struct debug_obj *obj;
264 	int cnt = 0;
265 
266 	hlist_for_each_entry_safe(obj, tmp, head, node) {
267 		hlist_del(&obj->node);
268 		kmem_cache_free(obj_cache, obj);
269 		cnt++;
270 	}
271 	debug_objects_freed += cnt;
272 }
273 
274 static void fill_pool_from_freelist(void)
275 {
276 	static unsigned long state;
277 
278 	/*
279 	 * Reuse objs from the global obj_to_free list; they will be
280 	 * reinitialized when allocating.
281 	 */
282 	if (!pool_count(&pool_to_free))
283 		return;
284 
285 	/*
286 	 * Prevent the context from being scheduled or interrupted after
287 	 * setting the state flag;
288 	 */
289 	guard(irqsave)();
290 
291 	/*
292 	 * Avoid lock contention on &pool_lock and avoid making the cache
293 	 * line exclusive by testing the bit before attempting to set it.
294 	 */
295 	if (test_bit(0, &state) || test_and_set_bit(0, &state))
296 		return;
297 
298 	/* Avoid taking the lock when there is no work to do */
299 	while (pool_should_refill(&pool_global) && pool_count(&pool_to_free)) {
300 		guard(raw_spinlock)(&pool_lock);
301 		/* Move a batch if possible */
302 		pool_move_batch(&pool_global, &pool_to_free);
303 	}
304 	clear_bit(0, &state);
305 }
306 
307 static bool kmem_alloc_batch(struct hlist_head *head, struct kmem_cache *cache, gfp_t gfp)
308 {
309 	struct hlist_node *last = NULL;
310 	struct debug_obj *obj;
311 
312 	for (int cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
313 		obj = kmem_cache_zalloc(cache, gfp);
314 		if (!obj) {
315 			free_object_list(head);
316 			return false;
317 		}
318 		debug_objects_allocated++;
319 
320 		if (!last)
321 			last = &obj->node;
322 		obj->batch_last = last;
323 
324 		hlist_add_head(&obj->node, head);
325 	}
326 	return true;
327 }
328 
329 static void fill_pool(void)
330 {
331 	static atomic_t cpus_allocating;
332 
333 	/*
334 	 * Avoid allocation and lock contention when:
335 	 *   - One other CPU is already allocating
336 	 *   - the global pool has not reached the critical level yet
337 	 */
338 	if (!pool_must_refill(&pool_global) && atomic_read(&cpus_allocating))
339 		return;
340 
341 	atomic_inc(&cpus_allocating);
342 	while (pool_should_refill(&pool_global)) {
343 		HLIST_HEAD(head);
344 
345 		if (!kmem_alloc_batch(&head, obj_cache, __GFP_HIGH | __GFP_NOWARN))
346 			break;
347 
348 		guard(raw_spinlock_irqsave)(&pool_lock);
349 		if (!pool_push_batch(&pool_global, &head))
350 			pool_push_batch(&pool_to_free, &head);
351 	}
352 	atomic_dec(&cpus_allocating);
353 }
354 
355 /*
356  * Lookup an object in the hash bucket.
357  */
358 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
359 {
360 	struct debug_obj *obj;
361 	int cnt = 0;
362 
363 	hlist_for_each_entry(obj, &b->list, node) {
364 		cnt++;
365 		if (obj->object == addr)
366 			return obj;
367 	}
368 	if (cnt > debug_objects_maxchain)
369 		debug_objects_maxchain = cnt;
370 
371 	return NULL;
372 }
373 
374 static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b,
375 				      const struct debug_obj_descr *descr)
376 {
377 	struct debug_obj *obj;
378 
379 	if (static_branch_likely(&obj_cache_enabled))
380 		obj = pcpu_alloc();
381 	else
382 		obj = __alloc_object(&pool_boot);
383 
384 	if (likely(obj)) {
385 		obj->object = addr;
386 		obj->descr  = descr;
387 		obj->state  = ODEBUG_STATE_NONE;
388 		obj->astate = 0;
389 		hlist_add_head(&obj->node, &b->list);
390 	}
391 	return obj;
392 }
393 
394 /* workqueue function to free objects. */
395 static void free_obj_work(struct work_struct *work)
396 {
397 	bool free = true;
398 
399 	WRITE_ONCE(obj_freeing, false);
400 
401 	if (!pool_count(&pool_to_free))
402 		return;
403 
404 	for (unsigned int cnt = 0; cnt < ODEBUG_FREE_WORK_MAX; cnt++) {
405 		HLIST_HEAD(tofree);
406 
407 		/* Acquire and drop the lock for each batch */
408 		scoped_guard(raw_spinlock_irqsave, &pool_lock) {
409 			if (!pool_to_free.cnt)
410 				return;
411 
412 			/* Refill the global pool if possible */
413 			if (pool_move_batch(&pool_global, &pool_to_free)) {
414 				/* Don't free as there seems to be demand */
415 				free = false;
416 			} else if (free) {
417 				pool_pop_batch(&tofree, &pool_to_free);
418 			} else {
419 				return;
420 			}
421 		}
422 		free_object_list(&tofree);
423 	}
424 }
425 
426 static void __free_object(struct debug_obj *obj)
427 {
428 	guard(irqsave)();
429 	if (static_branch_likely(&obj_cache_enabled))
430 		pcpu_free(obj);
431 	else
432 		hlist_add_head(&obj->node, &pool_boot);
433 }
434 
435 /*
436  * Put the object back into the pool and schedule work to free objects
437  * if necessary.
438  */
439 static void free_object(struct debug_obj *obj)
440 {
441 	__free_object(obj);
442 	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
443 		WRITE_ONCE(obj_freeing, true);
444 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
445 	}
446 }
447 
448 static void put_objects(struct hlist_head *list)
449 {
450 	struct hlist_node *tmp;
451 	struct debug_obj *obj;
452 
453 	/*
454 	 * Using free_object() puts the objects into reuse or schedules
455 	 * them for freeing and it get's all the accounting correct.
456 	 */
457 	hlist_for_each_entry_safe(obj, tmp, list, node) {
458 		hlist_del(&obj->node);
459 		free_object(obj);
460 	}
461 }
462 
463 #ifdef CONFIG_HOTPLUG_CPU
464 static int object_cpu_offline(unsigned int cpu)
465 {
466 	/* Remote access is safe as the CPU is dead already */
467 	struct obj_pool *pcp = per_cpu_ptr(&pool_pcpu, cpu);
468 
469 	put_objects(&pcp->objects);
470 	pcp->cnt = 0;
471 	return 0;
472 }
473 #endif
474 
475 /* Out of memory. Free all objects from hash */
476 static void debug_objects_oom(void)
477 {
478 	struct debug_bucket *db = obj_hash;
479 	HLIST_HEAD(freelist);
480 
481 	pr_warn("Out of memory. ODEBUG disabled\n");
482 
483 	for (int i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
484 		scoped_guard(raw_spinlock_irqsave, &db->lock)
485 			hlist_move_list(&db->list, &freelist);
486 
487 		put_objects(&freelist);
488 	}
489 }
490 
491 /*
492  * We use the pfn of the address for the hash. That way we can check
493  * for freed objects simply by checking the affected bucket.
494  */
495 static struct debug_bucket *get_bucket(unsigned long addr)
496 {
497 	unsigned long hash;
498 
499 	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
500 	return &obj_hash[hash];
501 }
502 
503 static void debug_print_object(struct debug_obj *obj, char *msg)
504 {
505 	const struct debug_obj_descr *descr = obj->descr;
506 	static int limit;
507 
508 	/*
509 	 * Don't report if lookup_object_or_alloc() by the current thread
510 	 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
511 	 * concurrent thread turned off debug_objects_enabled and cleared
512 	 * the hash buckets.
513 	 */
514 	if (!debug_objects_enabled)
515 		return;
516 
517 	if (limit < 5 && descr != descr_test) {
518 		void *hint = descr->debug_hint ?
519 			descr->debug_hint(obj->object) : NULL;
520 		limit++;
521 		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
522 				 "object: %p object type: %s hint: %pS\n",
523 			msg, obj_states[obj->state], obj->astate,
524 			obj->object, descr->name, hint);
525 	}
526 	debug_objects_warnings++;
527 }
528 
529 /*
530  * Try to repair the damage, so we have a better chance to get useful
531  * debug output.
532  */
533 static bool
534 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
535 		   void * addr, enum debug_obj_state state)
536 {
537 	if (fixup && fixup(addr, state)) {
538 		debug_objects_fixups++;
539 		return true;
540 	}
541 	return false;
542 }
543 
544 static void debug_object_is_on_stack(void *addr, int onstack)
545 {
546 	int is_on_stack;
547 	static int limit;
548 
549 	if (limit > 4)
550 		return;
551 
552 	is_on_stack = object_is_on_stack(addr);
553 	if (is_on_stack == onstack)
554 		return;
555 
556 	limit++;
557 	if (is_on_stack)
558 		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
559 			 task_stack_page(current));
560 	else
561 		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
562 			 task_stack_page(current));
563 
564 	WARN_ON(1);
565 }
566 
567 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
568 						const struct debug_obj_descr *descr,
569 						bool onstack, bool alloc_ifstatic)
570 {
571 	struct debug_obj *obj = lookup_object(addr, b);
572 	enum debug_obj_state state = ODEBUG_STATE_NONE;
573 
574 	if (likely(obj))
575 		return obj;
576 
577 	/*
578 	 * debug_object_init() unconditionally allocates untracked
579 	 * objects. It does not matter whether it is a static object or
580 	 * not.
581 	 *
582 	 * debug_object_assert_init() and debug_object_activate() allow
583 	 * allocation only if the descriptor callback confirms that the
584 	 * object is static and considered initialized. For non-static
585 	 * objects the allocation needs to be done from the fixup callback.
586 	 */
587 	if (unlikely(alloc_ifstatic)) {
588 		if (!descr->is_static_object || !descr->is_static_object(addr))
589 			return ERR_PTR(-ENOENT);
590 		/* Statically allocated objects are considered initialized */
591 		state = ODEBUG_STATE_INIT;
592 	}
593 
594 	obj = alloc_object(addr, b, descr);
595 	if (likely(obj)) {
596 		obj->state = state;
597 		debug_object_is_on_stack(addr, onstack);
598 		return obj;
599 	}
600 
601 	/* Out of memory. Do the cleanup outside of the locked region */
602 	debug_objects_enabled = false;
603 	return NULL;
604 }
605 
606 static void debug_objects_fill_pool(void)
607 {
608 	if (!static_branch_likely(&obj_cache_enabled))
609 		return;
610 
611 	if (likely(!pool_should_refill(&pool_global)))
612 		return;
613 
614 	/* Try reusing objects from obj_to_free_list */
615 	fill_pool_from_freelist();
616 
617 	if (likely(!pool_should_refill(&pool_global)))
618 		return;
619 
620 	/*
621 	 * On RT enabled kernels the pool refill must happen in preemptible
622 	 * context -- for !RT kernels we rely on the fact that spinlock_t and
623 	 * raw_spinlock_t are basically the same type and this lock-type
624 	 * inversion works just fine.
625 	 */
626 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
627 		/*
628 		 * Annotate away the spinlock_t inside raw_spinlock_t warning
629 		 * by temporarily raising the wait-type to WAIT_SLEEP, matching
630 		 * the preemptible() condition above.
631 		 */
632 		static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
633 		lock_map_acquire_try(&fill_pool_map);
634 		fill_pool();
635 		lock_map_release(&fill_pool_map);
636 	}
637 }
638 
639 static void
640 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
641 {
642 	struct debug_obj *obj, o;
643 	struct debug_bucket *db;
644 	unsigned long flags;
645 
646 	debug_objects_fill_pool();
647 
648 	db = get_bucket((unsigned long) addr);
649 
650 	raw_spin_lock_irqsave(&db->lock, flags);
651 
652 	obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
653 	if (unlikely(!obj)) {
654 		raw_spin_unlock_irqrestore(&db->lock, flags);
655 		debug_objects_oom();
656 		return;
657 	}
658 
659 	switch (obj->state) {
660 	case ODEBUG_STATE_NONE:
661 	case ODEBUG_STATE_INIT:
662 	case ODEBUG_STATE_INACTIVE:
663 		obj->state = ODEBUG_STATE_INIT;
664 		raw_spin_unlock_irqrestore(&db->lock, flags);
665 		return;
666 	default:
667 		break;
668 	}
669 
670 	o = *obj;
671 	raw_spin_unlock_irqrestore(&db->lock, flags);
672 	debug_print_object(&o, "init");
673 
674 	if (o.state == ODEBUG_STATE_ACTIVE)
675 		debug_object_fixup(descr->fixup_init, addr, o.state);
676 }
677 
678 /**
679  * debug_object_init - debug checks when an object is initialized
680  * @addr:	address of the object
681  * @descr:	pointer to an object specific debug description structure
682  */
683 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
684 {
685 	if (!debug_objects_enabled)
686 		return;
687 
688 	__debug_object_init(addr, descr, 0);
689 }
690 EXPORT_SYMBOL_GPL(debug_object_init);
691 
692 /**
693  * debug_object_init_on_stack - debug checks when an object on stack is
694  *				initialized
695  * @addr:	address of the object
696  * @descr:	pointer to an object specific debug description structure
697  */
698 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
699 {
700 	if (!debug_objects_enabled)
701 		return;
702 
703 	__debug_object_init(addr, descr, 1);
704 }
705 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
706 
707 /**
708  * debug_object_activate - debug checks when an object is activated
709  * @addr:	address of the object
710  * @descr:	pointer to an object specific debug description structure
711  * Returns 0 for success, -EINVAL for check failed.
712  */
713 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
714 {
715 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
716 	struct debug_bucket *db;
717 	struct debug_obj *obj;
718 	unsigned long flags;
719 
720 	if (!debug_objects_enabled)
721 		return 0;
722 
723 	debug_objects_fill_pool();
724 
725 	db = get_bucket((unsigned long) addr);
726 
727 	raw_spin_lock_irqsave(&db->lock, flags);
728 
729 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
730 	if (unlikely(!obj)) {
731 		raw_spin_unlock_irqrestore(&db->lock, flags);
732 		debug_objects_oom();
733 		return 0;
734 	} else if (likely(!IS_ERR(obj))) {
735 		switch (obj->state) {
736 		case ODEBUG_STATE_ACTIVE:
737 		case ODEBUG_STATE_DESTROYED:
738 			o = *obj;
739 			break;
740 		case ODEBUG_STATE_INIT:
741 		case ODEBUG_STATE_INACTIVE:
742 			obj->state = ODEBUG_STATE_ACTIVE;
743 			fallthrough;
744 		default:
745 			raw_spin_unlock_irqrestore(&db->lock, flags);
746 			return 0;
747 		}
748 	}
749 
750 	raw_spin_unlock_irqrestore(&db->lock, flags);
751 	debug_print_object(&o, "activate");
752 
753 	switch (o.state) {
754 	case ODEBUG_STATE_ACTIVE:
755 	case ODEBUG_STATE_NOTAVAILABLE:
756 		if (debug_object_fixup(descr->fixup_activate, addr, o.state))
757 			return 0;
758 		fallthrough;
759 	default:
760 		return -EINVAL;
761 	}
762 }
763 EXPORT_SYMBOL_GPL(debug_object_activate);
764 
765 /**
766  * debug_object_deactivate - debug checks when an object is deactivated
767  * @addr:	address of the object
768  * @descr:	pointer to an object specific debug description structure
769  */
770 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
771 {
772 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
773 	struct debug_bucket *db;
774 	struct debug_obj *obj;
775 	unsigned long flags;
776 
777 	if (!debug_objects_enabled)
778 		return;
779 
780 	db = get_bucket((unsigned long) addr);
781 
782 	raw_spin_lock_irqsave(&db->lock, flags);
783 
784 	obj = lookup_object(addr, db);
785 	if (obj) {
786 		switch (obj->state) {
787 		case ODEBUG_STATE_DESTROYED:
788 			break;
789 		case ODEBUG_STATE_INIT:
790 		case ODEBUG_STATE_INACTIVE:
791 		case ODEBUG_STATE_ACTIVE:
792 			if (obj->astate)
793 				break;
794 			obj->state = ODEBUG_STATE_INACTIVE;
795 			fallthrough;
796 		default:
797 			raw_spin_unlock_irqrestore(&db->lock, flags);
798 			return;
799 		}
800 		o = *obj;
801 	}
802 
803 	raw_spin_unlock_irqrestore(&db->lock, flags);
804 	debug_print_object(&o, "deactivate");
805 }
806 EXPORT_SYMBOL_GPL(debug_object_deactivate);
807 
808 /**
809  * debug_object_destroy - debug checks when an object is destroyed
810  * @addr:	address of the object
811  * @descr:	pointer to an object specific debug description structure
812  */
813 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
814 {
815 	struct debug_obj *obj, o;
816 	struct debug_bucket *db;
817 	unsigned long flags;
818 
819 	if (!debug_objects_enabled)
820 		return;
821 
822 	db = get_bucket((unsigned long) addr);
823 
824 	raw_spin_lock_irqsave(&db->lock, flags);
825 
826 	obj = lookup_object(addr, db);
827 	if (!obj) {
828 		raw_spin_unlock_irqrestore(&db->lock, flags);
829 		return;
830 	}
831 
832 	switch (obj->state) {
833 	case ODEBUG_STATE_ACTIVE:
834 	case ODEBUG_STATE_DESTROYED:
835 		break;
836 	case ODEBUG_STATE_NONE:
837 	case ODEBUG_STATE_INIT:
838 	case ODEBUG_STATE_INACTIVE:
839 		obj->state = ODEBUG_STATE_DESTROYED;
840 		fallthrough;
841 	default:
842 		raw_spin_unlock_irqrestore(&db->lock, flags);
843 		return;
844 	}
845 
846 	o = *obj;
847 	raw_spin_unlock_irqrestore(&db->lock, flags);
848 	debug_print_object(&o, "destroy");
849 
850 	if (o.state == ODEBUG_STATE_ACTIVE)
851 		debug_object_fixup(descr->fixup_destroy, addr, o.state);
852 }
853 EXPORT_SYMBOL_GPL(debug_object_destroy);
854 
855 /**
856  * debug_object_free - debug checks when an object is freed
857  * @addr:	address of the object
858  * @descr:	pointer to an object specific debug description structure
859  */
860 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
861 {
862 	struct debug_obj *obj, o;
863 	struct debug_bucket *db;
864 	unsigned long flags;
865 
866 	if (!debug_objects_enabled)
867 		return;
868 
869 	db = get_bucket((unsigned long) addr);
870 
871 	raw_spin_lock_irqsave(&db->lock, flags);
872 
873 	obj = lookup_object(addr, db);
874 	if (!obj) {
875 		raw_spin_unlock_irqrestore(&db->lock, flags);
876 		return;
877 	}
878 
879 	switch (obj->state) {
880 	case ODEBUG_STATE_ACTIVE:
881 		break;
882 	default:
883 		hlist_del(&obj->node);
884 		raw_spin_unlock_irqrestore(&db->lock, flags);
885 		free_object(obj);
886 		return;
887 	}
888 
889 	o = *obj;
890 	raw_spin_unlock_irqrestore(&db->lock, flags);
891 	debug_print_object(&o, "free");
892 
893 	debug_object_fixup(descr->fixup_free, addr, o.state);
894 }
895 EXPORT_SYMBOL_GPL(debug_object_free);
896 
897 /**
898  * debug_object_assert_init - debug checks when object should be init-ed
899  * @addr:	address of the object
900  * @descr:	pointer to an object specific debug description structure
901  */
902 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
903 {
904 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
905 	struct debug_bucket *db;
906 	struct debug_obj *obj;
907 	unsigned long flags;
908 
909 	if (!debug_objects_enabled)
910 		return;
911 
912 	debug_objects_fill_pool();
913 
914 	db = get_bucket((unsigned long) addr);
915 
916 	raw_spin_lock_irqsave(&db->lock, flags);
917 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
918 	raw_spin_unlock_irqrestore(&db->lock, flags);
919 	if (likely(!IS_ERR_OR_NULL(obj)))
920 		return;
921 
922 	/* If NULL the allocation has hit OOM */
923 	if (!obj) {
924 		debug_objects_oom();
925 		return;
926 	}
927 
928 	/* Object is neither tracked nor static. It's not initialized. */
929 	debug_print_object(&o, "assert_init");
930 	debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
931 }
932 EXPORT_SYMBOL_GPL(debug_object_assert_init);
933 
934 /**
935  * debug_object_active_state - debug checks object usage state machine
936  * @addr:	address of the object
937  * @descr:	pointer to an object specific debug description structure
938  * @expect:	expected state
939  * @next:	state to move to if expected state is found
940  */
941 void
942 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
943 			  unsigned int expect, unsigned int next)
944 {
945 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
946 	struct debug_bucket *db;
947 	struct debug_obj *obj;
948 	unsigned long flags;
949 
950 	if (!debug_objects_enabled)
951 		return;
952 
953 	db = get_bucket((unsigned long) addr);
954 
955 	raw_spin_lock_irqsave(&db->lock, flags);
956 
957 	obj = lookup_object(addr, db);
958 	if (obj) {
959 		switch (obj->state) {
960 		case ODEBUG_STATE_ACTIVE:
961 			if (obj->astate != expect)
962 				break;
963 			obj->astate = next;
964 			raw_spin_unlock_irqrestore(&db->lock, flags);
965 			return;
966 		default:
967 			break;
968 		}
969 		o = *obj;
970 	}
971 
972 	raw_spin_unlock_irqrestore(&db->lock, flags);
973 	debug_print_object(&o, "active_state");
974 }
975 EXPORT_SYMBOL_GPL(debug_object_active_state);
976 
977 #ifdef CONFIG_DEBUG_OBJECTS_FREE
978 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
979 {
980 	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
981 	int cnt, objs_checked = 0;
982 	struct debug_obj *obj, o;
983 	struct debug_bucket *db;
984 	struct hlist_node *tmp;
985 
986 	saddr = (unsigned long) address;
987 	eaddr = saddr + size;
988 	paddr = saddr & ODEBUG_CHUNK_MASK;
989 	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
990 	chunks >>= ODEBUG_CHUNK_SHIFT;
991 
992 	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
993 		db = get_bucket(paddr);
994 
995 repeat:
996 		cnt = 0;
997 		raw_spin_lock_irqsave(&db->lock, flags);
998 		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
999 			cnt++;
1000 			oaddr = (unsigned long) obj->object;
1001 			if (oaddr < saddr || oaddr >= eaddr)
1002 				continue;
1003 
1004 			switch (obj->state) {
1005 			case ODEBUG_STATE_ACTIVE:
1006 				o = *obj;
1007 				raw_spin_unlock_irqrestore(&db->lock, flags);
1008 				debug_print_object(&o, "free");
1009 				debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
1010 				goto repeat;
1011 			default:
1012 				hlist_del(&obj->node);
1013 				__free_object(obj);
1014 				break;
1015 			}
1016 		}
1017 		raw_spin_unlock_irqrestore(&db->lock, flags);
1018 
1019 		if (cnt > debug_objects_maxchain)
1020 			debug_objects_maxchain = cnt;
1021 
1022 		objs_checked += cnt;
1023 	}
1024 
1025 	if (objs_checked > debug_objects_maxchecked)
1026 		debug_objects_maxchecked = objs_checked;
1027 
1028 	/* Schedule work to actually kmem_cache_free() objects */
1029 	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
1030 		WRITE_ONCE(obj_freeing, true);
1031 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1032 	}
1033 }
1034 
1035 void debug_check_no_obj_freed(const void *address, unsigned long size)
1036 {
1037 	if (debug_objects_enabled)
1038 		__debug_check_no_obj_freed(address, size);
1039 }
1040 #endif
1041 
1042 #ifdef CONFIG_DEBUG_FS
1043 
1044 static int debug_stats_show(struct seq_file *m, void *v)
1045 {
1046 	int cpu, obj_percpu_free = 0;
1047 
1048 	for_each_possible_cpu(cpu)
1049 		obj_percpu_free += per_cpu(pool_pcpu.cnt, cpu);
1050 
1051 	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1052 	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1053 	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1054 	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1055 	seq_printf(m, "pool_free     :%d\n", pool_count(&pool_global) + obj_percpu_free);
1056 	seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1057 	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1058 	seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1059 	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1060 	seq_printf(m, "on_free_list  :%d\n", pool_count(&pool_to_free));
1061 	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1062 	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1063 	return 0;
1064 }
1065 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1066 
1067 static int __init debug_objects_init_debugfs(void)
1068 {
1069 	struct dentry *dbgdir;
1070 
1071 	if (!debug_objects_enabled)
1072 		return 0;
1073 
1074 	dbgdir = debugfs_create_dir("debug_objects", NULL);
1075 
1076 	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1077 
1078 	return 0;
1079 }
1080 __initcall(debug_objects_init_debugfs);
1081 
1082 #else
1083 static inline void debug_objects_init_debugfs(void) { }
1084 #endif
1085 
1086 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1087 
1088 /* Random data structure for the self test */
1089 struct self_test {
1090 	unsigned long	dummy1[6];
1091 	int		static_init;
1092 	unsigned long	dummy2[3];
1093 };
1094 
1095 static __initconst const struct debug_obj_descr descr_type_test;
1096 
1097 static bool __init is_static_object(void *addr)
1098 {
1099 	struct self_test *obj = addr;
1100 
1101 	return obj->static_init;
1102 }
1103 
1104 /*
1105  * fixup_init is called when:
1106  * - an active object is initialized
1107  */
1108 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1109 {
1110 	struct self_test *obj = addr;
1111 
1112 	switch (state) {
1113 	case ODEBUG_STATE_ACTIVE:
1114 		debug_object_deactivate(obj, &descr_type_test);
1115 		debug_object_init(obj, &descr_type_test);
1116 		return true;
1117 	default:
1118 		return false;
1119 	}
1120 }
1121 
1122 /*
1123  * fixup_activate is called when:
1124  * - an active object is activated
1125  * - an unknown non-static object is activated
1126  */
1127 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1128 {
1129 	struct self_test *obj = addr;
1130 
1131 	switch (state) {
1132 	case ODEBUG_STATE_NOTAVAILABLE:
1133 		return true;
1134 	case ODEBUG_STATE_ACTIVE:
1135 		debug_object_deactivate(obj, &descr_type_test);
1136 		debug_object_activate(obj, &descr_type_test);
1137 		return true;
1138 
1139 	default:
1140 		return false;
1141 	}
1142 }
1143 
1144 /*
1145  * fixup_destroy is called when:
1146  * - an active object is destroyed
1147  */
1148 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1149 {
1150 	struct self_test *obj = addr;
1151 
1152 	switch (state) {
1153 	case ODEBUG_STATE_ACTIVE:
1154 		debug_object_deactivate(obj, &descr_type_test);
1155 		debug_object_destroy(obj, &descr_type_test);
1156 		return true;
1157 	default:
1158 		return false;
1159 	}
1160 }
1161 
1162 /*
1163  * fixup_free is called when:
1164  * - an active object is freed
1165  */
1166 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1167 {
1168 	struct self_test *obj = addr;
1169 
1170 	switch (state) {
1171 	case ODEBUG_STATE_ACTIVE:
1172 		debug_object_deactivate(obj, &descr_type_test);
1173 		debug_object_free(obj, &descr_type_test);
1174 		return true;
1175 	default:
1176 		return false;
1177 	}
1178 }
1179 
1180 static int __init
1181 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1182 {
1183 	struct debug_bucket *db;
1184 	struct debug_obj *obj;
1185 	unsigned long flags;
1186 	int res = -EINVAL;
1187 
1188 	db = get_bucket((unsigned long) addr);
1189 
1190 	raw_spin_lock_irqsave(&db->lock, flags);
1191 
1192 	obj = lookup_object(addr, db);
1193 	if (!obj && state != ODEBUG_STATE_NONE) {
1194 		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1195 		goto out;
1196 	}
1197 	if (obj && obj->state != state) {
1198 		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1199 		       obj->state, state);
1200 		goto out;
1201 	}
1202 	if (fixups != debug_objects_fixups) {
1203 		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1204 		       fixups, debug_objects_fixups);
1205 		goto out;
1206 	}
1207 	if (warnings != debug_objects_warnings) {
1208 		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1209 		       warnings, debug_objects_warnings);
1210 		goto out;
1211 	}
1212 	res = 0;
1213 out:
1214 	raw_spin_unlock_irqrestore(&db->lock, flags);
1215 	if (res)
1216 		debug_objects_enabled = false;
1217 	return res;
1218 }
1219 
1220 static __initconst const struct debug_obj_descr descr_type_test = {
1221 	.name			= "selftest",
1222 	.is_static_object	= is_static_object,
1223 	.fixup_init		= fixup_init,
1224 	.fixup_activate		= fixup_activate,
1225 	.fixup_destroy		= fixup_destroy,
1226 	.fixup_free		= fixup_free,
1227 };
1228 
1229 static __initdata struct self_test obj = { .static_init = 0 };
1230 
1231 static bool __init debug_objects_selftest(void)
1232 {
1233 	int fixups, oldfixups, warnings, oldwarnings;
1234 	unsigned long flags;
1235 
1236 	local_irq_save(flags);
1237 
1238 	fixups = oldfixups = debug_objects_fixups;
1239 	warnings = oldwarnings = debug_objects_warnings;
1240 	descr_test = &descr_type_test;
1241 
1242 	debug_object_init(&obj, &descr_type_test);
1243 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1244 		goto out;
1245 	debug_object_activate(&obj, &descr_type_test);
1246 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1247 		goto out;
1248 	debug_object_activate(&obj, &descr_type_test);
1249 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1250 		goto out;
1251 	debug_object_deactivate(&obj, &descr_type_test);
1252 	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1253 		goto out;
1254 	debug_object_destroy(&obj, &descr_type_test);
1255 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1256 		goto out;
1257 	debug_object_init(&obj, &descr_type_test);
1258 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1259 		goto out;
1260 	debug_object_activate(&obj, &descr_type_test);
1261 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1262 		goto out;
1263 	debug_object_deactivate(&obj, &descr_type_test);
1264 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1265 		goto out;
1266 	debug_object_free(&obj, &descr_type_test);
1267 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1268 		goto out;
1269 
1270 	obj.static_init = 1;
1271 	debug_object_activate(&obj, &descr_type_test);
1272 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1273 		goto out;
1274 	debug_object_init(&obj, &descr_type_test);
1275 	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1276 		goto out;
1277 	debug_object_free(&obj, &descr_type_test);
1278 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1279 		goto out;
1280 
1281 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1282 	debug_object_init(&obj, &descr_type_test);
1283 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1284 		goto out;
1285 	debug_object_activate(&obj, &descr_type_test);
1286 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1287 		goto out;
1288 	__debug_check_no_obj_freed(&obj, sizeof(obj));
1289 	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1290 		goto out;
1291 #endif
1292 	pr_info("selftest passed\n");
1293 
1294 out:
1295 	debug_objects_fixups = oldfixups;
1296 	debug_objects_warnings = oldwarnings;
1297 	descr_test = NULL;
1298 
1299 	local_irq_restore(flags);
1300 	return debug_objects_enabled;
1301 }
1302 #else
1303 static inline bool debug_objects_selftest(void) { return true; }
1304 #endif
1305 
1306 /*
1307  * Called during early boot to initialize the hash buckets and link
1308  * the static object pool objects into the poll list. After this call
1309  * the object tracker is fully operational.
1310  */
1311 void __init debug_objects_early_init(void)
1312 {
1313 	int i;
1314 
1315 	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1316 		raw_spin_lock_init(&obj_hash[i].lock);
1317 
1318 	/* Keep early boot simple and add everything to the boot list */
1319 	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1320 		hlist_add_head(&obj_static_pool[i].node, &pool_boot);
1321 }
1322 
1323 /*
1324  * Convert the statically allocated objects to dynamic ones.
1325  * debug_objects_mem_init() is called early so only one CPU is up and
1326  * interrupts are disabled, which means it is safe to replace the active
1327  * object references.
1328  */
1329 static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache)
1330 {
1331 	struct debug_bucket *db = obj_hash;
1332 	struct hlist_node *tmp;
1333 	struct debug_obj *obj;
1334 	HLIST_HEAD(objects);
1335 	int i;
1336 
1337 	for (i = 0; i < ODEBUG_POOL_SIZE; i += ODEBUG_BATCH_SIZE) {
1338 		if (!kmem_alloc_batch(&objects, cache, GFP_KERNEL))
1339 			goto free;
1340 		pool_push_batch(&pool_global, &objects);
1341 	}
1342 
1343 	/* Disconnect the boot pool. */
1344 	pool_boot.first = NULL;
1345 
1346 	/* Replace the active object references */
1347 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1348 		hlist_move_list(&db->list, &objects);
1349 
1350 		hlist_for_each_entry(obj, &objects, node) {
1351 			struct debug_obj *new = pcpu_alloc();
1352 
1353 			/* copy object data */
1354 			*new = *obj;
1355 			hlist_add_head(&new->node, &db->list);
1356 		}
1357 	}
1358 	return true;
1359 free:
1360 	/* Can't use free_object_list() as the cache is not populated yet */
1361 	hlist_for_each_entry_safe(obj, tmp, &pool_global.objects, node) {
1362 		hlist_del(&obj->node);
1363 		kmem_cache_free(cache, obj);
1364 	}
1365 	return false;
1366 }
1367 
1368 /*
1369  * Called after the kmem_caches are functional to setup a dedicated
1370  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1371  * prevents that the debug code is called on kmem_cache_free() for the
1372  * debug tracker objects to avoid recursive calls.
1373  */
1374 void __init debug_objects_mem_init(void)
1375 {
1376 	struct kmem_cache *cache;
1377 	int extras;
1378 
1379 	if (!debug_objects_enabled)
1380 		return;
1381 
1382 	if (!debug_objects_selftest())
1383 		return;
1384 
1385 	cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0,
1386 				  SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL);
1387 
1388 	if (!cache || !debug_objects_replace_static_objects(cache)) {
1389 		debug_objects_enabled = false;
1390 		pr_warn("Out of memory.\n");
1391 		return;
1392 	}
1393 
1394 	/*
1395 	 * Adjust the thresholds for allocating and freeing objects
1396 	 * according to the number of possible CPUs available in the
1397 	 * system.
1398 	 */
1399 	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1400 	pool_global.max_cnt += extras;
1401 	pool_global.min_cnt += extras;
1402 
1403 	/* Everything worked. Expose the cache */
1404 	obj_cache = cache;
1405 	static_branch_enable(&obj_cache_enabled);
1406 
1407 #ifdef CONFIG_HOTPLUG_CPU
1408 	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1409 				  object_cpu_offline);
1410 #endif
1411 	return;
1412 }
1413