xref: /linux-6.15/lib/debugobjects.c (revision 13f9ca72)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic infrastructure for lifetime debugging of objects.
4  *
5  * Copyright (C) 2008, Thomas Gleixner <[email protected]>
6  */
7 
8 #define pr_fmt(fmt) "ODEBUG: " fmt
9 
10 #include <linux/cpu.h>
11 #include <linux/debugobjects.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/kmemleak.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/slab.h>
19 #include <linux/static_key.h>
20 
21 #define ODEBUG_HASH_BITS	14
22 #define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
23 
24 /* Must be power of two */
25 #define ODEBUG_BATCH_SIZE	16
26 
27 /* Initial values. Must all be a multiple of batch size */
28 #define ODEBUG_POOL_SIZE	(64 * ODEBUG_BATCH_SIZE)
29 #define ODEBUG_POOL_MIN_LEVEL	(ODEBUG_POOL_SIZE / 4)
30 
31 #define ODEBUG_POOL_PERCPU_SIZE	(8 * ODEBUG_BATCH_SIZE)
32 
33 #define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
34 #define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
35 #define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
36 
37 /*
38  * We limit the freeing of debug objects via workqueue at a maximum
39  * frequency of 10Hz and about 1024 objects for each freeing operation.
40  * So it is freeing at most 10k debug objects per second.
41  */
42 #define ODEBUG_FREE_WORK_MAX	(1024 / ODEBUG_BATCH_SIZE)
43 #define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
44 
45 struct debug_bucket {
46 	struct hlist_head	list;
47 	raw_spinlock_t		lock;
48 };
49 
50 struct pool_stats {
51 	unsigned int		cur_used;
52 	unsigned int		max_used;
53 	unsigned int		min_fill;
54 };
55 
56 struct obj_pool {
57 	struct hlist_head	objects;
58 	unsigned int		cnt;
59 	unsigned int		min_cnt;
60 	unsigned int		max_cnt;
61 	struct pool_stats	stats;
62 } ____cacheline_aligned;
63 
64 
65 static DEFINE_PER_CPU_ALIGNED(struct obj_pool, pool_pcpu)  = {
66 	.max_cnt	= ODEBUG_POOL_PERCPU_SIZE,
67 };
68 
69 static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
70 
71 static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
72 
73 static DEFINE_RAW_SPINLOCK(pool_lock);
74 
75 static struct obj_pool pool_global = {
76 	.min_cnt		= ODEBUG_POOL_MIN_LEVEL,
77 	.max_cnt		= ODEBUG_POOL_SIZE,
78 	.stats			= {
79 		.min_fill	= ODEBUG_POOL_SIZE,
80 	},
81 };
82 
83 static struct obj_pool pool_to_free = {
84 	.max_cnt	= UINT_MAX,
85 };
86 
87 static HLIST_HEAD(pool_boot);
88 
89 static bool			obj_freeing;
90 
91 static int __data_racy			debug_objects_maxchain __read_mostly;
92 static int __data_racy __maybe_unused	debug_objects_maxchecked __read_mostly;
93 static int __data_racy			debug_objects_fixups __read_mostly;
94 static int __data_racy			debug_objects_warnings __read_mostly;
95 static bool __data_racy			debug_objects_enabled __read_mostly
96 					= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
97 
98 static const struct debug_obj_descr	*descr_test  __read_mostly;
99 static struct kmem_cache		*obj_cache __ro_after_init;
100 
101 /*
102  * Track numbers of kmem_cache_alloc()/free() calls done.
103  */
104 static int __data_racy		debug_objects_allocated;
105 static int __data_racy		debug_objects_freed;
106 
107 static void free_obj_work(struct work_struct *work);
108 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
109 
110 static DEFINE_STATIC_KEY_FALSE(obj_cache_enabled);
111 
112 static int __init enable_object_debug(char *str)
113 {
114 	debug_objects_enabled = true;
115 	return 0;
116 }
117 early_param("debug_objects", enable_object_debug);
118 
119 static int __init disable_object_debug(char *str)
120 {
121 	debug_objects_enabled = false;
122 	return 0;
123 }
124 early_param("no_debug_objects", disable_object_debug);
125 
126 static const char *obj_states[ODEBUG_STATE_MAX] = {
127 	[ODEBUG_STATE_NONE]		= "none",
128 	[ODEBUG_STATE_INIT]		= "initialized",
129 	[ODEBUG_STATE_INACTIVE]		= "inactive",
130 	[ODEBUG_STATE_ACTIVE]		= "active",
131 	[ODEBUG_STATE_DESTROYED]	= "destroyed",
132 	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
133 };
134 
135 static __always_inline unsigned int pool_count(struct obj_pool *pool)
136 {
137 	return READ_ONCE(pool->cnt);
138 }
139 
140 static __always_inline bool pool_should_refill(struct obj_pool *pool)
141 {
142 	return pool_count(pool) < pool->min_cnt;
143 }
144 
145 static __always_inline bool pool_must_refill(struct obj_pool *pool)
146 {
147 	return pool_count(pool) < pool->min_cnt / 2;
148 }
149 
150 static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src)
151 {
152 	struct hlist_node *last, *next_batch, *first_batch;
153 	struct debug_obj *obj;
154 
155 	if (dst->cnt >= dst->max_cnt || !src->cnt)
156 		return false;
157 
158 	first_batch = src->objects.first;
159 	obj = hlist_entry(first_batch, typeof(*obj), node);
160 	last = obj->batch_last;
161 	next_batch = last->next;
162 
163 	/* Move the next batch to the front of the source pool */
164 	src->objects.first = next_batch;
165 	if (next_batch)
166 		next_batch->pprev = &src->objects.first;
167 
168 	/* Add the extracted batch to the destination pool */
169 	last->next = dst->objects.first;
170 	if (last->next)
171 		last->next->pprev = &last->next;
172 	first_batch->pprev = &dst->objects.first;
173 	dst->objects.first = first_batch;
174 
175 	WRITE_ONCE(src->cnt, src->cnt - ODEBUG_BATCH_SIZE);
176 	WRITE_ONCE(dst->cnt, dst->cnt + ODEBUG_BATCH_SIZE);
177 	return true;
178 }
179 
180 static bool pool_push_batch(struct obj_pool *dst, struct hlist_head *head)
181 {
182 	struct hlist_node *last;
183 	struct debug_obj *obj;
184 
185 	if (dst->cnt >= dst->max_cnt)
186 		return false;
187 
188 	obj = hlist_entry(head->first, typeof(*obj), node);
189 	last = obj->batch_last;
190 
191 	hlist_splice_init(head, last, &dst->objects);
192 	WRITE_ONCE(dst->cnt, dst->cnt + ODEBUG_BATCH_SIZE);
193 	return true;
194 }
195 
196 static bool pool_pop_batch(struct hlist_head *head, struct obj_pool *src)
197 {
198 	struct hlist_node *last, *next;
199 	struct debug_obj *obj;
200 
201 	if (!src->cnt)
202 		return false;
203 
204 	/* Move the complete list to the head */
205 	hlist_move_list(&src->objects, head);
206 
207 	obj = hlist_entry(head->first, typeof(*obj), node);
208 	last = obj->batch_last;
209 	next = last->next;
210 	/* Disconnect the batch from the list */
211 	last->next = NULL;
212 
213 	/* Move the node after last back to the source pool. */
214 	src->objects.first = next;
215 	if (next)
216 		next->pprev = &src->objects.first;
217 
218 	WRITE_ONCE(src->cnt, src->cnt - ODEBUG_BATCH_SIZE);
219 	return true;
220 }
221 
222 static struct debug_obj *__alloc_object(struct hlist_head *list)
223 {
224 	struct debug_obj *obj;
225 
226 	if (unlikely(!list->first))
227 		return NULL;
228 
229 	obj = hlist_entry(list->first, typeof(*obj), node);
230 	hlist_del(&obj->node);
231 	return obj;
232 }
233 
234 static void pcpu_refill_stats(void)
235 {
236 	struct pool_stats *stats = &pool_global.stats;
237 
238 	WRITE_ONCE(stats->cur_used, stats->cur_used + ODEBUG_BATCH_SIZE);
239 
240 	if (stats->cur_used > stats->max_used)
241 		stats->max_used = stats->cur_used;
242 
243 	if (pool_global.cnt < stats->min_fill)
244 		stats->min_fill = pool_global.cnt;
245 }
246 
247 static struct debug_obj *pcpu_alloc(void)
248 {
249 	struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
250 
251 	lockdep_assert_irqs_disabled();
252 
253 	for (;;) {
254 		struct debug_obj *obj = __alloc_object(&pcp->objects);
255 
256 		if (likely(obj)) {
257 			pcp->cnt--;
258 			/*
259 			 * If this emptied a batch try to refill from the
260 			 * free pool. Don't do that if this was the top-most
261 			 * batch as pcpu_free() expects the per CPU pool
262 			 * to be less than ODEBUG_POOL_PERCPU_SIZE.
263 			 */
264 			if (unlikely(pcp->cnt < (ODEBUG_POOL_PERCPU_SIZE - ODEBUG_BATCH_SIZE) &&
265 				     !(pcp->cnt % ODEBUG_BATCH_SIZE))) {
266 				/*
267 				 * Don't try to allocate from the regular pool here
268 				 * to not exhaust it prematurely.
269 				 */
270 				if (pool_count(&pool_to_free)) {
271 					guard(raw_spinlock)(&pool_lock);
272 					pool_move_batch(pcp, &pool_to_free);
273 					pcpu_refill_stats();
274 				}
275 			}
276 			return obj;
277 		}
278 
279 		guard(raw_spinlock)(&pool_lock);
280 		if (!pool_move_batch(pcp, &pool_to_free)) {
281 			if (!pool_move_batch(pcp, &pool_global))
282 				return NULL;
283 		}
284 		pcpu_refill_stats();
285 	}
286 }
287 
288 static void pcpu_free(struct debug_obj *obj)
289 {
290 	struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
291 	struct debug_obj *first;
292 
293 	lockdep_assert_irqs_disabled();
294 
295 	if (!(pcp->cnt % ODEBUG_BATCH_SIZE)) {
296 		obj->batch_last = &obj->node;
297 	} else {
298 		first = hlist_entry(pcp->objects.first, typeof(*first), node);
299 		obj->batch_last = first->batch_last;
300 	}
301 	hlist_add_head(&obj->node, &pcp->objects);
302 	pcp->cnt++;
303 
304 	/* Pool full ? */
305 	if (pcp->cnt < ODEBUG_POOL_PERCPU_SIZE)
306 		return;
307 
308 	/* Remove a batch from the per CPU pool */
309 	guard(raw_spinlock)(&pool_lock);
310 	/* Try to fit the batch into the pool_global first */
311 	if (!pool_move_batch(&pool_global, pcp))
312 		pool_move_batch(&pool_to_free, pcp);
313 	WRITE_ONCE(pool_global.stats.cur_used, pool_global.stats.cur_used - ODEBUG_BATCH_SIZE);
314 }
315 
316 static void free_object_list(struct hlist_head *head)
317 {
318 	struct hlist_node *tmp;
319 	struct debug_obj *obj;
320 	int cnt = 0;
321 
322 	hlist_for_each_entry_safe(obj, tmp, head, node) {
323 		hlist_del(&obj->node);
324 		kmem_cache_free(obj_cache, obj);
325 		cnt++;
326 	}
327 	debug_objects_freed += cnt;
328 }
329 
330 static void fill_pool_from_freelist(void)
331 {
332 	static unsigned long state;
333 
334 	/*
335 	 * Reuse objs from the global obj_to_free list; they will be
336 	 * reinitialized when allocating.
337 	 */
338 	if (!pool_count(&pool_to_free))
339 		return;
340 
341 	/*
342 	 * Prevent the context from being scheduled or interrupted after
343 	 * setting the state flag;
344 	 */
345 	guard(irqsave)();
346 
347 	/*
348 	 * Avoid lock contention on &pool_lock and avoid making the cache
349 	 * line exclusive by testing the bit before attempting to set it.
350 	 */
351 	if (test_bit(0, &state) || test_and_set_bit(0, &state))
352 		return;
353 
354 	/* Avoid taking the lock when there is no work to do */
355 	while (pool_should_refill(&pool_global) && pool_count(&pool_to_free)) {
356 		guard(raw_spinlock)(&pool_lock);
357 		/* Move a batch if possible */
358 		pool_move_batch(&pool_global, &pool_to_free);
359 	}
360 	clear_bit(0, &state);
361 }
362 
363 static bool kmem_alloc_batch(struct hlist_head *head, struct kmem_cache *cache, gfp_t gfp)
364 {
365 	struct hlist_node *last = NULL;
366 	struct debug_obj *obj;
367 
368 	for (int cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
369 		obj = kmem_cache_zalloc(cache, gfp);
370 		if (!obj) {
371 			free_object_list(head);
372 			return false;
373 		}
374 		debug_objects_allocated++;
375 
376 		if (!last)
377 			last = &obj->node;
378 		obj->batch_last = last;
379 
380 		hlist_add_head(&obj->node, head);
381 	}
382 	return true;
383 }
384 
385 static void fill_pool(void)
386 {
387 	static atomic_t cpus_allocating;
388 
389 	/*
390 	 * Avoid allocation and lock contention when:
391 	 *   - One other CPU is already allocating
392 	 *   - the global pool has not reached the critical level yet
393 	 */
394 	if (!pool_must_refill(&pool_global) && atomic_read(&cpus_allocating))
395 		return;
396 
397 	atomic_inc(&cpus_allocating);
398 	while (pool_should_refill(&pool_global)) {
399 		HLIST_HEAD(head);
400 
401 		if (!kmem_alloc_batch(&head, obj_cache, __GFP_HIGH | __GFP_NOWARN))
402 			break;
403 
404 		guard(raw_spinlock_irqsave)(&pool_lock);
405 		if (!pool_push_batch(&pool_global, &head))
406 			pool_push_batch(&pool_to_free, &head);
407 	}
408 	atomic_dec(&cpus_allocating);
409 }
410 
411 /*
412  * Lookup an object in the hash bucket.
413  */
414 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
415 {
416 	struct debug_obj *obj;
417 	int cnt = 0;
418 
419 	hlist_for_each_entry(obj, &b->list, node) {
420 		cnt++;
421 		if (obj->object == addr)
422 			return obj;
423 	}
424 	if (cnt > debug_objects_maxchain)
425 		debug_objects_maxchain = cnt;
426 
427 	return NULL;
428 }
429 
430 static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b,
431 				      const struct debug_obj_descr *descr)
432 {
433 	struct debug_obj *obj;
434 
435 	if (static_branch_likely(&obj_cache_enabled))
436 		obj = pcpu_alloc();
437 	else
438 		obj = __alloc_object(&pool_boot);
439 
440 	if (likely(obj)) {
441 		obj->object = addr;
442 		obj->descr  = descr;
443 		obj->state  = ODEBUG_STATE_NONE;
444 		obj->astate = 0;
445 		hlist_add_head(&obj->node, &b->list);
446 	}
447 	return obj;
448 }
449 
450 /* workqueue function to free objects. */
451 static void free_obj_work(struct work_struct *work)
452 {
453 	bool free = true;
454 
455 	WRITE_ONCE(obj_freeing, false);
456 
457 	if (!pool_count(&pool_to_free))
458 		return;
459 
460 	for (unsigned int cnt = 0; cnt < ODEBUG_FREE_WORK_MAX; cnt++) {
461 		HLIST_HEAD(tofree);
462 
463 		/* Acquire and drop the lock for each batch */
464 		scoped_guard(raw_spinlock_irqsave, &pool_lock) {
465 			if (!pool_to_free.cnt)
466 				return;
467 
468 			/* Refill the global pool if possible */
469 			if (pool_move_batch(&pool_global, &pool_to_free)) {
470 				/* Don't free as there seems to be demand */
471 				free = false;
472 			} else if (free) {
473 				pool_pop_batch(&tofree, &pool_to_free);
474 			} else {
475 				return;
476 			}
477 		}
478 		free_object_list(&tofree);
479 	}
480 }
481 
482 static void __free_object(struct debug_obj *obj)
483 {
484 	guard(irqsave)();
485 	if (static_branch_likely(&obj_cache_enabled))
486 		pcpu_free(obj);
487 	else
488 		hlist_add_head(&obj->node, &pool_boot);
489 }
490 
491 /*
492  * Put the object back into the pool and schedule work to free objects
493  * if necessary.
494  */
495 static void free_object(struct debug_obj *obj)
496 {
497 	__free_object(obj);
498 	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
499 		WRITE_ONCE(obj_freeing, true);
500 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
501 	}
502 }
503 
504 static void put_objects(struct hlist_head *list)
505 {
506 	struct hlist_node *tmp;
507 	struct debug_obj *obj;
508 
509 	/*
510 	 * Using free_object() puts the objects into reuse or schedules
511 	 * them for freeing and it get's all the accounting correct.
512 	 */
513 	hlist_for_each_entry_safe(obj, tmp, list, node) {
514 		hlist_del(&obj->node);
515 		free_object(obj);
516 	}
517 }
518 
519 #ifdef CONFIG_HOTPLUG_CPU
520 static int object_cpu_offline(unsigned int cpu)
521 {
522 	/* Remote access is safe as the CPU is dead already */
523 	struct obj_pool *pcp = per_cpu_ptr(&pool_pcpu, cpu);
524 
525 	put_objects(&pcp->objects);
526 	pcp->cnt = 0;
527 	return 0;
528 }
529 #endif
530 
531 /* Out of memory. Free all objects from hash */
532 static void debug_objects_oom(void)
533 {
534 	struct debug_bucket *db = obj_hash;
535 	HLIST_HEAD(freelist);
536 
537 	pr_warn("Out of memory. ODEBUG disabled\n");
538 
539 	for (int i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
540 		scoped_guard(raw_spinlock_irqsave, &db->lock)
541 			hlist_move_list(&db->list, &freelist);
542 
543 		put_objects(&freelist);
544 	}
545 }
546 
547 /*
548  * We use the pfn of the address for the hash. That way we can check
549  * for freed objects simply by checking the affected bucket.
550  */
551 static struct debug_bucket *get_bucket(unsigned long addr)
552 {
553 	unsigned long hash;
554 
555 	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
556 	return &obj_hash[hash];
557 }
558 
559 static void debug_print_object(struct debug_obj *obj, char *msg)
560 {
561 	const struct debug_obj_descr *descr = obj->descr;
562 	static int limit;
563 
564 	/*
565 	 * Don't report if lookup_object_or_alloc() by the current thread
566 	 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
567 	 * concurrent thread turned off debug_objects_enabled and cleared
568 	 * the hash buckets.
569 	 */
570 	if (!debug_objects_enabled)
571 		return;
572 
573 	if (limit < 5 && descr != descr_test) {
574 		void *hint = descr->debug_hint ?
575 			descr->debug_hint(obj->object) : NULL;
576 		limit++;
577 		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
578 				 "object: %p object type: %s hint: %pS\n",
579 			msg, obj_states[obj->state], obj->astate,
580 			obj->object, descr->name, hint);
581 	}
582 	debug_objects_warnings++;
583 }
584 
585 /*
586  * Try to repair the damage, so we have a better chance to get useful
587  * debug output.
588  */
589 static bool
590 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
591 		   void * addr, enum debug_obj_state state)
592 {
593 	if (fixup && fixup(addr, state)) {
594 		debug_objects_fixups++;
595 		return true;
596 	}
597 	return false;
598 }
599 
600 static void debug_object_is_on_stack(void *addr, int onstack)
601 {
602 	int is_on_stack;
603 	static int limit;
604 
605 	if (limit > 4)
606 		return;
607 
608 	is_on_stack = object_is_on_stack(addr);
609 	if (is_on_stack == onstack)
610 		return;
611 
612 	limit++;
613 	if (is_on_stack)
614 		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
615 			 task_stack_page(current));
616 	else
617 		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
618 			 task_stack_page(current));
619 
620 	WARN_ON(1);
621 }
622 
623 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
624 						const struct debug_obj_descr *descr,
625 						bool onstack, bool alloc_ifstatic)
626 {
627 	struct debug_obj *obj = lookup_object(addr, b);
628 	enum debug_obj_state state = ODEBUG_STATE_NONE;
629 
630 	if (likely(obj))
631 		return obj;
632 
633 	/*
634 	 * debug_object_init() unconditionally allocates untracked
635 	 * objects. It does not matter whether it is a static object or
636 	 * not.
637 	 *
638 	 * debug_object_assert_init() and debug_object_activate() allow
639 	 * allocation only if the descriptor callback confirms that the
640 	 * object is static and considered initialized. For non-static
641 	 * objects the allocation needs to be done from the fixup callback.
642 	 */
643 	if (unlikely(alloc_ifstatic)) {
644 		if (!descr->is_static_object || !descr->is_static_object(addr))
645 			return ERR_PTR(-ENOENT);
646 		/* Statically allocated objects are considered initialized */
647 		state = ODEBUG_STATE_INIT;
648 	}
649 
650 	obj = alloc_object(addr, b, descr);
651 	if (likely(obj)) {
652 		obj->state = state;
653 		debug_object_is_on_stack(addr, onstack);
654 		return obj;
655 	}
656 
657 	/* Out of memory. Do the cleanup outside of the locked region */
658 	debug_objects_enabled = false;
659 	return NULL;
660 }
661 
662 static void debug_objects_fill_pool(void)
663 {
664 	if (!static_branch_likely(&obj_cache_enabled))
665 		return;
666 
667 	if (likely(!pool_should_refill(&pool_global)))
668 		return;
669 
670 	/* Try reusing objects from obj_to_free_list */
671 	fill_pool_from_freelist();
672 
673 	if (likely(!pool_should_refill(&pool_global)))
674 		return;
675 
676 	/*
677 	 * On RT enabled kernels the pool refill must happen in preemptible
678 	 * context -- for !RT kernels we rely on the fact that spinlock_t and
679 	 * raw_spinlock_t are basically the same type and this lock-type
680 	 * inversion works just fine.
681 	 */
682 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
683 		/*
684 		 * Annotate away the spinlock_t inside raw_spinlock_t warning
685 		 * by temporarily raising the wait-type to WAIT_SLEEP, matching
686 		 * the preemptible() condition above.
687 		 */
688 		static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
689 		lock_map_acquire_try(&fill_pool_map);
690 		fill_pool();
691 		lock_map_release(&fill_pool_map);
692 	}
693 }
694 
695 static void
696 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
697 {
698 	struct debug_obj *obj, o;
699 	struct debug_bucket *db;
700 	unsigned long flags;
701 
702 	debug_objects_fill_pool();
703 
704 	db = get_bucket((unsigned long) addr);
705 
706 	raw_spin_lock_irqsave(&db->lock, flags);
707 
708 	obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
709 	if (unlikely(!obj)) {
710 		raw_spin_unlock_irqrestore(&db->lock, flags);
711 		debug_objects_oom();
712 		return;
713 	}
714 
715 	switch (obj->state) {
716 	case ODEBUG_STATE_NONE:
717 	case ODEBUG_STATE_INIT:
718 	case ODEBUG_STATE_INACTIVE:
719 		obj->state = ODEBUG_STATE_INIT;
720 		raw_spin_unlock_irqrestore(&db->lock, flags);
721 		return;
722 	default:
723 		break;
724 	}
725 
726 	o = *obj;
727 	raw_spin_unlock_irqrestore(&db->lock, flags);
728 	debug_print_object(&o, "init");
729 
730 	if (o.state == ODEBUG_STATE_ACTIVE)
731 		debug_object_fixup(descr->fixup_init, addr, o.state);
732 }
733 
734 /**
735  * debug_object_init - debug checks when an object is initialized
736  * @addr:	address of the object
737  * @descr:	pointer to an object specific debug description structure
738  */
739 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
740 {
741 	if (!debug_objects_enabled)
742 		return;
743 
744 	__debug_object_init(addr, descr, 0);
745 }
746 EXPORT_SYMBOL_GPL(debug_object_init);
747 
748 /**
749  * debug_object_init_on_stack - debug checks when an object on stack is
750  *				initialized
751  * @addr:	address of the object
752  * @descr:	pointer to an object specific debug description structure
753  */
754 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
755 {
756 	if (!debug_objects_enabled)
757 		return;
758 
759 	__debug_object_init(addr, descr, 1);
760 }
761 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
762 
763 /**
764  * debug_object_activate - debug checks when an object is activated
765  * @addr:	address of the object
766  * @descr:	pointer to an object specific debug description structure
767  * Returns 0 for success, -EINVAL for check failed.
768  */
769 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
770 {
771 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
772 	struct debug_bucket *db;
773 	struct debug_obj *obj;
774 	unsigned long flags;
775 
776 	if (!debug_objects_enabled)
777 		return 0;
778 
779 	debug_objects_fill_pool();
780 
781 	db = get_bucket((unsigned long) addr);
782 
783 	raw_spin_lock_irqsave(&db->lock, flags);
784 
785 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
786 	if (unlikely(!obj)) {
787 		raw_spin_unlock_irqrestore(&db->lock, flags);
788 		debug_objects_oom();
789 		return 0;
790 	} else if (likely(!IS_ERR(obj))) {
791 		switch (obj->state) {
792 		case ODEBUG_STATE_ACTIVE:
793 		case ODEBUG_STATE_DESTROYED:
794 			o = *obj;
795 			break;
796 		case ODEBUG_STATE_INIT:
797 		case ODEBUG_STATE_INACTIVE:
798 			obj->state = ODEBUG_STATE_ACTIVE;
799 			fallthrough;
800 		default:
801 			raw_spin_unlock_irqrestore(&db->lock, flags);
802 			return 0;
803 		}
804 	}
805 
806 	raw_spin_unlock_irqrestore(&db->lock, flags);
807 	debug_print_object(&o, "activate");
808 
809 	switch (o.state) {
810 	case ODEBUG_STATE_ACTIVE:
811 	case ODEBUG_STATE_NOTAVAILABLE:
812 		if (debug_object_fixup(descr->fixup_activate, addr, o.state))
813 			return 0;
814 		fallthrough;
815 	default:
816 		return -EINVAL;
817 	}
818 }
819 EXPORT_SYMBOL_GPL(debug_object_activate);
820 
821 /**
822  * debug_object_deactivate - debug checks when an object is deactivated
823  * @addr:	address of the object
824  * @descr:	pointer to an object specific debug description structure
825  */
826 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
827 {
828 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
829 	struct debug_bucket *db;
830 	struct debug_obj *obj;
831 	unsigned long flags;
832 
833 	if (!debug_objects_enabled)
834 		return;
835 
836 	db = get_bucket((unsigned long) addr);
837 
838 	raw_spin_lock_irqsave(&db->lock, flags);
839 
840 	obj = lookup_object(addr, db);
841 	if (obj) {
842 		switch (obj->state) {
843 		case ODEBUG_STATE_DESTROYED:
844 			break;
845 		case ODEBUG_STATE_INIT:
846 		case ODEBUG_STATE_INACTIVE:
847 		case ODEBUG_STATE_ACTIVE:
848 			if (obj->astate)
849 				break;
850 			obj->state = ODEBUG_STATE_INACTIVE;
851 			fallthrough;
852 		default:
853 			raw_spin_unlock_irqrestore(&db->lock, flags);
854 			return;
855 		}
856 		o = *obj;
857 	}
858 
859 	raw_spin_unlock_irqrestore(&db->lock, flags);
860 	debug_print_object(&o, "deactivate");
861 }
862 EXPORT_SYMBOL_GPL(debug_object_deactivate);
863 
864 /**
865  * debug_object_destroy - debug checks when an object is destroyed
866  * @addr:	address of the object
867  * @descr:	pointer to an object specific debug description structure
868  */
869 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
870 {
871 	struct debug_obj *obj, o;
872 	struct debug_bucket *db;
873 	unsigned long flags;
874 
875 	if (!debug_objects_enabled)
876 		return;
877 
878 	db = get_bucket((unsigned long) addr);
879 
880 	raw_spin_lock_irqsave(&db->lock, flags);
881 
882 	obj = lookup_object(addr, db);
883 	if (!obj) {
884 		raw_spin_unlock_irqrestore(&db->lock, flags);
885 		return;
886 	}
887 
888 	switch (obj->state) {
889 	case ODEBUG_STATE_ACTIVE:
890 	case ODEBUG_STATE_DESTROYED:
891 		break;
892 	case ODEBUG_STATE_NONE:
893 	case ODEBUG_STATE_INIT:
894 	case ODEBUG_STATE_INACTIVE:
895 		obj->state = ODEBUG_STATE_DESTROYED;
896 		fallthrough;
897 	default:
898 		raw_spin_unlock_irqrestore(&db->lock, flags);
899 		return;
900 	}
901 
902 	o = *obj;
903 	raw_spin_unlock_irqrestore(&db->lock, flags);
904 	debug_print_object(&o, "destroy");
905 
906 	if (o.state == ODEBUG_STATE_ACTIVE)
907 		debug_object_fixup(descr->fixup_destroy, addr, o.state);
908 }
909 EXPORT_SYMBOL_GPL(debug_object_destroy);
910 
911 /**
912  * debug_object_free - debug checks when an object is freed
913  * @addr:	address of the object
914  * @descr:	pointer to an object specific debug description structure
915  */
916 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
917 {
918 	struct debug_obj *obj, o;
919 	struct debug_bucket *db;
920 	unsigned long flags;
921 
922 	if (!debug_objects_enabled)
923 		return;
924 
925 	db = get_bucket((unsigned long) addr);
926 
927 	raw_spin_lock_irqsave(&db->lock, flags);
928 
929 	obj = lookup_object(addr, db);
930 	if (!obj) {
931 		raw_spin_unlock_irqrestore(&db->lock, flags);
932 		return;
933 	}
934 
935 	switch (obj->state) {
936 	case ODEBUG_STATE_ACTIVE:
937 		break;
938 	default:
939 		hlist_del(&obj->node);
940 		raw_spin_unlock_irqrestore(&db->lock, flags);
941 		free_object(obj);
942 		return;
943 	}
944 
945 	o = *obj;
946 	raw_spin_unlock_irqrestore(&db->lock, flags);
947 	debug_print_object(&o, "free");
948 
949 	debug_object_fixup(descr->fixup_free, addr, o.state);
950 }
951 EXPORT_SYMBOL_GPL(debug_object_free);
952 
953 /**
954  * debug_object_assert_init - debug checks when object should be init-ed
955  * @addr:	address of the object
956  * @descr:	pointer to an object specific debug description structure
957  */
958 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
959 {
960 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
961 	struct debug_bucket *db;
962 	struct debug_obj *obj;
963 	unsigned long flags;
964 
965 	if (!debug_objects_enabled)
966 		return;
967 
968 	debug_objects_fill_pool();
969 
970 	db = get_bucket((unsigned long) addr);
971 
972 	raw_spin_lock_irqsave(&db->lock, flags);
973 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
974 	raw_spin_unlock_irqrestore(&db->lock, flags);
975 	if (likely(!IS_ERR_OR_NULL(obj)))
976 		return;
977 
978 	/* If NULL the allocation has hit OOM */
979 	if (!obj) {
980 		debug_objects_oom();
981 		return;
982 	}
983 
984 	/* Object is neither tracked nor static. It's not initialized. */
985 	debug_print_object(&o, "assert_init");
986 	debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
987 }
988 EXPORT_SYMBOL_GPL(debug_object_assert_init);
989 
990 /**
991  * debug_object_active_state - debug checks object usage state machine
992  * @addr:	address of the object
993  * @descr:	pointer to an object specific debug description structure
994  * @expect:	expected state
995  * @next:	state to move to if expected state is found
996  */
997 void
998 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
999 			  unsigned int expect, unsigned int next)
1000 {
1001 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
1002 	struct debug_bucket *db;
1003 	struct debug_obj *obj;
1004 	unsigned long flags;
1005 
1006 	if (!debug_objects_enabled)
1007 		return;
1008 
1009 	db = get_bucket((unsigned long) addr);
1010 
1011 	raw_spin_lock_irqsave(&db->lock, flags);
1012 
1013 	obj = lookup_object(addr, db);
1014 	if (obj) {
1015 		switch (obj->state) {
1016 		case ODEBUG_STATE_ACTIVE:
1017 			if (obj->astate != expect)
1018 				break;
1019 			obj->astate = next;
1020 			raw_spin_unlock_irqrestore(&db->lock, flags);
1021 			return;
1022 		default:
1023 			break;
1024 		}
1025 		o = *obj;
1026 	}
1027 
1028 	raw_spin_unlock_irqrestore(&db->lock, flags);
1029 	debug_print_object(&o, "active_state");
1030 }
1031 EXPORT_SYMBOL_GPL(debug_object_active_state);
1032 
1033 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1034 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
1035 {
1036 	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
1037 	int cnt, objs_checked = 0;
1038 	struct debug_obj *obj, o;
1039 	struct debug_bucket *db;
1040 	struct hlist_node *tmp;
1041 
1042 	saddr = (unsigned long) address;
1043 	eaddr = saddr + size;
1044 	paddr = saddr & ODEBUG_CHUNK_MASK;
1045 	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
1046 	chunks >>= ODEBUG_CHUNK_SHIFT;
1047 
1048 	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
1049 		db = get_bucket(paddr);
1050 
1051 repeat:
1052 		cnt = 0;
1053 		raw_spin_lock_irqsave(&db->lock, flags);
1054 		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
1055 			cnt++;
1056 			oaddr = (unsigned long) obj->object;
1057 			if (oaddr < saddr || oaddr >= eaddr)
1058 				continue;
1059 
1060 			switch (obj->state) {
1061 			case ODEBUG_STATE_ACTIVE:
1062 				o = *obj;
1063 				raw_spin_unlock_irqrestore(&db->lock, flags);
1064 				debug_print_object(&o, "free");
1065 				debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
1066 				goto repeat;
1067 			default:
1068 				hlist_del(&obj->node);
1069 				__free_object(obj);
1070 				break;
1071 			}
1072 		}
1073 		raw_spin_unlock_irqrestore(&db->lock, flags);
1074 
1075 		if (cnt > debug_objects_maxchain)
1076 			debug_objects_maxchain = cnt;
1077 
1078 		objs_checked += cnt;
1079 	}
1080 
1081 	if (objs_checked > debug_objects_maxchecked)
1082 		debug_objects_maxchecked = objs_checked;
1083 
1084 	/* Schedule work to actually kmem_cache_free() objects */
1085 	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
1086 		WRITE_ONCE(obj_freeing, true);
1087 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1088 	}
1089 }
1090 
1091 void debug_check_no_obj_freed(const void *address, unsigned long size)
1092 {
1093 	if (debug_objects_enabled)
1094 		__debug_check_no_obj_freed(address, size);
1095 }
1096 #endif
1097 
1098 #ifdef CONFIG_DEBUG_FS
1099 
1100 static int debug_stats_show(struct seq_file *m, void *v)
1101 {
1102 	unsigned int cpu, pool_used, pcp_free = 0;
1103 
1104 	/*
1105 	 * pool_global.stats.cur_used is the number of batches currently
1106 	 * handed out to per CPU pools. Convert it to number of objects
1107 	 * and subtract the number of free objects in the per CPU pools.
1108 	 * As this is lockless the number is an estimate.
1109 	 */
1110 	for_each_possible_cpu(cpu)
1111 		pcp_free += per_cpu(pool_pcpu.cnt, cpu);
1112 
1113 	pool_used = data_race(pool_global.stats.cur_used);
1114 	pcp_free = min(pool_used, pcp_free);
1115 	pool_used -= pcp_free;
1116 
1117 	seq_printf(m, "max_chain     : %d\n", debug_objects_maxchain);
1118 	seq_printf(m, "max_checked   : %d\n", debug_objects_maxchecked);
1119 	seq_printf(m, "warnings      : %d\n", debug_objects_warnings);
1120 	seq_printf(m, "fixups        : %d\n", debug_objects_fixups);
1121 	seq_printf(m, "pool_free     : %u\n", pool_count(&pool_global) + pcp_free);
1122 	seq_printf(m, "pool_pcp_free : %u\n", pcp_free);
1123 	seq_printf(m, "pool_min_free : %u\n", data_race(pool_global.stats.min_fill));
1124 	seq_printf(m, "pool_used     : %u\n", pool_used);
1125 	seq_printf(m, "pool_max_used : %u\n", data_race(pool_global.stats.max_used));
1126 	seq_printf(m, "on_free_list  : %u\n", pool_count(&pool_to_free));
1127 	seq_printf(m, "objs_allocated: %d\n", debug_objects_allocated);
1128 	seq_printf(m, "objs_freed    : %d\n", debug_objects_freed);
1129 	return 0;
1130 }
1131 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1132 
1133 static int __init debug_objects_init_debugfs(void)
1134 {
1135 	struct dentry *dbgdir;
1136 
1137 	if (!debug_objects_enabled)
1138 		return 0;
1139 
1140 	dbgdir = debugfs_create_dir("debug_objects", NULL);
1141 
1142 	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1143 
1144 	return 0;
1145 }
1146 __initcall(debug_objects_init_debugfs);
1147 
1148 #else
1149 static inline void debug_objects_init_debugfs(void) { }
1150 #endif
1151 
1152 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1153 
1154 /* Random data structure for the self test */
1155 struct self_test {
1156 	unsigned long	dummy1[6];
1157 	int		static_init;
1158 	unsigned long	dummy2[3];
1159 };
1160 
1161 static __initconst const struct debug_obj_descr descr_type_test;
1162 
1163 static bool __init is_static_object(void *addr)
1164 {
1165 	struct self_test *obj = addr;
1166 
1167 	return obj->static_init;
1168 }
1169 
1170 /*
1171  * fixup_init is called when:
1172  * - an active object is initialized
1173  */
1174 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1175 {
1176 	struct self_test *obj = addr;
1177 
1178 	switch (state) {
1179 	case ODEBUG_STATE_ACTIVE:
1180 		debug_object_deactivate(obj, &descr_type_test);
1181 		debug_object_init(obj, &descr_type_test);
1182 		return true;
1183 	default:
1184 		return false;
1185 	}
1186 }
1187 
1188 /*
1189  * fixup_activate is called when:
1190  * - an active object is activated
1191  * - an unknown non-static object is activated
1192  */
1193 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1194 {
1195 	struct self_test *obj = addr;
1196 
1197 	switch (state) {
1198 	case ODEBUG_STATE_NOTAVAILABLE:
1199 		return true;
1200 	case ODEBUG_STATE_ACTIVE:
1201 		debug_object_deactivate(obj, &descr_type_test);
1202 		debug_object_activate(obj, &descr_type_test);
1203 		return true;
1204 
1205 	default:
1206 		return false;
1207 	}
1208 }
1209 
1210 /*
1211  * fixup_destroy is called when:
1212  * - an active object is destroyed
1213  */
1214 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1215 {
1216 	struct self_test *obj = addr;
1217 
1218 	switch (state) {
1219 	case ODEBUG_STATE_ACTIVE:
1220 		debug_object_deactivate(obj, &descr_type_test);
1221 		debug_object_destroy(obj, &descr_type_test);
1222 		return true;
1223 	default:
1224 		return false;
1225 	}
1226 }
1227 
1228 /*
1229  * fixup_free is called when:
1230  * - an active object is freed
1231  */
1232 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1233 {
1234 	struct self_test *obj = addr;
1235 
1236 	switch (state) {
1237 	case ODEBUG_STATE_ACTIVE:
1238 		debug_object_deactivate(obj, &descr_type_test);
1239 		debug_object_free(obj, &descr_type_test);
1240 		return true;
1241 	default:
1242 		return false;
1243 	}
1244 }
1245 
1246 static int __init
1247 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1248 {
1249 	struct debug_bucket *db;
1250 	struct debug_obj *obj;
1251 	unsigned long flags;
1252 	int res = -EINVAL;
1253 
1254 	db = get_bucket((unsigned long) addr);
1255 
1256 	raw_spin_lock_irqsave(&db->lock, flags);
1257 
1258 	obj = lookup_object(addr, db);
1259 	if (!obj && state != ODEBUG_STATE_NONE) {
1260 		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1261 		goto out;
1262 	}
1263 	if (obj && obj->state != state) {
1264 		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1265 		       obj->state, state);
1266 		goto out;
1267 	}
1268 	if (fixups != debug_objects_fixups) {
1269 		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1270 		       fixups, debug_objects_fixups);
1271 		goto out;
1272 	}
1273 	if (warnings != debug_objects_warnings) {
1274 		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1275 		       warnings, debug_objects_warnings);
1276 		goto out;
1277 	}
1278 	res = 0;
1279 out:
1280 	raw_spin_unlock_irqrestore(&db->lock, flags);
1281 	if (res)
1282 		debug_objects_enabled = false;
1283 	return res;
1284 }
1285 
1286 static __initconst const struct debug_obj_descr descr_type_test = {
1287 	.name			= "selftest",
1288 	.is_static_object	= is_static_object,
1289 	.fixup_init		= fixup_init,
1290 	.fixup_activate		= fixup_activate,
1291 	.fixup_destroy		= fixup_destroy,
1292 	.fixup_free		= fixup_free,
1293 };
1294 
1295 static __initdata struct self_test obj = { .static_init = 0 };
1296 
1297 static bool __init debug_objects_selftest(void)
1298 {
1299 	int fixups, oldfixups, warnings, oldwarnings;
1300 	unsigned long flags;
1301 
1302 	local_irq_save(flags);
1303 
1304 	fixups = oldfixups = debug_objects_fixups;
1305 	warnings = oldwarnings = debug_objects_warnings;
1306 	descr_test = &descr_type_test;
1307 
1308 	debug_object_init(&obj, &descr_type_test);
1309 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1310 		goto out;
1311 	debug_object_activate(&obj, &descr_type_test);
1312 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1313 		goto out;
1314 	debug_object_activate(&obj, &descr_type_test);
1315 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1316 		goto out;
1317 	debug_object_deactivate(&obj, &descr_type_test);
1318 	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1319 		goto out;
1320 	debug_object_destroy(&obj, &descr_type_test);
1321 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1322 		goto out;
1323 	debug_object_init(&obj, &descr_type_test);
1324 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1325 		goto out;
1326 	debug_object_activate(&obj, &descr_type_test);
1327 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1328 		goto out;
1329 	debug_object_deactivate(&obj, &descr_type_test);
1330 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1331 		goto out;
1332 	debug_object_free(&obj, &descr_type_test);
1333 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1334 		goto out;
1335 
1336 	obj.static_init = 1;
1337 	debug_object_activate(&obj, &descr_type_test);
1338 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1339 		goto out;
1340 	debug_object_init(&obj, &descr_type_test);
1341 	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1342 		goto out;
1343 	debug_object_free(&obj, &descr_type_test);
1344 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1345 		goto out;
1346 
1347 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1348 	debug_object_init(&obj, &descr_type_test);
1349 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1350 		goto out;
1351 	debug_object_activate(&obj, &descr_type_test);
1352 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1353 		goto out;
1354 	__debug_check_no_obj_freed(&obj, sizeof(obj));
1355 	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1356 		goto out;
1357 #endif
1358 	pr_info("selftest passed\n");
1359 
1360 out:
1361 	debug_objects_fixups = oldfixups;
1362 	debug_objects_warnings = oldwarnings;
1363 	descr_test = NULL;
1364 
1365 	local_irq_restore(flags);
1366 	return debug_objects_enabled;
1367 }
1368 #else
1369 static inline bool debug_objects_selftest(void) { return true; }
1370 #endif
1371 
1372 /*
1373  * Called during early boot to initialize the hash buckets and link
1374  * the static object pool objects into the poll list. After this call
1375  * the object tracker is fully operational.
1376  */
1377 void __init debug_objects_early_init(void)
1378 {
1379 	int i;
1380 
1381 	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1382 		raw_spin_lock_init(&obj_hash[i].lock);
1383 
1384 	/* Keep early boot simple and add everything to the boot list */
1385 	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1386 		hlist_add_head(&obj_static_pool[i].node, &pool_boot);
1387 }
1388 
1389 /*
1390  * Convert the statically allocated objects to dynamic ones.
1391  * debug_objects_mem_init() is called early so only one CPU is up and
1392  * interrupts are disabled, which means it is safe to replace the active
1393  * object references.
1394  */
1395 static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache)
1396 {
1397 	struct debug_bucket *db = obj_hash;
1398 	struct hlist_node *tmp;
1399 	struct debug_obj *obj;
1400 	HLIST_HEAD(objects);
1401 	int i;
1402 
1403 	for (i = 0; i < ODEBUG_POOL_SIZE; i += ODEBUG_BATCH_SIZE) {
1404 		if (!kmem_alloc_batch(&objects, cache, GFP_KERNEL))
1405 			goto free;
1406 		pool_push_batch(&pool_global, &objects);
1407 	}
1408 
1409 	/* Disconnect the boot pool. */
1410 	pool_boot.first = NULL;
1411 
1412 	/* Replace the active object references */
1413 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1414 		hlist_move_list(&db->list, &objects);
1415 
1416 		hlist_for_each_entry(obj, &objects, node) {
1417 			struct debug_obj *new = pcpu_alloc();
1418 
1419 			/* copy object data */
1420 			*new = *obj;
1421 			hlist_add_head(&new->node, &db->list);
1422 		}
1423 	}
1424 	return true;
1425 free:
1426 	/* Can't use free_object_list() as the cache is not populated yet */
1427 	hlist_for_each_entry_safe(obj, tmp, &pool_global.objects, node) {
1428 		hlist_del(&obj->node);
1429 		kmem_cache_free(cache, obj);
1430 	}
1431 	return false;
1432 }
1433 
1434 /*
1435  * Called after the kmem_caches are functional to setup a dedicated
1436  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1437  * prevents that the debug code is called on kmem_cache_free() for the
1438  * debug tracker objects to avoid recursive calls.
1439  */
1440 void __init debug_objects_mem_init(void)
1441 {
1442 	struct kmem_cache *cache;
1443 	int extras;
1444 
1445 	if (!debug_objects_enabled)
1446 		return;
1447 
1448 	if (!debug_objects_selftest())
1449 		return;
1450 
1451 	cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0,
1452 				  SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL);
1453 
1454 	if (!cache || !debug_objects_replace_static_objects(cache)) {
1455 		debug_objects_enabled = false;
1456 		pr_warn("Out of memory.\n");
1457 		return;
1458 	}
1459 
1460 	/*
1461 	 * Adjust the thresholds for allocating and freeing objects
1462 	 * according to the number of possible CPUs available in the
1463 	 * system.
1464 	 */
1465 	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1466 	pool_global.max_cnt += extras;
1467 	pool_global.min_cnt += extras;
1468 
1469 	/* Everything worked. Expose the cache */
1470 	obj_cache = cache;
1471 	static_branch_enable(&obj_cache_enabled);
1472 
1473 #ifdef CONFIG_HOTPLUG_CPU
1474 	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1475 				  object_cpu_offline);
1476 #endif
1477 	return;
1478 }
1479