xref: /linux-6.15/lib/debugobjects.c (revision 2638345d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic infrastructure for lifetime debugging of objects.
4  *
5  * Copyright (C) 2008, Thomas Gleixner <[email protected]>
6  */
7 
8 #define pr_fmt(fmt) "ODEBUG: " fmt
9 
10 #include <linux/cpu.h>
11 #include <linux/debugobjects.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/kmemleak.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/slab.h>
19 #include <linux/static_key.h>
20 
21 #define ODEBUG_HASH_BITS	14
22 #define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
23 
24 /* Must be power of two */
25 #define ODEBUG_BATCH_SIZE	16
26 
27 /* Initial values. Must all be a multiple of batch size */
28 #define ODEBUG_POOL_SIZE	(64 * ODEBUG_BATCH_SIZE)
29 #define ODEBUG_POOL_MIN_LEVEL	(ODEBUG_POOL_SIZE / 4)
30 
31 #define ODEBUG_POOL_PERCPU_SIZE	(4 * ODEBUG_BATCH_SIZE)
32 
33 #define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
34 #define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
35 #define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
36 
37 /*
38  * We limit the freeing of debug objects via workqueue at a maximum
39  * frequency of 10Hz and about 1024 objects for each freeing operation.
40  * So it is freeing at most 10k debug objects per second.
41  */
42 #define ODEBUG_FREE_WORK_MAX	(1024 / ODEBUG_BATCH_SIZE)
43 #define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
44 
45 struct debug_bucket {
46 	struct hlist_head	list;
47 	raw_spinlock_t		lock;
48 };
49 
50 struct pool_stats {
51 	unsigned int		cur_used;
52 	unsigned int		max_used;
53 	unsigned int		min_fill;
54 };
55 
56 struct obj_pool {
57 	struct hlist_head	objects;
58 	unsigned int		cnt;
59 	unsigned int		min_cnt;
60 	unsigned int		max_cnt;
61 	struct pool_stats	stats;
62 } ____cacheline_aligned;
63 
64 
65 static DEFINE_PER_CPU_ALIGNED(struct obj_pool, pool_pcpu)  = {
66 	.max_cnt	= ODEBUG_POOL_PERCPU_SIZE,
67 };
68 
69 static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
70 
71 static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
72 
73 static DEFINE_RAW_SPINLOCK(pool_lock);
74 
75 static struct obj_pool pool_global = {
76 	.min_cnt		= ODEBUG_POOL_MIN_LEVEL,
77 	.max_cnt		= ODEBUG_POOL_SIZE,
78 	.stats			= {
79 		.min_fill	= ODEBUG_POOL_SIZE,
80 	},
81 };
82 
83 static struct obj_pool pool_to_free = {
84 	.max_cnt	= UINT_MAX,
85 };
86 
87 static HLIST_HEAD(pool_boot);
88 
89 static bool			obj_freeing;
90 
91 static int __data_racy			debug_objects_maxchain __read_mostly;
92 static int __data_racy __maybe_unused	debug_objects_maxchecked __read_mostly;
93 static int __data_racy			debug_objects_fixups __read_mostly;
94 static int __data_racy			debug_objects_warnings __read_mostly;
95 static bool __data_racy			debug_objects_enabled __read_mostly
96 					= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
97 
98 static const struct debug_obj_descr	*descr_test  __read_mostly;
99 static struct kmem_cache		*obj_cache __ro_after_init;
100 
101 /*
102  * Track numbers of kmem_cache_alloc()/free() calls done.
103  */
104 static int __data_racy		debug_objects_allocated;
105 static int __data_racy		debug_objects_freed;
106 
107 static void free_obj_work(struct work_struct *work);
108 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
109 
110 static DEFINE_STATIC_KEY_FALSE(obj_cache_enabled);
111 
112 static int __init enable_object_debug(char *str)
113 {
114 	debug_objects_enabled = true;
115 	return 0;
116 }
117 early_param("debug_objects", enable_object_debug);
118 
119 static int __init disable_object_debug(char *str)
120 {
121 	debug_objects_enabled = false;
122 	return 0;
123 }
124 early_param("no_debug_objects", disable_object_debug);
125 
126 static const char *obj_states[ODEBUG_STATE_MAX] = {
127 	[ODEBUG_STATE_NONE]		= "none",
128 	[ODEBUG_STATE_INIT]		= "initialized",
129 	[ODEBUG_STATE_INACTIVE]		= "inactive",
130 	[ODEBUG_STATE_ACTIVE]		= "active",
131 	[ODEBUG_STATE_DESTROYED]	= "destroyed",
132 	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
133 };
134 
135 static __always_inline unsigned int pool_count(struct obj_pool *pool)
136 {
137 	return READ_ONCE(pool->cnt);
138 }
139 
140 static __always_inline bool pool_should_refill(struct obj_pool *pool)
141 {
142 	return pool_count(pool) < pool->min_cnt;
143 }
144 
145 static __always_inline bool pool_must_refill(struct obj_pool *pool)
146 {
147 	return pool_count(pool) < pool->min_cnt / 2;
148 }
149 
150 static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src)
151 {
152 	struct hlist_node *last, *next_batch, *first_batch;
153 	struct debug_obj *obj;
154 
155 	if (dst->cnt >= dst->max_cnt || !src->cnt)
156 		return false;
157 
158 	first_batch = src->objects.first;
159 	obj = hlist_entry(first_batch, typeof(*obj), node);
160 	last = obj->batch_last;
161 	next_batch = last->next;
162 
163 	/* Move the next batch to the front of the source pool */
164 	src->objects.first = next_batch;
165 	if (next_batch)
166 		next_batch->pprev = &src->objects.first;
167 
168 	/* Add the extracted batch to the destination pool */
169 	last->next = dst->objects.first;
170 	if (last->next)
171 		last->next->pprev = &last->next;
172 	first_batch->pprev = &dst->objects.first;
173 	dst->objects.first = first_batch;
174 
175 	WRITE_ONCE(src->cnt, src->cnt - ODEBUG_BATCH_SIZE);
176 	WRITE_ONCE(dst->cnt, dst->cnt + ODEBUG_BATCH_SIZE);
177 	return true;
178 }
179 
180 static bool pool_push_batch(struct obj_pool *dst, struct hlist_head *head)
181 {
182 	struct hlist_node *last;
183 	struct debug_obj *obj;
184 
185 	if (dst->cnt >= dst->max_cnt)
186 		return false;
187 
188 	obj = hlist_entry(head->first, typeof(*obj), node);
189 	last = obj->batch_last;
190 
191 	hlist_splice_init(head, last, &dst->objects);
192 	WRITE_ONCE(dst->cnt, dst->cnt + ODEBUG_BATCH_SIZE);
193 	return true;
194 }
195 
196 static bool pool_pop_batch(struct hlist_head *head, struct obj_pool *src)
197 {
198 	struct hlist_node *last, *next;
199 	struct debug_obj *obj;
200 
201 	if (!src->cnt)
202 		return false;
203 
204 	/* Move the complete list to the head */
205 	hlist_move_list(&src->objects, head);
206 
207 	obj = hlist_entry(head->first, typeof(*obj), node);
208 	last = obj->batch_last;
209 	next = last->next;
210 	/* Disconnect the batch from the list */
211 	last->next = NULL;
212 
213 	/* Move the node after last back to the source pool. */
214 	src->objects.first = next;
215 	if (next)
216 		next->pprev = &src->objects.first;
217 
218 	WRITE_ONCE(src->cnt, src->cnt - ODEBUG_BATCH_SIZE);
219 	return true;
220 }
221 
222 static struct debug_obj *__alloc_object(struct hlist_head *list)
223 {
224 	struct debug_obj *obj;
225 
226 	if (unlikely(!list->first))
227 		return NULL;
228 
229 	obj = hlist_entry(list->first, typeof(*obj), node);
230 	hlist_del(&obj->node);
231 	return obj;
232 }
233 
234 static void pcpu_refill_stats(void)
235 {
236 	struct pool_stats *stats = &pool_global.stats;
237 
238 	WRITE_ONCE(stats->cur_used, stats->cur_used + ODEBUG_BATCH_SIZE);
239 
240 	if (stats->cur_used > stats->max_used)
241 		stats->max_used = stats->cur_used;
242 
243 	if (pool_global.cnt < stats->min_fill)
244 		stats->min_fill = pool_global.cnt;
245 }
246 
247 static struct debug_obj *pcpu_alloc(void)
248 {
249 	struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
250 
251 	lockdep_assert_irqs_disabled();
252 
253 	for (;;) {
254 		struct debug_obj *obj = __alloc_object(&pcp->objects);
255 
256 		if (likely(obj)) {
257 			pcp->cnt--;
258 			return obj;
259 		}
260 
261 		guard(raw_spinlock)(&pool_lock);
262 		if (!pool_move_batch(pcp, &pool_to_free)) {
263 			if (!pool_move_batch(pcp, &pool_global))
264 				return NULL;
265 		}
266 		pcpu_refill_stats();
267 	}
268 }
269 
270 static void pcpu_free(struct debug_obj *obj)
271 {
272 	struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
273 	struct debug_obj *first;
274 
275 	lockdep_assert_irqs_disabled();
276 
277 	if (!(pcp->cnt % ODEBUG_BATCH_SIZE)) {
278 		obj->batch_last = &obj->node;
279 	} else {
280 		first = hlist_entry(pcp->objects.first, typeof(*first), node);
281 		obj->batch_last = first->batch_last;
282 	}
283 	hlist_add_head(&obj->node, &pcp->objects);
284 	pcp->cnt++;
285 
286 	/* Pool full ? */
287 	if (pcp->cnt < ODEBUG_POOL_PERCPU_SIZE)
288 		return;
289 
290 	/* Remove a batch from the per CPU pool */
291 	guard(raw_spinlock)(&pool_lock);
292 	/* Try to fit the batch into the pool_global first */
293 	if (!pool_move_batch(&pool_global, pcp))
294 		pool_move_batch(&pool_to_free, pcp);
295 	WRITE_ONCE(pool_global.stats.cur_used, pool_global.stats.cur_used - ODEBUG_BATCH_SIZE);
296 }
297 
298 static void free_object_list(struct hlist_head *head)
299 {
300 	struct hlist_node *tmp;
301 	struct debug_obj *obj;
302 	int cnt = 0;
303 
304 	hlist_for_each_entry_safe(obj, tmp, head, node) {
305 		hlist_del(&obj->node);
306 		kmem_cache_free(obj_cache, obj);
307 		cnt++;
308 	}
309 	debug_objects_freed += cnt;
310 }
311 
312 static void fill_pool_from_freelist(void)
313 {
314 	static unsigned long state;
315 
316 	/*
317 	 * Reuse objs from the global obj_to_free list; they will be
318 	 * reinitialized when allocating.
319 	 */
320 	if (!pool_count(&pool_to_free))
321 		return;
322 
323 	/*
324 	 * Prevent the context from being scheduled or interrupted after
325 	 * setting the state flag;
326 	 */
327 	guard(irqsave)();
328 
329 	/*
330 	 * Avoid lock contention on &pool_lock and avoid making the cache
331 	 * line exclusive by testing the bit before attempting to set it.
332 	 */
333 	if (test_bit(0, &state) || test_and_set_bit(0, &state))
334 		return;
335 
336 	/* Avoid taking the lock when there is no work to do */
337 	while (pool_should_refill(&pool_global) && pool_count(&pool_to_free)) {
338 		guard(raw_spinlock)(&pool_lock);
339 		/* Move a batch if possible */
340 		pool_move_batch(&pool_global, &pool_to_free);
341 	}
342 	clear_bit(0, &state);
343 }
344 
345 static bool kmem_alloc_batch(struct hlist_head *head, struct kmem_cache *cache, gfp_t gfp)
346 {
347 	struct hlist_node *last = NULL;
348 	struct debug_obj *obj;
349 
350 	for (int cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
351 		obj = kmem_cache_zalloc(cache, gfp);
352 		if (!obj) {
353 			free_object_list(head);
354 			return false;
355 		}
356 		debug_objects_allocated++;
357 
358 		if (!last)
359 			last = &obj->node;
360 		obj->batch_last = last;
361 
362 		hlist_add_head(&obj->node, head);
363 	}
364 	return true;
365 }
366 
367 static void fill_pool(void)
368 {
369 	static atomic_t cpus_allocating;
370 
371 	/*
372 	 * Avoid allocation and lock contention when:
373 	 *   - One other CPU is already allocating
374 	 *   - the global pool has not reached the critical level yet
375 	 */
376 	if (!pool_must_refill(&pool_global) && atomic_read(&cpus_allocating))
377 		return;
378 
379 	atomic_inc(&cpus_allocating);
380 	while (pool_should_refill(&pool_global)) {
381 		HLIST_HEAD(head);
382 
383 		if (!kmem_alloc_batch(&head, obj_cache, __GFP_HIGH | __GFP_NOWARN))
384 			break;
385 
386 		guard(raw_spinlock_irqsave)(&pool_lock);
387 		if (!pool_push_batch(&pool_global, &head))
388 			pool_push_batch(&pool_to_free, &head);
389 	}
390 	atomic_dec(&cpus_allocating);
391 }
392 
393 /*
394  * Lookup an object in the hash bucket.
395  */
396 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
397 {
398 	struct debug_obj *obj;
399 	int cnt = 0;
400 
401 	hlist_for_each_entry(obj, &b->list, node) {
402 		cnt++;
403 		if (obj->object == addr)
404 			return obj;
405 	}
406 	if (cnt > debug_objects_maxchain)
407 		debug_objects_maxchain = cnt;
408 
409 	return NULL;
410 }
411 
412 static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b,
413 				      const struct debug_obj_descr *descr)
414 {
415 	struct debug_obj *obj;
416 
417 	if (static_branch_likely(&obj_cache_enabled))
418 		obj = pcpu_alloc();
419 	else
420 		obj = __alloc_object(&pool_boot);
421 
422 	if (likely(obj)) {
423 		obj->object = addr;
424 		obj->descr  = descr;
425 		obj->state  = ODEBUG_STATE_NONE;
426 		obj->astate = 0;
427 		hlist_add_head(&obj->node, &b->list);
428 	}
429 	return obj;
430 }
431 
432 /* workqueue function to free objects. */
433 static void free_obj_work(struct work_struct *work)
434 {
435 	bool free = true;
436 
437 	WRITE_ONCE(obj_freeing, false);
438 
439 	if (!pool_count(&pool_to_free))
440 		return;
441 
442 	for (unsigned int cnt = 0; cnt < ODEBUG_FREE_WORK_MAX; cnt++) {
443 		HLIST_HEAD(tofree);
444 
445 		/* Acquire and drop the lock for each batch */
446 		scoped_guard(raw_spinlock_irqsave, &pool_lock) {
447 			if (!pool_to_free.cnt)
448 				return;
449 
450 			/* Refill the global pool if possible */
451 			if (pool_move_batch(&pool_global, &pool_to_free)) {
452 				/* Don't free as there seems to be demand */
453 				free = false;
454 			} else if (free) {
455 				pool_pop_batch(&tofree, &pool_to_free);
456 			} else {
457 				return;
458 			}
459 		}
460 		free_object_list(&tofree);
461 	}
462 }
463 
464 static void __free_object(struct debug_obj *obj)
465 {
466 	guard(irqsave)();
467 	if (static_branch_likely(&obj_cache_enabled))
468 		pcpu_free(obj);
469 	else
470 		hlist_add_head(&obj->node, &pool_boot);
471 }
472 
473 /*
474  * Put the object back into the pool and schedule work to free objects
475  * if necessary.
476  */
477 static void free_object(struct debug_obj *obj)
478 {
479 	__free_object(obj);
480 	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
481 		WRITE_ONCE(obj_freeing, true);
482 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
483 	}
484 }
485 
486 static void put_objects(struct hlist_head *list)
487 {
488 	struct hlist_node *tmp;
489 	struct debug_obj *obj;
490 
491 	/*
492 	 * Using free_object() puts the objects into reuse or schedules
493 	 * them for freeing and it get's all the accounting correct.
494 	 */
495 	hlist_for_each_entry_safe(obj, tmp, list, node) {
496 		hlist_del(&obj->node);
497 		free_object(obj);
498 	}
499 }
500 
501 #ifdef CONFIG_HOTPLUG_CPU
502 static int object_cpu_offline(unsigned int cpu)
503 {
504 	/* Remote access is safe as the CPU is dead already */
505 	struct obj_pool *pcp = per_cpu_ptr(&pool_pcpu, cpu);
506 
507 	put_objects(&pcp->objects);
508 	pcp->cnt = 0;
509 	return 0;
510 }
511 #endif
512 
513 /* Out of memory. Free all objects from hash */
514 static void debug_objects_oom(void)
515 {
516 	struct debug_bucket *db = obj_hash;
517 	HLIST_HEAD(freelist);
518 
519 	pr_warn("Out of memory. ODEBUG disabled\n");
520 
521 	for (int i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
522 		scoped_guard(raw_spinlock_irqsave, &db->lock)
523 			hlist_move_list(&db->list, &freelist);
524 
525 		put_objects(&freelist);
526 	}
527 }
528 
529 /*
530  * We use the pfn of the address for the hash. That way we can check
531  * for freed objects simply by checking the affected bucket.
532  */
533 static struct debug_bucket *get_bucket(unsigned long addr)
534 {
535 	unsigned long hash;
536 
537 	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
538 	return &obj_hash[hash];
539 }
540 
541 static void debug_print_object(struct debug_obj *obj, char *msg)
542 {
543 	const struct debug_obj_descr *descr = obj->descr;
544 	static int limit;
545 
546 	/*
547 	 * Don't report if lookup_object_or_alloc() by the current thread
548 	 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
549 	 * concurrent thread turned off debug_objects_enabled and cleared
550 	 * the hash buckets.
551 	 */
552 	if (!debug_objects_enabled)
553 		return;
554 
555 	if (limit < 5 && descr != descr_test) {
556 		void *hint = descr->debug_hint ?
557 			descr->debug_hint(obj->object) : NULL;
558 		limit++;
559 		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
560 				 "object: %p object type: %s hint: %pS\n",
561 			msg, obj_states[obj->state], obj->astate,
562 			obj->object, descr->name, hint);
563 	}
564 	debug_objects_warnings++;
565 }
566 
567 /*
568  * Try to repair the damage, so we have a better chance to get useful
569  * debug output.
570  */
571 static bool
572 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
573 		   void * addr, enum debug_obj_state state)
574 {
575 	if (fixup && fixup(addr, state)) {
576 		debug_objects_fixups++;
577 		return true;
578 	}
579 	return false;
580 }
581 
582 static void debug_object_is_on_stack(void *addr, int onstack)
583 {
584 	int is_on_stack;
585 	static int limit;
586 
587 	if (limit > 4)
588 		return;
589 
590 	is_on_stack = object_is_on_stack(addr);
591 	if (is_on_stack == onstack)
592 		return;
593 
594 	limit++;
595 	if (is_on_stack)
596 		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
597 			 task_stack_page(current));
598 	else
599 		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
600 			 task_stack_page(current));
601 
602 	WARN_ON(1);
603 }
604 
605 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
606 						const struct debug_obj_descr *descr,
607 						bool onstack, bool alloc_ifstatic)
608 {
609 	struct debug_obj *obj = lookup_object(addr, b);
610 	enum debug_obj_state state = ODEBUG_STATE_NONE;
611 
612 	if (likely(obj))
613 		return obj;
614 
615 	/*
616 	 * debug_object_init() unconditionally allocates untracked
617 	 * objects. It does not matter whether it is a static object or
618 	 * not.
619 	 *
620 	 * debug_object_assert_init() and debug_object_activate() allow
621 	 * allocation only if the descriptor callback confirms that the
622 	 * object is static and considered initialized. For non-static
623 	 * objects the allocation needs to be done from the fixup callback.
624 	 */
625 	if (unlikely(alloc_ifstatic)) {
626 		if (!descr->is_static_object || !descr->is_static_object(addr))
627 			return ERR_PTR(-ENOENT);
628 		/* Statically allocated objects are considered initialized */
629 		state = ODEBUG_STATE_INIT;
630 	}
631 
632 	obj = alloc_object(addr, b, descr);
633 	if (likely(obj)) {
634 		obj->state = state;
635 		debug_object_is_on_stack(addr, onstack);
636 		return obj;
637 	}
638 
639 	/* Out of memory. Do the cleanup outside of the locked region */
640 	debug_objects_enabled = false;
641 	return NULL;
642 }
643 
644 static void debug_objects_fill_pool(void)
645 {
646 	if (!static_branch_likely(&obj_cache_enabled))
647 		return;
648 
649 	if (likely(!pool_should_refill(&pool_global)))
650 		return;
651 
652 	/* Try reusing objects from obj_to_free_list */
653 	fill_pool_from_freelist();
654 
655 	if (likely(!pool_should_refill(&pool_global)))
656 		return;
657 
658 	/*
659 	 * On RT enabled kernels the pool refill must happen in preemptible
660 	 * context -- for !RT kernels we rely on the fact that spinlock_t and
661 	 * raw_spinlock_t are basically the same type and this lock-type
662 	 * inversion works just fine.
663 	 */
664 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
665 		/*
666 		 * Annotate away the spinlock_t inside raw_spinlock_t warning
667 		 * by temporarily raising the wait-type to WAIT_SLEEP, matching
668 		 * the preemptible() condition above.
669 		 */
670 		static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
671 		lock_map_acquire_try(&fill_pool_map);
672 		fill_pool();
673 		lock_map_release(&fill_pool_map);
674 	}
675 }
676 
677 static void
678 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
679 {
680 	struct debug_obj *obj, o;
681 	struct debug_bucket *db;
682 	unsigned long flags;
683 
684 	debug_objects_fill_pool();
685 
686 	db = get_bucket((unsigned long) addr);
687 
688 	raw_spin_lock_irqsave(&db->lock, flags);
689 
690 	obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
691 	if (unlikely(!obj)) {
692 		raw_spin_unlock_irqrestore(&db->lock, flags);
693 		debug_objects_oom();
694 		return;
695 	}
696 
697 	switch (obj->state) {
698 	case ODEBUG_STATE_NONE:
699 	case ODEBUG_STATE_INIT:
700 	case ODEBUG_STATE_INACTIVE:
701 		obj->state = ODEBUG_STATE_INIT;
702 		raw_spin_unlock_irqrestore(&db->lock, flags);
703 		return;
704 	default:
705 		break;
706 	}
707 
708 	o = *obj;
709 	raw_spin_unlock_irqrestore(&db->lock, flags);
710 	debug_print_object(&o, "init");
711 
712 	if (o.state == ODEBUG_STATE_ACTIVE)
713 		debug_object_fixup(descr->fixup_init, addr, o.state);
714 }
715 
716 /**
717  * debug_object_init - debug checks when an object is initialized
718  * @addr:	address of the object
719  * @descr:	pointer to an object specific debug description structure
720  */
721 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
722 {
723 	if (!debug_objects_enabled)
724 		return;
725 
726 	__debug_object_init(addr, descr, 0);
727 }
728 EXPORT_SYMBOL_GPL(debug_object_init);
729 
730 /**
731  * debug_object_init_on_stack - debug checks when an object on stack is
732  *				initialized
733  * @addr:	address of the object
734  * @descr:	pointer to an object specific debug description structure
735  */
736 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
737 {
738 	if (!debug_objects_enabled)
739 		return;
740 
741 	__debug_object_init(addr, descr, 1);
742 }
743 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
744 
745 /**
746  * debug_object_activate - debug checks when an object is activated
747  * @addr:	address of the object
748  * @descr:	pointer to an object specific debug description structure
749  * Returns 0 for success, -EINVAL for check failed.
750  */
751 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
752 {
753 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
754 	struct debug_bucket *db;
755 	struct debug_obj *obj;
756 	unsigned long flags;
757 
758 	if (!debug_objects_enabled)
759 		return 0;
760 
761 	debug_objects_fill_pool();
762 
763 	db = get_bucket((unsigned long) addr);
764 
765 	raw_spin_lock_irqsave(&db->lock, flags);
766 
767 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
768 	if (unlikely(!obj)) {
769 		raw_spin_unlock_irqrestore(&db->lock, flags);
770 		debug_objects_oom();
771 		return 0;
772 	} else if (likely(!IS_ERR(obj))) {
773 		switch (obj->state) {
774 		case ODEBUG_STATE_ACTIVE:
775 		case ODEBUG_STATE_DESTROYED:
776 			o = *obj;
777 			break;
778 		case ODEBUG_STATE_INIT:
779 		case ODEBUG_STATE_INACTIVE:
780 			obj->state = ODEBUG_STATE_ACTIVE;
781 			fallthrough;
782 		default:
783 			raw_spin_unlock_irqrestore(&db->lock, flags);
784 			return 0;
785 		}
786 	}
787 
788 	raw_spin_unlock_irqrestore(&db->lock, flags);
789 	debug_print_object(&o, "activate");
790 
791 	switch (o.state) {
792 	case ODEBUG_STATE_ACTIVE:
793 	case ODEBUG_STATE_NOTAVAILABLE:
794 		if (debug_object_fixup(descr->fixup_activate, addr, o.state))
795 			return 0;
796 		fallthrough;
797 	default:
798 		return -EINVAL;
799 	}
800 }
801 EXPORT_SYMBOL_GPL(debug_object_activate);
802 
803 /**
804  * debug_object_deactivate - debug checks when an object is deactivated
805  * @addr:	address of the object
806  * @descr:	pointer to an object specific debug description structure
807  */
808 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
809 {
810 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
811 	struct debug_bucket *db;
812 	struct debug_obj *obj;
813 	unsigned long flags;
814 
815 	if (!debug_objects_enabled)
816 		return;
817 
818 	db = get_bucket((unsigned long) addr);
819 
820 	raw_spin_lock_irqsave(&db->lock, flags);
821 
822 	obj = lookup_object(addr, db);
823 	if (obj) {
824 		switch (obj->state) {
825 		case ODEBUG_STATE_DESTROYED:
826 			break;
827 		case ODEBUG_STATE_INIT:
828 		case ODEBUG_STATE_INACTIVE:
829 		case ODEBUG_STATE_ACTIVE:
830 			if (obj->astate)
831 				break;
832 			obj->state = ODEBUG_STATE_INACTIVE;
833 			fallthrough;
834 		default:
835 			raw_spin_unlock_irqrestore(&db->lock, flags);
836 			return;
837 		}
838 		o = *obj;
839 	}
840 
841 	raw_spin_unlock_irqrestore(&db->lock, flags);
842 	debug_print_object(&o, "deactivate");
843 }
844 EXPORT_SYMBOL_GPL(debug_object_deactivate);
845 
846 /**
847  * debug_object_destroy - debug checks when an object is destroyed
848  * @addr:	address of the object
849  * @descr:	pointer to an object specific debug description structure
850  */
851 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
852 {
853 	struct debug_obj *obj, o;
854 	struct debug_bucket *db;
855 	unsigned long flags;
856 
857 	if (!debug_objects_enabled)
858 		return;
859 
860 	db = get_bucket((unsigned long) addr);
861 
862 	raw_spin_lock_irqsave(&db->lock, flags);
863 
864 	obj = lookup_object(addr, db);
865 	if (!obj) {
866 		raw_spin_unlock_irqrestore(&db->lock, flags);
867 		return;
868 	}
869 
870 	switch (obj->state) {
871 	case ODEBUG_STATE_ACTIVE:
872 	case ODEBUG_STATE_DESTROYED:
873 		break;
874 	case ODEBUG_STATE_NONE:
875 	case ODEBUG_STATE_INIT:
876 	case ODEBUG_STATE_INACTIVE:
877 		obj->state = ODEBUG_STATE_DESTROYED;
878 		fallthrough;
879 	default:
880 		raw_spin_unlock_irqrestore(&db->lock, flags);
881 		return;
882 	}
883 
884 	o = *obj;
885 	raw_spin_unlock_irqrestore(&db->lock, flags);
886 	debug_print_object(&o, "destroy");
887 
888 	if (o.state == ODEBUG_STATE_ACTIVE)
889 		debug_object_fixup(descr->fixup_destroy, addr, o.state);
890 }
891 EXPORT_SYMBOL_GPL(debug_object_destroy);
892 
893 /**
894  * debug_object_free - debug checks when an object is freed
895  * @addr:	address of the object
896  * @descr:	pointer to an object specific debug description structure
897  */
898 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
899 {
900 	struct debug_obj *obj, o;
901 	struct debug_bucket *db;
902 	unsigned long flags;
903 
904 	if (!debug_objects_enabled)
905 		return;
906 
907 	db = get_bucket((unsigned long) addr);
908 
909 	raw_spin_lock_irqsave(&db->lock, flags);
910 
911 	obj = lookup_object(addr, db);
912 	if (!obj) {
913 		raw_spin_unlock_irqrestore(&db->lock, flags);
914 		return;
915 	}
916 
917 	switch (obj->state) {
918 	case ODEBUG_STATE_ACTIVE:
919 		break;
920 	default:
921 		hlist_del(&obj->node);
922 		raw_spin_unlock_irqrestore(&db->lock, flags);
923 		free_object(obj);
924 		return;
925 	}
926 
927 	o = *obj;
928 	raw_spin_unlock_irqrestore(&db->lock, flags);
929 	debug_print_object(&o, "free");
930 
931 	debug_object_fixup(descr->fixup_free, addr, o.state);
932 }
933 EXPORT_SYMBOL_GPL(debug_object_free);
934 
935 /**
936  * debug_object_assert_init - debug checks when object should be init-ed
937  * @addr:	address of the object
938  * @descr:	pointer to an object specific debug description structure
939  */
940 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
941 {
942 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
943 	struct debug_bucket *db;
944 	struct debug_obj *obj;
945 	unsigned long flags;
946 
947 	if (!debug_objects_enabled)
948 		return;
949 
950 	debug_objects_fill_pool();
951 
952 	db = get_bucket((unsigned long) addr);
953 
954 	raw_spin_lock_irqsave(&db->lock, flags);
955 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
956 	raw_spin_unlock_irqrestore(&db->lock, flags);
957 	if (likely(!IS_ERR_OR_NULL(obj)))
958 		return;
959 
960 	/* If NULL the allocation has hit OOM */
961 	if (!obj) {
962 		debug_objects_oom();
963 		return;
964 	}
965 
966 	/* Object is neither tracked nor static. It's not initialized. */
967 	debug_print_object(&o, "assert_init");
968 	debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
969 }
970 EXPORT_SYMBOL_GPL(debug_object_assert_init);
971 
972 /**
973  * debug_object_active_state - debug checks object usage state machine
974  * @addr:	address of the object
975  * @descr:	pointer to an object specific debug description structure
976  * @expect:	expected state
977  * @next:	state to move to if expected state is found
978  */
979 void
980 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
981 			  unsigned int expect, unsigned int next)
982 {
983 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
984 	struct debug_bucket *db;
985 	struct debug_obj *obj;
986 	unsigned long flags;
987 
988 	if (!debug_objects_enabled)
989 		return;
990 
991 	db = get_bucket((unsigned long) addr);
992 
993 	raw_spin_lock_irqsave(&db->lock, flags);
994 
995 	obj = lookup_object(addr, db);
996 	if (obj) {
997 		switch (obj->state) {
998 		case ODEBUG_STATE_ACTIVE:
999 			if (obj->astate != expect)
1000 				break;
1001 			obj->astate = next;
1002 			raw_spin_unlock_irqrestore(&db->lock, flags);
1003 			return;
1004 		default:
1005 			break;
1006 		}
1007 		o = *obj;
1008 	}
1009 
1010 	raw_spin_unlock_irqrestore(&db->lock, flags);
1011 	debug_print_object(&o, "active_state");
1012 }
1013 EXPORT_SYMBOL_GPL(debug_object_active_state);
1014 
1015 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1016 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
1017 {
1018 	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
1019 	int cnt, objs_checked = 0;
1020 	struct debug_obj *obj, o;
1021 	struct debug_bucket *db;
1022 	struct hlist_node *tmp;
1023 
1024 	saddr = (unsigned long) address;
1025 	eaddr = saddr + size;
1026 	paddr = saddr & ODEBUG_CHUNK_MASK;
1027 	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
1028 	chunks >>= ODEBUG_CHUNK_SHIFT;
1029 
1030 	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
1031 		db = get_bucket(paddr);
1032 
1033 repeat:
1034 		cnt = 0;
1035 		raw_spin_lock_irqsave(&db->lock, flags);
1036 		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
1037 			cnt++;
1038 			oaddr = (unsigned long) obj->object;
1039 			if (oaddr < saddr || oaddr >= eaddr)
1040 				continue;
1041 
1042 			switch (obj->state) {
1043 			case ODEBUG_STATE_ACTIVE:
1044 				o = *obj;
1045 				raw_spin_unlock_irqrestore(&db->lock, flags);
1046 				debug_print_object(&o, "free");
1047 				debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
1048 				goto repeat;
1049 			default:
1050 				hlist_del(&obj->node);
1051 				__free_object(obj);
1052 				break;
1053 			}
1054 		}
1055 		raw_spin_unlock_irqrestore(&db->lock, flags);
1056 
1057 		if (cnt > debug_objects_maxchain)
1058 			debug_objects_maxchain = cnt;
1059 
1060 		objs_checked += cnt;
1061 	}
1062 
1063 	if (objs_checked > debug_objects_maxchecked)
1064 		debug_objects_maxchecked = objs_checked;
1065 
1066 	/* Schedule work to actually kmem_cache_free() objects */
1067 	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
1068 		WRITE_ONCE(obj_freeing, true);
1069 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1070 	}
1071 }
1072 
1073 void debug_check_no_obj_freed(const void *address, unsigned long size)
1074 {
1075 	if (debug_objects_enabled)
1076 		__debug_check_no_obj_freed(address, size);
1077 }
1078 #endif
1079 
1080 #ifdef CONFIG_DEBUG_FS
1081 
1082 static int debug_stats_show(struct seq_file *m, void *v)
1083 {
1084 	unsigned int cpu, pool_used, pcp_free = 0;
1085 
1086 	/*
1087 	 * pool_global.stats.cur_used is the number of batches currently
1088 	 * handed out to per CPU pools. Convert it to number of objects
1089 	 * and subtract the number of free objects in the per CPU pools.
1090 	 * As this is lockless the number is an estimate.
1091 	 */
1092 	for_each_possible_cpu(cpu)
1093 		pcp_free += per_cpu(pool_pcpu.cnt, cpu);
1094 
1095 	pool_used = data_race(pool_global.stats.cur_used);
1096 	pcp_free = min(pool_used, pcp_free);
1097 	pool_used -= pcp_free;
1098 
1099 	seq_printf(m, "max_chain     : %d\n", debug_objects_maxchain);
1100 	seq_printf(m, "max_checked   : %d\n", debug_objects_maxchecked);
1101 	seq_printf(m, "warnings      : %d\n", debug_objects_warnings);
1102 	seq_printf(m, "fixups        : %d\n", debug_objects_fixups);
1103 	seq_printf(m, "pool_free     : %u\n", pool_count(&pool_global) + pcp_free);
1104 	seq_printf(m, "pool_pcp_free : %u\n", pcp_free);
1105 	seq_printf(m, "pool_min_free : %u\n", data_race(pool_global.stats.min_fill));
1106 	seq_printf(m, "pool_used     : %u\n", pool_used);
1107 	seq_printf(m, "pool_max_used : %u\n", data_race(pool_global.stats.max_used));
1108 	seq_printf(m, "on_free_list  : %u\n", pool_count(&pool_to_free));
1109 	seq_printf(m, "objs_allocated: %d\n", debug_objects_allocated);
1110 	seq_printf(m, "objs_freed    : %d\n", debug_objects_freed);
1111 	return 0;
1112 }
1113 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1114 
1115 static int __init debug_objects_init_debugfs(void)
1116 {
1117 	struct dentry *dbgdir;
1118 
1119 	if (!debug_objects_enabled)
1120 		return 0;
1121 
1122 	dbgdir = debugfs_create_dir("debug_objects", NULL);
1123 
1124 	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1125 
1126 	return 0;
1127 }
1128 __initcall(debug_objects_init_debugfs);
1129 
1130 #else
1131 static inline void debug_objects_init_debugfs(void) { }
1132 #endif
1133 
1134 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1135 
1136 /* Random data structure for the self test */
1137 struct self_test {
1138 	unsigned long	dummy1[6];
1139 	int		static_init;
1140 	unsigned long	dummy2[3];
1141 };
1142 
1143 static __initconst const struct debug_obj_descr descr_type_test;
1144 
1145 static bool __init is_static_object(void *addr)
1146 {
1147 	struct self_test *obj = addr;
1148 
1149 	return obj->static_init;
1150 }
1151 
1152 /*
1153  * fixup_init is called when:
1154  * - an active object is initialized
1155  */
1156 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1157 {
1158 	struct self_test *obj = addr;
1159 
1160 	switch (state) {
1161 	case ODEBUG_STATE_ACTIVE:
1162 		debug_object_deactivate(obj, &descr_type_test);
1163 		debug_object_init(obj, &descr_type_test);
1164 		return true;
1165 	default:
1166 		return false;
1167 	}
1168 }
1169 
1170 /*
1171  * fixup_activate is called when:
1172  * - an active object is activated
1173  * - an unknown non-static object is activated
1174  */
1175 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1176 {
1177 	struct self_test *obj = addr;
1178 
1179 	switch (state) {
1180 	case ODEBUG_STATE_NOTAVAILABLE:
1181 		return true;
1182 	case ODEBUG_STATE_ACTIVE:
1183 		debug_object_deactivate(obj, &descr_type_test);
1184 		debug_object_activate(obj, &descr_type_test);
1185 		return true;
1186 
1187 	default:
1188 		return false;
1189 	}
1190 }
1191 
1192 /*
1193  * fixup_destroy is called when:
1194  * - an active object is destroyed
1195  */
1196 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1197 {
1198 	struct self_test *obj = addr;
1199 
1200 	switch (state) {
1201 	case ODEBUG_STATE_ACTIVE:
1202 		debug_object_deactivate(obj, &descr_type_test);
1203 		debug_object_destroy(obj, &descr_type_test);
1204 		return true;
1205 	default:
1206 		return false;
1207 	}
1208 }
1209 
1210 /*
1211  * fixup_free is called when:
1212  * - an active object is freed
1213  */
1214 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1215 {
1216 	struct self_test *obj = addr;
1217 
1218 	switch (state) {
1219 	case ODEBUG_STATE_ACTIVE:
1220 		debug_object_deactivate(obj, &descr_type_test);
1221 		debug_object_free(obj, &descr_type_test);
1222 		return true;
1223 	default:
1224 		return false;
1225 	}
1226 }
1227 
1228 static int __init
1229 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1230 {
1231 	struct debug_bucket *db;
1232 	struct debug_obj *obj;
1233 	unsigned long flags;
1234 	int res = -EINVAL;
1235 
1236 	db = get_bucket((unsigned long) addr);
1237 
1238 	raw_spin_lock_irqsave(&db->lock, flags);
1239 
1240 	obj = lookup_object(addr, db);
1241 	if (!obj && state != ODEBUG_STATE_NONE) {
1242 		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1243 		goto out;
1244 	}
1245 	if (obj && obj->state != state) {
1246 		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1247 		       obj->state, state);
1248 		goto out;
1249 	}
1250 	if (fixups != debug_objects_fixups) {
1251 		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1252 		       fixups, debug_objects_fixups);
1253 		goto out;
1254 	}
1255 	if (warnings != debug_objects_warnings) {
1256 		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1257 		       warnings, debug_objects_warnings);
1258 		goto out;
1259 	}
1260 	res = 0;
1261 out:
1262 	raw_spin_unlock_irqrestore(&db->lock, flags);
1263 	if (res)
1264 		debug_objects_enabled = false;
1265 	return res;
1266 }
1267 
1268 static __initconst const struct debug_obj_descr descr_type_test = {
1269 	.name			= "selftest",
1270 	.is_static_object	= is_static_object,
1271 	.fixup_init		= fixup_init,
1272 	.fixup_activate		= fixup_activate,
1273 	.fixup_destroy		= fixup_destroy,
1274 	.fixup_free		= fixup_free,
1275 };
1276 
1277 static __initdata struct self_test obj = { .static_init = 0 };
1278 
1279 static bool __init debug_objects_selftest(void)
1280 {
1281 	int fixups, oldfixups, warnings, oldwarnings;
1282 	unsigned long flags;
1283 
1284 	local_irq_save(flags);
1285 
1286 	fixups = oldfixups = debug_objects_fixups;
1287 	warnings = oldwarnings = debug_objects_warnings;
1288 	descr_test = &descr_type_test;
1289 
1290 	debug_object_init(&obj, &descr_type_test);
1291 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1292 		goto out;
1293 	debug_object_activate(&obj, &descr_type_test);
1294 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1295 		goto out;
1296 	debug_object_activate(&obj, &descr_type_test);
1297 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1298 		goto out;
1299 	debug_object_deactivate(&obj, &descr_type_test);
1300 	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1301 		goto out;
1302 	debug_object_destroy(&obj, &descr_type_test);
1303 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1304 		goto out;
1305 	debug_object_init(&obj, &descr_type_test);
1306 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1307 		goto out;
1308 	debug_object_activate(&obj, &descr_type_test);
1309 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1310 		goto out;
1311 	debug_object_deactivate(&obj, &descr_type_test);
1312 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1313 		goto out;
1314 	debug_object_free(&obj, &descr_type_test);
1315 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1316 		goto out;
1317 
1318 	obj.static_init = 1;
1319 	debug_object_activate(&obj, &descr_type_test);
1320 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1321 		goto out;
1322 	debug_object_init(&obj, &descr_type_test);
1323 	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1324 		goto out;
1325 	debug_object_free(&obj, &descr_type_test);
1326 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1327 		goto out;
1328 
1329 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1330 	debug_object_init(&obj, &descr_type_test);
1331 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1332 		goto out;
1333 	debug_object_activate(&obj, &descr_type_test);
1334 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1335 		goto out;
1336 	__debug_check_no_obj_freed(&obj, sizeof(obj));
1337 	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1338 		goto out;
1339 #endif
1340 	pr_info("selftest passed\n");
1341 
1342 out:
1343 	debug_objects_fixups = oldfixups;
1344 	debug_objects_warnings = oldwarnings;
1345 	descr_test = NULL;
1346 
1347 	local_irq_restore(flags);
1348 	return debug_objects_enabled;
1349 }
1350 #else
1351 static inline bool debug_objects_selftest(void) { return true; }
1352 #endif
1353 
1354 /*
1355  * Called during early boot to initialize the hash buckets and link
1356  * the static object pool objects into the poll list. After this call
1357  * the object tracker is fully operational.
1358  */
1359 void __init debug_objects_early_init(void)
1360 {
1361 	int i;
1362 
1363 	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1364 		raw_spin_lock_init(&obj_hash[i].lock);
1365 
1366 	/* Keep early boot simple and add everything to the boot list */
1367 	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1368 		hlist_add_head(&obj_static_pool[i].node, &pool_boot);
1369 }
1370 
1371 /*
1372  * Convert the statically allocated objects to dynamic ones.
1373  * debug_objects_mem_init() is called early so only one CPU is up and
1374  * interrupts are disabled, which means it is safe to replace the active
1375  * object references.
1376  */
1377 static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache)
1378 {
1379 	struct debug_bucket *db = obj_hash;
1380 	struct hlist_node *tmp;
1381 	struct debug_obj *obj;
1382 	HLIST_HEAD(objects);
1383 	int i;
1384 
1385 	for (i = 0; i < ODEBUG_POOL_SIZE; i += ODEBUG_BATCH_SIZE) {
1386 		if (!kmem_alloc_batch(&objects, cache, GFP_KERNEL))
1387 			goto free;
1388 		pool_push_batch(&pool_global, &objects);
1389 	}
1390 
1391 	/* Disconnect the boot pool. */
1392 	pool_boot.first = NULL;
1393 
1394 	/* Replace the active object references */
1395 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1396 		hlist_move_list(&db->list, &objects);
1397 
1398 		hlist_for_each_entry(obj, &objects, node) {
1399 			struct debug_obj *new = pcpu_alloc();
1400 
1401 			/* copy object data */
1402 			*new = *obj;
1403 			hlist_add_head(&new->node, &db->list);
1404 		}
1405 	}
1406 	return true;
1407 free:
1408 	/* Can't use free_object_list() as the cache is not populated yet */
1409 	hlist_for_each_entry_safe(obj, tmp, &pool_global.objects, node) {
1410 		hlist_del(&obj->node);
1411 		kmem_cache_free(cache, obj);
1412 	}
1413 	return false;
1414 }
1415 
1416 /*
1417  * Called after the kmem_caches are functional to setup a dedicated
1418  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1419  * prevents that the debug code is called on kmem_cache_free() for the
1420  * debug tracker objects to avoid recursive calls.
1421  */
1422 void __init debug_objects_mem_init(void)
1423 {
1424 	struct kmem_cache *cache;
1425 	int extras;
1426 
1427 	if (!debug_objects_enabled)
1428 		return;
1429 
1430 	if (!debug_objects_selftest())
1431 		return;
1432 
1433 	cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0,
1434 				  SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL);
1435 
1436 	if (!cache || !debug_objects_replace_static_objects(cache)) {
1437 		debug_objects_enabled = false;
1438 		pr_warn("Out of memory.\n");
1439 		return;
1440 	}
1441 
1442 	/*
1443 	 * Adjust the thresholds for allocating and freeing objects
1444 	 * according to the number of possible CPUs available in the
1445 	 * system.
1446 	 */
1447 	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1448 	pool_global.max_cnt += extras;
1449 	pool_global.min_cnt += extras;
1450 
1451 	/* Everything worked. Expose the cache */
1452 	obj_cache = cache;
1453 	static_branch_enable(&obj_cache_enabled);
1454 
1455 #ifdef CONFIG_HOTPLUG_CPU
1456 	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1457 				  object_cpu_offline);
1458 #endif
1459 	return;
1460 }
1461