xref: /linux-6.15/lib/debugobjects.c (revision f57ebb92)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic infrastructure for lifetime debugging of objects.
4  *
5  * Copyright (C) 2008, Thomas Gleixner <[email protected]>
6  */
7 
8 #define pr_fmt(fmt) "ODEBUG: " fmt
9 
10 #include <linux/cpu.h>
11 #include <linux/debugobjects.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/kmemleak.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/slab.h>
19 #include <linux/static_key.h>
20 
21 #define ODEBUG_HASH_BITS	14
22 #define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
23 
24 /* Must be power of two */
25 #define ODEBUG_BATCH_SIZE	16
26 
27 /* Initial values. Must all be a multiple of batch size */
28 #define ODEBUG_POOL_SIZE	(64 * ODEBUG_BATCH_SIZE)
29 #define ODEBUG_POOL_MIN_LEVEL	(ODEBUG_POOL_SIZE / 4)
30 
31 #define ODEBUG_POOL_PERCPU_SIZE	(4 * ODEBUG_BATCH_SIZE)
32 
33 #define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
34 #define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
35 #define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
36 
37 /*
38  * We limit the freeing of debug objects via workqueue at a maximum
39  * frequency of 10Hz and about 1024 objects for each freeing operation.
40  * So it is freeing at most 10k debug objects per second.
41  */
42 #define ODEBUG_FREE_WORK_MAX	(1024 / ODEBUG_BATCH_SIZE)
43 #define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
44 
45 struct debug_bucket {
46 	struct hlist_head	list;
47 	raw_spinlock_t		lock;
48 };
49 
50 struct obj_pool {
51 	struct hlist_head	objects;
52 	unsigned int		cnt;
53 	unsigned int		min_cnt;
54 	unsigned int		max_cnt;
55 } ____cacheline_aligned;
56 
57 
58 static DEFINE_PER_CPU_ALIGNED(struct obj_pool, pool_pcpu)  = {
59 	.max_cnt	= ODEBUG_POOL_PERCPU_SIZE,
60 };
61 
62 static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
63 
64 static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
65 
66 static DEFINE_RAW_SPINLOCK(pool_lock);
67 
68 static struct obj_pool pool_global = {
69 	.min_cnt	= ODEBUG_POOL_MIN_LEVEL,
70 	.max_cnt	= ODEBUG_POOL_SIZE,
71 };
72 
73 static struct obj_pool pool_to_free = {
74 	.max_cnt	= UINT_MAX,
75 };
76 
77 static HLIST_HEAD(pool_boot);
78 
79 /*
80  * Because of the presence of percpu free pools, obj_pool_free will
81  * under-count those in the percpu free pools. Similarly, obj_pool_used
82  * will over-count those in the percpu free pools. Adjustments will be
83  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
84  * can be off.
85  */
86 static int __data_racy		obj_pool_min_free = ODEBUG_POOL_SIZE;
87 static int			obj_pool_used;
88 static int __data_racy		obj_pool_max_used;
89 static bool			obj_freeing;
90 
91 static int __data_racy			debug_objects_maxchain __read_mostly;
92 static int __data_racy __maybe_unused	debug_objects_maxchecked __read_mostly;
93 static int __data_racy			debug_objects_fixups __read_mostly;
94 static int __data_racy			debug_objects_warnings __read_mostly;
95 static bool __data_racy			debug_objects_enabled __read_mostly
96 					= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
97 
98 static const struct debug_obj_descr	*descr_test  __read_mostly;
99 static struct kmem_cache		*obj_cache __ro_after_init;
100 
101 /*
102  * Track numbers of kmem_cache_alloc()/free() calls done.
103  */
104 static int __data_racy		debug_objects_allocated;
105 static int __data_racy		debug_objects_freed;
106 
107 static void free_obj_work(struct work_struct *work);
108 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
109 
110 static DEFINE_STATIC_KEY_FALSE(obj_cache_enabled);
111 
112 static int __init enable_object_debug(char *str)
113 {
114 	debug_objects_enabled = true;
115 	return 0;
116 }
117 early_param("debug_objects", enable_object_debug);
118 
119 static int __init disable_object_debug(char *str)
120 {
121 	debug_objects_enabled = false;
122 	return 0;
123 }
124 early_param("no_debug_objects", disable_object_debug);
125 
126 static const char *obj_states[ODEBUG_STATE_MAX] = {
127 	[ODEBUG_STATE_NONE]		= "none",
128 	[ODEBUG_STATE_INIT]		= "initialized",
129 	[ODEBUG_STATE_INACTIVE]		= "inactive",
130 	[ODEBUG_STATE_ACTIVE]		= "active",
131 	[ODEBUG_STATE_DESTROYED]	= "destroyed",
132 	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
133 };
134 
135 static __always_inline unsigned int pool_count(struct obj_pool *pool)
136 {
137 	return READ_ONCE(pool->cnt);
138 }
139 
140 static __always_inline bool pool_should_refill(struct obj_pool *pool)
141 {
142 	return pool_count(pool) < pool->min_cnt;
143 }
144 
145 static __always_inline bool pool_must_refill(struct obj_pool *pool)
146 {
147 	return pool_count(pool) < pool->min_cnt / 2;
148 }
149 
150 static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src)
151 {
152 	struct hlist_node *last, *next_batch, *first_batch;
153 	struct debug_obj *obj;
154 
155 	if (dst->cnt >= dst->max_cnt || !src->cnt)
156 		return false;
157 
158 	first_batch = src->objects.first;
159 	obj = hlist_entry(first_batch, typeof(*obj), node);
160 	last = obj->batch_last;
161 	next_batch = last->next;
162 
163 	/* Move the next batch to the front of the source pool */
164 	src->objects.first = next_batch;
165 	if (next_batch)
166 		next_batch->pprev = &src->objects.first;
167 
168 	/* Add the extracted batch to the destination pool */
169 	last->next = dst->objects.first;
170 	if (last->next)
171 		last->next->pprev = &last->next;
172 	first_batch->pprev = &dst->objects.first;
173 	dst->objects.first = first_batch;
174 
175 	WRITE_ONCE(src->cnt, src->cnt - ODEBUG_BATCH_SIZE);
176 	WRITE_ONCE(dst->cnt, dst->cnt + ODEBUG_BATCH_SIZE);
177 	return true;
178 }
179 
180 static bool pool_push_batch(struct obj_pool *dst, struct hlist_head *head)
181 {
182 	struct hlist_node *last;
183 	struct debug_obj *obj;
184 
185 	if (dst->cnt >= dst->max_cnt)
186 		return false;
187 
188 	obj = hlist_entry(head->first, typeof(*obj), node);
189 	last = obj->batch_last;
190 
191 	hlist_splice_init(head, last, &dst->objects);
192 	WRITE_ONCE(dst->cnt, dst->cnt + ODEBUG_BATCH_SIZE);
193 	return true;
194 }
195 
196 static bool pool_pop_batch(struct hlist_head *head, struct obj_pool *src)
197 {
198 	struct hlist_node *last, *next;
199 	struct debug_obj *obj;
200 
201 	if (!src->cnt)
202 		return false;
203 
204 	/* Move the complete list to the head */
205 	hlist_move_list(&src->objects, head);
206 
207 	obj = hlist_entry(head->first, typeof(*obj), node);
208 	last = obj->batch_last;
209 	next = last->next;
210 	/* Disconnect the batch from the list */
211 	last->next = NULL;
212 
213 	/* Move the node after last back to the source pool. */
214 	src->objects.first = next;
215 	if (next)
216 		next->pprev = &src->objects.first;
217 
218 	WRITE_ONCE(src->cnt, src->cnt - ODEBUG_BATCH_SIZE);
219 	return true;
220 }
221 
222 static struct debug_obj *__alloc_object(struct hlist_head *list)
223 {
224 	struct debug_obj *obj;
225 
226 	if (unlikely(!list->first))
227 		return NULL;
228 
229 	obj = hlist_entry(list->first, typeof(*obj), node);
230 	hlist_del(&obj->node);
231 	return obj;
232 }
233 
234 static struct debug_obj *pcpu_alloc(void)
235 {
236 	struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
237 
238 	lockdep_assert_irqs_disabled();
239 
240 	for (;;) {
241 		struct debug_obj *obj = __alloc_object(&pcp->objects);
242 
243 		if (likely(obj)) {
244 			pcp->cnt--;
245 			return obj;
246 		}
247 
248 		guard(raw_spinlock)(&pool_lock);
249 		if (!pool_move_batch(pcp, &pool_to_free)) {
250 			if (!pool_move_batch(pcp, &pool_global))
251 				return NULL;
252 		}
253 		obj_pool_used += ODEBUG_BATCH_SIZE;
254 
255 		if (obj_pool_used > obj_pool_max_used)
256 			obj_pool_max_used = obj_pool_used;
257 
258 		if (pool_global.cnt < obj_pool_min_free)
259 			obj_pool_min_free = pool_global.cnt;
260 	}
261 }
262 
263 static void pcpu_free(struct debug_obj *obj)
264 {
265 	struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
266 	struct debug_obj *first;
267 
268 	lockdep_assert_irqs_disabled();
269 
270 	if (!(pcp->cnt % ODEBUG_BATCH_SIZE)) {
271 		obj->batch_last = &obj->node;
272 	} else {
273 		first = hlist_entry(pcp->objects.first, typeof(*first), node);
274 		obj->batch_last = first->batch_last;
275 	}
276 	hlist_add_head(&obj->node, &pcp->objects);
277 	pcp->cnt++;
278 
279 	/* Pool full ? */
280 	if (pcp->cnt < ODEBUG_POOL_PERCPU_SIZE)
281 		return;
282 
283 	/* Remove a batch from the per CPU pool */
284 	guard(raw_spinlock)(&pool_lock);
285 	/* Try to fit the batch into the pool_global first */
286 	if (!pool_move_batch(&pool_global, pcp))
287 		pool_move_batch(&pool_to_free, pcp);
288 	obj_pool_used -= ODEBUG_BATCH_SIZE;
289 }
290 
291 static void free_object_list(struct hlist_head *head)
292 {
293 	struct hlist_node *tmp;
294 	struct debug_obj *obj;
295 	int cnt = 0;
296 
297 	hlist_for_each_entry_safe(obj, tmp, head, node) {
298 		hlist_del(&obj->node);
299 		kmem_cache_free(obj_cache, obj);
300 		cnt++;
301 	}
302 	debug_objects_freed += cnt;
303 }
304 
305 static void fill_pool_from_freelist(void)
306 {
307 	static unsigned long state;
308 
309 	/*
310 	 * Reuse objs from the global obj_to_free list; they will be
311 	 * reinitialized when allocating.
312 	 */
313 	if (!pool_count(&pool_to_free))
314 		return;
315 
316 	/*
317 	 * Prevent the context from being scheduled or interrupted after
318 	 * setting the state flag;
319 	 */
320 	guard(irqsave)();
321 
322 	/*
323 	 * Avoid lock contention on &pool_lock and avoid making the cache
324 	 * line exclusive by testing the bit before attempting to set it.
325 	 */
326 	if (test_bit(0, &state) || test_and_set_bit(0, &state))
327 		return;
328 
329 	/* Avoid taking the lock when there is no work to do */
330 	while (pool_should_refill(&pool_global) && pool_count(&pool_to_free)) {
331 		guard(raw_spinlock)(&pool_lock);
332 		/* Move a batch if possible */
333 		pool_move_batch(&pool_global, &pool_to_free);
334 	}
335 	clear_bit(0, &state);
336 }
337 
338 static bool kmem_alloc_batch(struct hlist_head *head, struct kmem_cache *cache, gfp_t gfp)
339 {
340 	struct hlist_node *last = NULL;
341 	struct debug_obj *obj;
342 
343 	for (int cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
344 		obj = kmem_cache_zalloc(cache, gfp);
345 		if (!obj) {
346 			free_object_list(head);
347 			return false;
348 		}
349 		debug_objects_allocated++;
350 
351 		if (!last)
352 			last = &obj->node;
353 		obj->batch_last = last;
354 
355 		hlist_add_head(&obj->node, head);
356 	}
357 	return true;
358 }
359 
360 static void fill_pool(void)
361 {
362 	static atomic_t cpus_allocating;
363 
364 	/*
365 	 * Avoid allocation and lock contention when:
366 	 *   - One other CPU is already allocating
367 	 *   - the global pool has not reached the critical level yet
368 	 */
369 	if (!pool_must_refill(&pool_global) && atomic_read(&cpus_allocating))
370 		return;
371 
372 	atomic_inc(&cpus_allocating);
373 	while (pool_should_refill(&pool_global)) {
374 		HLIST_HEAD(head);
375 
376 		if (!kmem_alloc_batch(&head, obj_cache, __GFP_HIGH | __GFP_NOWARN))
377 			break;
378 
379 		guard(raw_spinlock_irqsave)(&pool_lock);
380 		if (!pool_push_batch(&pool_global, &head))
381 			pool_push_batch(&pool_to_free, &head);
382 	}
383 	atomic_dec(&cpus_allocating);
384 }
385 
386 /*
387  * Lookup an object in the hash bucket.
388  */
389 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
390 {
391 	struct debug_obj *obj;
392 	int cnt = 0;
393 
394 	hlist_for_each_entry(obj, &b->list, node) {
395 		cnt++;
396 		if (obj->object == addr)
397 			return obj;
398 	}
399 	if (cnt > debug_objects_maxchain)
400 		debug_objects_maxchain = cnt;
401 
402 	return NULL;
403 }
404 
405 static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b,
406 				      const struct debug_obj_descr *descr)
407 {
408 	struct debug_obj *obj;
409 
410 	if (static_branch_likely(&obj_cache_enabled))
411 		obj = pcpu_alloc();
412 	else
413 		obj = __alloc_object(&pool_boot);
414 
415 	if (likely(obj)) {
416 		obj->object = addr;
417 		obj->descr  = descr;
418 		obj->state  = ODEBUG_STATE_NONE;
419 		obj->astate = 0;
420 		hlist_add_head(&obj->node, &b->list);
421 	}
422 	return obj;
423 }
424 
425 /* workqueue function to free objects. */
426 static void free_obj_work(struct work_struct *work)
427 {
428 	bool free = true;
429 
430 	WRITE_ONCE(obj_freeing, false);
431 
432 	if (!pool_count(&pool_to_free))
433 		return;
434 
435 	for (unsigned int cnt = 0; cnt < ODEBUG_FREE_WORK_MAX; cnt++) {
436 		HLIST_HEAD(tofree);
437 
438 		/* Acquire and drop the lock for each batch */
439 		scoped_guard(raw_spinlock_irqsave, &pool_lock) {
440 			if (!pool_to_free.cnt)
441 				return;
442 
443 			/* Refill the global pool if possible */
444 			if (pool_move_batch(&pool_global, &pool_to_free)) {
445 				/* Don't free as there seems to be demand */
446 				free = false;
447 			} else if (free) {
448 				pool_pop_batch(&tofree, &pool_to_free);
449 			} else {
450 				return;
451 			}
452 		}
453 		free_object_list(&tofree);
454 	}
455 }
456 
457 static void __free_object(struct debug_obj *obj)
458 {
459 	guard(irqsave)();
460 	if (static_branch_likely(&obj_cache_enabled))
461 		pcpu_free(obj);
462 	else
463 		hlist_add_head(&obj->node, &pool_boot);
464 }
465 
466 /*
467  * Put the object back into the pool and schedule work to free objects
468  * if necessary.
469  */
470 static void free_object(struct debug_obj *obj)
471 {
472 	__free_object(obj);
473 	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
474 		WRITE_ONCE(obj_freeing, true);
475 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
476 	}
477 }
478 
479 static void put_objects(struct hlist_head *list)
480 {
481 	struct hlist_node *tmp;
482 	struct debug_obj *obj;
483 
484 	/*
485 	 * Using free_object() puts the objects into reuse or schedules
486 	 * them for freeing and it get's all the accounting correct.
487 	 */
488 	hlist_for_each_entry_safe(obj, tmp, list, node) {
489 		hlist_del(&obj->node);
490 		free_object(obj);
491 	}
492 }
493 
494 #ifdef CONFIG_HOTPLUG_CPU
495 static int object_cpu_offline(unsigned int cpu)
496 {
497 	/* Remote access is safe as the CPU is dead already */
498 	struct obj_pool *pcp = per_cpu_ptr(&pool_pcpu, cpu);
499 
500 	put_objects(&pcp->objects);
501 	pcp->cnt = 0;
502 	return 0;
503 }
504 #endif
505 
506 /* Out of memory. Free all objects from hash */
507 static void debug_objects_oom(void)
508 {
509 	struct debug_bucket *db = obj_hash;
510 	HLIST_HEAD(freelist);
511 
512 	pr_warn("Out of memory. ODEBUG disabled\n");
513 
514 	for (int i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
515 		scoped_guard(raw_spinlock_irqsave, &db->lock)
516 			hlist_move_list(&db->list, &freelist);
517 
518 		put_objects(&freelist);
519 	}
520 }
521 
522 /*
523  * We use the pfn of the address for the hash. That way we can check
524  * for freed objects simply by checking the affected bucket.
525  */
526 static struct debug_bucket *get_bucket(unsigned long addr)
527 {
528 	unsigned long hash;
529 
530 	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
531 	return &obj_hash[hash];
532 }
533 
534 static void debug_print_object(struct debug_obj *obj, char *msg)
535 {
536 	const struct debug_obj_descr *descr = obj->descr;
537 	static int limit;
538 
539 	/*
540 	 * Don't report if lookup_object_or_alloc() by the current thread
541 	 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
542 	 * concurrent thread turned off debug_objects_enabled and cleared
543 	 * the hash buckets.
544 	 */
545 	if (!debug_objects_enabled)
546 		return;
547 
548 	if (limit < 5 && descr != descr_test) {
549 		void *hint = descr->debug_hint ?
550 			descr->debug_hint(obj->object) : NULL;
551 		limit++;
552 		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
553 				 "object: %p object type: %s hint: %pS\n",
554 			msg, obj_states[obj->state], obj->astate,
555 			obj->object, descr->name, hint);
556 	}
557 	debug_objects_warnings++;
558 }
559 
560 /*
561  * Try to repair the damage, so we have a better chance to get useful
562  * debug output.
563  */
564 static bool
565 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
566 		   void * addr, enum debug_obj_state state)
567 {
568 	if (fixup && fixup(addr, state)) {
569 		debug_objects_fixups++;
570 		return true;
571 	}
572 	return false;
573 }
574 
575 static void debug_object_is_on_stack(void *addr, int onstack)
576 {
577 	int is_on_stack;
578 	static int limit;
579 
580 	if (limit > 4)
581 		return;
582 
583 	is_on_stack = object_is_on_stack(addr);
584 	if (is_on_stack == onstack)
585 		return;
586 
587 	limit++;
588 	if (is_on_stack)
589 		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
590 			 task_stack_page(current));
591 	else
592 		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
593 			 task_stack_page(current));
594 
595 	WARN_ON(1);
596 }
597 
598 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
599 						const struct debug_obj_descr *descr,
600 						bool onstack, bool alloc_ifstatic)
601 {
602 	struct debug_obj *obj = lookup_object(addr, b);
603 	enum debug_obj_state state = ODEBUG_STATE_NONE;
604 
605 	if (likely(obj))
606 		return obj;
607 
608 	/*
609 	 * debug_object_init() unconditionally allocates untracked
610 	 * objects. It does not matter whether it is a static object or
611 	 * not.
612 	 *
613 	 * debug_object_assert_init() and debug_object_activate() allow
614 	 * allocation only if the descriptor callback confirms that the
615 	 * object is static and considered initialized. For non-static
616 	 * objects the allocation needs to be done from the fixup callback.
617 	 */
618 	if (unlikely(alloc_ifstatic)) {
619 		if (!descr->is_static_object || !descr->is_static_object(addr))
620 			return ERR_PTR(-ENOENT);
621 		/* Statically allocated objects are considered initialized */
622 		state = ODEBUG_STATE_INIT;
623 	}
624 
625 	obj = alloc_object(addr, b, descr);
626 	if (likely(obj)) {
627 		obj->state = state;
628 		debug_object_is_on_stack(addr, onstack);
629 		return obj;
630 	}
631 
632 	/* Out of memory. Do the cleanup outside of the locked region */
633 	debug_objects_enabled = false;
634 	return NULL;
635 }
636 
637 static void debug_objects_fill_pool(void)
638 {
639 	if (!static_branch_likely(&obj_cache_enabled))
640 		return;
641 
642 	if (likely(!pool_should_refill(&pool_global)))
643 		return;
644 
645 	/* Try reusing objects from obj_to_free_list */
646 	fill_pool_from_freelist();
647 
648 	if (likely(!pool_should_refill(&pool_global)))
649 		return;
650 
651 	/*
652 	 * On RT enabled kernels the pool refill must happen in preemptible
653 	 * context -- for !RT kernels we rely on the fact that spinlock_t and
654 	 * raw_spinlock_t are basically the same type and this lock-type
655 	 * inversion works just fine.
656 	 */
657 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
658 		/*
659 		 * Annotate away the spinlock_t inside raw_spinlock_t warning
660 		 * by temporarily raising the wait-type to WAIT_SLEEP, matching
661 		 * the preemptible() condition above.
662 		 */
663 		static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
664 		lock_map_acquire_try(&fill_pool_map);
665 		fill_pool();
666 		lock_map_release(&fill_pool_map);
667 	}
668 }
669 
670 static void
671 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
672 {
673 	struct debug_obj *obj, o;
674 	struct debug_bucket *db;
675 	unsigned long flags;
676 
677 	debug_objects_fill_pool();
678 
679 	db = get_bucket((unsigned long) addr);
680 
681 	raw_spin_lock_irqsave(&db->lock, flags);
682 
683 	obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
684 	if (unlikely(!obj)) {
685 		raw_spin_unlock_irqrestore(&db->lock, flags);
686 		debug_objects_oom();
687 		return;
688 	}
689 
690 	switch (obj->state) {
691 	case ODEBUG_STATE_NONE:
692 	case ODEBUG_STATE_INIT:
693 	case ODEBUG_STATE_INACTIVE:
694 		obj->state = ODEBUG_STATE_INIT;
695 		raw_spin_unlock_irqrestore(&db->lock, flags);
696 		return;
697 	default:
698 		break;
699 	}
700 
701 	o = *obj;
702 	raw_spin_unlock_irqrestore(&db->lock, flags);
703 	debug_print_object(&o, "init");
704 
705 	if (o.state == ODEBUG_STATE_ACTIVE)
706 		debug_object_fixup(descr->fixup_init, addr, o.state);
707 }
708 
709 /**
710  * debug_object_init - debug checks when an object is initialized
711  * @addr:	address of the object
712  * @descr:	pointer to an object specific debug description structure
713  */
714 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
715 {
716 	if (!debug_objects_enabled)
717 		return;
718 
719 	__debug_object_init(addr, descr, 0);
720 }
721 EXPORT_SYMBOL_GPL(debug_object_init);
722 
723 /**
724  * debug_object_init_on_stack - debug checks when an object on stack is
725  *				initialized
726  * @addr:	address of the object
727  * @descr:	pointer to an object specific debug description structure
728  */
729 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
730 {
731 	if (!debug_objects_enabled)
732 		return;
733 
734 	__debug_object_init(addr, descr, 1);
735 }
736 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
737 
738 /**
739  * debug_object_activate - debug checks when an object is activated
740  * @addr:	address of the object
741  * @descr:	pointer to an object specific debug description structure
742  * Returns 0 for success, -EINVAL for check failed.
743  */
744 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
745 {
746 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
747 	struct debug_bucket *db;
748 	struct debug_obj *obj;
749 	unsigned long flags;
750 
751 	if (!debug_objects_enabled)
752 		return 0;
753 
754 	debug_objects_fill_pool();
755 
756 	db = get_bucket((unsigned long) addr);
757 
758 	raw_spin_lock_irqsave(&db->lock, flags);
759 
760 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
761 	if (unlikely(!obj)) {
762 		raw_spin_unlock_irqrestore(&db->lock, flags);
763 		debug_objects_oom();
764 		return 0;
765 	} else if (likely(!IS_ERR(obj))) {
766 		switch (obj->state) {
767 		case ODEBUG_STATE_ACTIVE:
768 		case ODEBUG_STATE_DESTROYED:
769 			o = *obj;
770 			break;
771 		case ODEBUG_STATE_INIT:
772 		case ODEBUG_STATE_INACTIVE:
773 			obj->state = ODEBUG_STATE_ACTIVE;
774 			fallthrough;
775 		default:
776 			raw_spin_unlock_irqrestore(&db->lock, flags);
777 			return 0;
778 		}
779 	}
780 
781 	raw_spin_unlock_irqrestore(&db->lock, flags);
782 	debug_print_object(&o, "activate");
783 
784 	switch (o.state) {
785 	case ODEBUG_STATE_ACTIVE:
786 	case ODEBUG_STATE_NOTAVAILABLE:
787 		if (debug_object_fixup(descr->fixup_activate, addr, o.state))
788 			return 0;
789 		fallthrough;
790 	default:
791 		return -EINVAL;
792 	}
793 }
794 EXPORT_SYMBOL_GPL(debug_object_activate);
795 
796 /**
797  * debug_object_deactivate - debug checks when an object is deactivated
798  * @addr:	address of the object
799  * @descr:	pointer to an object specific debug description structure
800  */
801 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
802 {
803 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
804 	struct debug_bucket *db;
805 	struct debug_obj *obj;
806 	unsigned long flags;
807 
808 	if (!debug_objects_enabled)
809 		return;
810 
811 	db = get_bucket((unsigned long) addr);
812 
813 	raw_spin_lock_irqsave(&db->lock, flags);
814 
815 	obj = lookup_object(addr, db);
816 	if (obj) {
817 		switch (obj->state) {
818 		case ODEBUG_STATE_DESTROYED:
819 			break;
820 		case ODEBUG_STATE_INIT:
821 		case ODEBUG_STATE_INACTIVE:
822 		case ODEBUG_STATE_ACTIVE:
823 			if (obj->astate)
824 				break;
825 			obj->state = ODEBUG_STATE_INACTIVE;
826 			fallthrough;
827 		default:
828 			raw_spin_unlock_irqrestore(&db->lock, flags);
829 			return;
830 		}
831 		o = *obj;
832 	}
833 
834 	raw_spin_unlock_irqrestore(&db->lock, flags);
835 	debug_print_object(&o, "deactivate");
836 }
837 EXPORT_SYMBOL_GPL(debug_object_deactivate);
838 
839 /**
840  * debug_object_destroy - debug checks when an object is destroyed
841  * @addr:	address of the object
842  * @descr:	pointer to an object specific debug description structure
843  */
844 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
845 {
846 	struct debug_obj *obj, o;
847 	struct debug_bucket *db;
848 	unsigned long flags;
849 
850 	if (!debug_objects_enabled)
851 		return;
852 
853 	db = get_bucket((unsigned long) addr);
854 
855 	raw_spin_lock_irqsave(&db->lock, flags);
856 
857 	obj = lookup_object(addr, db);
858 	if (!obj) {
859 		raw_spin_unlock_irqrestore(&db->lock, flags);
860 		return;
861 	}
862 
863 	switch (obj->state) {
864 	case ODEBUG_STATE_ACTIVE:
865 	case ODEBUG_STATE_DESTROYED:
866 		break;
867 	case ODEBUG_STATE_NONE:
868 	case ODEBUG_STATE_INIT:
869 	case ODEBUG_STATE_INACTIVE:
870 		obj->state = ODEBUG_STATE_DESTROYED;
871 		fallthrough;
872 	default:
873 		raw_spin_unlock_irqrestore(&db->lock, flags);
874 		return;
875 	}
876 
877 	o = *obj;
878 	raw_spin_unlock_irqrestore(&db->lock, flags);
879 	debug_print_object(&o, "destroy");
880 
881 	if (o.state == ODEBUG_STATE_ACTIVE)
882 		debug_object_fixup(descr->fixup_destroy, addr, o.state);
883 }
884 EXPORT_SYMBOL_GPL(debug_object_destroy);
885 
886 /**
887  * debug_object_free - debug checks when an object is freed
888  * @addr:	address of the object
889  * @descr:	pointer to an object specific debug description structure
890  */
891 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
892 {
893 	struct debug_obj *obj, o;
894 	struct debug_bucket *db;
895 	unsigned long flags;
896 
897 	if (!debug_objects_enabled)
898 		return;
899 
900 	db = get_bucket((unsigned long) addr);
901 
902 	raw_spin_lock_irqsave(&db->lock, flags);
903 
904 	obj = lookup_object(addr, db);
905 	if (!obj) {
906 		raw_spin_unlock_irqrestore(&db->lock, flags);
907 		return;
908 	}
909 
910 	switch (obj->state) {
911 	case ODEBUG_STATE_ACTIVE:
912 		break;
913 	default:
914 		hlist_del(&obj->node);
915 		raw_spin_unlock_irqrestore(&db->lock, flags);
916 		free_object(obj);
917 		return;
918 	}
919 
920 	o = *obj;
921 	raw_spin_unlock_irqrestore(&db->lock, flags);
922 	debug_print_object(&o, "free");
923 
924 	debug_object_fixup(descr->fixup_free, addr, o.state);
925 }
926 EXPORT_SYMBOL_GPL(debug_object_free);
927 
928 /**
929  * debug_object_assert_init - debug checks when object should be init-ed
930  * @addr:	address of the object
931  * @descr:	pointer to an object specific debug description structure
932  */
933 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
934 {
935 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
936 	struct debug_bucket *db;
937 	struct debug_obj *obj;
938 	unsigned long flags;
939 
940 	if (!debug_objects_enabled)
941 		return;
942 
943 	debug_objects_fill_pool();
944 
945 	db = get_bucket((unsigned long) addr);
946 
947 	raw_spin_lock_irqsave(&db->lock, flags);
948 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
949 	raw_spin_unlock_irqrestore(&db->lock, flags);
950 	if (likely(!IS_ERR_OR_NULL(obj)))
951 		return;
952 
953 	/* If NULL the allocation has hit OOM */
954 	if (!obj) {
955 		debug_objects_oom();
956 		return;
957 	}
958 
959 	/* Object is neither tracked nor static. It's not initialized. */
960 	debug_print_object(&o, "assert_init");
961 	debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
962 }
963 EXPORT_SYMBOL_GPL(debug_object_assert_init);
964 
965 /**
966  * debug_object_active_state - debug checks object usage state machine
967  * @addr:	address of the object
968  * @descr:	pointer to an object specific debug description structure
969  * @expect:	expected state
970  * @next:	state to move to if expected state is found
971  */
972 void
973 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
974 			  unsigned int expect, unsigned int next)
975 {
976 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
977 	struct debug_bucket *db;
978 	struct debug_obj *obj;
979 	unsigned long flags;
980 
981 	if (!debug_objects_enabled)
982 		return;
983 
984 	db = get_bucket((unsigned long) addr);
985 
986 	raw_spin_lock_irqsave(&db->lock, flags);
987 
988 	obj = lookup_object(addr, db);
989 	if (obj) {
990 		switch (obj->state) {
991 		case ODEBUG_STATE_ACTIVE:
992 			if (obj->astate != expect)
993 				break;
994 			obj->astate = next;
995 			raw_spin_unlock_irqrestore(&db->lock, flags);
996 			return;
997 		default:
998 			break;
999 		}
1000 		o = *obj;
1001 	}
1002 
1003 	raw_spin_unlock_irqrestore(&db->lock, flags);
1004 	debug_print_object(&o, "active_state");
1005 }
1006 EXPORT_SYMBOL_GPL(debug_object_active_state);
1007 
1008 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1009 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
1010 {
1011 	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
1012 	int cnt, objs_checked = 0;
1013 	struct debug_obj *obj, o;
1014 	struct debug_bucket *db;
1015 	struct hlist_node *tmp;
1016 
1017 	saddr = (unsigned long) address;
1018 	eaddr = saddr + size;
1019 	paddr = saddr & ODEBUG_CHUNK_MASK;
1020 	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
1021 	chunks >>= ODEBUG_CHUNK_SHIFT;
1022 
1023 	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
1024 		db = get_bucket(paddr);
1025 
1026 repeat:
1027 		cnt = 0;
1028 		raw_spin_lock_irqsave(&db->lock, flags);
1029 		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
1030 			cnt++;
1031 			oaddr = (unsigned long) obj->object;
1032 			if (oaddr < saddr || oaddr >= eaddr)
1033 				continue;
1034 
1035 			switch (obj->state) {
1036 			case ODEBUG_STATE_ACTIVE:
1037 				o = *obj;
1038 				raw_spin_unlock_irqrestore(&db->lock, flags);
1039 				debug_print_object(&o, "free");
1040 				debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
1041 				goto repeat;
1042 			default:
1043 				hlist_del(&obj->node);
1044 				__free_object(obj);
1045 				break;
1046 			}
1047 		}
1048 		raw_spin_unlock_irqrestore(&db->lock, flags);
1049 
1050 		if (cnt > debug_objects_maxchain)
1051 			debug_objects_maxchain = cnt;
1052 
1053 		objs_checked += cnt;
1054 	}
1055 
1056 	if (objs_checked > debug_objects_maxchecked)
1057 		debug_objects_maxchecked = objs_checked;
1058 
1059 	/* Schedule work to actually kmem_cache_free() objects */
1060 	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
1061 		WRITE_ONCE(obj_freeing, true);
1062 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1063 	}
1064 }
1065 
1066 void debug_check_no_obj_freed(const void *address, unsigned long size)
1067 {
1068 	if (debug_objects_enabled)
1069 		__debug_check_no_obj_freed(address, size);
1070 }
1071 #endif
1072 
1073 #ifdef CONFIG_DEBUG_FS
1074 
1075 static int debug_stats_show(struct seq_file *m, void *v)
1076 {
1077 	int cpu, obj_percpu_free = 0;
1078 
1079 	for_each_possible_cpu(cpu)
1080 		obj_percpu_free += per_cpu(pool_pcpu.cnt, cpu);
1081 
1082 	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1083 	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1084 	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1085 	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1086 	seq_printf(m, "pool_free     :%d\n", pool_count(&pool_global) + obj_percpu_free);
1087 	seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1088 	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1089 	seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1090 	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1091 	seq_printf(m, "on_free_list  :%d\n", pool_count(&pool_to_free));
1092 	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1093 	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1094 	return 0;
1095 }
1096 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1097 
1098 static int __init debug_objects_init_debugfs(void)
1099 {
1100 	struct dentry *dbgdir;
1101 
1102 	if (!debug_objects_enabled)
1103 		return 0;
1104 
1105 	dbgdir = debugfs_create_dir("debug_objects", NULL);
1106 
1107 	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1108 
1109 	return 0;
1110 }
1111 __initcall(debug_objects_init_debugfs);
1112 
1113 #else
1114 static inline void debug_objects_init_debugfs(void) { }
1115 #endif
1116 
1117 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1118 
1119 /* Random data structure for the self test */
1120 struct self_test {
1121 	unsigned long	dummy1[6];
1122 	int		static_init;
1123 	unsigned long	dummy2[3];
1124 };
1125 
1126 static __initconst const struct debug_obj_descr descr_type_test;
1127 
1128 static bool __init is_static_object(void *addr)
1129 {
1130 	struct self_test *obj = addr;
1131 
1132 	return obj->static_init;
1133 }
1134 
1135 /*
1136  * fixup_init is called when:
1137  * - an active object is initialized
1138  */
1139 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1140 {
1141 	struct self_test *obj = addr;
1142 
1143 	switch (state) {
1144 	case ODEBUG_STATE_ACTIVE:
1145 		debug_object_deactivate(obj, &descr_type_test);
1146 		debug_object_init(obj, &descr_type_test);
1147 		return true;
1148 	default:
1149 		return false;
1150 	}
1151 }
1152 
1153 /*
1154  * fixup_activate is called when:
1155  * - an active object is activated
1156  * - an unknown non-static object is activated
1157  */
1158 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1159 {
1160 	struct self_test *obj = addr;
1161 
1162 	switch (state) {
1163 	case ODEBUG_STATE_NOTAVAILABLE:
1164 		return true;
1165 	case ODEBUG_STATE_ACTIVE:
1166 		debug_object_deactivate(obj, &descr_type_test);
1167 		debug_object_activate(obj, &descr_type_test);
1168 		return true;
1169 
1170 	default:
1171 		return false;
1172 	}
1173 }
1174 
1175 /*
1176  * fixup_destroy is called when:
1177  * - an active object is destroyed
1178  */
1179 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1180 {
1181 	struct self_test *obj = addr;
1182 
1183 	switch (state) {
1184 	case ODEBUG_STATE_ACTIVE:
1185 		debug_object_deactivate(obj, &descr_type_test);
1186 		debug_object_destroy(obj, &descr_type_test);
1187 		return true;
1188 	default:
1189 		return false;
1190 	}
1191 }
1192 
1193 /*
1194  * fixup_free is called when:
1195  * - an active object is freed
1196  */
1197 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1198 {
1199 	struct self_test *obj = addr;
1200 
1201 	switch (state) {
1202 	case ODEBUG_STATE_ACTIVE:
1203 		debug_object_deactivate(obj, &descr_type_test);
1204 		debug_object_free(obj, &descr_type_test);
1205 		return true;
1206 	default:
1207 		return false;
1208 	}
1209 }
1210 
1211 static int __init
1212 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1213 {
1214 	struct debug_bucket *db;
1215 	struct debug_obj *obj;
1216 	unsigned long flags;
1217 	int res = -EINVAL;
1218 
1219 	db = get_bucket((unsigned long) addr);
1220 
1221 	raw_spin_lock_irqsave(&db->lock, flags);
1222 
1223 	obj = lookup_object(addr, db);
1224 	if (!obj && state != ODEBUG_STATE_NONE) {
1225 		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1226 		goto out;
1227 	}
1228 	if (obj && obj->state != state) {
1229 		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1230 		       obj->state, state);
1231 		goto out;
1232 	}
1233 	if (fixups != debug_objects_fixups) {
1234 		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1235 		       fixups, debug_objects_fixups);
1236 		goto out;
1237 	}
1238 	if (warnings != debug_objects_warnings) {
1239 		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1240 		       warnings, debug_objects_warnings);
1241 		goto out;
1242 	}
1243 	res = 0;
1244 out:
1245 	raw_spin_unlock_irqrestore(&db->lock, flags);
1246 	if (res)
1247 		debug_objects_enabled = false;
1248 	return res;
1249 }
1250 
1251 static __initconst const struct debug_obj_descr descr_type_test = {
1252 	.name			= "selftest",
1253 	.is_static_object	= is_static_object,
1254 	.fixup_init		= fixup_init,
1255 	.fixup_activate		= fixup_activate,
1256 	.fixup_destroy		= fixup_destroy,
1257 	.fixup_free		= fixup_free,
1258 };
1259 
1260 static __initdata struct self_test obj = { .static_init = 0 };
1261 
1262 static bool __init debug_objects_selftest(void)
1263 {
1264 	int fixups, oldfixups, warnings, oldwarnings;
1265 	unsigned long flags;
1266 
1267 	local_irq_save(flags);
1268 
1269 	fixups = oldfixups = debug_objects_fixups;
1270 	warnings = oldwarnings = debug_objects_warnings;
1271 	descr_test = &descr_type_test;
1272 
1273 	debug_object_init(&obj, &descr_type_test);
1274 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1275 		goto out;
1276 	debug_object_activate(&obj, &descr_type_test);
1277 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1278 		goto out;
1279 	debug_object_activate(&obj, &descr_type_test);
1280 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1281 		goto out;
1282 	debug_object_deactivate(&obj, &descr_type_test);
1283 	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1284 		goto out;
1285 	debug_object_destroy(&obj, &descr_type_test);
1286 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1287 		goto out;
1288 	debug_object_init(&obj, &descr_type_test);
1289 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1290 		goto out;
1291 	debug_object_activate(&obj, &descr_type_test);
1292 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1293 		goto out;
1294 	debug_object_deactivate(&obj, &descr_type_test);
1295 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1296 		goto out;
1297 	debug_object_free(&obj, &descr_type_test);
1298 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1299 		goto out;
1300 
1301 	obj.static_init = 1;
1302 	debug_object_activate(&obj, &descr_type_test);
1303 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1304 		goto out;
1305 	debug_object_init(&obj, &descr_type_test);
1306 	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1307 		goto out;
1308 	debug_object_free(&obj, &descr_type_test);
1309 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1310 		goto out;
1311 
1312 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1313 	debug_object_init(&obj, &descr_type_test);
1314 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1315 		goto out;
1316 	debug_object_activate(&obj, &descr_type_test);
1317 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1318 		goto out;
1319 	__debug_check_no_obj_freed(&obj, sizeof(obj));
1320 	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1321 		goto out;
1322 #endif
1323 	pr_info("selftest passed\n");
1324 
1325 out:
1326 	debug_objects_fixups = oldfixups;
1327 	debug_objects_warnings = oldwarnings;
1328 	descr_test = NULL;
1329 
1330 	local_irq_restore(flags);
1331 	return debug_objects_enabled;
1332 }
1333 #else
1334 static inline bool debug_objects_selftest(void) { return true; }
1335 #endif
1336 
1337 /*
1338  * Called during early boot to initialize the hash buckets and link
1339  * the static object pool objects into the poll list. After this call
1340  * the object tracker is fully operational.
1341  */
1342 void __init debug_objects_early_init(void)
1343 {
1344 	int i;
1345 
1346 	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1347 		raw_spin_lock_init(&obj_hash[i].lock);
1348 
1349 	/* Keep early boot simple and add everything to the boot list */
1350 	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1351 		hlist_add_head(&obj_static_pool[i].node, &pool_boot);
1352 }
1353 
1354 /*
1355  * Convert the statically allocated objects to dynamic ones.
1356  * debug_objects_mem_init() is called early so only one CPU is up and
1357  * interrupts are disabled, which means it is safe to replace the active
1358  * object references.
1359  */
1360 static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache)
1361 {
1362 	struct debug_bucket *db = obj_hash;
1363 	struct hlist_node *tmp;
1364 	struct debug_obj *obj;
1365 	HLIST_HEAD(objects);
1366 	int i;
1367 
1368 	for (i = 0; i < ODEBUG_POOL_SIZE; i += ODEBUG_BATCH_SIZE) {
1369 		if (!kmem_alloc_batch(&objects, cache, GFP_KERNEL))
1370 			goto free;
1371 		pool_push_batch(&pool_global, &objects);
1372 	}
1373 
1374 	/* Disconnect the boot pool. */
1375 	pool_boot.first = NULL;
1376 
1377 	/* Replace the active object references */
1378 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1379 		hlist_move_list(&db->list, &objects);
1380 
1381 		hlist_for_each_entry(obj, &objects, node) {
1382 			struct debug_obj *new = pcpu_alloc();
1383 
1384 			/* copy object data */
1385 			*new = *obj;
1386 			hlist_add_head(&new->node, &db->list);
1387 		}
1388 	}
1389 	return true;
1390 free:
1391 	/* Can't use free_object_list() as the cache is not populated yet */
1392 	hlist_for_each_entry_safe(obj, tmp, &pool_global.objects, node) {
1393 		hlist_del(&obj->node);
1394 		kmem_cache_free(cache, obj);
1395 	}
1396 	return false;
1397 }
1398 
1399 /*
1400  * Called after the kmem_caches are functional to setup a dedicated
1401  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1402  * prevents that the debug code is called on kmem_cache_free() for the
1403  * debug tracker objects to avoid recursive calls.
1404  */
1405 void __init debug_objects_mem_init(void)
1406 {
1407 	struct kmem_cache *cache;
1408 	int extras;
1409 
1410 	if (!debug_objects_enabled)
1411 		return;
1412 
1413 	if (!debug_objects_selftest())
1414 		return;
1415 
1416 	cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0,
1417 				  SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL);
1418 
1419 	if (!cache || !debug_objects_replace_static_objects(cache)) {
1420 		debug_objects_enabled = false;
1421 		pr_warn("Out of memory.\n");
1422 		return;
1423 	}
1424 
1425 	/*
1426 	 * Adjust the thresholds for allocating and freeing objects
1427 	 * according to the number of possible CPUs available in the
1428 	 * system.
1429 	 */
1430 	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1431 	pool_global.max_cnt += extras;
1432 	pool_global.min_cnt += extras;
1433 
1434 	/* Everything worked. Expose the cache */
1435 	obj_cache = cache;
1436 	static_branch_enable(&obj_cache_enabled);
1437 
1438 #ifdef CONFIG_HOTPLUG_CPU
1439 	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1440 				  object_cpu_offline);
1441 #endif
1442 	return;
1443 }
1444