xref: /linux-6.15/lib/debugobjects.c (revision cb58d190)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic infrastructure for lifetime debugging of objects.
4  *
5  * Copyright (C) 2008, Thomas Gleixner <[email protected]>
6  */
7 
8 #define pr_fmt(fmt) "ODEBUG: " fmt
9 
10 #include <linux/debugobjects.h>
11 #include <linux/interrupt.h>
12 #include <linux/sched.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/seq_file.h>
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/kmemleak.h>
19 #include <linux/cpu.h>
20 
21 #define ODEBUG_HASH_BITS	14
22 #define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
23 
24 #define ODEBUG_POOL_SIZE	1024
25 #define ODEBUG_POOL_MIN_LEVEL	256
26 #define ODEBUG_POOL_PERCPU_SIZE	64
27 #define ODEBUG_BATCH_SIZE	16
28 
29 #define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
32 
33 /*
34  * We limit the freeing of debug objects via workqueue at a maximum
35  * frequency of 10Hz and about 1024 objects for each freeing operation.
36  * So it is freeing at most 10k debug objects per second.
37  */
38 #define ODEBUG_FREE_WORK_MAX	1024
39 #define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
40 
41 struct debug_bucket {
42 	struct hlist_head	list;
43 	raw_spinlock_t		lock;
44 };
45 
46 /*
47  * Debug object percpu free list
48  * Access is protected by disabling irq
49  */
50 struct debug_percpu_free {
51 	struct hlist_head	free_objs;
52 	int			obj_free;
53 };
54 
55 struct obj_pool {
56 	struct hlist_head	objects;
57 	unsigned int		cnt;
58 } ____cacheline_aligned;
59 
60 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
61 
62 static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
63 
64 static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
65 
66 static DEFINE_RAW_SPINLOCK(pool_lock);
67 
68 static struct obj_pool		pool_global;
69 static struct obj_pool		pool_to_free;
70 
71 static HLIST_HEAD(pool_boot);
72 
73 /*
74  * Because of the presence of percpu free pools, obj_pool_free will
75  * under-count those in the percpu free pools. Similarly, obj_pool_used
76  * will over-count those in the percpu free pools. Adjustments will be
77  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
78  * can be off.
79  */
80 static int __data_racy		obj_pool_min_free = ODEBUG_POOL_SIZE;
81 static int			obj_pool_used;
82 static int __data_racy		obj_pool_max_used;
83 static bool			obj_freeing;
84 
85 static int __data_racy			debug_objects_maxchain __read_mostly;
86 static int __data_racy __maybe_unused	debug_objects_maxchecked __read_mostly;
87 static int __data_racy			debug_objects_fixups __read_mostly;
88 static int __data_racy			debug_objects_warnings __read_mostly;
89 static bool __data_racy			debug_objects_enabled __read_mostly
90 					= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
91 static int				debug_objects_pool_size __ro_after_init
92 					= ODEBUG_POOL_SIZE;
93 static int				debug_objects_pool_min_level __ro_after_init
94 					= ODEBUG_POOL_MIN_LEVEL;
95 
96 static const struct debug_obj_descr *descr_test  __read_mostly;
97 static struct kmem_cache	*obj_cache __ro_after_init;
98 
99 /*
100  * Track numbers of kmem_cache_alloc()/free() calls done.
101  */
102 static int __data_racy		debug_objects_allocated;
103 static int __data_racy		debug_objects_freed;
104 
105 static void free_obj_work(struct work_struct *work);
106 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
107 
108 static int __init enable_object_debug(char *str)
109 {
110 	debug_objects_enabled = true;
111 	return 0;
112 }
113 early_param("debug_objects", enable_object_debug);
114 
115 static int __init disable_object_debug(char *str)
116 {
117 	debug_objects_enabled = false;
118 	return 0;
119 }
120 early_param("no_debug_objects", disable_object_debug);
121 
122 static const char *obj_states[ODEBUG_STATE_MAX] = {
123 	[ODEBUG_STATE_NONE]		= "none",
124 	[ODEBUG_STATE_INIT]		= "initialized",
125 	[ODEBUG_STATE_INACTIVE]		= "inactive",
126 	[ODEBUG_STATE_ACTIVE]		= "active",
127 	[ODEBUG_STATE_DESTROYED]	= "destroyed",
128 	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
129 };
130 
131 static __always_inline unsigned int pool_count(struct obj_pool *pool)
132 {
133 	return READ_ONCE(pool->cnt);
134 }
135 
136 static inline bool pool_global_should_refill(void)
137 {
138 	return READ_ONCE(pool_global.cnt) < debug_objects_pool_min_level;
139 }
140 
141 static inline bool pool_global_must_refill(void)
142 {
143 	return READ_ONCE(pool_global.cnt) < (debug_objects_pool_min_level / 2);
144 }
145 
146 static void free_object_list(struct hlist_head *head)
147 {
148 	struct hlist_node *tmp;
149 	struct debug_obj *obj;
150 	int cnt = 0;
151 
152 	hlist_for_each_entry_safe(obj, tmp, head, node) {
153 		hlist_del(&obj->node);
154 		kmem_cache_free(obj_cache, obj);
155 		cnt++;
156 	}
157 	debug_objects_freed += cnt;
158 }
159 
160 static void fill_pool_from_freelist(void)
161 {
162 	static unsigned long state;
163 	struct debug_obj *obj;
164 
165 	/*
166 	 * Reuse objs from the global obj_to_free list; they will be
167 	 * reinitialized when allocating.
168 	 */
169 	if (!pool_count(&pool_to_free))
170 		return;
171 
172 	/*
173 	 * Prevent the context from being scheduled or interrupted after
174 	 * setting the state flag;
175 	 */
176 	guard(irqsave)();
177 
178 	/*
179 	 * Avoid lock contention on &pool_lock and avoid making the cache
180 	 * line exclusive by testing the bit before attempting to set it.
181 	 */
182 	if (test_bit(0, &state) || test_and_set_bit(0, &state))
183 		return;
184 
185 	guard(raw_spinlock)(&pool_lock);
186 	/*
187 	 * Recheck with the lock held as the worker thread might have
188 	 * won the race and freed the global free list already.
189 	 */
190 	while (pool_to_free.cnt && (pool_global.cnt < debug_objects_pool_min_level)) {
191 		obj = hlist_entry(pool_to_free.objects.first, typeof(*obj), node);
192 		hlist_del(&obj->node);
193 		WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt - 1);
194 		hlist_add_head(&obj->node, &pool_global.objects);
195 		WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
196 	}
197 	clear_bit(0, &state);
198 }
199 
200 static void fill_pool(void)
201 {
202 	static atomic_t cpus_allocating;
203 
204 	/*
205 	 * Avoid allocation and lock contention when:
206 	 *   - One other CPU is already allocating
207 	 *   - the global pool has not reached the critical level yet
208 	 */
209 	if (!pool_global_must_refill() && atomic_read(&cpus_allocating))
210 		return;
211 
212 	atomic_inc(&cpus_allocating);
213 	while (pool_global_should_refill()) {
214 		struct debug_obj *new, *last = NULL;
215 		HLIST_HEAD(head);
216 		int cnt;
217 
218 		for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
219 			new = kmem_cache_zalloc(obj_cache, __GFP_HIGH | __GFP_NOWARN);
220 			if (!new)
221 				break;
222 			hlist_add_head(&new->node, &head);
223 			if (!last)
224 				last = new;
225 		}
226 		if (!cnt)
227 			break;
228 
229 		guard(raw_spinlock_irqsave)(&pool_lock);
230 		hlist_splice_init(&head, &last->node, &pool_global.objects);
231 		debug_objects_allocated += cnt;
232 		WRITE_ONCE(pool_global.cnt, pool_global.cnt + cnt);
233 	}
234 	atomic_dec(&cpus_allocating);
235 }
236 
237 /*
238  * Lookup an object in the hash bucket.
239  */
240 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
241 {
242 	struct debug_obj *obj;
243 	int cnt = 0;
244 
245 	hlist_for_each_entry(obj, &b->list, node) {
246 		cnt++;
247 		if (obj->object == addr)
248 			return obj;
249 	}
250 	if (cnt > debug_objects_maxchain)
251 		debug_objects_maxchain = cnt;
252 
253 	return NULL;
254 }
255 
256 /*
257  * Allocate a new object from the hlist
258  */
259 static struct debug_obj *__alloc_object(struct hlist_head *list)
260 {
261 	struct debug_obj *obj = NULL;
262 
263 	if (list->first) {
264 		obj = hlist_entry(list->first, typeof(*obj), node);
265 		hlist_del(&obj->node);
266 	}
267 
268 	return obj;
269 }
270 
271 static struct debug_obj *
272 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
273 {
274 	struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
275 	struct debug_obj *obj;
276 
277 	if (likely(obj_cache)) {
278 		obj = __alloc_object(&percpu_pool->free_objs);
279 		if (obj) {
280 			percpu_pool->obj_free--;
281 			goto init_obj;
282 		}
283 	} else {
284 		obj = __alloc_object(&pool_boot);
285 		goto init_obj;
286 	}
287 
288 	raw_spin_lock(&pool_lock);
289 	obj = __alloc_object(&pool_global.objects);
290 	if (obj) {
291 		obj_pool_used++;
292 		WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
293 
294 		/*
295 		 * Looking ahead, allocate one batch of debug objects and
296 		 * put them into the percpu free pool.
297 		 */
298 		if (likely(obj_cache)) {
299 			int i;
300 
301 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
302 				struct debug_obj *obj2;
303 
304 				obj2 = __alloc_object(&pool_global.objects);
305 				if (!obj2)
306 					break;
307 				hlist_add_head(&obj2->node, &percpu_pool->free_objs);
308 				percpu_pool->obj_free++;
309 				obj_pool_used++;
310 				WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
311 			}
312 		}
313 
314 		if (obj_pool_used > obj_pool_max_used)
315 			obj_pool_max_used = obj_pool_used;
316 
317 		if (pool_global.cnt < obj_pool_min_free)
318 			obj_pool_min_free = pool_global.cnt;
319 	}
320 	raw_spin_unlock(&pool_lock);
321 
322 init_obj:
323 	if (obj) {
324 		obj->object = addr;
325 		obj->descr  = descr;
326 		obj->state  = ODEBUG_STATE_NONE;
327 		obj->astate = 0;
328 		hlist_add_head(&obj->node, &b->list);
329 	}
330 	return obj;
331 }
332 
333 /*
334  * workqueue function to free objects.
335  *
336  * To reduce contention on the global pool_lock, the actual freeing of
337  * debug objects will be delayed if the pool_lock is busy.
338  */
339 static void free_obj_work(struct work_struct *work)
340 {
341 	struct debug_obj *obj;
342 	unsigned long flags;
343 	HLIST_HEAD(tofree);
344 
345 	WRITE_ONCE(obj_freeing, false);
346 	if (!raw_spin_trylock_irqsave(&pool_lock, flags))
347 		return;
348 
349 	if (pool_global.cnt >= debug_objects_pool_size)
350 		goto free_objs;
351 
352 	/*
353 	 * The objs on the pool list might be allocated before the work is
354 	 * run, so recheck if pool list it full or not, if not fill pool
355 	 * list from the global free list. As it is likely that a workload
356 	 * may be gearing up to use more and more objects, don't free any
357 	 * of them until the next round.
358 	 */
359 	while (pool_to_free.cnt && pool_global.cnt < debug_objects_pool_size) {
360 		obj = hlist_entry(pool_to_free.objects.first, typeof(*obj), node);
361 		hlist_del(&obj->node);
362 		hlist_add_head(&obj->node, &pool_global.objects);
363 		WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt - 1);
364 		WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
365 	}
366 	raw_spin_unlock_irqrestore(&pool_lock, flags);
367 	return;
368 
369 free_objs:
370 	/*
371 	 * Pool list is already full and there are still objs on the free
372 	 * list. Move remaining free objs to a temporary list to free the
373 	 * memory outside the pool_lock held region.
374 	 */
375 	if (pool_to_free.cnt) {
376 		hlist_move_list(&pool_to_free.objects, &tofree);
377 		WRITE_ONCE(pool_to_free.cnt, 0);
378 	}
379 	raw_spin_unlock_irqrestore(&pool_lock, flags);
380 
381 	free_object_list(&tofree);
382 }
383 
384 static void __free_object(struct debug_obj *obj)
385 {
386 	struct debug_obj *objs[ODEBUG_BATCH_SIZE];
387 	struct debug_percpu_free *percpu_pool;
388 	int lookahead_count = 0;
389 	bool work;
390 
391 	guard(irqsave)();
392 
393 	if (unlikely(!obj_cache)) {
394 		hlist_add_head(&obj->node, &pool_boot);
395 		return;
396 	}
397 
398 	/*
399 	 * Try to free it into the percpu pool first.
400 	 */
401 	percpu_pool = this_cpu_ptr(&percpu_obj_pool);
402 	if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
403 		hlist_add_head(&obj->node, &percpu_pool->free_objs);
404 		percpu_pool->obj_free++;
405 		return;
406 	}
407 
408 	/*
409 	 * As the percpu pool is full, look ahead and pull out a batch
410 	 * of objects from the percpu pool and free them as well.
411 	 */
412 	for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
413 		objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
414 		if (!objs[lookahead_count])
415 			break;
416 		percpu_pool->obj_free--;
417 	}
418 
419 	raw_spin_lock(&pool_lock);
420 	work = (pool_global.cnt > debug_objects_pool_size) && obj_cache &&
421 	       (pool_to_free.cnt < ODEBUG_FREE_WORK_MAX);
422 	obj_pool_used--;
423 
424 	if (work) {
425 		WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + 1);
426 		hlist_add_head(&obj->node, &pool_to_free.objects);
427 		if (lookahead_count) {
428 			WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + lookahead_count);
429 			obj_pool_used -= lookahead_count;
430 			while (lookahead_count) {
431 				hlist_add_head(&objs[--lookahead_count]->node,
432 					       &pool_to_free.objects);
433 			}
434 		}
435 
436 		if ((pool_global.cnt > debug_objects_pool_size) &&
437 		    (pool_to_free.cnt < ODEBUG_FREE_WORK_MAX)) {
438 			int i;
439 
440 			/*
441 			 * Free one more batch of objects from obj_pool.
442 			 */
443 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
444 				obj = __alloc_object(&pool_global.objects);
445 				hlist_add_head(&obj->node, &pool_to_free.objects);
446 				WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
447 				WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + 1);
448 			}
449 		}
450 	} else {
451 		WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
452 		hlist_add_head(&obj->node, &pool_global.objects);
453 		if (lookahead_count) {
454 			WRITE_ONCE(pool_global.cnt, pool_global.cnt + lookahead_count);
455 			obj_pool_used -= lookahead_count;
456 			while (lookahead_count) {
457 				hlist_add_head(&objs[--lookahead_count]->node,
458 					       &pool_global.objects);
459 			}
460 		}
461 	}
462 	raw_spin_unlock(&pool_lock);
463 }
464 
465 /*
466  * Put the object back into the pool and schedule work to free objects
467  * if necessary.
468  */
469 static void free_object(struct debug_obj *obj)
470 {
471 	__free_object(obj);
472 	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
473 		WRITE_ONCE(obj_freeing, true);
474 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
475 	}
476 }
477 
478 static void put_objects(struct hlist_head *list)
479 {
480 	struct hlist_node *tmp;
481 	struct debug_obj *obj;
482 
483 	/*
484 	 * Using free_object() puts the objects into reuse or schedules
485 	 * them for freeing and it get's all the accounting correct.
486 	 */
487 	hlist_for_each_entry_safe(obj, tmp, list, node) {
488 		hlist_del(&obj->node);
489 		free_object(obj);
490 	}
491 }
492 
493 #ifdef CONFIG_HOTPLUG_CPU
494 static int object_cpu_offline(unsigned int cpu)
495 {
496 	/* Remote access is safe as the CPU is dead already */
497 	struct debug_percpu_free *pcp = per_cpu_ptr(&percpu_obj_pool, cpu);
498 
499 	put_objects(&pcp->free_objs);
500 	pcp->obj_free = 0;
501 	return 0;
502 }
503 #endif
504 
505 /* Out of memory. Free all objects from hash */
506 static void debug_objects_oom(void)
507 {
508 	struct debug_bucket *db = obj_hash;
509 	HLIST_HEAD(freelist);
510 
511 	pr_warn("Out of memory. ODEBUG disabled\n");
512 
513 	for (int i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
514 		scoped_guard(raw_spinlock_irqsave, &db->lock)
515 			hlist_move_list(&db->list, &freelist);
516 
517 		put_objects(&freelist);
518 	}
519 }
520 
521 /*
522  * We use the pfn of the address for the hash. That way we can check
523  * for freed objects simply by checking the affected bucket.
524  */
525 static struct debug_bucket *get_bucket(unsigned long addr)
526 {
527 	unsigned long hash;
528 
529 	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
530 	return &obj_hash[hash];
531 }
532 
533 static void debug_print_object(struct debug_obj *obj, char *msg)
534 {
535 	const struct debug_obj_descr *descr = obj->descr;
536 	static int limit;
537 
538 	/*
539 	 * Don't report if lookup_object_or_alloc() by the current thread
540 	 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
541 	 * concurrent thread turned off debug_objects_enabled and cleared
542 	 * the hash buckets.
543 	 */
544 	if (!debug_objects_enabled)
545 		return;
546 
547 	if (limit < 5 && descr != descr_test) {
548 		void *hint = descr->debug_hint ?
549 			descr->debug_hint(obj->object) : NULL;
550 		limit++;
551 		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
552 				 "object: %p object type: %s hint: %pS\n",
553 			msg, obj_states[obj->state], obj->astate,
554 			obj->object, descr->name, hint);
555 	}
556 	debug_objects_warnings++;
557 }
558 
559 /*
560  * Try to repair the damage, so we have a better chance to get useful
561  * debug output.
562  */
563 static bool
564 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
565 		   void * addr, enum debug_obj_state state)
566 {
567 	if (fixup && fixup(addr, state)) {
568 		debug_objects_fixups++;
569 		return true;
570 	}
571 	return false;
572 }
573 
574 static void debug_object_is_on_stack(void *addr, int onstack)
575 {
576 	int is_on_stack;
577 	static int limit;
578 
579 	if (limit > 4)
580 		return;
581 
582 	is_on_stack = object_is_on_stack(addr);
583 	if (is_on_stack == onstack)
584 		return;
585 
586 	limit++;
587 	if (is_on_stack)
588 		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
589 			 task_stack_page(current));
590 	else
591 		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
592 			 task_stack_page(current));
593 
594 	WARN_ON(1);
595 }
596 
597 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
598 						const struct debug_obj_descr *descr,
599 						bool onstack, bool alloc_ifstatic)
600 {
601 	struct debug_obj *obj = lookup_object(addr, b);
602 	enum debug_obj_state state = ODEBUG_STATE_NONE;
603 
604 	if (likely(obj))
605 		return obj;
606 
607 	/*
608 	 * debug_object_init() unconditionally allocates untracked
609 	 * objects. It does not matter whether it is a static object or
610 	 * not.
611 	 *
612 	 * debug_object_assert_init() and debug_object_activate() allow
613 	 * allocation only if the descriptor callback confirms that the
614 	 * object is static and considered initialized. For non-static
615 	 * objects the allocation needs to be done from the fixup callback.
616 	 */
617 	if (unlikely(alloc_ifstatic)) {
618 		if (!descr->is_static_object || !descr->is_static_object(addr))
619 			return ERR_PTR(-ENOENT);
620 		/* Statically allocated objects are considered initialized */
621 		state = ODEBUG_STATE_INIT;
622 	}
623 
624 	obj = alloc_object(addr, b, descr);
625 	if (likely(obj)) {
626 		obj->state = state;
627 		debug_object_is_on_stack(addr, onstack);
628 		return obj;
629 	}
630 
631 	/* Out of memory. Do the cleanup outside of the locked region */
632 	debug_objects_enabled = false;
633 	return NULL;
634 }
635 
636 static void debug_objects_fill_pool(void)
637 {
638 	if (unlikely(!obj_cache))
639 		return;
640 
641 	if (likely(!pool_global_should_refill()))
642 		return;
643 
644 	/* Try reusing objects from obj_to_free_list */
645 	fill_pool_from_freelist();
646 
647 	if (likely(!pool_global_should_refill()))
648 		return;
649 
650 	/*
651 	 * On RT enabled kernels the pool refill must happen in preemptible
652 	 * context -- for !RT kernels we rely on the fact that spinlock_t and
653 	 * raw_spinlock_t are basically the same type and this lock-type
654 	 * inversion works just fine.
655 	 */
656 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
657 		/*
658 		 * Annotate away the spinlock_t inside raw_spinlock_t warning
659 		 * by temporarily raising the wait-type to WAIT_SLEEP, matching
660 		 * the preemptible() condition above.
661 		 */
662 		static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
663 		lock_map_acquire_try(&fill_pool_map);
664 		fill_pool();
665 		lock_map_release(&fill_pool_map);
666 	}
667 }
668 
669 static void
670 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
671 {
672 	struct debug_obj *obj, o;
673 	struct debug_bucket *db;
674 	unsigned long flags;
675 
676 	debug_objects_fill_pool();
677 
678 	db = get_bucket((unsigned long) addr);
679 
680 	raw_spin_lock_irqsave(&db->lock, flags);
681 
682 	obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
683 	if (unlikely(!obj)) {
684 		raw_spin_unlock_irqrestore(&db->lock, flags);
685 		debug_objects_oom();
686 		return;
687 	}
688 
689 	switch (obj->state) {
690 	case ODEBUG_STATE_NONE:
691 	case ODEBUG_STATE_INIT:
692 	case ODEBUG_STATE_INACTIVE:
693 		obj->state = ODEBUG_STATE_INIT;
694 		raw_spin_unlock_irqrestore(&db->lock, flags);
695 		return;
696 	default:
697 		break;
698 	}
699 
700 	o = *obj;
701 	raw_spin_unlock_irqrestore(&db->lock, flags);
702 	debug_print_object(&o, "init");
703 
704 	if (o.state == ODEBUG_STATE_ACTIVE)
705 		debug_object_fixup(descr->fixup_init, addr, o.state);
706 }
707 
708 /**
709  * debug_object_init - debug checks when an object is initialized
710  * @addr:	address of the object
711  * @descr:	pointer to an object specific debug description structure
712  */
713 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
714 {
715 	if (!debug_objects_enabled)
716 		return;
717 
718 	__debug_object_init(addr, descr, 0);
719 }
720 EXPORT_SYMBOL_GPL(debug_object_init);
721 
722 /**
723  * debug_object_init_on_stack - debug checks when an object on stack is
724  *				initialized
725  * @addr:	address of the object
726  * @descr:	pointer to an object specific debug description structure
727  */
728 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
729 {
730 	if (!debug_objects_enabled)
731 		return;
732 
733 	__debug_object_init(addr, descr, 1);
734 }
735 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
736 
737 /**
738  * debug_object_activate - debug checks when an object is activated
739  * @addr:	address of the object
740  * @descr:	pointer to an object specific debug description structure
741  * Returns 0 for success, -EINVAL for check failed.
742  */
743 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
744 {
745 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
746 	struct debug_bucket *db;
747 	struct debug_obj *obj;
748 	unsigned long flags;
749 
750 	if (!debug_objects_enabled)
751 		return 0;
752 
753 	debug_objects_fill_pool();
754 
755 	db = get_bucket((unsigned long) addr);
756 
757 	raw_spin_lock_irqsave(&db->lock, flags);
758 
759 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
760 	if (unlikely(!obj)) {
761 		raw_spin_unlock_irqrestore(&db->lock, flags);
762 		debug_objects_oom();
763 		return 0;
764 	} else if (likely(!IS_ERR(obj))) {
765 		switch (obj->state) {
766 		case ODEBUG_STATE_ACTIVE:
767 		case ODEBUG_STATE_DESTROYED:
768 			o = *obj;
769 			break;
770 		case ODEBUG_STATE_INIT:
771 		case ODEBUG_STATE_INACTIVE:
772 			obj->state = ODEBUG_STATE_ACTIVE;
773 			fallthrough;
774 		default:
775 			raw_spin_unlock_irqrestore(&db->lock, flags);
776 			return 0;
777 		}
778 	}
779 
780 	raw_spin_unlock_irqrestore(&db->lock, flags);
781 	debug_print_object(&o, "activate");
782 
783 	switch (o.state) {
784 	case ODEBUG_STATE_ACTIVE:
785 	case ODEBUG_STATE_NOTAVAILABLE:
786 		if (debug_object_fixup(descr->fixup_activate, addr, o.state))
787 			return 0;
788 		fallthrough;
789 	default:
790 		return -EINVAL;
791 	}
792 }
793 EXPORT_SYMBOL_GPL(debug_object_activate);
794 
795 /**
796  * debug_object_deactivate - debug checks when an object is deactivated
797  * @addr:	address of the object
798  * @descr:	pointer to an object specific debug description structure
799  */
800 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
801 {
802 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
803 	struct debug_bucket *db;
804 	struct debug_obj *obj;
805 	unsigned long flags;
806 
807 	if (!debug_objects_enabled)
808 		return;
809 
810 	db = get_bucket((unsigned long) addr);
811 
812 	raw_spin_lock_irqsave(&db->lock, flags);
813 
814 	obj = lookup_object(addr, db);
815 	if (obj) {
816 		switch (obj->state) {
817 		case ODEBUG_STATE_DESTROYED:
818 			break;
819 		case ODEBUG_STATE_INIT:
820 		case ODEBUG_STATE_INACTIVE:
821 		case ODEBUG_STATE_ACTIVE:
822 			if (obj->astate)
823 				break;
824 			obj->state = ODEBUG_STATE_INACTIVE;
825 			fallthrough;
826 		default:
827 			raw_spin_unlock_irqrestore(&db->lock, flags);
828 			return;
829 		}
830 		o = *obj;
831 	}
832 
833 	raw_spin_unlock_irqrestore(&db->lock, flags);
834 	debug_print_object(&o, "deactivate");
835 }
836 EXPORT_SYMBOL_GPL(debug_object_deactivate);
837 
838 /**
839  * debug_object_destroy - debug checks when an object is destroyed
840  * @addr:	address of the object
841  * @descr:	pointer to an object specific debug description structure
842  */
843 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
844 {
845 	struct debug_obj *obj, o;
846 	struct debug_bucket *db;
847 	unsigned long flags;
848 
849 	if (!debug_objects_enabled)
850 		return;
851 
852 	db = get_bucket((unsigned long) addr);
853 
854 	raw_spin_lock_irqsave(&db->lock, flags);
855 
856 	obj = lookup_object(addr, db);
857 	if (!obj) {
858 		raw_spin_unlock_irqrestore(&db->lock, flags);
859 		return;
860 	}
861 
862 	switch (obj->state) {
863 	case ODEBUG_STATE_ACTIVE:
864 	case ODEBUG_STATE_DESTROYED:
865 		break;
866 	case ODEBUG_STATE_NONE:
867 	case ODEBUG_STATE_INIT:
868 	case ODEBUG_STATE_INACTIVE:
869 		obj->state = ODEBUG_STATE_DESTROYED;
870 		fallthrough;
871 	default:
872 		raw_spin_unlock_irqrestore(&db->lock, flags);
873 		return;
874 	}
875 
876 	o = *obj;
877 	raw_spin_unlock_irqrestore(&db->lock, flags);
878 	debug_print_object(&o, "destroy");
879 
880 	if (o.state == ODEBUG_STATE_ACTIVE)
881 		debug_object_fixup(descr->fixup_destroy, addr, o.state);
882 }
883 EXPORT_SYMBOL_GPL(debug_object_destroy);
884 
885 /**
886  * debug_object_free - debug checks when an object is freed
887  * @addr:	address of the object
888  * @descr:	pointer to an object specific debug description structure
889  */
890 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
891 {
892 	struct debug_obj *obj, o;
893 	struct debug_bucket *db;
894 	unsigned long flags;
895 
896 	if (!debug_objects_enabled)
897 		return;
898 
899 	db = get_bucket((unsigned long) addr);
900 
901 	raw_spin_lock_irqsave(&db->lock, flags);
902 
903 	obj = lookup_object(addr, db);
904 	if (!obj) {
905 		raw_spin_unlock_irqrestore(&db->lock, flags);
906 		return;
907 	}
908 
909 	switch (obj->state) {
910 	case ODEBUG_STATE_ACTIVE:
911 		break;
912 	default:
913 		hlist_del(&obj->node);
914 		raw_spin_unlock_irqrestore(&db->lock, flags);
915 		free_object(obj);
916 		return;
917 	}
918 
919 	o = *obj;
920 	raw_spin_unlock_irqrestore(&db->lock, flags);
921 	debug_print_object(&o, "free");
922 
923 	debug_object_fixup(descr->fixup_free, addr, o.state);
924 }
925 EXPORT_SYMBOL_GPL(debug_object_free);
926 
927 /**
928  * debug_object_assert_init - debug checks when object should be init-ed
929  * @addr:	address of the object
930  * @descr:	pointer to an object specific debug description structure
931  */
932 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
933 {
934 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
935 	struct debug_bucket *db;
936 	struct debug_obj *obj;
937 	unsigned long flags;
938 
939 	if (!debug_objects_enabled)
940 		return;
941 
942 	debug_objects_fill_pool();
943 
944 	db = get_bucket((unsigned long) addr);
945 
946 	raw_spin_lock_irqsave(&db->lock, flags);
947 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
948 	raw_spin_unlock_irqrestore(&db->lock, flags);
949 	if (likely(!IS_ERR_OR_NULL(obj)))
950 		return;
951 
952 	/* If NULL the allocation has hit OOM */
953 	if (!obj) {
954 		debug_objects_oom();
955 		return;
956 	}
957 
958 	/* Object is neither tracked nor static. It's not initialized. */
959 	debug_print_object(&o, "assert_init");
960 	debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
961 }
962 EXPORT_SYMBOL_GPL(debug_object_assert_init);
963 
964 /**
965  * debug_object_active_state - debug checks object usage state machine
966  * @addr:	address of the object
967  * @descr:	pointer to an object specific debug description structure
968  * @expect:	expected state
969  * @next:	state to move to if expected state is found
970  */
971 void
972 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
973 			  unsigned int expect, unsigned int next)
974 {
975 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
976 	struct debug_bucket *db;
977 	struct debug_obj *obj;
978 	unsigned long flags;
979 
980 	if (!debug_objects_enabled)
981 		return;
982 
983 	db = get_bucket((unsigned long) addr);
984 
985 	raw_spin_lock_irqsave(&db->lock, flags);
986 
987 	obj = lookup_object(addr, db);
988 	if (obj) {
989 		switch (obj->state) {
990 		case ODEBUG_STATE_ACTIVE:
991 			if (obj->astate != expect)
992 				break;
993 			obj->astate = next;
994 			raw_spin_unlock_irqrestore(&db->lock, flags);
995 			return;
996 		default:
997 			break;
998 		}
999 		o = *obj;
1000 	}
1001 
1002 	raw_spin_unlock_irqrestore(&db->lock, flags);
1003 	debug_print_object(&o, "active_state");
1004 }
1005 EXPORT_SYMBOL_GPL(debug_object_active_state);
1006 
1007 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1008 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
1009 {
1010 	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
1011 	int cnt, objs_checked = 0;
1012 	struct debug_obj *obj, o;
1013 	struct debug_bucket *db;
1014 	struct hlist_node *tmp;
1015 
1016 	saddr = (unsigned long) address;
1017 	eaddr = saddr + size;
1018 	paddr = saddr & ODEBUG_CHUNK_MASK;
1019 	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
1020 	chunks >>= ODEBUG_CHUNK_SHIFT;
1021 
1022 	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
1023 		db = get_bucket(paddr);
1024 
1025 repeat:
1026 		cnt = 0;
1027 		raw_spin_lock_irqsave(&db->lock, flags);
1028 		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
1029 			cnt++;
1030 			oaddr = (unsigned long) obj->object;
1031 			if (oaddr < saddr || oaddr >= eaddr)
1032 				continue;
1033 
1034 			switch (obj->state) {
1035 			case ODEBUG_STATE_ACTIVE:
1036 				o = *obj;
1037 				raw_spin_unlock_irqrestore(&db->lock, flags);
1038 				debug_print_object(&o, "free");
1039 				debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
1040 				goto repeat;
1041 			default:
1042 				hlist_del(&obj->node);
1043 				__free_object(obj);
1044 				break;
1045 			}
1046 		}
1047 		raw_spin_unlock_irqrestore(&db->lock, flags);
1048 
1049 		if (cnt > debug_objects_maxchain)
1050 			debug_objects_maxchain = cnt;
1051 
1052 		objs_checked += cnt;
1053 	}
1054 
1055 	if (objs_checked > debug_objects_maxchecked)
1056 		debug_objects_maxchecked = objs_checked;
1057 
1058 	/* Schedule work to actually kmem_cache_free() objects */
1059 	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
1060 		WRITE_ONCE(obj_freeing, true);
1061 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1062 	}
1063 }
1064 
1065 void debug_check_no_obj_freed(const void *address, unsigned long size)
1066 {
1067 	if (debug_objects_enabled)
1068 		__debug_check_no_obj_freed(address, size);
1069 }
1070 #endif
1071 
1072 #ifdef CONFIG_DEBUG_FS
1073 
1074 static int debug_stats_show(struct seq_file *m, void *v)
1075 {
1076 	int cpu, obj_percpu_free = 0;
1077 
1078 	for_each_possible_cpu(cpu)
1079 		obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1080 
1081 	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1082 	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1083 	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1084 	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1085 	seq_printf(m, "pool_free     :%d\n", pool_count(&pool_global) + obj_percpu_free);
1086 	seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1087 	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1088 	seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1089 	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1090 	seq_printf(m, "on_free_list  :%d\n", pool_count(&pool_to_free));
1091 	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1092 	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1093 	return 0;
1094 }
1095 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1096 
1097 static int __init debug_objects_init_debugfs(void)
1098 {
1099 	struct dentry *dbgdir;
1100 
1101 	if (!debug_objects_enabled)
1102 		return 0;
1103 
1104 	dbgdir = debugfs_create_dir("debug_objects", NULL);
1105 
1106 	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1107 
1108 	return 0;
1109 }
1110 __initcall(debug_objects_init_debugfs);
1111 
1112 #else
1113 static inline void debug_objects_init_debugfs(void) { }
1114 #endif
1115 
1116 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1117 
1118 /* Random data structure for the self test */
1119 struct self_test {
1120 	unsigned long	dummy1[6];
1121 	int		static_init;
1122 	unsigned long	dummy2[3];
1123 };
1124 
1125 static __initconst const struct debug_obj_descr descr_type_test;
1126 
1127 static bool __init is_static_object(void *addr)
1128 {
1129 	struct self_test *obj = addr;
1130 
1131 	return obj->static_init;
1132 }
1133 
1134 /*
1135  * fixup_init is called when:
1136  * - an active object is initialized
1137  */
1138 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1139 {
1140 	struct self_test *obj = addr;
1141 
1142 	switch (state) {
1143 	case ODEBUG_STATE_ACTIVE:
1144 		debug_object_deactivate(obj, &descr_type_test);
1145 		debug_object_init(obj, &descr_type_test);
1146 		return true;
1147 	default:
1148 		return false;
1149 	}
1150 }
1151 
1152 /*
1153  * fixup_activate is called when:
1154  * - an active object is activated
1155  * - an unknown non-static object is activated
1156  */
1157 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1158 {
1159 	struct self_test *obj = addr;
1160 
1161 	switch (state) {
1162 	case ODEBUG_STATE_NOTAVAILABLE:
1163 		return true;
1164 	case ODEBUG_STATE_ACTIVE:
1165 		debug_object_deactivate(obj, &descr_type_test);
1166 		debug_object_activate(obj, &descr_type_test);
1167 		return true;
1168 
1169 	default:
1170 		return false;
1171 	}
1172 }
1173 
1174 /*
1175  * fixup_destroy is called when:
1176  * - an active object is destroyed
1177  */
1178 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1179 {
1180 	struct self_test *obj = addr;
1181 
1182 	switch (state) {
1183 	case ODEBUG_STATE_ACTIVE:
1184 		debug_object_deactivate(obj, &descr_type_test);
1185 		debug_object_destroy(obj, &descr_type_test);
1186 		return true;
1187 	default:
1188 		return false;
1189 	}
1190 }
1191 
1192 /*
1193  * fixup_free is called when:
1194  * - an active object is freed
1195  */
1196 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1197 {
1198 	struct self_test *obj = addr;
1199 
1200 	switch (state) {
1201 	case ODEBUG_STATE_ACTIVE:
1202 		debug_object_deactivate(obj, &descr_type_test);
1203 		debug_object_free(obj, &descr_type_test);
1204 		return true;
1205 	default:
1206 		return false;
1207 	}
1208 }
1209 
1210 static int __init
1211 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1212 {
1213 	struct debug_bucket *db;
1214 	struct debug_obj *obj;
1215 	unsigned long flags;
1216 	int res = -EINVAL;
1217 
1218 	db = get_bucket((unsigned long) addr);
1219 
1220 	raw_spin_lock_irqsave(&db->lock, flags);
1221 
1222 	obj = lookup_object(addr, db);
1223 	if (!obj && state != ODEBUG_STATE_NONE) {
1224 		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1225 		goto out;
1226 	}
1227 	if (obj && obj->state != state) {
1228 		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1229 		       obj->state, state);
1230 		goto out;
1231 	}
1232 	if (fixups != debug_objects_fixups) {
1233 		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1234 		       fixups, debug_objects_fixups);
1235 		goto out;
1236 	}
1237 	if (warnings != debug_objects_warnings) {
1238 		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1239 		       warnings, debug_objects_warnings);
1240 		goto out;
1241 	}
1242 	res = 0;
1243 out:
1244 	raw_spin_unlock_irqrestore(&db->lock, flags);
1245 	if (res)
1246 		debug_objects_enabled = false;
1247 	return res;
1248 }
1249 
1250 static __initconst const struct debug_obj_descr descr_type_test = {
1251 	.name			= "selftest",
1252 	.is_static_object	= is_static_object,
1253 	.fixup_init		= fixup_init,
1254 	.fixup_activate		= fixup_activate,
1255 	.fixup_destroy		= fixup_destroy,
1256 	.fixup_free		= fixup_free,
1257 };
1258 
1259 static __initdata struct self_test obj = { .static_init = 0 };
1260 
1261 static bool __init debug_objects_selftest(void)
1262 {
1263 	int fixups, oldfixups, warnings, oldwarnings;
1264 	unsigned long flags;
1265 
1266 	local_irq_save(flags);
1267 
1268 	fixups = oldfixups = debug_objects_fixups;
1269 	warnings = oldwarnings = debug_objects_warnings;
1270 	descr_test = &descr_type_test;
1271 
1272 	debug_object_init(&obj, &descr_type_test);
1273 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1274 		goto out;
1275 	debug_object_activate(&obj, &descr_type_test);
1276 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1277 		goto out;
1278 	debug_object_activate(&obj, &descr_type_test);
1279 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1280 		goto out;
1281 	debug_object_deactivate(&obj, &descr_type_test);
1282 	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1283 		goto out;
1284 	debug_object_destroy(&obj, &descr_type_test);
1285 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1286 		goto out;
1287 	debug_object_init(&obj, &descr_type_test);
1288 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1289 		goto out;
1290 	debug_object_activate(&obj, &descr_type_test);
1291 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1292 		goto out;
1293 	debug_object_deactivate(&obj, &descr_type_test);
1294 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1295 		goto out;
1296 	debug_object_free(&obj, &descr_type_test);
1297 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1298 		goto out;
1299 
1300 	obj.static_init = 1;
1301 	debug_object_activate(&obj, &descr_type_test);
1302 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1303 		goto out;
1304 	debug_object_init(&obj, &descr_type_test);
1305 	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1306 		goto out;
1307 	debug_object_free(&obj, &descr_type_test);
1308 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1309 		goto out;
1310 
1311 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1312 	debug_object_init(&obj, &descr_type_test);
1313 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1314 		goto out;
1315 	debug_object_activate(&obj, &descr_type_test);
1316 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1317 		goto out;
1318 	__debug_check_no_obj_freed(&obj, sizeof(obj));
1319 	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1320 		goto out;
1321 #endif
1322 	pr_info("selftest passed\n");
1323 
1324 out:
1325 	debug_objects_fixups = oldfixups;
1326 	debug_objects_warnings = oldwarnings;
1327 	descr_test = NULL;
1328 
1329 	local_irq_restore(flags);
1330 	return debug_objects_enabled;
1331 }
1332 #else
1333 static inline bool debug_objects_selftest(void) { return true; }
1334 #endif
1335 
1336 /*
1337  * Called during early boot to initialize the hash buckets and link
1338  * the static object pool objects into the poll list. After this call
1339  * the object tracker is fully operational.
1340  */
1341 void __init debug_objects_early_init(void)
1342 {
1343 	int i;
1344 
1345 	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1346 		raw_spin_lock_init(&obj_hash[i].lock);
1347 
1348 	/* Keep early boot simple and add everything to the boot list */
1349 	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1350 		hlist_add_head(&obj_static_pool[i].node, &pool_boot);
1351 }
1352 
1353 /*
1354  * Convert the statically allocated objects to dynamic ones.
1355  * debug_objects_mem_init() is called early so only one CPU is up and
1356  * interrupts are disabled, which means it is safe to replace the active
1357  * object references.
1358  */
1359 static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache)
1360 {
1361 	struct debug_bucket *db = obj_hash;
1362 	struct debug_obj *obj, *new;
1363 	struct hlist_node *tmp;
1364 	HLIST_HEAD(objects);
1365 	int i;
1366 
1367 	for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1368 		obj = kmem_cache_zalloc(cache, GFP_KERNEL);
1369 		if (!obj)
1370 			goto free;
1371 		hlist_add_head(&obj->node, &objects);
1372 	}
1373 
1374 	debug_objects_allocated = ODEBUG_POOL_SIZE;
1375 	pool_global.cnt = ODEBUG_POOL_SIZE;
1376 
1377 	/*
1378 	 * Move the allocated objects to the global pool and disconnect the
1379 	 * boot pool.
1380 	 */
1381 	hlist_move_list(&objects, &pool_global.objects);
1382 	pool_boot.first = NULL;
1383 
1384 	/* Replace the active object references */
1385 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1386 		hlist_move_list(&db->list, &objects);
1387 
1388 		hlist_for_each_entry(obj, &objects, node) {
1389 			new = hlist_entry(pool_global.objects.first, typeof(*obj), node);
1390 			hlist_del(&new->node);
1391 			pool_global.cnt--;
1392 			/* copy object data */
1393 			*new = *obj;
1394 			hlist_add_head(&new->node, &db->list);
1395 		}
1396 	}
1397 	return true;
1398 free:
1399 	/* Can't use free_object_list() as the cache is not populated yet */
1400 	hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1401 		hlist_del(&obj->node);
1402 		kmem_cache_free(cache, obj);
1403 	}
1404 	return false;
1405 }
1406 
1407 /*
1408  * Called after the kmem_caches are functional to setup a dedicated
1409  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1410  * prevents that the debug code is called on kmem_cache_free() for the
1411  * debug tracker objects to avoid recursive calls.
1412  */
1413 void __init debug_objects_mem_init(void)
1414 {
1415 	struct kmem_cache *cache;
1416 	int extras;
1417 
1418 	if (!debug_objects_enabled)
1419 		return;
1420 
1421 	if (!debug_objects_selftest())
1422 		return;
1423 
1424 	cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0,
1425 				  SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL);
1426 
1427 	if (!cache || !debug_objects_replace_static_objects(cache)) {
1428 		debug_objects_enabled = false;
1429 		pr_warn("Out of memory.\n");
1430 		return;
1431 	}
1432 
1433 	/*
1434 	 * Adjust the thresholds for allocating and freeing objects
1435 	 * according to the number of possible CPUs available in the
1436 	 * system.
1437 	 */
1438 	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1439 	debug_objects_pool_size += extras;
1440 	debug_objects_pool_min_level += extras;
1441 
1442 	/* Everything worked. Expose the cache */
1443 	obj_cache = cache;
1444 
1445 #ifdef CONFIG_HOTPLUG_CPU
1446 	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1447 				  object_cpu_offline);
1448 #endif
1449 	return;
1450 }
1451