xref: /linux-6.15/lib/debugobjects.c (revision 18b8afcb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic infrastructure for lifetime debugging of objects.
4  *
5  * Copyright (C) 2008, Thomas Gleixner <[email protected]>
6  */
7 
8 #define pr_fmt(fmt) "ODEBUG: " fmt
9 
10 #include <linux/debugobjects.h>
11 #include <linux/interrupt.h>
12 #include <linux/sched.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/seq_file.h>
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/kmemleak.h>
19 #include <linux/cpu.h>
20 
21 #define ODEBUG_HASH_BITS	14
22 #define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
23 
24 #define ODEBUG_POOL_SIZE	1024
25 #define ODEBUG_POOL_MIN_LEVEL	256
26 #define ODEBUG_POOL_PERCPU_SIZE	64
27 #define ODEBUG_BATCH_SIZE	16
28 
29 #define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
32 
33 /*
34  * We limit the freeing of debug objects via workqueue at a maximum
35  * frequency of 10Hz and about 1024 objects for each freeing operation.
36  * So it is freeing at most 10k debug objects per second.
37  */
38 #define ODEBUG_FREE_WORK_MAX	1024
39 #define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
40 
41 struct debug_bucket {
42 	struct hlist_head	list;
43 	raw_spinlock_t		lock;
44 };
45 
46 struct obj_pool {
47 	struct hlist_head	objects;
48 	unsigned int		cnt;
49 } ____cacheline_aligned;
50 
51 static DEFINE_PER_CPU(struct obj_pool, pool_pcpu);
52 
53 static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
54 
55 static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
56 
57 static DEFINE_RAW_SPINLOCK(pool_lock);
58 
59 static struct obj_pool		pool_global;
60 static struct obj_pool		pool_to_free;
61 
62 static HLIST_HEAD(pool_boot);
63 
64 /*
65  * Because of the presence of percpu free pools, obj_pool_free will
66  * under-count those in the percpu free pools. Similarly, obj_pool_used
67  * will over-count those in the percpu free pools. Adjustments will be
68  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
69  * can be off.
70  */
71 static int __data_racy		obj_pool_min_free = ODEBUG_POOL_SIZE;
72 static int			obj_pool_used;
73 static int __data_racy		obj_pool_max_used;
74 static bool			obj_freeing;
75 
76 static int __data_racy			debug_objects_maxchain __read_mostly;
77 static int __data_racy __maybe_unused	debug_objects_maxchecked __read_mostly;
78 static int __data_racy			debug_objects_fixups __read_mostly;
79 static int __data_racy			debug_objects_warnings __read_mostly;
80 static bool __data_racy			debug_objects_enabled __read_mostly
81 					= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
82 static int				debug_objects_pool_size __ro_after_init
83 					= ODEBUG_POOL_SIZE;
84 static int				debug_objects_pool_min_level __ro_after_init
85 					= ODEBUG_POOL_MIN_LEVEL;
86 
87 static const struct debug_obj_descr *descr_test  __read_mostly;
88 static struct kmem_cache	*obj_cache __ro_after_init;
89 
90 /*
91  * Track numbers of kmem_cache_alloc()/free() calls done.
92  */
93 static int __data_racy		debug_objects_allocated;
94 static int __data_racy		debug_objects_freed;
95 
96 static void free_obj_work(struct work_struct *work);
97 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
98 
99 static int __init enable_object_debug(char *str)
100 {
101 	debug_objects_enabled = true;
102 	return 0;
103 }
104 early_param("debug_objects", enable_object_debug);
105 
106 static int __init disable_object_debug(char *str)
107 {
108 	debug_objects_enabled = false;
109 	return 0;
110 }
111 early_param("no_debug_objects", disable_object_debug);
112 
113 static const char *obj_states[ODEBUG_STATE_MAX] = {
114 	[ODEBUG_STATE_NONE]		= "none",
115 	[ODEBUG_STATE_INIT]		= "initialized",
116 	[ODEBUG_STATE_INACTIVE]		= "inactive",
117 	[ODEBUG_STATE_ACTIVE]		= "active",
118 	[ODEBUG_STATE_DESTROYED]	= "destroyed",
119 	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
120 };
121 
122 static __always_inline unsigned int pool_count(struct obj_pool *pool)
123 {
124 	return READ_ONCE(pool->cnt);
125 }
126 
127 static inline bool pool_global_should_refill(void)
128 {
129 	return READ_ONCE(pool_global.cnt) < debug_objects_pool_min_level;
130 }
131 
132 static inline bool pool_global_must_refill(void)
133 {
134 	return READ_ONCE(pool_global.cnt) < (debug_objects_pool_min_level / 2);
135 }
136 
137 static void free_object_list(struct hlist_head *head)
138 {
139 	struct hlist_node *tmp;
140 	struct debug_obj *obj;
141 	int cnt = 0;
142 
143 	hlist_for_each_entry_safe(obj, tmp, head, node) {
144 		hlist_del(&obj->node);
145 		kmem_cache_free(obj_cache, obj);
146 		cnt++;
147 	}
148 	debug_objects_freed += cnt;
149 }
150 
151 static void fill_pool_from_freelist(void)
152 {
153 	static unsigned long state;
154 	struct debug_obj *obj;
155 
156 	/*
157 	 * Reuse objs from the global obj_to_free list; they will be
158 	 * reinitialized when allocating.
159 	 */
160 	if (!pool_count(&pool_to_free))
161 		return;
162 
163 	/*
164 	 * Prevent the context from being scheduled or interrupted after
165 	 * setting the state flag;
166 	 */
167 	guard(irqsave)();
168 
169 	/*
170 	 * Avoid lock contention on &pool_lock and avoid making the cache
171 	 * line exclusive by testing the bit before attempting to set it.
172 	 */
173 	if (test_bit(0, &state) || test_and_set_bit(0, &state))
174 		return;
175 
176 	guard(raw_spinlock)(&pool_lock);
177 	/*
178 	 * Recheck with the lock held as the worker thread might have
179 	 * won the race and freed the global free list already.
180 	 */
181 	while (pool_to_free.cnt && (pool_global.cnt < debug_objects_pool_min_level)) {
182 		obj = hlist_entry(pool_to_free.objects.first, typeof(*obj), node);
183 		hlist_del(&obj->node);
184 		WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt - 1);
185 		hlist_add_head(&obj->node, &pool_global.objects);
186 		WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
187 	}
188 	clear_bit(0, &state);
189 }
190 
191 static void fill_pool(void)
192 {
193 	static atomic_t cpus_allocating;
194 
195 	/*
196 	 * Avoid allocation and lock contention when:
197 	 *   - One other CPU is already allocating
198 	 *   - the global pool has not reached the critical level yet
199 	 */
200 	if (!pool_global_must_refill() && atomic_read(&cpus_allocating))
201 		return;
202 
203 	atomic_inc(&cpus_allocating);
204 	while (pool_global_should_refill()) {
205 		struct debug_obj *new, *last = NULL;
206 		HLIST_HEAD(head);
207 		int cnt;
208 
209 		for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
210 			new = kmem_cache_zalloc(obj_cache, __GFP_HIGH | __GFP_NOWARN);
211 			if (!new)
212 				break;
213 			hlist_add_head(&new->node, &head);
214 			if (!last)
215 				last = new;
216 		}
217 		if (!cnt)
218 			break;
219 
220 		guard(raw_spinlock_irqsave)(&pool_lock);
221 		hlist_splice_init(&head, &last->node, &pool_global.objects);
222 		debug_objects_allocated += cnt;
223 		WRITE_ONCE(pool_global.cnt, pool_global.cnt + cnt);
224 	}
225 	atomic_dec(&cpus_allocating);
226 }
227 
228 /*
229  * Lookup an object in the hash bucket.
230  */
231 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
232 {
233 	struct debug_obj *obj;
234 	int cnt = 0;
235 
236 	hlist_for_each_entry(obj, &b->list, node) {
237 		cnt++;
238 		if (obj->object == addr)
239 			return obj;
240 	}
241 	if (cnt > debug_objects_maxchain)
242 		debug_objects_maxchain = cnt;
243 
244 	return NULL;
245 }
246 
247 /*
248  * Allocate a new object from the hlist
249  */
250 static struct debug_obj *__alloc_object(struct hlist_head *list)
251 {
252 	struct debug_obj *obj = NULL;
253 
254 	if (list->first) {
255 		obj = hlist_entry(list->first, typeof(*obj), node);
256 		hlist_del(&obj->node);
257 	}
258 
259 	return obj;
260 }
261 
262 static struct debug_obj *
263 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
264 {
265 	struct obj_pool *percpu_pool = this_cpu_ptr(&pool_pcpu);
266 	struct debug_obj *obj;
267 
268 	if (likely(obj_cache)) {
269 		obj = __alloc_object(&percpu_pool->objects);
270 		if (obj) {
271 			percpu_pool->cnt--;
272 			goto init_obj;
273 		}
274 	} else {
275 		obj = __alloc_object(&pool_boot);
276 		goto init_obj;
277 	}
278 
279 	raw_spin_lock(&pool_lock);
280 	obj = __alloc_object(&pool_global.objects);
281 	if (obj) {
282 		obj_pool_used++;
283 		WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
284 
285 		/*
286 		 * Looking ahead, allocate one batch of debug objects and
287 		 * put them into the percpu free pool.
288 		 */
289 		if (likely(obj_cache)) {
290 			int i;
291 
292 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
293 				struct debug_obj *obj2;
294 
295 				obj2 = __alloc_object(&pool_global.objects);
296 				if (!obj2)
297 					break;
298 				hlist_add_head(&obj2->node, &percpu_pool->objects);
299 				percpu_pool->cnt++;
300 				obj_pool_used++;
301 				WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
302 			}
303 		}
304 
305 		if (obj_pool_used > obj_pool_max_used)
306 			obj_pool_max_used = obj_pool_used;
307 
308 		if (pool_global.cnt < obj_pool_min_free)
309 			obj_pool_min_free = pool_global.cnt;
310 	}
311 	raw_spin_unlock(&pool_lock);
312 
313 init_obj:
314 	if (obj) {
315 		obj->object = addr;
316 		obj->descr  = descr;
317 		obj->state  = ODEBUG_STATE_NONE;
318 		obj->astate = 0;
319 		hlist_add_head(&obj->node, &b->list);
320 	}
321 	return obj;
322 }
323 
324 /*
325  * workqueue function to free objects.
326  *
327  * To reduce contention on the global pool_lock, the actual freeing of
328  * debug objects will be delayed if the pool_lock is busy.
329  */
330 static void free_obj_work(struct work_struct *work)
331 {
332 	struct debug_obj *obj;
333 	unsigned long flags;
334 	HLIST_HEAD(tofree);
335 
336 	WRITE_ONCE(obj_freeing, false);
337 	if (!raw_spin_trylock_irqsave(&pool_lock, flags))
338 		return;
339 
340 	if (pool_global.cnt >= debug_objects_pool_size)
341 		goto free_objs;
342 
343 	/*
344 	 * The objs on the pool list might be allocated before the work is
345 	 * run, so recheck if pool list it full or not, if not fill pool
346 	 * list from the global free list. As it is likely that a workload
347 	 * may be gearing up to use more and more objects, don't free any
348 	 * of them until the next round.
349 	 */
350 	while (pool_to_free.cnt && pool_global.cnt < debug_objects_pool_size) {
351 		obj = hlist_entry(pool_to_free.objects.first, typeof(*obj), node);
352 		hlist_del(&obj->node);
353 		hlist_add_head(&obj->node, &pool_global.objects);
354 		WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt - 1);
355 		WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
356 	}
357 	raw_spin_unlock_irqrestore(&pool_lock, flags);
358 	return;
359 
360 free_objs:
361 	/*
362 	 * Pool list is already full and there are still objs on the free
363 	 * list. Move remaining free objs to a temporary list to free the
364 	 * memory outside the pool_lock held region.
365 	 */
366 	if (pool_to_free.cnt) {
367 		hlist_move_list(&pool_to_free.objects, &tofree);
368 		WRITE_ONCE(pool_to_free.cnt, 0);
369 	}
370 	raw_spin_unlock_irqrestore(&pool_lock, flags);
371 
372 	free_object_list(&tofree);
373 }
374 
375 static void __free_object(struct debug_obj *obj)
376 {
377 	struct debug_obj *objs[ODEBUG_BATCH_SIZE];
378 	struct obj_pool *percpu_pool;
379 	int lookahead_count = 0;
380 	bool work;
381 
382 	guard(irqsave)();
383 
384 	if (unlikely(!obj_cache)) {
385 		hlist_add_head(&obj->node, &pool_boot);
386 		return;
387 	}
388 
389 	/*
390 	 * Try to free it into the percpu pool first.
391 	 */
392 	percpu_pool = this_cpu_ptr(&pool_pcpu);
393 	if (percpu_pool->cnt < ODEBUG_POOL_PERCPU_SIZE) {
394 		hlist_add_head(&obj->node, &percpu_pool->objects);
395 		percpu_pool->cnt++;
396 		return;
397 	}
398 
399 	/*
400 	 * As the percpu pool is full, look ahead and pull out a batch
401 	 * of objects from the percpu pool and free them as well.
402 	 */
403 	for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
404 		objs[lookahead_count] = __alloc_object(&percpu_pool->objects);
405 		if (!objs[lookahead_count])
406 			break;
407 		percpu_pool->cnt--;
408 	}
409 
410 	raw_spin_lock(&pool_lock);
411 	work = (pool_global.cnt > debug_objects_pool_size) && obj_cache &&
412 	       (pool_to_free.cnt < ODEBUG_FREE_WORK_MAX);
413 	obj_pool_used--;
414 
415 	if (work) {
416 		WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + 1);
417 		hlist_add_head(&obj->node, &pool_to_free.objects);
418 		if (lookahead_count) {
419 			WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + lookahead_count);
420 			obj_pool_used -= lookahead_count;
421 			while (lookahead_count) {
422 				hlist_add_head(&objs[--lookahead_count]->node,
423 					       &pool_to_free.objects);
424 			}
425 		}
426 
427 		if ((pool_global.cnt > debug_objects_pool_size) &&
428 		    (pool_to_free.cnt < ODEBUG_FREE_WORK_MAX)) {
429 			int i;
430 
431 			/*
432 			 * Free one more batch of objects from obj_pool.
433 			 */
434 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
435 				obj = __alloc_object(&pool_global.objects);
436 				hlist_add_head(&obj->node, &pool_to_free.objects);
437 				WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
438 				WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + 1);
439 			}
440 		}
441 	} else {
442 		WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
443 		hlist_add_head(&obj->node, &pool_global.objects);
444 		if (lookahead_count) {
445 			WRITE_ONCE(pool_global.cnt, pool_global.cnt + lookahead_count);
446 			obj_pool_used -= lookahead_count;
447 			while (lookahead_count) {
448 				hlist_add_head(&objs[--lookahead_count]->node,
449 					       &pool_global.objects);
450 			}
451 		}
452 	}
453 	raw_spin_unlock(&pool_lock);
454 }
455 
456 /*
457  * Put the object back into the pool and schedule work to free objects
458  * if necessary.
459  */
460 static void free_object(struct debug_obj *obj)
461 {
462 	__free_object(obj);
463 	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
464 		WRITE_ONCE(obj_freeing, true);
465 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
466 	}
467 }
468 
469 static void put_objects(struct hlist_head *list)
470 {
471 	struct hlist_node *tmp;
472 	struct debug_obj *obj;
473 
474 	/*
475 	 * Using free_object() puts the objects into reuse or schedules
476 	 * them for freeing and it get's all the accounting correct.
477 	 */
478 	hlist_for_each_entry_safe(obj, tmp, list, node) {
479 		hlist_del(&obj->node);
480 		free_object(obj);
481 	}
482 }
483 
484 #ifdef CONFIG_HOTPLUG_CPU
485 static int object_cpu_offline(unsigned int cpu)
486 {
487 	/* Remote access is safe as the CPU is dead already */
488 	struct obj_pool *pcp = per_cpu_ptr(&pool_pcpu, cpu);
489 
490 	put_objects(&pcp->objects);
491 	pcp->cnt = 0;
492 	return 0;
493 }
494 #endif
495 
496 /* Out of memory. Free all objects from hash */
497 static void debug_objects_oom(void)
498 {
499 	struct debug_bucket *db = obj_hash;
500 	HLIST_HEAD(freelist);
501 
502 	pr_warn("Out of memory. ODEBUG disabled\n");
503 
504 	for (int i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
505 		scoped_guard(raw_spinlock_irqsave, &db->lock)
506 			hlist_move_list(&db->list, &freelist);
507 
508 		put_objects(&freelist);
509 	}
510 }
511 
512 /*
513  * We use the pfn of the address for the hash. That way we can check
514  * for freed objects simply by checking the affected bucket.
515  */
516 static struct debug_bucket *get_bucket(unsigned long addr)
517 {
518 	unsigned long hash;
519 
520 	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
521 	return &obj_hash[hash];
522 }
523 
524 static void debug_print_object(struct debug_obj *obj, char *msg)
525 {
526 	const struct debug_obj_descr *descr = obj->descr;
527 	static int limit;
528 
529 	/*
530 	 * Don't report if lookup_object_or_alloc() by the current thread
531 	 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
532 	 * concurrent thread turned off debug_objects_enabled and cleared
533 	 * the hash buckets.
534 	 */
535 	if (!debug_objects_enabled)
536 		return;
537 
538 	if (limit < 5 && descr != descr_test) {
539 		void *hint = descr->debug_hint ?
540 			descr->debug_hint(obj->object) : NULL;
541 		limit++;
542 		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
543 				 "object: %p object type: %s hint: %pS\n",
544 			msg, obj_states[obj->state], obj->astate,
545 			obj->object, descr->name, hint);
546 	}
547 	debug_objects_warnings++;
548 }
549 
550 /*
551  * Try to repair the damage, so we have a better chance to get useful
552  * debug output.
553  */
554 static bool
555 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
556 		   void * addr, enum debug_obj_state state)
557 {
558 	if (fixup && fixup(addr, state)) {
559 		debug_objects_fixups++;
560 		return true;
561 	}
562 	return false;
563 }
564 
565 static void debug_object_is_on_stack(void *addr, int onstack)
566 {
567 	int is_on_stack;
568 	static int limit;
569 
570 	if (limit > 4)
571 		return;
572 
573 	is_on_stack = object_is_on_stack(addr);
574 	if (is_on_stack == onstack)
575 		return;
576 
577 	limit++;
578 	if (is_on_stack)
579 		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
580 			 task_stack_page(current));
581 	else
582 		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
583 			 task_stack_page(current));
584 
585 	WARN_ON(1);
586 }
587 
588 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
589 						const struct debug_obj_descr *descr,
590 						bool onstack, bool alloc_ifstatic)
591 {
592 	struct debug_obj *obj = lookup_object(addr, b);
593 	enum debug_obj_state state = ODEBUG_STATE_NONE;
594 
595 	if (likely(obj))
596 		return obj;
597 
598 	/*
599 	 * debug_object_init() unconditionally allocates untracked
600 	 * objects. It does not matter whether it is a static object or
601 	 * not.
602 	 *
603 	 * debug_object_assert_init() and debug_object_activate() allow
604 	 * allocation only if the descriptor callback confirms that the
605 	 * object is static and considered initialized. For non-static
606 	 * objects the allocation needs to be done from the fixup callback.
607 	 */
608 	if (unlikely(alloc_ifstatic)) {
609 		if (!descr->is_static_object || !descr->is_static_object(addr))
610 			return ERR_PTR(-ENOENT);
611 		/* Statically allocated objects are considered initialized */
612 		state = ODEBUG_STATE_INIT;
613 	}
614 
615 	obj = alloc_object(addr, b, descr);
616 	if (likely(obj)) {
617 		obj->state = state;
618 		debug_object_is_on_stack(addr, onstack);
619 		return obj;
620 	}
621 
622 	/* Out of memory. Do the cleanup outside of the locked region */
623 	debug_objects_enabled = false;
624 	return NULL;
625 }
626 
627 static void debug_objects_fill_pool(void)
628 {
629 	if (unlikely(!obj_cache))
630 		return;
631 
632 	if (likely(!pool_global_should_refill()))
633 		return;
634 
635 	/* Try reusing objects from obj_to_free_list */
636 	fill_pool_from_freelist();
637 
638 	if (likely(!pool_global_should_refill()))
639 		return;
640 
641 	/*
642 	 * On RT enabled kernels the pool refill must happen in preemptible
643 	 * context -- for !RT kernels we rely on the fact that spinlock_t and
644 	 * raw_spinlock_t are basically the same type and this lock-type
645 	 * inversion works just fine.
646 	 */
647 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
648 		/*
649 		 * Annotate away the spinlock_t inside raw_spinlock_t warning
650 		 * by temporarily raising the wait-type to WAIT_SLEEP, matching
651 		 * the preemptible() condition above.
652 		 */
653 		static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
654 		lock_map_acquire_try(&fill_pool_map);
655 		fill_pool();
656 		lock_map_release(&fill_pool_map);
657 	}
658 }
659 
660 static void
661 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
662 {
663 	struct debug_obj *obj, o;
664 	struct debug_bucket *db;
665 	unsigned long flags;
666 
667 	debug_objects_fill_pool();
668 
669 	db = get_bucket((unsigned long) addr);
670 
671 	raw_spin_lock_irqsave(&db->lock, flags);
672 
673 	obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
674 	if (unlikely(!obj)) {
675 		raw_spin_unlock_irqrestore(&db->lock, flags);
676 		debug_objects_oom();
677 		return;
678 	}
679 
680 	switch (obj->state) {
681 	case ODEBUG_STATE_NONE:
682 	case ODEBUG_STATE_INIT:
683 	case ODEBUG_STATE_INACTIVE:
684 		obj->state = ODEBUG_STATE_INIT;
685 		raw_spin_unlock_irqrestore(&db->lock, flags);
686 		return;
687 	default:
688 		break;
689 	}
690 
691 	o = *obj;
692 	raw_spin_unlock_irqrestore(&db->lock, flags);
693 	debug_print_object(&o, "init");
694 
695 	if (o.state == ODEBUG_STATE_ACTIVE)
696 		debug_object_fixup(descr->fixup_init, addr, o.state);
697 }
698 
699 /**
700  * debug_object_init - debug checks when an object is initialized
701  * @addr:	address of the object
702  * @descr:	pointer to an object specific debug description structure
703  */
704 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
705 {
706 	if (!debug_objects_enabled)
707 		return;
708 
709 	__debug_object_init(addr, descr, 0);
710 }
711 EXPORT_SYMBOL_GPL(debug_object_init);
712 
713 /**
714  * debug_object_init_on_stack - debug checks when an object on stack is
715  *				initialized
716  * @addr:	address of the object
717  * @descr:	pointer to an object specific debug description structure
718  */
719 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
720 {
721 	if (!debug_objects_enabled)
722 		return;
723 
724 	__debug_object_init(addr, descr, 1);
725 }
726 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
727 
728 /**
729  * debug_object_activate - debug checks when an object is activated
730  * @addr:	address of the object
731  * @descr:	pointer to an object specific debug description structure
732  * Returns 0 for success, -EINVAL for check failed.
733  */
734 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
735 {
736 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
737 	struct debug_bucket *db;
738 	struct debug_obj *obj;
739 	unsigned long flags;
740 
741 	if (!debug_objects_enabled)
742 		return 0;
743 
744 	debug_objects_fill_pool();
745 
746 	db = get_bucket((unsigned long) addr);
747 
748 	raw_spin_lock_irqsave(&db->lock, flags);
749 
750 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
751 	if (unlikely(!obj)) {
752 		raw_spin_unlock_irqrestore(&db->lock, flags);
753 		debug_objects_oom();
754 		return 0;
755 	} else if (likely(!IS_ERR(obj))) {
756 		switch (obj->state) {
757 		case ODEBUG_STATE_ACTIVE:
758 		case ODEBUG_STATE_DESTROYED:
759 			o = *obj;
760 			break;
761 		case ODEBUG_STATE_INIT:
762 		case ODEBUG_STATE_INACTIVE:
763 			obj->state = ODEBUG_STATE_ACTIVE;
764 			fallthrough;
765 		default:
766 			raw_spin_unlock_irqrestore(&db->lock, flags);
767 			return 0;
768 		}
769 	}
770 
771 	raw_spin_unlock_irqrestore(&db->lock, flags);
772 	debug_print_object(&o, "activate");
773 
774 	switch (o.state) {
775 	case ODEBUG_STATE_ACTIVE:
776 	case ODEBUG_STATE_NOTAVAILABLE:
777 		if (debug_object_fixup(descr->fixup_activate, addr, o.state))
778 			return 0;
779 		fallthrough;
780 	default:
781 		return -EINVAL;
782 	}
783 }
784 EXPORT_SYMBOL_GPL(debug_object_activate);
785 
786 /**
787  * debug_object_deactivate - debug checks when an object is deactivated
788  * @addr:	address of the object
789  * @descr:	pointer to an object specific debug description structure
790  */
791 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
792 {
793 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
794 	struct debug_bucket *db;
795 	struct debug_obj *obj;
796 	unsigned long flags;
797 
798 	if (!debug_objects_enabled)
799 		return;
800 
801 	db = get_bucket((unsigned long) addr);
802 
803 	raw_spin_lock_irqsave(&db->lock, flags);
804 
805 	obj = lookup_object(addr, db);
806 	if (obj) {
807 		switch (obj->state) {
808 		case ODEBUG_STATE_DESTROYED:
809 			break;
810 		case ODEBUG_STATE_INIT:
811 		case ODEBUG_STATE_INACTIVE:
812 		case ODEBUG_STATE_ACTIVE:
813 			if (obj->astate)
814 				break;
815 			obj->state = ODEBUG_STATE_INACTIVE;
816 			fallthrough;
817 		default:
818 			raw_spin_unlock_irqrestore(&db->lock, flags);
819 			return;
820 		}
821 		o = *obj;
822 	}
823 
824 	raw_spin_unlock_irqrestore(&db->lock, flags);
825 	debug_print_object(&o, "deactivate");
826 }
827 EXPORT_SYMBOL_GPL(debug_object_deactivate);
828 
829 /**
830  * debug_object_destroy - debug checks when an object is destroyed
831  * @addr:	address of the object
832  * @descr:	pointer to an object specific debug description structure
833  */
834 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
835 {
836 	struct debug_obj *obj, o;
837 	struct debug_bucket *db;
838 	unsigned long flags;
839 
840 	if (!debug_objects_enabled)
841 		return;
842 
843 	db = get_bucket((unsigned long) addr);
844 
845 	raw_spin_lock_irqsave(&db->lock, flags);
846 
847 	obj = lookup_object(addr, db);
848 	if (!obj) {
849 		raw_spin_unlock_irqrestore(&db->lock, flags);
850 		return;
851 	}
852 
853 	switch (obj->state) {
854 	case ODEBUG_STATE_ACTIVE:
855 	case ODEBUG_STATE_DESTROYED:
856 		break;
857 	case ODEBUG_STATE_NONE:
858 	case ODEBUG_STATE_INIT:
859 	case ODEBUG_STATE_INACTIVE:
860 		obj->state = ODEBUG_STATE_DESTROYED;
861 		fallthrough;
862 	default:
863 		raw_spin_unlock_irqrestore(&db->lock, flags);
864 		return;
865 	}
866 
867 	o = *obj;
868 	raw_spin_unlock_irqrestore(&db->lock, flags);
869 	debug_print_object(&o, "destroy");
870 
871 	if (o.state == ODEBUG_STATE_ACTIVE)
872 		debug_object_fixup(descr->fixup_destroy, addr, o.state);
873 }
874 EXPORT_SYMBOL_GPL(debug_object_destroy);
875 
876 /**
877  * debug_object_free - debug checks when an object is freed
878  * @addr:	address of the object
879  * @descr:	pointer to an object specific debug description structure
880  */
881 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
882 {
883 	struct debug_obj *obj, o;
884 	struct debug_bucket *db;
885 	unsigned long flags;
886 
887 	if (!debug_objects_enabled)
888 		return;
889 
890 	db = get_bucket((unsigned long) addr);
891 
892 	raw_spin_lock_irqsave(&db->lock, flags);
893 
894 	obj = lookup_object(addr, db);
895 	if (!obj) {
896 		raw_spin_unlock_irqrestore(&db->lock, flags);
897 		return;
898 	}
899 
900 	switch (obj->state) {
901 	case ODEBUG_STATE_ACTIVE:
902 		break;
903 	default:
904 		hlist_del(&obj->node);
905 		raw_spin_unlock_irqrestore(&db->lock, flags);
906 		free_object(obj);
907 		return;
908 	}
909 
910 	o = *obj;
911 	raw_spin_unlock_irqrestore(&db->lock, flags);
912 	debug_print_object(&o, "free");
913 
914 	debug_object_fixup(descr->fixup_free, addr, o.state);
915 }
916 EXPORT_SYMBOL_GPL(debug_object_free);
917 
918 /**
919  * debug_object_assert_init - debug checks when object should be init-ed
920  * @addr:	address of the object
921  * @descr:	pointer to an object specific debug description structure
922  */
923 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
924 {
925 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
926 	struct debug_bucket *db;
927 	struct debug_obj *obj;
928 	unsigned long flags;
929 
930 	if (!debug_objects_enabled)
931 		return;
932 
933 	debug_objects_fill_pool();
934 
935 	db = get_bucket((unsigned long) addr);
936 
937 	raw_spin_lock_irqsave(&db->lock, flags);
938 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
939 	raw_spin_unlock_irqrestore(&db->lock, flags);
940 	if (likely(!IS_ERR_OR_NULL(obj)))
941 		return;
942 
943 	/* If NULL the allocation has hit OOM */
944 	if (!obj) {
945 		debug_objects_oom();
946 		return;
947 	}
948 
949 	/* Object is neither tracked nor static. It's not initialized. */
950 	debug_print_object(&o, "assert_init");
951 	debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
952 }
953 EXPORT_SYMBOL_GPL(debug_object_assert_init);
954 
955 /**
956  * debug_object_active_state - debug checks object usage state machine
957  * @addr:	address of the object
958  * @descr:	pointer to an object specific debug description structure
959  * @expect:	expected state
960  * @next:	state to move to if expected state is found
961  */
962 void
963 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
964 			  unsigned int expect, unsigned int next)
965 {
966 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
967 	struct debug_bucket *db;
968 	struct debug_obj *obj;
969 	unsigned long flags;
970 
971 	if (!debug_objects_enabled)
972 		return;
973 
974 	db = get_bucket((unsigned long) addr);
975 
976 	raw_spin_lock_irqsave(&db->lock, flags);
977 
978 	obj = lookup_object(addr, db);
979 	if (obj) {
980 		switch (obj->state) {
981 		case ODEBUG_STATE_ACTIVE:
982 			if (obj->astate != expect)
983 				break;
984 			obj->astate = next;
985 			raw_spin_unlock_irqrestore(&db->lock, flags);
986 			return;
987 		default:
988 			break;
989 		}
990 		o = *obj;
991 	}
992 
993 	raw_spin_unlock_irqrestore(&db->lock, flags);
994 	debug_print_object(&o, "active_state");
995 }
996 EXPORT_SYMBOL_GPL(debug_object_active_state);
997 
998 #ifdef CONFIG_DEBUG_OBJECTS_FREE
999 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
1000 {
1001 	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
1002 	int cnt, objs_checked = 0;
1003 	struct debug_obj *obj, o;
1004 	struct debug_bucket *db;
1005 	struct hlist_node *tmp;
1006 
1007 	saddr = (unsigned long) address;
1008 	eaddr = saddr + size;
1009 	paddr = saddr & ODEBUG_CHUNK_MASK;
1010 	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
1011 	chunks >>= ODEBUG_CHUNK_SHIFT;
1012 
1013 	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
1014 		db = get_bucket(paddr);
1015 
1016 repeat:
1017 		cnt = 0;
1018 		raw_spin_lock_irqsave(&db->lock, flags);
1019 		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
1020 			cnt++;
1021 			oaddr = (unsigned long) obj->object;
1022 			if (oaddr < saddr || oaddr >= eaddr)
1023 				continue;
1024 
1025 			switch (obj->state) {
1026 			case ODEBUG_STATE_ACTIVE:
1027 				o = *obj;
1028 				raw_spin_unlock_irqrestore(&db->lock, flags);
1029 				debug_print_object(&o, "free");
1030 				debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
1031 				goto repeat;
1032 			default:
1033 				hlist_del(&obj->node);
1034 				__free_object(obj);
1035 				break;
1036 			}
1037 		}
1038 		raw_spin_unlock_irqrestore(&db->lock, flags);
1039 
1040 		if (cnt > debug_objects_maxchain)
1041 			debug_objects_maxchain = cnt;
1042 
1043 		objs_checked += cnt;
1044 	}
1045 
1046 	if (objs_checked > debug_objects_maxchecked)
1047 		debug_objects_maxchecked = objs_checked;
1048 
1049 	/* Schedule work to actually kmem_cache_free() objects */
1050 	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
1051 		WRITE_ONCE(obj_freeing, true);
1052 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1053 	}
1054 }
1055 
1056 void debug_check_no_obj_freed(const void *address, unsigned long size)
1057 {
1058 	if (debug_objects_enabled)
1059 		__debug_check_no_obj_freed(address, size);
1060 }
1061 #endif
1062 
1063 #ifdef CONFIG_DEBUG_FS
1064 
1065 static int debug_stats_show(struct seq_file *m, void *v)
1066 {
1067 	int cpu, obj_percpu_free = 0;
1068 
1069 	for_each_possible_cpu(cpu)
1070 		obj_percpu_free += per_cpu(pool_pcpu.cnt, cpu);
1071 
1072 	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1073 	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1074 	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1075 	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1076 	seq_printf(m, "pool_free     :%d\n", pool_count(&pool_global) + obj_percpu_free);
1077 	seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1078 	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1079 	seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1080 	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1081 	seq_printf(m, "on_free_list  :%d\n", pool_count(&pool_to_free));
1082 	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1083 	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1084 	return 0;
1085 }
1086 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1087 
1088 static int __init debug_objects_init_debugfs(void)
1089 {
1090 	struct dentry *dbgdir;
1091 
1092 	if (!debug_objects_enabled)
1093 		return 0;
1094 
1095 	dbgdir = debugfs_create_dir("debug_objects", NULL);
1096 
1097 	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1098 
1099 	return 0;
1100 }
1101 __initcall(debug_objects_init_debugfs);
1102 
1103 #else
1104 static inline void debug_objects_init_debugfs(void) { }
1105 #endif
1106 
1107 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1108 
1109 /* Random data structure for the self test */
1110 struct self_test {
1111 	unsigned long	dummy1[6];
1112 	int		static_init;
1113 	unsigned long	dummy2[3];
1114 };
1115 
1116 static __initconst const struct debug_obj_descr descr_type_test;
1117 
1118 static bool __init is_static_object(void *addr)
1119 {
1120 	struct self_test *obj = addr;
1121 
1122 	return obj->static_init;
1123 }
1124 
1125 /*
1126  * fixup_init is called when:
1127  * - an active object is initialized
1128  */
1129 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1130 {
1131 	struct self_test *obj = addr;
1132 
1133 	switch (state) {
1134 	case ODEBUG_STATE_ACTIVE:
1135 		debug_object_deactivate(obj, &descr_type_test);
1136 		debug_object_init(obj, &descr_type_test);
1137 		return true;
1138 	default:
1139 		return false;
1140 	}
1141 }
1142 
1143 /*
1144  * fixup_activate is called when:
1145  * - an active object is activated
1146  * - an unknown non-static object is activated
1147  */
1148 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1149 {
1150 	struct self_test *obj = addr;
1151 
1152 	switch (state) {
1153 	case ODEBUG_STATE_NOTAVAILABLE:
1154 		return true;
1155 	case ODEBUG_STATE_ACTIVE:
1156 		debug_object_deactivate(obj, &descr_type_test);
1157 		debug_object_activate(obj, &descr_type_test);
1158 		return true;
1159 
1160 	default:
1161 		return false;
1162 	}
1163 }
1164 
1165 /*
1166  * fixup_destroy is called when:
1167  * - an active object is destroyed
1168  */
1169 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1170 {
1171 	struct self_test *obj = addr;
1172 
1173 	switch (state) {
1174 	case ODEBUG_STATE_ACTIVE:
1175 		debug_object_deactivate(obj, &descr_type_test);
1176 		debug_object_destroy(obj, &descr_type_test);
1177 		return true;
1178 	default:
1179 		return false;
1180 	}
1181 }
1182 
1183 /*
1184  * fixup_free is called when:
1185  * - an active object is freed
1186  */
1187 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1188 {
1189 	struct self_test *obj = addr;
1190 
1191 	switch (state) {
1192 	case ODEBUG_STATE_ACTIVE:
1193 		debug_object_deactivate(obj, &descr_type_test);
1194 		debug_object_free(obj, &descr_type_test);
1195 		return true;
1196 	default:
1197 		return false;
1198 	}
1199 }
1200 
1201 static int __init
1202 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1203 {
1204 	struct debug_bucket *db;
1205 	struct debug_obj *obj;
1206 	unsigned long flags;
1207 	int res = -EINVAL;
1208 
1209 	db = get_bucket((unsigned long) addr);
1210 
1211 	raw_spin_lock_irqsave(&db->lock, flags);
1212 
1213 	obj = lookup_object(addr, db);
1214 	if (!obj && state != ODEBUG_STATE_NONE) {
1215 		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1216 		goto out;
1217 	}
1218 	if (obj && obj->state != state) {
1219 		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1220 		       obj->state, state);
1221 		goto out;
1222 	}
1223 	if (fixups != debug_objects_fixups) {
1224 		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1225 		       fixups, debug_objects_fixups);
1226 		goto out;
1227 	}
1228 	if (warnings != debug_objects_warnings) {
1229 		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1230 		       warnings, debug_objects_warnings);
1231 		goto out;
1232 	}
1233 	res = 0;
1234 out:
1235 	raw_spin_unlock_irqrestore(&db->lock, flags);
1236 	if (res)
1237 		debug_objects_enabled = false;
1238 	return res;
1239 }
1240 
1241 static __initconst const struct debug_obj_descr descr_type_test = {
1242 	.name			= "selftest",
1243 	.is_static_object	= is_static_object,
1244 	.fixup_init		= fixup_init,
1245 	.fixup_activate		= fixup_activate,
1246 	.fixup_destroy		= fixup_destroy,
1247 	.fixup_free		= fixup_free,
1248 };
1249 
1250 static __initdata struct self_test obj = { .static_init = 0 };
1251 
1252 static bool __init debug_objects_selftest(void)
1253 {
1254 	int fixups, oldfixups, warnings, oldwarnings;
1255 	unsigned long flags;
1256 
1257 	local_irq_save(flags);
1258 
1259 	fixups = oldfixups = debug_objects_fixups;
1260 	warnings = oldwarnings = debug_objects_warnings;
1261 	descr_test = &descr_type_test;
1262 
1263 	debug_object_init(&obj, &descr_type_test);
1264 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1265 		goto out;
1266 	debug_object_activate(&obj, &descr_type_test);
1267 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1268 		goto out;
1269 	debug_object_activate(&obj, &descr_type_test);
1270 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1271 		goto out;
1272 	debug_object_deactivate(&obj, &descr_type_test);
1273 	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1274 		goto out;
1275 	debug_object_destroy(&obj, &descr_type_test);
1276 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1277 		goto out;
1278 	debug_object_init(&obj, &descr_type_test);
1279 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1280 		goto out;
1281 	debug_object_activate(&obj, &descr_type_test);
1282 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1283 		goto out;
1284 	debug_object_deactivate(&obj, &descr_type_test);
1285 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1286 		goto out;
1287 	debug_object_free(&obj, &descr_type_test);
1288 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1289 		goto out;
1290 
1291 	obj.static_init = 1;
1292 	debug_object_activate(&obj, &descr_type_test);
1293 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1294 		goto out;
1295 	debug_object_init(&obj, &descr_type_test);
1296 	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1297 		goto out;
1298 	debug_object_free(&obj, &descr_type_test);
1299 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1300 		goto out;
1301 
1302 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1303 	debug_object_init(&obj, &descr_type_test);
1304 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1305 		goto out;
1306 	debug_object_activate(&obj, &descr_type_test);
1307 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1308 		goto out;
1309 	__debug_check_no_obj_freed(&obj, sizeof(obj));
1310 	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1311 		goto out;
1312 #endif
1313 	pr_info("selftest passed\n");
1314 
1315 out:
1316 	debug_objects_fixups = oldfixups;
1317 	debug_objects_warnings = oldwarnings;
1318 	descr_test = NULL;
1319 
1320 	local_irq_restore(flags);
1321 	return debug_objects_enabled;
1322 }
1323 #else
1324 static inline bool debug_objects_selftest(void) { return true; }
1325 #endif
1326 
1327 /*
1328  * Called during early boot to initialize the hash buckets and link
1329  * the static object pool objects into the poll list. After this call
1330  * the object tracker is fully operational.
1331  */
1332 void __init debug_objects_early_init(void)
1333 {
1334 	int i;
1335 
1336 	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1337 		raw_spin_lock_init(&obj_hash[i].lock);
1338 
1339 	/* Keep early boot simple and add everything to the boot list */
1340 	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1341 		hlist_add_head(&obj_static_pool[i].node, &pool_boot);
1342 }
1343 
1344 /*
1345  * Convert the statically allocated objects to dynamic ones.
1346  * debug_objects_mem_init() is called early so only one CPU is up and
1347  * interrupts are disabled, which means it is safe to replace the active
1348  * object references.
1349  */
1350 static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache)
1351 {
1352 	struct debug_bucket *db = obj_hash;
1353 	struct debug_obj *obj, *new;
1354 	struct hlist_node *tmp;
1355 	HLIST_HEAD(objects);
1356 	int i;
1357 
1358 	for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1359 		obj = kmem_cache_zalloc(cache, GFP_KERNEL);
1360 		if (!obj)
1361 			goto free;
1362 		hlist_add_head(&obj->node, &objects);
1363 	}
1364 
1365 	debug_objects_allocated = ODEBUG_POOL_SIZE;
1366 	pool_global.cnt = ODEBUG_POOL_SIZE;
1367 
1368 	/*
1369 	 * Move the allocated objects to the global pool and disconnect the
1370 	 * boot pool.
1371 	 */
1372 	hlist_move_list(&objects, &pool_global.objects);
1373 	pool_boot.first = NULL;
1374 
1375 	/* Replace the active object references */
1376 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1377 		hlist_move_list(&db->list, &objects);
1378 
1379 		hlist_for_each_entry(obj, &objects, node) {
1380 			new = hlist_entry(pool_global.objects.first, typeof(*obj), node);
1381 			hlist_del(&new->node);
1382 			pool_global.cnt--;
1383 			/* copy object data */
1384 			*new = *obj;
1385 			hlist_add_head(&new->node, &db->list);
1386 		}
1387 	}
1388 	return true;
1389 free:
1390 	/* Can't use free_object_list() as the cache is not populated yet */
1391 	hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1392 		hlist_del(&obj->node);
1393 		kmem_cache_free(cache, obj);
1394 	}
1395 	return false;
1396 }
1397 
1398 /*
1399  * Called after the kmem_caches are functional to setup a dedicated
1400  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1401  * prevents that the debug code is called on kmem_cache_free() for the
1402  * debug tracker objects to avoid recursive calls.
1403  */
1404 void __init debug_objects_mem_init(void)
1405 {
1406 	struct kmem_cache *cache;
1407 	int extras;
1408 
1409 	if (!debug_objects_enabled)
1410 		return;
1411 
1412 	if (!debug_objects_selftest())
1413 		return;
1414 
1415 	cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0,
1416 				  SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL);
1417 
1418 	if (!cache || !debug_objects_replace_static_objects(cache)) {
1419 		debug_objects_enabled = false;
1420 		pr_warn("Out of memory.\n");
1421 		return;
1422 	}
1423 
1424 	/*
1425 	 * Adjust the thresholds for allocating and freeing objects
1426 	 * according to the number of possible CPUs available in the
1427 	 * system.
1428 	 */
1429 	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1430 	debug_objects_pool_size += extras;
1431 	debug_objects_pool_min_level += extras;
1432 
1433 	/* Everything worked. Expose the cache */
1434 	obj_cache = cache;
1435 
1436 #ifdef CONFIG_HOTPLUG_CPU
1437 	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1438 				  object_cpu_offline);
1439 #endif
1440 	return;
1441 }
1442