xref: /linux-6.15/lib/debugobjects.c (revision d8c6cd3a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic infrastructure for lifetime debugging of objects.
4  *
5  * Copyright (C) 2008, Thomas Gleixner <[email protected]>
6  */
7 
8 #define pr_fmt(fmt) "ODEBUG: " fmt
9 
10 #include <linux/debugobjects.h>
11 #include <linux/interrupt.h>
12 #include <linux/sched.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/seq_file.h>
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/kmemleak.h>
19 #include <linux/cpu.h>
20 
21 #define ODEBUG_HASH_BITS	14
22 #define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
23 
24 #define ODEBUG_POOL_SIZE	1024
25 #define ODEBUG_POOL_MIN_LEVEL	256
26 #define ODEBUG_POOL_PERCPU_SIZE	64
27 #define ODEBUG_BATCH_SIZE	16
28 
29 #define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
32 
33 /*
34  * We limit the freeing of debug objects via workqueue at a maximum
35  * frequency of 10Hz and about 1024 objects for each freeing operation.
36  * So it is freeing at most 10k debug objects per second.
37  */
38 #define ODEBUG_FREE_WORK_MAX	1024
39 #define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
40 
41 struct debug_bucket {
42 	struct hlist_head	list;
43 	raw_spinlock_t		lock;
44 };
45 
46 /*
47  * Debug object percpu free list
48  * Access is protected by disabling irq
49  */
50 struct debug_percpu_free {
51 	struct hlist_head	free_objs;
52 	int			obj_free;
53 };
54 
55 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
56 
57 static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
58 
59 static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
60 
61 static DEFINE_RAW_SPINLOCK(pool_lock);
62 
63 static HLIST_HEAD(obj_pool);
64 static HLIST_HEAD(obj_to_free);
65 
66 /*
67  * Because of the presence of percpu free pools, obj_pool_free will
68  * under-count those in the percpu free pools. Similarly, obj_pool_used
69  * will over-count those in the percpu free pools. Adjustments will be
70  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
71  * can be off.
72  */
73 static int __data_racy		obj_pool_min_free = ODEBUG_POOL_SIZE;
74 static int __data_racy		obj_pool_free = ODEBUG_POOL_SIZE;
75 static int			obj_pool_used;
76 static int __data_racy		obj_pool_max_used;
77 static bool			obj_freeing;
78 /* The number of objs on the global free list */
79 static int			obj_nr_tofree;
80 
81 static int __data_racy			debug_objects_maxchain __read_mostly;
82 static int __data_racy __maybe_unused	debug_objects_maxchecked __read_mostly;
83 static int __data_racy			debug_objects_fixups __read_mostly;
84 static int __data_racy			debug_objects_warnings __read_mostly;
85 static bool __data_racy			debug_objects_enabled __read_mostly
86 					= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
87 static int				debug_objects_pool_size __ro_after_init
88 					= ODEBUG_POOL_SIZE;
89 static int				debug_objects_pool_min_level __ro_after_init
90 					= ODEBUG_POOL_MIN_LEVEL;
91 
92 static const struct debug_obj_descr *descr_test  __read_mostly;
93 static struct kmem_cache	*obj_cache __ro_after_init;
94 
95 /*
96  * Track numbers of kmem_cache_alloc()/free() calls done.
97  */
98 static int __data_racy		debug_objects_allocated;
99 static int __data_racy		debug_objects_freed;
100 
101 static void free_obj_work(struct work_struct *work);
102 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
103 
104 static int __init enable_object_debug(char *str)
105 {
106 	debug_objects_enabled = true;
107 	return 0;
108 }
109 early_param("debug_objects", enable_object_debug);
110 
111 static int __init disable_object_debug(char *str)
112 {
113 	debug_objects_enabled = false;
114 	return 0;
115 }
116 early_param("no_debug_objects", disable_object_debug);
117 
118 static const char *obj_states[ODEBUG_STATE_MAX] = {
119 	[ODEBUG_STATE_NONE]		= "none",
120 	[ODEBUG_STATE_INIT]		= "initialized",
121 	[ODEBUG_STATE_INACTIVE]		= "inactive",
122 	[ODEBUG_STATE_ACTIVE]		= "active",
123 	[ODEBUG_STATE_DESTROYED]	= "destroyed",
124 	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
125 };
126 
127 static void free_object_list(struct hlist_head *head)
128 {
129 	struct hlist_node *tmp;
130 	struct debug_obj *obj;
131 	int cnt = 0;
132 
133 	hlist_for_each_entry_safe(obj, tmp, head, node) {
134 		hlist_del(&obj->node);
135 		kmem_cache_free(obj_cache, obj);
136 		cnt++;
137 	}
138 	debug_objects_freed += cnt;
139 }
140 
141 static void fill_pool_from_freelist(void)
142 {
143 	static unsigned long state;
144 	struct debug_obj *obj;
145 
146 	/*
147 	 * Reuse objs from the global obj_to_free list; they will be
148 	 * reinitialized when allocating.
149 	 *
150 	 * obj_nr_tofree is checked locklessly; the READ_ONCE() pairs with
151 	 * the WRITE_ONCE() in pool_lock critical sections.
152 	 */
153 	if (!READ_ONCE(obj_nr_tofree))
154 		return;
155 
156 	/*
157 	 * Prevent the context from being scheduled or interrupted after
158 	 * setting the state flag;
159 	 */
160 	guard(irqsave)();
161 
162 	/*
163 	 * Avoid lock contention on &pool_lock and avoid making the cache
164 	 * line exclusive by testing the bit before attempting to set it.
165 	 */
166 	if (test_bit(0, &state) || test_and_set_bit(0, &state))
167 		return;
168 
169 	guard(raw_spinlock)(&pool_lock);
170 	/*
171 	 * Recheck with the lock held as the worker thread might have
172 	 * won the race and freed the global free list already.
173 	 */
174 	while (obj_nr_tofree && (obj_pool_free < debug_objects_pool_min_level)) {
175 		obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
176 		hlist_del(&obj->node);
177 		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
178 		hlist_add_head(&obj->node, &obj_pool);
179 		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
180 	}
181 	clear_bit(0, &state);
182 }
183 
184 static void fill_pool(void)
185 {
186 	static atomic_t cpus_allocating;
187 
188 	/*
189 	 * Avoid allocation and lock contention when:
190 	 *   - One other CPU is already allocating
191 	 *   - the global pool has not reached the critical level yet
192 	 */
193 	if (READ_ONCE(obj_pool_free) > (debug_objects_pool_min_level / 2) &&
194 	    atomic_read(&cpus_allocating))
195 		return;
196 
197 	atomic_inc(&cpus_allocating);
198 	while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
199 		struct debug_obj *new, *last = NULL;
200 		HLIST_HEAD(head);
201 		int cnt;
202 
203 		for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
204 			new = kmem_cache_zalloc(obj_cache, __GFP_HIGH | __GFP_NOWARN);
205 			if (!new)
206 				break;
207 			hlist_add_head(&new->node, &head);
208 			if (!last)
209 				last = new;
210 		}
211 		if (!cnt)
212 			break;
213 
214 		guard(raw_spinlock_irqsave)(&pool_lock);
215 		hlist_splice_init(&head, &last->node, &obj_pool);
216 		debug_objects_allocated += cnt;
217 		WRITE_ONCE(obj_pool_free, obj_pool_free + cnt);
218 	}
219 	atomic_dec(&cpus_allocating);
220 }
221 
222 /*
223  * Lookup an object in the hash bucket.
224  */
225 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
226 {
227 	struct debug_obj *obj;
228 	int cnt = 0;
229 
230 	hlist_for_each_entry(obj, &b->list, node) {
231 		cnt++;
232 		if (obj->object == addr)
233 			return obj;
234 	}
235 	if (cnt > debug_objects_maxchain)
236 		debug_objects_maxchain = cnt;
237 
238 	return NULL;
239 }
240 
241 /*
242  * Allocate a new object from the hlist
243  */
244 static struct debug_obj *__alloc_object(struct hlist_head *list)
245 {
246 	struct debug_obj *obj = NULL;
247 
248 	if (list->first) {
249 		obj = hlist_entry(list->first, typeof(*obj), node);
250 		hlist_del(&obj->node);
251 	}
252 
253 	return obj;
254 }
255 
256 static struct debug_obj *
257 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
258 {
259 	struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
260 	struct debug_obj *obj;
261 
262 	if (likely(obj_cache)) {
263 		obj = __alloc_object(&percpu_pool->free_objs);
264 		if (obj) {
265 			percpu_pool->obj_free--;
266 			goto init_obj;
267 		}
268 	}
269 
270 	raw_spin_lock(&pool_lock);
271 	obj = __alloc_object(&obj_pool);
272 	if (obj) {
273 		obj_pool_used++;
274 		WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
275 
276 		/*
277 		 * Looking ahead, allocate one batch of debug objects and
278 		 * put them into the percpu free pool.
279 		 */
280 		if (likely(obj_cache)) {
281 			int i;
282 
283 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
284 				struct debug_obj *obj2;
285 
286 				obj2 = __alloc_object(&obj_pool);
287 				if (!obj2)
288 					break;
289 				hlist_add_head(&obj2->node,
290 					       &percpu_pool->free_objs);
291 				percpu_pool->obj_free++;
292 				obj_pool_used++;
293 				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
294 			}
295 		}
296 
297 		if (obj_pool_used > obj_pool_max_used)
298 			obj_pool_max_used = obj_pool_used;
299 
300 		if (obj_pool_free < obj_pool_min_free)
301 			obj_pool_min_free = obj_pool_free;
302 	}
303 	raw_spin_unlock(&pool_lock);
304 
305 init_obj:
306 	if (obj) {
307 		obj->object = addr;
308 		obj->descr  = descr;
309 		obj->state  = ODEBUG_STATE_NONE;
310 		obj->astate = 0;
311 		hlist_add_head(&obj->node, &b->list);
312 	}
313 	return obj;
314 }
315 
316 /*
317  * workqueue function to free objects.
318  *
319  * To reduce contention on the global pool_lock, the actual freeing of
320  * debug objects will be delayed if the pool_lock is busy.
321  */
322 static void free_obj_work(struct work_struct *work)
323 {
324 	struct debug_obj *obj;
325 	unsigned long flags;
326 	HLIST_HEAD(tofree);
327 
328 	WRITE_ONCE(obj_freeing, false);
329 	if (!raw_spin_trylock_irqsave(&pool_lock, flags))
330 		return;
331 
332 	if (obj_pool_free >= debug_objects_pool_size)
333 		goto free_objs;
334 
335 	/*
336 	 * The objs on the pool list might be allocated before the work is
337 	 * run, so recheck if pool list it full or not, if not fill pool
338 	 * list from the global free list. As it is likely that a workload
339 	 * may be gearing up to use more and more objects, don't free any
340 	 * of them until the next round.
341 	 */
342 	while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
343 		obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
344 		hlist_del(&obj->node);
345 		hlist_add_head(&obj->node, &obj_pool);
346 		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
347 		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
348 	}
349 	raw_spin_unlock_irqrestore(&pool_lock, flags);
350 	return;
351 
352 free_objs:
353 	/*
354 	 * Pool list is already full and there are still objs on the free
355 	 * list. Move remaining free objs to a temporary list to free the
356 	 * memory outside the pool_lock held region.
357 	 */
358 	if (obj_nr_tofree) {
359 		hlist_move_list(&obj_to_free, &tofree);
360 		WRITE_ONCE(obj_nr_tofree, 0);
361 	}
362 	raw_spin_unlock_irqrestore(&pool_lock, flags);
363 
364 	free_object_list(&tofree);
365 }
366 
367 static void __free_object(struct debug_obj *obj)
368 {
369 	struct debug_obj *objs[ODEBUG_BATCH_SIZE];
370 	struct debug_percpu_free *percpu_pool;
371 	int lookahead_count = 0;
372 	unsigned long flags;
373 	bool work;
374 
375 	local_irq_save(flags);
376 	if (!obj_cache)
377 		goto free_to_obj_pool;
378 
379 	/*
380 	 * Try to free it into the percpu pool first.
381 	 */
382 	percpu_pool = this_cpu_ptr(&percpu_obj_pool);
383 	if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
384 		hlist_add_head(&obj->node, &percpu_pool->free_objs);
385 		percpu_pool->obj_free++;
386 		local_irq_restore(flags);
387 		return;
388 	}
389 
390 	/*
391 	 * As the percpu pool is full, look ahead and pull out a batch
392 	 * of objects from the percpu pool and free them as well.
393 	 */
394 	for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
395 		objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
396 		if (!objs[lookahead_count])
397 			break;
398 		percpu_pool->obj_free--;
399 	}
400 
401 free_to_obj_pool:
402 	raw_spin_lock(&pool_lock);
403 	work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
404 	       (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
405 	obj_pool_used--;
406 
407 	if (work) {
408 		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
409 		hlist_add_head(&obj->node, &obj_to_free);
410 		if (lookahead_count) {
411 			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
412 			obj_pool_used -= lookahead_count;
413 			while (lookahead_count) {
414 				hlist_add_head(&objs[--lookahead_count]->node,
415 					       &obj_to_free);
416 			}
417 		}
418 
419 		if ((obj_pool_free > debug_objects_pool_size) &&
420 		    (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
421 			int i;
422 
423 			/*
424 			 * Free one more batch of objects from obj_pool.
425 			 */
426 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
427 				obj = __alloc_object(&obj_pool);
428 				hlist_add_head(&obj->node, &obj_to_free);
429 				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
430 				WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
431 			}
432 		}
433 	} else {
434 		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
435 		hlist_add_head(&obj->node, &obj_pool);
436 		if (lookahead_count) {
437 			WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
438 			obj_pool_used -= lookahead_count;
439 			while (lookahead_count) {
440 				hlist_add_head(&objs[--lookahead_count]->node,
441 					       &obj_pool);
442 			}
443 		}
444 	}
445 	raw_spin_unlock(&pool_lock);
446 	local_irq_restore(flags);
447 }
448 
449 /*
450  * Put the object back into the pool and schedule work to free objects
451  * if necessary.
452  */
453 static void free_object(struct debug_obj *obj)
454 {
455 	__free_object(obj);
456 	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
457 		WRITE_ONCE(obj_freeing, true);
458 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
459 	}
460 }
461 
462 static void put_objects(struct hlist_head *list)
463 {
464 	struct hlist_node *tmp;
465 	struct debug_obj *obj;
466 
467 	/*
468 	 * Using free_object() puts the objects into reuse or schedules
469 	 * them for freeing and it get's all the accounting correct.
470 	 */
471 	hlist_for_each_entry_safe(obj, tmp, list, node) {
472 		hlist_del(&obj->node);
473 		free_object(obj);
474 	}
475 }
476 
477 #ifdef CONFIG_HOTPLUG_CPU
478 static int object_cpu_offline(unsigned int cpu)
479 {
480 	/* Remote access is safe as the CPU is dead already */
481 	struct debug_percpu_free *pcp = per_cpu_ptr(&percpu_obj_pool, cpu);
482 
483 	put_objects(&pcp->free_objs);
484 	pcp->obj_free = 0;
485 	return 0;
486 }
487 #endif
488 
489 /* Out of memory. Free all objects from hash */
490 static void debug_objects_oom(void)
491 {
492 	struct debug_bucket *db = obj_hash;
493 	HLIST_HEAD(freelist);
494 
495 	pr_warn("Out of memory. ODEBUG disabled\n");
496 
497 	for (int i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
498 		scoped_guard(raw_spinlock_irqsave, &db->lock)
499 			hlist_move_list(&db->list, &freelist);
500 
501 		put_objects(&freelist);
502 	}
503 }
504 
505 /*
506  * We use the pfn of the address for the hash. That way we can check
507  * for freed objects simply by checking the affected bucket.
508  */
509 static struct debug_bucket *get_bucket(unsigned long addr)
510 {
511 	unsigned long hash;
512 
513 	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
514 	return &obj_hash[hash];
515 }
516 
517 static void debug_print_object(struct debug_obj *obj, char *msg)
518 {
519 	const struct debug_obj_descr *descr = obj->descr;
520 	static int limit;
521 
522 	/*
523 	 * Don't report if lookup_object_or_alloc() by the current thread
524 	 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
525 	 * concurrent thread turned off debug_objects_enabled and cleared
526 	 * the hash buckets.
527 	 */
528 	if (!debug_objects_enabled)
529 		return;
530 
531 	if (limit < 5 && descr != descr_test) {
532 		void *hint = descr->debug_hint ?
533 			descr->debug_hint(obj->object) : NULL;
534 		limit++;
535 		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
536 				 "object: %p object type: %s hint: %pS\n",
537 			msg, obj_states[obj->state], obj->astate,
538 			obj->object, descr->name, hint);
539 	}
540 	debug_objects_warnings++;
541 }
542 
543 /*
544  * Try to repair the damage, so we have a better chance to get useful
545  * debug output.
546  */
547 static bool
548 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
549 		   void * addr, enum debug_obj_state state)
550 {
551 	if (fixup && fixup(addr, state)) {
552 		debug_objects_fixups++;
553 		return true;
554 	}
555 	return false;
556 }
557 
558 static void debug_object_is_on_stack(void *addr, int onstack)
559 {
560 	int is_on_stack;
561 	static int limit;
562 
563 	if (limit > 4)
564 		return;
565 
566 	is_on_stack = object_is_on_stack(addr);
567 	if (is_on_stack == onstack)
568 		return;
569 
570 	limit++;
571 	if (is_on_stack)
572 		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
573 			 task_stack_page(current));
574 	else
575 		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
576 			 task_stack_page(current));
577 
578 	WARN_ON(1);
579 }
580 
581 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
582 						const struct debug_obj_descr *descr,
583 						bool onstack, bool alloc_ifstatic)
584 {
585 	struct debug_obj *obj = lookup_object(addr, b);
586 	enum debug_obj_state state = ODEBUG_STATE_NONE;
587 
588 	if (likely(obj))
589 		return obj;
590 
591 	/*
592 	 * debug_object_init() unconditionally allocates untracked
593 	 * objects. It does not matter whether it is a static object or
594 	 * not.
595 	 *
596 	 * debug_object_assert_init() and debug_object_activate() allow
597 	 * allocation only if the descriptor callback confirms that the
598 	 * object is static and considered initialized. For non-static
599 	 * objects the allocation needs to be done from the fixup callback.
600 	 */
601 	if (unlikely(alloc_ifstatic)) {
602 		if (!descr->is_static_object || !descr->is_static_object(addr))
603 			return ERR_PTR(-ENOENT);
604 		/* Statically allocated objects are considered initialized */
605 		state = ODEBUG_STATE_INIT;
606 	}
607 
608 	obj = alloc_object(addr, b, descr);
609 	if (likely(obj)) {
610 		obj->state = state;
611 		debug_object_is_on_stack(addr, onstack);
612 		return obj;
613 	}
614 
615 	/* Out of memory. Do the cleanup outside of the locked region */
616 	debug_objects_enabled = false;
617 	return NULL;
618 }
619 
620 static void debug_objects_fill_pool(void)
621 {
622 	if (unlikely(!obj_cache))
623 		return;
624 
625 	if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
626 		return;
627 
628 	/* Try reusing objects from obj_to_free_list */
629 	fill_pool_from_freelist();
630 
631 	if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
632 		return;
633 
634 	/*
635 	 * On RT enabled kernels the pool refill must happen in preemptible
636 	 * context -- for !RT kernels we rely on the fact that spinlock_t and
637 	 * raw_spinlock_t are basically the same type and this lock-type
638 	 * inversion works just fine.
639 	 */
640 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
641 		/*
642 		 * Annotate away the spinlock_t inside raw_spinlock_t warning
643 		 * by temporarily raising the wait-type to WAIT_SLEEP, matching
644 		 * the preemptible() condition above.
645 		 */
646 		static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
647 		lock_map_acquire_try(&fill_pool_map);
648 		fill_pool();
649 		lock_map_release(&fill_pool_map);
650 	}
651 }
652 
653 static void
654 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
655 {
656 	struct debug_obj *obj, o;
657 	struct debug_bucket *db;
658 	unsigned long flags;
659 
660 	debug_objects_fill_pool();
661 
662 	db = get_bucket((unsigned long) addr);
663 
664 	raw_spin_lock_irqsave(&db->lock, flags);
665 
666 	obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
667 	if (unlikely(!obj)) {
668 		raw_spin_unlock_irqrestore(&db->lock, flags);
669 		debug_objects_oom();
670 		return;
671 	}
672 
673 	switch (obj->state) {
674 	case ODEBUG_STATE_NONE:
675 	case ODEBUG_STATE_INIT:
676 	case ODEBUG_STATE_INACTIVE:
677 		obj->state = ODEBUG_STATE_INIT;
678 		raw_spin_unlock_irqrestore(&db->lock, flags);
679 		return;
680 	default:
681 		break;
682 	}
683 
684 	o = *obj;
685 	raw_spin_unlock_irqrestore(&db->lock, flags);
686 	debug_print_object(&o, "init");
687 
688 	if (o.state == ODEBUG_STATE_ACTIVE)
689 		debug_object_fixup(descr->fixup_init, addr, o.state);
690 }
691 
692 /**
693  * debug_object_init - debug checks when an object is initialized
694  * @addr:	address of the object
695  * @descr:	pointer to an object specific debug description structure
696  */
697 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
698 {
699 	if (!debug_objects_enabled)
700 		return;
701 
702 	__debug_object_init(addr, descr, 0);
703 }
704 EXPORT_SYMBOL_GPL(debug_object_init);
705 
706 /**
707  * debug_object_init_on_stack - debug checks when an object on stack is
708  *				initialized
709  * @addr:	address of the object
710  * @descr:	pointer to an object specific debug description structure
711  */
712 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
713 {
714 	if (!debug_objects_enabled)
715 		return;
716 
717 	__debug_object_init(addr, descr, 1);
718 }
719 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
720 
721 /**
722  * debug_object_activate - debug checks when an object is activated
723  * @addr:	address of the object
724  * @descr:	pointer to an object specific debug description structure
725  * Returns 0 for success, -EINVAL for check failed.
726  */
727 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
728 {
729 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
730 	struct debug_bucket *db;
731 	struct debug_obj *obj;
732 	unsigned long flags;
733 
734 	if (!debug_objects_enabled)
735 		return 0;
736 
737 	debug_objects_fill_pool();
738 
739 	db = get_bucket((unsigned long) addr);
740 
741 	raw_spin_lock_irqsave(&db->lock, flags);
742 
743 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
744 	if (unlikely(!obj)) {
745 		raw_spin_unlock_irqrestore(&db->lock, flags);
746 		debug_objects_oom();
747 		return 0;
748 	} else if (likely(!IS_ERR(obj))) {
749 		switch (obj->state) {
750 		case ODEBUG_STATE_ACTIVE:
751 		case ODEBUG_STATE_DESTROYED:
752 			o = *obj;
753 			break;
754 		case ODEBUG_STATE_INIT:
755 		case ODEBUG_STATE_INACTIVE:
756 			obj->state = ODEBUG_STATE_ACTIVE;
757 			fallthrough;
758 		default:
759 			raw_spin_unlock_irqrestore(&db->lock, flags);
760 			return 0;
761 		}
762 	}
763 
764 	raw_spin_unlock_irqrestore(&db->lock, flags);
765 	debug_print_object(&o, "activate");
766 
767 	switch (o.state) {
768 	case ODEBUG_STATE_ACTIVE:
769 	case ODEBUG_STATE_NOTAVAILABLE:
770 		if (debug_object_fixup(descr->fixup_activate, addr, o.state))
771 			return 0;
772 		fallthrough;
773 	default:
774 		return -EINVAL;
775 	}
776 }
777 EXPORT_SYMBOL_GPL(debug_object_activate);
778 
779 /**
780  * debug_object_deactivate - debug checks when an object is deactivated
781  * @addr:	address of the object
782  * @descr:	pointer to an object specific debug description structure
783  */
784 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
785 {
786 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
787 	struct debug_bucket *db;
788 	struct debug_obj *obj;
789 	unsigned long flags;
790 
791 	if (!debug_objects_enabled)
792 		return;
793 
794 	db = get_bucket((unsigned long) addr);
795 
796 	raw_spin_lock_irqsave(&db->lock, flags);
797 
798 	obj = lookup_object(addr, db);
799 	if (obj) {
800 		switch (obj->state) {
801 		case ODEBUG_STATE_DESTROYED:
802 			break;
803 		case ODEBUG_STATE_INIT:
804 		case ODEBUG_STATE_INACTIVE:
805 		case ODEBUG_STATE_ACTIVE:
806 			if (obj->astate)
807 				break;
808 			obj->state = ODEBUG_STATE_INACTIVE;
809 			fallthrough;
810 		default:
811 			raw_spin_unlock_irqrestore(&db->lock, flags);
812 			return;
813 		}
814 		o = *obj;
815 	}
816 
817 	raw_spin_unlock_irqrestore(&db->lock, flags);
818 	debug_print_object(&o, "deactivate");
819 }
820 EXPORT_SYMBOL_GPL(debug_object_deactivate);
821 
822 /**
823  * debug_object_destroy - debug checks when an object is destroyed
824  * @addr:	address of the object
825  * @descr:	pointer to an object specific debug description structure
826  */
827 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
828 {
829 	struct debug_obj *obj, o;
830 	struct debug_bucket *db;
831 	unsigned long flags;
832 
833 	if (!debug_objects_enabled)
834 		return;
835 
836 	db = get_bucket((unsigned long) addr);
837 
838 	raw_spin_lock_irqsave(&db->lock, flags);
839 
840 	obj = lookup_object(addr, db);
841 	if (!obj) {
842 		raw_spin_unlock_irqrestore(&db->lock, flags);
843 		return;
844 	}
845 
846 	switch (obj->state) {
847 	case ODEBUG_STATE_ACTIVE:
848 	case ODEBUG_STATE_DESTROYED:
849 		break;
850 	case ODEBUG_STATE_NONE:
851 	case ODEBUG_STATE_INIT:
852 	case ODEBUG_STATE_INACTIVE:
853 		obj->state = ODEBUG_STATE_DESTROYED;
854 		fallthrough;
855 	default:
856 		raw_spin_unlock_irqrestore(&db->lock, flags);
857 		return;
858 	}
859 
860 	o = *obj;
861 	raw_spin_unlock_irqrestore(&db->lock, flags);
862 	debug_print_object(&o, "destroy");
863 
864 	if (o.state == ODEBUG_STATE_ACTIVE)
865 		debug_object_fixup(descr->fixup_destroy, addr, o.state);
866 }
867 EXPORT_SYMBOL_GPL(debug_object_destroy);
868 
869 /**
870  * debug_object_free - debug checks when an object is freed
871  * @addr:	address of the object
872  * @descr:	pointer to an object specific debug description structure
873  */
874 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
875 {
876 	struct debug_obj *obj, o;
877 	struct debug_bucket *db;
878 	unsigned long flags;
879 
880 	if (!debug_objects_enabled)
881 		return;
882 
883 	db = get_bucket((unsigned long) addr);
884 
885 	raw_spin_lock_irqsave(&db->lock, flags);
886 
887 	obj = lookup_object(addr, db);
888 	if (!obj) {
889 		raw_spin_unlock_irqrestore(&db->lock, flags);
890 		return;
891 	}
892 
893 	switch (obj->state) {
894 	case ODEBUG_STATE_ACTIVE:
895 		break;
896 	default:
897 		hlist_del(&obj->node);
898 		raw_spin_unlock_irqrestore(&db->lock, flags);
899 		free_object(obj);
900 		return;
901 	}
902 
903 	o = *obj;
904 	raw_spin_unlock_irqrestore(&db->lock, flags);
905 	debug_print_object(&o, "free");
906 
907 	debug_object_fixup(descr->fixup_free, addr, o.state);
908 }
909 EXPORT_SYMBOL_GPL(debug_object_free);
910 
911 /**
912  * debug_object_assert_init - debug checks when object should be init-ed
913  * @addr:	address of the object
914  * @descr:	pointer to an object specific debug description structure
915  */
916 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
917 {
918 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
919 	struct debug_bucket *db;
920 	struct debug_obj *obj;
921 	unsigned long flags;
922 
923 	if (!debug_objects_enabled)
924 		return;
925 
926 	debug_objects_fill_pool();
927 
928 	db = get_bucket((unsigned long) addr);
929 
930 	raw_spin_lock_irqsave(&db->lock, flags);
931 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
932 	raw_spin_unlock_irqrestore(&db->lock, flags);
933 	if (likely(!IS_ERR_OR_NULL(obj)))
934 		return;
935 
936 	/* If NULL the allocation has hit OOM */
937 	if (!obj) {
938 		debug_objects_oom();
939 		return;
940 	}
941 
942 	/* Object is neither tracked nor static. It's not initialized. */
943 	debug_print_object(&o, "assert_init");
944 	debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
945 }
946 EXPORT_SYMBOL_GPL(debug_object_assert_init);
947 
948 /**
949  * debug_object_active_state - debug checks object usage state machine
950  * @addr:	address of the object
951  * @descr:	pointer to an object specific debug description structure
952  * @expect:	expected state
953  * @next:	state to move to if expected state is found
954  */
955 void
956 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
957 			  unsigned int expect, unsigned int next)
958 {
959 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
960 	struct debug_bucket *db;
961 	struct debug_obj *obj;
962 	unsigned long flags;
963 
964 	if (!debug_objects_enabled)
965 		return;
966 
967 	db = get_bucket((unsigned long) addr);
968 
969 	raw_spin_lock_irqsave(&db->lock, flags);
970 
971 	obj = lookup_object(addr, db);
972 	if (obj) {
973 		switch (obj->state) {
974 		case ODEBUG_STATE_ACTIVE:
975 			if (obj->astate != expect)
976 				break;
977 			obj->astate = next;
978 			raw_spin_unlock_irqrestore(&db->lock, flags);
979 			return;
980 		default:
981 			break;
982 		}
983 		o = *obj;
984 	}
985 
986 	raw_spin_unlock_irqrestore(&db->lock, flags);
987 	debug_print_object(&o, "active_state");
988 }
989 EXPORT_SYMBOL_GPL(debug_object_active_state);
990 
991 #ifdef CONFIG_DEBUG_OBJECTS_FREE
992 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
993 {
994 	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
995 	int cnt, objs_checked = 0;
996 	struct debug_obj *obj, o;
997 	struct debug_bucket *db;
998 	struct hlist_node *tmp;
999 
1000 	saddr = (unsigned long) address;
1001 	eaddr = saddr + size;
1002 	paddr = saddr & ODEBUG_CHUNK_MASK;
1003 	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
1004 	chunks >>= ODEBUG_CHUNK_SHIFT;
1005 
1006 	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
1007 		db = get_bucket(paddr);
1008 
1009 repeat:
1010 		cnt = 0;
1011 		raw_spin_lock_irqsave(&db->lock, flags);
1012 		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
1013 			cnt++;
1014 			oaddr = (unsigned long) obj->object;
1015 			if (oaddr < saddr || oaddr >= eaddr)
1016 				continue;
1017 
1018 			switch (obj->state) {
1019 			case ODEBUG_STATE_ACTIVE:
1020 				o = *obj;
1021 				raw_spin_unlock_irqrestore(&db->lock, flags);
1022 				debug_print_object(&o, "free");
1023 				debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
1024 				goto repeat;
1025 			default:
1026 				hlist_del(&obj->node);
1027 				__free_object(obj);
1028 				break;
1029 			}
1030 		}
1031 		raw_spin_unlock_irqrestore(&db->lock, flags);
1032 
1033 		if (cnt > debug_objects_maxchain)
1034 			debug_objects_maxchain = cnt;
1035 
1036 		objs_checked += cnt;
1037 	}
1038 
1039 	if (objs_checked > debug_objects_maxchecked)
1040 		debug_objects_maxchecked = objs_checked;
1041 
1042 	/* Schedule work to actually kmem_cache_free() objects */
1043 	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1044 		WRITE_ONCE(obj_freeing, true);
1045 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1046 	}
1047 }
1048 
1049 void debug_check_no_obj_freed(const void *address, unsigned long size)
1050 {
1051 	if (debug_objects_enabled)
1052 		__debug_check_no_obj_freed(address, size);
1053 }
1054 #endif
1055 
1056 #ifdef CONFIG_DEBUG_FS
1057 
1058 static int debug_stats_show(struct seq_file *m, void *v)
1059 {
1060 	int cpu, obj_percpu_free = 0;
1061 
1062 	for_each_possible_cpu(cpu)
1063 		obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1064 
1065 	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1066 	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1067 	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1068 	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1069 	seq_printf(m, "pool_free     :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1070 	seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1071 	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1072 	seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1073 	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1074 	seq_printf(m, "on_free_list  :%d\n", READ_ONCE(obj_nr_tofree));
1075 	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1076 	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1077 	return 0;
1078 }
1079 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1080 
1081 static int __init debug_objects_init_debugfs(void)
1082 {
1083 	struct dentry *dbgdir;
1084 
1085 	if (!debug_objects_enabled)
1086 		return 0;
1087 
1088 	dbgdir = debugfs_create_dir("debug_objects", NULL);
1089 
1090 	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1091 
1092 	return 0;
1093 }
1094 __initcall(debug_objects_init_debugfs);
1095 
1096 #else
1097 static inline void debug_objects_init_debugfs(void) { }
1098 #endif
1099 
1100 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1101 
1102 /* Random data structure for the self test */
1103 struct self_test {
1104 	unsigned long	dummy1[6];
1105 	int		static_init;
1106 	unsigned long	dummy2[3];
1107 };
1108 
1109 static __initconst const struct debug_obj_descr descr_type_test;
1110 
1111 static bool __init is_static_object(void *addr)
1112 {
1113 	struct self_test *obj = addr;
1114 
1115 	return obj->static_init;
1116 }
1117 
1118 /*
1119  * fixup_init is called when:
1120  * - an active object is initialized
1121  */
1122 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1123 {
1124 	struct self_test *obj = addr;
1125 
1126 	switch (state) {
1127 	case ODEBUG_STATE_ACTIVE:
1128 		debug_object_deactivate(obj, &descr_type_test);
1129 		debug_object_init(obj, &descr_type_test);
1130 		return true;
1131 	default:
1132 		return false;
1133 	}
1134 }
1135 
1136 /*
1137  * fixup_activate is called when:
1138  * - an active object is activated
1139  * - an unknown non-static object is activated
1140  */
1141 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1142 {
1143 	struct self_test *obj = addr;
1144 
1145 	switch (state) {
1146 	case ODEBUG_STATE_NOTAVAILABLE:
1147 		return true;
1148 	case ODEBUG_STATE_ACTIVE:
1149 		debug_object_deactivate(obj, &descr_type_test);
1150 		debug_object_activate(obj, &descr_type_test);
1151 		return true;
1152 
1153 	default:
1154 		return false;
1155 	}
1156 }
1157 
1158 /*
1159  * fixup_destroy is called when:
1160  * - an active object is destroyed
1161  */
1162 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1163 {
1164 	struct self_test *obj = addr;
1165 
1166 	switch (state) {
1167 	case ODEBUG_STATE_ACTIVE:
1168 		debug_object_deactivate(obj, &descr_type_test);
1169 		debug_object_destroy(obj, &descr_type_test);
1170 		return true;
1171 	default:
1172 		return false;
1173 	}
1174 }
1175 
1176 /*
1177  * fixup_free is called when:
1178  * - an active object is freed
1179  */
1180 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1181 {
1182 	struct self_test *obj = addr;
1183 
1184 	switch (state) {
1185 	case ODEBUG_STATE_ACTIVE:
1186 		debug_object_deactivate(obj, &descr_type_test);
1187 		debug_object_free(obj, &descr_type_test);
1188 		return true;
1189 	default:
1190 		return false;
1191 	}
1192 }
1193 
1194 static int __init
1195 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1196 {
1197 	struct debug_bucket *db;
1198 	struct debug_obj *obj;
1199 	unsigned long flags;
1200 	int res = -EINVAL;
1201 
1202 	db = get_bucket((unsigned long) addr);
1203 
1204 	raw_spin_lock_irqsave(&db->lock, flags);
1205 
1206 	obj = lookup_object(addr, db);
1207 	if (!obj && state != ODEBUG_STATE_NONE) {
1208 		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1209 		goto out;
1210 	}
1211 	if (obj && obj->state != state) {
1212 		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1213 		       obj->state, state);
1214 		goto out;
1215 	}
1216 	if (fixups != debug_objects_fixups) {
1217 		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1218 		       fixups, debug_objects_fixups);
1219 		goto out;
1220 	}
1221 	if (warnings != debug_objects_warnings) {
1222 		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1223 		       warnings, debug_objects_warnings);
1224 		goto out;
1225 	}
1226 	res = 0;
1227 out:
1228 	raw_spin_unlock_irqrestore(&db->lock, flags);
1229 	if (res)
1230 		debug_objects_enabled = false;
1231 	return res;
1232 }
1233 
1234 static __initconst const struct debug_obj_descr descr_type_test = {
1235 	.name			= "selftest",
1236 	.is_static_object	= is_static_object,
1237 	.fixup_init		= fixup_init,
1238 	.fixup_activate		= fixup_activate,
1239 	.fixup_destroy		= fixup_destroy,
1240 	.fixup_free		= fixup_free,
1241 };
1242 
1243 static __initdata struct self_test obj = { .static_init = 0 };
1244 
1245 static bool __init debug_objects_selftest(void)
1246 {
1247 	int fixups, oldfixups, warnings, oldwarnings;
1248 	unsigned long flags;
1249 
1250 	local_irq_save(flags);
1251 
1252 	fixups = oldfixups = debug_objects_fixups;
1253 	warnings = oldwarnings = debug_objects_warnings;
1254 	descr_test = &descr_type_test;
1255 
1256 	debug_object_init(&obj, &descr_type_test);
1257 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1258 		goto out;
1259 	debug_object_activate(&obj, &descr_type_test);
1260 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1261 		goto out;
1262 	debug_object_activate(&obj, &descr_type_test);
1263 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1264 		goto out;
1265 	debug_object_deactivate(&obj, &descr_type_test);
1266 	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1267 		goto out;
1268 	debug_object_destroy(&obj, &descr_type_test);
1269 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1270 		goto out;
1271 	debug_object_init(&obj, &descr_type_test);
1272 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1273 		goto out;
1274 	debug_object_activate(&obj, &descr_type_test);
1275 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1276 		goto out;
1277 	debug_object_deactivate(&obj, &descr_type_test);
1278 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1279 		goto out;
1280 	debug_object_free(&obj, &descr_type_test);
1281 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1282 		goto out;
1283 
1284 	obj.static_init = 1;
1285 	debug_object_activate(&obj, &descr_type_test);
1286 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1287 		goto out;
1288 	debug_object_init(&obj, &descr_type_test);
1289 	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1290 		goto out;
1291 	debug_object_free(&obj, &descr_type_test);
1292 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1293 		goto out;
1294 
1295 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1296 	debug_object_init(&obj, &descr_type_test);
1297 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1298 		goto out;
1299 	debug_object_activate(&obj, &descr_type_test);
1300 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1301 		goto out;
1302 	__debug_check_no_obj_freed(&obj, sizeof(obj));
1303 	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1304 		goto out;
1305 #endif
1306 	pr_info("selftest passed\n");
1307 
1308 out:
1309 	debug_objects_fixups = oldfixups;
1310 	debug_objects_warnings = oldwarnings;
1311 	descr_test = NULL;
1312 
1313 	local_irq_restore(flags);
1314 	return debug_objects_enabled;
1315 }
1316 #else
1317 static inline bool debug_objects_selftest(void) { return true; }
1318 #endif
1319 
1320 /*
1321  * Called during early boot to initialize the hash buckets and link
1322  * the static object pool objects into the poll list. After this call
1323  * the object tracker is fully operational.
1324  */
1325 void __init debug_objects_early_init(void)
1326 {
1327 	int i;
1328 
1329 	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1330 		raw_spin_lock_init(&obj_hash[i].lock);
1331 
1332 	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1333 		hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1334 }
1335 
1336 /*
1337  * Convert the statically allocated objects to dynamic ones.
1338  * debug_objects_mem_init() is called early so only one CPU is up and
1339  * interrupts are disabled, which means it is safe to replace the active
1340  * object references.
1341  */
1342 static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache)
1343 {
1344 	struct debug_bucket *db = obj_hash;
1345 	struct debug_obj *obj, *new;
1346 	struct hlist_node *tmp;
1347 	HLIST_HEAD(objects);
1348 	int i;
1349 
1350 	for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1351 		obj = kmem_cache_zalloc(cache, GFP_KERNEL);
1352 		if (!obj)
1353 			goto free;
1354 		hlist_add_head(&obj->node, &objects);
1355 	}
1356 
1357 	debug_objects_allocated += i;
1358 
1359 	/*
1360 	 * Replace the statically allocated objects list with the allocated
1361 	 * objects list.
1362 	 */
1363 	hlist_move_list(&objects, &obj_pool);
1364 
1365 	/* Replace the active object references */
1366 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1367 		hlist_move_list(&db->list, &objects);
1368 
1369 		hlist_for_each_entry(obj, &objects, node) {
1370 			new = hlist_entry(obj_pool.first, typeof(*obj), node);
1371 			hlist_del(&new->node);
1372 			/* copy object data */
1373 			*new = *obj;
1374 			hlist_add_head(&new->node, &db->list);
1375 		}
1376 	}
1377 	return true;
1378 free:
1379 	/* Can't use free_object_list() as the cache is not populated yet */
1380 	hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1381 		hlist_del(&obj->node);
1382 		kmem_cache_free(cache, obj);
1383 	}
1384 	return false;
1385 }
1386 
1387 /*
1388  * Called after the kmem_caches are functional to setup a dedicated
1389  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1390  * prevents that the debug code is called on kmem_cache_free() for the
1391  * debug tracker objects to avoid recursive calls.
1392  */
1393 void __init debug_objects_mem_init(void)
1394 {
1395 	struct kmem_cache *cache;
1396 	int extras;
1397 
1398 	if (!debug_objects_enabled)
1399 		return;
1400 
1401 	if (!debug_objects_selftest())
1402 		return;
1403 
1404 	cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0,
1405 				  SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL);
1406 
1407 	if (!cache || !debug_objects_replace_static_objects(cache)) {
1408 		debug_objects_enabled = false;
1409 		pr_warn("Out of memory.\n");
1410 		return;
1411 	}
1412 
1413 	/*
1414 	 * Adjust the thresholds for allocating and freeing objects
1415 	 * according to the number of possible CPUs available in the
1416 	 * system.
1417 	 */
1418 	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1419 	debug_objects_pool_size += extras;
1420 	debug_objects_pool_min_level += extras;
1421 
1422 	/* Everything worked. Expose the cache */
1423 	obj_cache = cache;
1424 
1425 #ifdef CONFIG_HOTPLUG_CPU
1426 	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1427 				  object_cpu_offline);
1428 #endif
1429 	return;
1430 }
1431