xref: /linux-6.15/lib/debugobjects.c (revision 661cc28b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic infrastructure for lifetime debugging of objects.
4  *
5  * Copyright (C) 2008, Thomas Gleixner <[email protected]>
6  */
7 
8 #define pr_fmt(fmt) "ODEBUG: " fmt
9 
10 #include <linux/debugobjects.h>
11 #include <linux/interrupt.h>
12 #include <linux/sched.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/seq_file.h>
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/kmemleak.h>
19 #include <linux/cpu.h>
20 
21 #define ODEBUG_HASH_BITS	14
22 #define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
23 
24 #define ODEBUG_POOL_SIZE	1024
25 #define ODEBUG_POOL_MIN_LEVEL	256
26 #define ODEBUG_POOL_PERCPU_SIZE	64
27 #define ODEBUG_BATCH_SIZE	16
28 
29 #define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
32 
33 /*
34  * We limit the freeing of debug objects via workqueue at a maximum
35  * frequency of 10Hz and about 1024 objects for each freeing operation.
36  * So it is freeing at most 10k debug objects per second.
37  */
38 #define ODEBUG_FREE_WORK_MAX	1024
39 #define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
40 
41 struct debug_bucket {
42 	struct hlist_head	list;
43 	raw_spinlock_t		lock;
44 };
45 
46 /*
47  * Debug object percpu free list
48  * Access is protected by disabling irq
49  */
50 struct debug_percpu_free {
51 	struct hlist_head	free_objs;
52 	int			obj_free;
53 };
54 
55 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
56 
57 static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
58 
59 static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
60 
61 static DEFINE_RAW_SPINLOCK(pool_lock);
62 
63 static HLIST_HEAD(obj_pool);
64 static HLIST_HEAD(obj_to_free);
65 
66 /*
67  * Because of the presence of percpu free pools, obj_pool_free will
68  * under-count those in the percpu free pools. Similarly, obj_pool_used
69  * will over-count those in the percpu free pools. Adjustments will be
70  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
71  * can be off.
72  */
73 static int __data_racy		obj_pool_min_free = ODEBUG_POOL_SIZE;
74 static int __data_racy		obj_pool_free = ODEBUG_POOL_SIZE;
75 static int			obj_pool_used;
76 static int __data_racy		obj_pool_max_used;
77 static bool			obj_freeing;
78 /* The number of objs on the global free list */
79 static int			obj_nr_tofree;
80 
81 static int __data_racy			debug_objects_maxchain __read_mostly;
82 static int __data_racy __maybe_unused	debug_objects_maxchecked __read_mostly;
83 static int __data_racy			debug_objects_fixups __read_mostly;
84 static int __data_racy			debug_objects_warnings __read_mostly;
85 static bool __data_racy			debug_objects_enabled __read_mostly
86 					= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
87 static int				debug_objects_pool_size __ro_after_init
88 					= ODEBUG_POOL_SIZE;
89 static int				debug_objects_pool_min_level __ro_after_init
90 					= ODEBUG_POOL_MIN_LEVEL;
91 
92 static const struct debug_obj_descr *descr_test  __read_mostly;
93 static struct kmem_cache	*obj_cache __ro_after_init;
94 
95 /*
96  * Track numbers of kmem_cache_alloc()/free() calls done.
97  */
98 static int __data_racy		debug_objects_allocated;
99 static int __data_racy		debug_objects_freed;
100 
101 static void free_obj_work(struct work_struct *work);
102 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
103 
104 static int __init enable_object_debug(char *str)
105 {
106 	debug_objects_enabled = true;
107 	return 0;
108 }
109 early_param("debug_objects", enable_object_debug);
110 
111 static int __init disable_object_debug(char *str)
112 {
113 	debug_objects_enabled = false;
114 	return 0;
115 }
116 early_param("no_debug_objects", disable_object_debug);
117 
118 static const char *obj_states[ODEBUG_STATE_MAX] = {
119 	[ODEBUG_STATE_NONE]		= "none",
120 	[ODEBUG_STATE_INIT]		= "initialized",
121 	[ODEBUG_STATE_INACTIVE]		= "inactive",
122 	[ODEBUG_STATE_ACTIVE]		= "active",
123 	[ODEBUG_STATE_DESTROYED]	= "destroyed",
124 	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
125 };
126 
127 static void free_object_list(struct hlist_head *head)
128 {
129 	struct hlist_node *tmp;
130 	struct debug_obj *obj;
131 	int cnt = 0;
132 
133 	hlist_for_each_entry_safe(obj, tmp, head, node) {
134 		hlist_del(&obj->node);
135 		kmem_cache_free(obj_cache, obj);
136 		cnt++;
137 	}
138 	debug_objects_freed += cnt;
139 }
140 
141 static void fill_pool(void)
142 {
143 	gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
144 	struct debug_obj *obj;
145 	unsigned long flags;
146 
147 	if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
148 		return;
149 
150 	/*
151 	 * Reuse objs from the global obj_to_free list; they will be
152 	 * reinitialized when allocating.
153 	 *
154 	 * obj_nr_tofree is checked locklessly; the READ_ONCE() pairs with
155 	 * the WRITE_ONCE() in pool_lock critical sections.
156 	 */
157 	if (READ_ONCE(obj_nr_tofree)) {
158 		raw_spin_lock_irqsave(&pool_lock, flags);
159 		/*
160 		 * Recheck with the lock held as the worker thread might have
161 		 * won the race and freed the global free list already.
162 		 */
163 		while (obj_nr_tofree && (obj_pool_free < debug_objects_pool_min_level)) {
164 			obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
165 			hlist_del(&obj->node);
166 			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
167 			hlist_add_head(&obj->node, &obj_pool);
168 			WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
169 		}
170 		raw_spin_unlock_irqrestore(&pool_lock, flags);
171 	}
172 
173 	if (unlikely(!obj_cache))
174 		return;
175 
176 	while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
177 		struct debug_obj *new, *last = NULL;
178 		HLIST_HEAD(head);
179 		int cnt;
180 
181 		for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
182 			new = kmem_cache_zalloc(obj_cache, gfp);
183 			if (!new)
184 				break;
185 			hlist_add_head(&new->node, &head);
186 			if (!last)
187 				last = new;
188 		}
189 		if (!cnt)
190 			return;
191 
192 		raw_spin_lock_irqsave(&pool_lock, flags);
193 		hlist_splice_init(&head, &last->node, &obj_pool);
194 		debug_objects_allocated += cnt;
195 		WRITE_ONCE(obj_pool_free, obj_pool_free + cnt);
196 		raw_spin_unlock_irqrestore(&pool_lock, flags);
197 	}
198 }
199 
200 /*
201  * Lookup an object in the hash bucket.
202  */
203 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
204 {
205 	struct debug_obj *obj;
206 	int cnt = 0;
207 
208 	hlist_for_each_entry(obj, &b->list, node) {
209 		cnt++;
210 		if (obj->object == addr)
211 			return obj;
212 	}
213 	if (cnt > debug_objects_maxchain)
214 		debug_objects_maxchain = cnt;
215 
216 	return NULL;
217 }
218 
219 /*
220  * Allocate a new object from the hlist
221  */
222 static struct debug_obj *__alloc_object(struct hlist_head *list)
223 {
224 	struct debug_obj *obj = NULL;
225 
226 	if (list->first) {
227 		obj = hlist_entry(list->first, typeof(*obj), node);
228 		hlist_del(&obj->node);
229 	}
230 
231 	return obj;
232 }
233 
234 static struct debug_obj *
235 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
236 {
237 	struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
238 	struct debug_obj *obj;
239 
240 	if (likely(obj_cache)) {
241 		obj = __alloc_object(&percpu_pool->free_objs);
242 		if (obj) {
243 			percpu_pool->obj_free--;
244 			goto init_obj;
245 		}
246 	}
247 
248 	raw_spin_lock(&pool_lock);
249 	obj = __alloc_object(&obj_pool);
250 	if (obj) {
251 		obj_pool_used++;
252 		WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
253 
254 		/*
255 		 * Looking ahead, allocate one batch of debug objects and
256 		 * put them into the percpu free pool.
257 		 */
258 		if (likely(obj_cache)) {
259 			int i;
260 
261 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
262 				struct debug_obj *obj2;
263 
264 				obj2 = __alloc_object(&obj_pool);
265 				if (!obj2)
266 					break;
267 				hlist_add_head(&obj2->node,
268 					       &percpu_pool->free_objs);
269 				percpu_pool->obj_free++;
270 				obj_pool_used++;
271 				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
272 			}
273 		}
274 
275 		if (obj_pool_used > obj_pool_max_used)
276 			obj_pool_max_used = obj_pool_used;
277 
278 		if (obj_pool_free < obj_pool_min_free)
279 			obj_pool_min_free = obj_pool_free;
280 	}
281 	raw_spin_unlock(&pool_lock);
282 
283 init_obj:
284 	if (obj) {
285 		obj->object = addr;
286 		obj->descr  = descr;
287 		obj->state  = ODEBUG_STATE_NONE;
288 		obj->astate = 0;
289 		hlist_add_head(&obj->node, &b->list);
290 	}
291 	return obj;
292 }
293 
294 /*
295  * workqueue function to free objects.
296  *
297  * To reduce contention on the global pool_lock, the actual freeing of
298  * debug objects will be delayed if the pool_lock is busy.
299  */
300 static void free_obj_work(struct work_struct *work)
301 {
302 	struct debug_obj *obj;
303 	unsigned long flags;
304 	HLIST_HEAD(tofree);
305 
306 	WRITE_ONCE(obj_freeing, false);
307 	if (!raw_spin_trylock_irqsave(&pool_lock, flags))
308 		return;
309 
310 	if (obj_pool_free >= debug_objects_pool_size)
311 		goto free_objs;
312 
313 	/*
314 	 * The objs on the pool list might be allocated before the work is
315 	 * run, so recheck if pool list it full or not, if not fill pool
316 	 * list from the global free list. As it is likely that a workload
317 	 * may be gearing up to use more and more objects, don't free any
318 	 * of them until the next round.
319 	 */
320 	while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
321 		obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
322 		hlist_del(&obj->node);
323 		hlist_add_head(&obj->node, &obj_pool);
324 		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
325 		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
326 	}
327 	raw_spin_unlock_irqrestore(&pool_lock, flags);
328 	return;
329 
330 free_objs:
331 	/*
332 	 * Pool list is already full and there are still objs on the free
333 	 * list. Move remaining free objs to a temporary list to free the
334 	 * memory outside the pool_lock held region.
335 	 */
336 	if (obj_nr_tofree) {
337 		hlist_move_list(&obj_to_free, &tofree);
338 		WRITE_ONCE(obj_nr_tofree, 0);
339 	}
340 	raw_spin_unlock_irqrestore(&pool_lock, flags);
341 
342 	free_object_list(&tofree);
343 }
344 
345 static void __free_object(struct debug_obj *obj)
346 {
347 	struct debug_obj *objs[ODEBUG_BATCH_SIZE];
348 	struct debug_percpu_free *percpu_pool;
349 	int lookahead_count = 0;
350 	unsigned long flags;
351 	bool work;
352 
353 	local_irq_save(flags);
354 	if (!obj_cache)
355 		goto free_to_obj_pool;
356 
357 	/*
358 	 * Try to free it into the percpu pool first.
359 	 */
360 	percpu_pool = this_cpu_ptr(&percpu_obj_pool);
361 	if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
362 		hlist_add_head(&obj->node, &percpu_pool->free_objs);
363 		percpu_pool->obj_free++;
364 		local_irq_restore(flags);
365 		return;
366 	}
367 
368 	/*
369 	 * As the percpu pool is full, look ahead and pull out a batch
370 	 * of objects from the percpu pool and free them as well.
371 	 */
372 	for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
373 		objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
374 		if (!objs[lookahead_count])
375 			break;
376 		percpu_pool->obj_free--;
377 	}
378 
379 free_to_obj_pool:
380 	raw_spin_lock(&pool_lock);
381 	work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
382 	       (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
383 	obj_pool_used--;
384 
385 	if (work) {
386 		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
387 		hlist_add_head(&obj->node, &obj_to_free);
388 		if (lookahead_count) {
389 			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
390 			obj_pool_used -= lookahead_count;
391 			while (lookahead_count) {
392 				hlist_add_head(&objs[--lookahead_count]->node,
393 					       &obj_to_free);
394 			}
395 		}
396 
397 		if ((obj_pool_free > debug_objects_pool_size) &&
398 		    (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
399 			int i;
400 
401 			/*
402 			 * Free one more batch of objects from obj_pool.
403 			 */
404 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
405 				obj = __alloc_object(&obj_pool);
406 				hlist_add_head(&obj->node, &obj_to_free);
407 				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
408 				WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
409 			}
410 		}
411 	} else {
412 		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
413 		hlist_add_head(&obj->node, &obj_pool);
414 		if (lookahead_count) {
415 			WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
416 			obj_pool_used -= lookahead_count;
417 			while (lookahead_count) {
418 				hlist_add_head(&objs[--lookahead_count]->node,
419 					       &obj_pool);
420 			}
421 		}
422 	}
423 	raw_spin_unlock(&pool_lock);
424 	local_irq_restore(flags);
425 }
426 
427 /*
428  * Put the object back into the pool and schedule work to free objects
429  * if necessary.
430  */
431 static void free_object(struct debug_obj *obj)
432 {
433 	__free_object(obj);
434 	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
435 		WRITE_ONCE(obj_freeing, true);
436 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
437 	}
438 }
439 
440 static void put_objects(struct hlist_head *list)
441 {
442 	struct hlist_node *tmp;
443 	struct debug_obj *obj;
444 
445 	/*
446 	 * Using free_object() puts the objects into reuse or schedules
447 	 * them for freeing and it get's all the accounting correct.
448 	 */
449 	hlist_for_each_entry_safe(obj, tmp, list, node) {
450 		hlist_del(&obj->node);
451 		free_object(obj);
452 	}
453 }
454 
455 #ifdef CONFIG_HOTPLUG_CPU
456 static int object_cpu_offline(unsigned int cpu)
457 {
458 	/* Remote access is safe as the CPU is dead already */
459 	struct debug_percpu_free *pcp = per_cpu_ptr(&percpu_obj_pool, cpu);
460 
461 	put_objects(&pcp->free_objs);
462 	pcp->obj_free = 0;
463 	return 0;
464 }
465 #endif
466 
467 /* Out of memory. Free all objects from hash */
468 static void debug_objects_oom(void)
469 {
470 	struct debug_bucket *db = obj_hash;
471 	HLIST_HEAD(freelist);
472 
473 	pr_warn("Out of memory. ODEBUG disabled\n");
474 
475 	for (int i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
476 		scoped_guard(raw_spinlock_irqsave, &db->lock)
477 			hlist_move_list(&db->list, &freelist);
478 
479 		put_objects(&freelist);
480 	}
481 }
482 
483 /*
484  * We use the pfn of the address for the hash. That way we can check
485  * for freed objects simply by checking the affected bucket.
486  */
487 static struct debug_bucket *get_bucket(unsigned long addr)
488 {
489 	unsigned long hash;
490 
491 	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
492 	return &obj_hash[hash];
493 }
494 
495 static void debug_print_object(struct debug_obj *obj, char *msg)
496 {
497 	const struct debug_obj_descr *descr = obj->descr;
498 	static int limit;
499 
500 	/*
501 	 * Don't report if lookup_object_or_alloc() by the current thread
502 	 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
503 	 * concurrent thread turned off debug_objects_enabled and cleared
504 	 * the hash buckets.
505 	 */
506 	if (!debug_objects_enabled)
507 		return;
508 
509 	if (limit < 5 && descr != descr_test) {
510 		void *hint = descr->debug_hint ?
511 			descr->debug_hint(obj->object) : NULL;
512 		limit++;
513 		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
514 				 "object: %p object type: %s hint: %pS\n",
515 			msg, obj_states[obj->state], obj->astate,
516 			obj->object, descr->name, hint);
517 	}
518 	debug_objects_warnings++;
519 }
520 
521 /*
522  * Try to repair the damage, so we have a better chance to get useful
523  * debug output.
524  */
525 static bool
526 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
527 		   void * addr, enum debug_obj_state state)
528 {
529 	if (fixup && fixup(addr, state)) {
530 		debug_objects_fixups++;
531 		return true;
532 	}
533 	return false;
534 }
535 
536 static void debug_object_is_on_stack(void *addr, int onstack)
537 {
538 	int is_on_stack;
539 	static int limit;
540 
541 	if (limit > 4)
542 		return;
543 
544 	is_on_stack = object_is_on_stack(addr);
545 	if (is_on_stack == onstack)
546 		return;
547 
548 	limit++;
549 	if (is_on_stack)
550 		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
551 			 task_stack_page(current));
552 	else
553 		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
554 			 task_stack_page(current));
555 
556 	WARN_ON(1);
557 }
558 
559 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
560 						const struct debug_obj_descr *descr,
561 						bool onstack, bool alloc_ifstatic)
562 {
563 	struct debug_obj *obj = lookup_object(addr, b);
564 	enum debug_obj_state state = ODEBUG_STATE_NONE;
565 
566 	if (likely(obj))
567 		return obj;
568 
569 	/*
570 	 * debug_object_init() unconditionally allocates untracked
571 	 * objects. It does not matter whether it is a static object or
572 	 * not.
573 	 *
574 	 * debug_object_assert_init() and debug_object_activate() allow
575 	 * allocation only if the descriptor callback confirms that the
576 	 * object is static and considered initialized. For non-static
577 	 * objects the allocation needs to be done from the fixup callback.
578 	 */
579 	if (unlikely(alloc_ifstatic)) {
580 		if (!descr->is_static_object || !descr->is_static_object(addr))
581 			return ERR_PTR(-ENOENT);
582 		/* Statically allocated objects are considered initialized */
583 		state = ODEBUG_STATE_INIT;
584 	}
585 
586 	obj = alloc_object(addr, b, descr);
587 	if (likely(obj)) {
588 		obj->state = state;
589 		debug_object_is_on_stack(addr, onstack);
590 		return obj;
591 	}
592 
593 	/* Out of memory. Do the cleanup outside of the locked region */
594 	debug_objects_enabled = false;
595 	return NULL;
596 }
597 
598 static void debug_objects_fill_pool(void)
599 {
600 	/*
601 	 * On RT enabled kernels the pool refill must happen in preemptible
602 	 * context -- for !RT kernels we rely on the fact that spinlock_t and
603 	 * raw_spinlock_t are basically the same type and this lock-type
604 	 * inversion works just fine.
605 	 */
606 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
607 		/*
608 		 * Annotate away the spinlock_t inside raw_spinlock_t warning
609 		 * by temporarily raising the wait-type to WAIT_SLEEP, matching
610 		 * the preemptible() condition above.
611 		 */
612 		static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
613 		lock_map_acquire_try(&fill_pool_map);
614 		fill_pool();
615 		lock_map_release(&fill_pool_map);
616 	}
617 }
618 
619 static void
620 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
621 {
622 	struct debug_obj *obj, o;
623 	struct debug_bucket *db;
624 	unsigned long flags;
625 
626 	debug_objects_fill_pool();
627 
628 	db = get_bucket((unsigned long) addr);
629 
630 	raw_spin_lock_irqsave(&db->lock, flags);
631 
632 	obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
633 	if (unlikely(!obj)) {
634 		raw_spin_unlock_irqrestore(&db->lock, flags);
635 		debug_objects_oom();
636 		return;
637 	}
638 
639 	switch (obj->state) {
640 	case ODEBUG_STATE_NONE:
641 	case ODEBUG_STATE_INIT:
642 	case ODEBUG_STATE_INACTIVE:
643 		obj->state = ODEBUG_STATE_INIT;
644 		raw_spin_unlock_irqrestore(&db->lock, flags);
645 		return;
646 	default:
647 		break;
648 	}
649 
650 	o = *obj;
651 	raw_spin_unlock_irqrestore(&db->lock, flags);
652 	debug_print_object(&o, "init");
653 
654 	if (o.state == ODEBUG_STATE_ACTIVE)
655 		debug_object_fixup(descr->fixup_init, addr, o.state);
656 }
657 
658 /**
659  * debug_object_init - debug checks when an object is initialized
660  * @addr:	address of the object
661  * @descr:	pointer to an object specific debug description structure
662  */
663 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
664 {
665 	if (!debug_objects_enabled)
666 		return;
667 
668 	__debug_object_init(addr, descr, 0);
669 }
670 EXPORT_SYMBOL_GPL(debug_object_init);
671 
672 /**
673  * debug_object_init_on_stack - debug checks when an object on stack is
674  *				initialized
675  * @addr:	address of the object
676  * @descr:	pointer to an object specific debug description structure
677  */
678 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
679 {
680 	if (!debug_objects_enabled)
681 		return;
682 
683 	__debug_object_init(addr, descr, 1);
684 }
685 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
686 
687 /**
688  * debug_object_activate - debug checks when an object is activated
689  * @addr:	address of the object
690  * @descr:	pointer to an object specific debug description structure
691  * Returns 0 for success, -EINVAL for check failed.
692  */
693 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
694 {
695 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
696 	struct debug_bucket *db;
697 	struct debug_obj *obj;
698 	unsigned long flags;
699 
700 	if (!debug_objects_enabled)
701 		return 0;
702 
703 	debug_objects_fill_pool();
704 
705 	db = get_bucket((unsigned long) addr);
706 
707 	raw_spin_lock_irqsave(&db->lock, flags);
708 
709 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
710 	if (unlikely(!obj)) {
711 		raw_spin_unlock_irqrestore(&db->lock, flags);
712 		debug_objects_oom();
713 		return 0;
714 	} else if (likely(!IS_ERR(obj))) {
715 		switch (obj->state) {
716 		case ODEBUG_STATE_ACTIVE:
717 		case ODEBUG_STATE_DESTROYED:
718 			o = *obj;
719 			break;
720 		case ODEBUG_STATE_INIT:
721 		case ODEBUG_STATE_INACTIVE:
722 			obj->state = ODEBUG_STATE_ACTIVE;
723 			fallthrough;
724 		default:
725 			raw_spin_unlock_irqrestore(&db->lock, flags);
726 			return 0;
727 		}
728 	}
729 
730 	raw_spin_unlock_irqrestore(&db->lock, flags);
731 	debug_print_object(&o, "activate");
732 
733 	switch (o.state) {
734 	case ODEBUG_STATE_ACTIVE:
735 	case ODEBUG_STATE_NOTAVAILABLE:
736 		if (debug_object_fixup(descr->fixup_activate, addr, o.state))
737 			return 0;
738 		fallthrough;
739 	default:
740 		return -EINVAL;
741 	}
742 }
743 EXPORT_SYMBOL_GPL(debug_object_activate);
744 
745 /**
746  * debug_object_deactivate - debug checks when an object is deactivated
747  * @addr:	address of the object
748  * @descr:	pointer to an object specific debug description structure
749  */
750 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
751 {
752 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
753 	struct debug_bucket *db;
754 	struct debug_obj *obj;
755 	unsigned long flags;
756 
757 	if (!debug_objects_enabled)
758 		return;
759 
760 	db = get_bucket((unsigned long) addr);
761 
762 	raw_spin_lock_irqsave(&db->lock, flags);
763 
764 	obj = lookup_object(addr, db);
765 	if (obj) {
766 		switch (obj->state) {
767 		case ODEBUG_STATE_DESTROYED:
768 			break;
769 		case ODEBUG_STATE_INIT:
770 		case ODEBUG_STATE_INACTIVE:
771 		case ODEBUG_STATE_ACTIVE:
772 			if (obj->astate)
773 				break;
774 			obj->state = ODEBUG_STATE_INACTIVE;
775 			fallthrough;
776 		default:
777 			raw_spin_unlock_irqrestore(&db->lock, flags);
778 			return;
779 		}
780 		o = *obj;
781 	}
782 
783 	raw_spin_unlock_irqrestore(&db->lock, flags);
784 	debug_print_object(&o, "deactivate");
785 }
786 EXPORT_SYMBOL_GPL(debug_object_deactivate);
787 
788 /**
789  * debug_object_destroy - debug checks when an object is destroyed
790  * @addr:	address of the object
791  * @descr:	pointer to an object specific debug description structure
792  */
793 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
794 {
795 	struct debug_obj *obj, o;
796 	struct debug_bucket *db;
797 	unsigned long flags;
798 
799 	if (!debug_objects_enabled)
800 		return;
801 
802 	db = get_bucket((unsigned long) addr);
803 
804 	raw_spin_lock_irqsave(&db->lock, flags);
805 
806 	obj = lookup_object(addr, db);
807 	if (!obj) {
808 		raw_spin_unlock_irqrestore(&db->lock, flags);
809 		return;
810 	}
811 
812 	switch (obj->state) {
813 	case ODEBUG_STATE_ACTIVE:
814 	case ODEBUG_STATE_DESTROYED:
815 		break;
816 	case ODEBUG_STATE_NONE:
817 	case ODEBUG_STATE_INIT:
818 	case ODEBUG_STATE_INACTIVE:
819 		obj->state = ODEBUG_STATE_DESTROYED;
820 		fallthrough;
821 	default:
822 		raw_spin_unlock_irqrestore(&db->lock, flags);
823 		return;
824 	}
825 
826 	o = *obj;
827 	raw_spin_unlock_irqrestore(&db->lock, flags);
828 	debug_print_object(&o, "destroy");
829 
830 	if (o.state == ODEBUG_STATE_ACTIVE)
831 		debug_object_fixup(descr->fixup_destroy, addr, o.state);
832 }
833 EXPORT_SYMBOL_GPL(debug_object_destroy);
834 
835 /**
836  * debug_object_free - debug checks when an object is freed
837  * @addr:	address of the object
838  * @descr:	pointer to an object specific debug description structure
839  */
840 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
841 {
842 	struct debug_obj *obj, o;
843 	struct debug_bucket *db;
844 	unsigned long flags;
845 
846 	if (!debug_objects_enabled)
847 		return;
848 
849 	db = get_bucket((unsigned long) addr);
850 
851 	raw_spin_lock_irqsave(&db->lock, flags);
852 
853 	obj = lookup_object(addr, db);
854 	if (!obj) {
855 		raw_spin_unlock_irqrestore(&db->lock, flags);
856 		return;
857 	}
858 
859 	switch (obj->state) {
860 	case ODEBUG_STATE_ACTIVE:
861 		break;
862 	default:
863 		hlist_del(&obj->node);
864 		raw_spin_unlock_irqrestore(&db->lock, flags);
865 		free_object(obj);
866 		return;
867 	}
868 
869 	o = *obj;
870 	raw_spin_unlock_irqrestore(&db->lock, flags);
871 	debug_print_object(&o, "free");
872 
873 	debug_object_fixup(descr->fixup_free, addr, o.state);
874 }
875 EXPORT_SYMBOL_GPL(debug_object_free);
876 
877 /**
878  * debug_object_assert_init - debug checks when object should be init-ed
879  * @addr:	address of the object
880  * @descr:	pointer to an object specific debug description structure
881  */
882 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
883 {
884 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
885 	struct debug_bucket *db;
886 	struct debug_obj *obj;
887 	unsigned long flags;
888 
889 	if (!debug_objects_enabled)
890 		return;
891 
892 	debug_objects_fill_pool();
893 
894 	db = get_bucket((unsigned long) addr);
895 
896 	raw_spin_lock_irqsave(&db->lock, flags);
897 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
898 	raw_spin_unlock_irqrestore(&db->lock, flags);
899 	if (likely(!IS_ERR_OR_NULL(obj)))
900 		return;
901 
902 	/* If NULL the allocation has hit OOM */
903 	if (!obj) {
904 		debug_objects_oom();
905 		return;
906 	}
907 
908 	/* Object is neither tracked nor static. It's not initialized. */
909 	debug_print_object(&o, "assert_init");
910 	debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
911 }
912 EXPORT_SYMBOL_GPL(debug_object_assert_init);
913 
914 /**
915  * debug_object_active_state - debug checks object usage state machine
916  * @addr:	address of the object
917  * @descr:	pointer to an object specific debug description structure
918  * @expect:	expected state
919  * @next:	state to move to if expected state is found
920  */
921 void
922 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
923 			  unsigned int expect, unsigned int next)
924 {
925 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
926 	struct debug_bucket *db;
927 	struct debug_obj *obj;
928 	unsigned long flags;
929 
930 	if (!debug_objects_enabled)
931 		return;
932 
933 	db = get_bucket((unsigned long) addr);
934 
935 	raw_spin_lock_irqsave(&db->lock, flags);
936 
937 	obj = lookup_object(addr, db);
938 	if (obj) {
939 		switch (obj->state) {
940 		case ODEBUG_STATE_ACTIVE:
941 			if (obj->astate != expect)
942 				break;
943 			obj->astate = next;
944 			raw_spin_unlock_irqrestore(&db->lock, flags);
945 			return;
946 		default:
947 			break;
948 		}
949 		o = *obj;
950 	}
951 
952 	raw_spin_unlock_irqrestore(&db->lock, flags);
953 	debug_print_object(&o, "active_state");
954 }
955 EXPORT_SYMBOL_GPL(debug_object_active_state);
956 
957 #ifdef CONFIG_DEBUG_OBJECTS_FREE
958 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
959 {
960 	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
961 	int cnt, objs_checked = 0;
962 	struct debug_obj *obj, o;
963 	struct debug_bucket *db;
964 	struct hlist_node *tmp;
965 
966 	saddr = (unsigned long) address;
967 	eaddr = saddr + size;
968 	paddr = saddr & ODEBUG_CHUNK_MASK;
969 	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
970 	chunks >>= ODEBUG_CHUNK_SHIFT;
971 
972 	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
973 		db = get_bucket(paddr);
974 
975 repeat:
976 		cnt = 0;
977 		raw_spin_lock_irqsave(&db->lock, flags);
978 		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
979 			cnt++;
980 			oaddr = (unsigned long) obj->object;
981 			if (oaddr < saddr || oaddr >= eaddr)
982 				continue;
983 
984 			switch (obj->state) {
985 			case ODEBUG_STATE_ACTIVE:
986 				o = *obj;
987 				raw_spin_unlock_irqrestore(&db->lock, flags);
988 				debug_print_object(&o, "free");
989 				debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
990 				goto repeat;
991 			default:
992 				hlist_del(&obj->node);
993 				__free_object(obj);
994 				break;
995 			}
996 		}
997 		raw_spin_unlock_irqrestore(&db->lock, flags);
998 
999 		if (cnt > debug_objects_maxchain)
1000 			debug_objects_maxchain = cnt;
1001 
1002 		objs_checked += cnt;
1003 	}
1004 
1005 	if (objs_checked > debug_objects_maxchecked)
1006 		debug_objects_maxchecked = objs_checked;
1007 
1008 	/* Schedule work to actually kmem_cache_free() objects */
1009 	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1010 		WRITE_ONCE(obj_freeing, true);
1011 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1012 	}
1013 }
1014 
1015 void debug_check_no_obj_freed(const void *address, unsigned long size)
1016 {
1017 	if (debug_objects_enabled)
1018 		__debug_check_no_obj_freed(address, size);
1019 }
1020 #endif
1021 
1022 #ifdef CONFIG_DEBUG_FS
1023 
1024 static int debug_stats_show(struct seq_file *m, void *v)
1025 {
1026 	int cpu, obj_percpu_free = 0;
1027 
1028 	for_each_possible_cpu(cpu)
1029 		obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1030 
1031 	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1032 	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1033 	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1034 	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1035 	seq_printf(m, "pool_free     :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1036 	seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1037 	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1038 	seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1039 	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1040 	seq_printf(m, "on_free_list  :%d\n", READ_ONCE(obj_nr_tofree));
1041 	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1042 	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1043 	return 0;
1044 }
1045 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1046 
1047 static int __init debug_objects_init_debugfs(void)
1048 {
1049 	struct dentry *dbgdir;
1050 
1051 	if (!debug_objects_enabled)
1052 		return 0;
1053 
1054 	dbgdir = debugfs_create_dir("debug_objects", NULL);
1055 
1056 	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1057 
1058 	return 0;
1059 }
1060 __initcall(debug_objects_init_debugfs);
1061 
1062 #else
1063 static inline void debug_objects_init_debugfs(void) { }
1064 #endif
1065 
1066 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1067 
1068 /* Random data structure for the self test */
1069 struct self_test {
1070 	unsigned long	dummy1[6];
1071 	int		static_init;
1072 	unsigned long	dummy2[3];
1073 };
1074 
1075 static __initconst const struct debug_obj_descr descr_type_test;
1076 
1077 static bool __init is_static_object(void *addr)
1078 {
1079 	struct self_test *obj = addr;
1080 
1081 	return obj->static_init;
1082 }
1083 
1084 /*
1085  * fixup_init is called when:
1086  * - an active object is initialized
1087  */
1088 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1089 {
1090 	struct self_test *obj = addr;
1091 
1092 	switch (state) {
1093 	case ODEBUG_STATE_ACTIVE:
1094 		debug_object_deactivate(obj, &descr_type_test);
1095 		debug_object_init(obj, &descr_type_test);
1096 		return true;
1097 	default:
1098 		return false;
1099 	}
1100 }
1101 
1102 /*
1103  * fixup_activate is called when:
1104  * - an active object is activated
1105  * - an unknown non-static object is activated
1106  */
1107 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1108 {
1109 	struct self_test *obj = addr;
1110 
1111 	switch (state) {
1112 	case ODEBUG_STATE_NOTAVAILABLE:
1113 		return true;
1114 	case ODEBUG_STATE_ACTIVE:
1115 		debug_object_deactivate(obj, &descr_type_test);
1116 		debug_object_activate(obj, &descr_type_test);
1117 		return true;
1118 
1119 	default:
1120 		return false;
1121 	}
1122 }
1123 
1124 /*
1125  * fixup_destroy is called when:
1126  * - an active object is destroyed
1127  */
1128 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1129 {
1130 	struct self_test *obj = addr;
1131 
1132 	switch (state) {
1133 	case ODEBUG_STATE_ACTIVE:
1134 		debug_object_deactivate(obj, &descr_type_test);
1135 		debug_object_destroy(obj, &descr_type_test);
1136 		return true;
1137 	default:
1138 		return false;
1139 	}
1140 }
1141 
1142 /*
1143  * fixup_free is called when:
1144  * - an active object is freed
1145  */
1146 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1147 {
1148 	struct self_test *obj = addr;
1149 
1150 	switch (state) {
1151 	case ODEBUG_STATE_ACTIVE:
1152 		debug_object_deactivate(obj, &descr_type_test);
1153 		debug_object_free(obj, &descr_type_test);
1154 		return true;
1155 	default:
1156 		return false;
1157 	}
1158 }
1159 
1160 static int __init
1161 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1162 {
1163 	struct debug_bucket *db;
1164 	struct debug_obj *obj;
1165 	unsigned long flags;
1166 	int res = -EINVAL;
1167 
1168 	db = get_bucket((unsigned long) addr);
1169 
1170 	raw_spin_lock_irqsave(&db->lock, flags);
1171 
1172 	obj = lookup_object(addr, db);
1173 	if (!obj && state != ODEBUG_STATE_NONE) {
1174 		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1175 		goto out;
1176 	}
1177 	if (obj && obj->state != state) {
1178 		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1179 		       obj->state, state);
1180 		goto out;
1181 	}
1182 	if (fixups != debug_objects_fixups) {
1183 		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1184 		       fixups, debug_objects_fixups);
1185 		goto out;
1186 	}
1187 	if (warnings != debug_objects_warnings) {
1188 		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1189 		       warnings, debug_objects_warnings);
1190 		goto out;
1191 	}
1192 	res = 0;
1193 out:
1194 	raw_spin_unlock_irqrestore(&db->lock, flags);
1195 	if (res)
1196 		debug_objects_enabled = false;
1197 	return res;
1198 }
1199 
1200 static __initconst const struct debug_obj_descr descr_type_test = {
1201 	.name			= "selftest",
1202 	.is_static_object	= is_static_object,
1203 	.fixup_init		= fixup_init,
1204 	.fixup_activate		= fixup_activate,
1205 	.fixup_destroy		= fixup_destroy,
1206 	.fixup_free		= fixup_free,
1207 };
1208 
1209 static __initdata struct self_test obj = { .static_init = 0 };
1210 
1211 static bool __init debug_objects_selftest(void)
1212 {
1213 	int fixups, oldfixups, warnings, oldwarnings;
1214 	unsigned long flags;
1215 
1216 	local_irq_save(flags);
1217 
1218 	fixups = oldfixups = debug_objects_fixups;
1219 	warnings = oldwarnings = debug_objects_warnings;
1220 	descr_test = &descr_type_test;
1221 
1222 	debug_object_init(&obj, &descr_type_test);
1223 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1224 		goto out;
1225 	debug_object_activate(&obj, &descr_type_test);
1226 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1227 		goto out;
1228 	debug_object_activate(&obj, &descr_type_test);
1229 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1230 		goto out;
1231 	debug_object_deactivate(&obj, &descr_type_test);
1232 	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1233 		goto out;
1234 	debug_object_destroy(&obj, &descr_type_test);
1235 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1236 		goto out;
1237 	debug_object_init(&obj, &descr_type_test);
1238 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1239 		goto out;
1240 	debug_object_activate(&obj, &descr_type_test);
1241 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1242 		goto out;
1243 	debug_object_deactivate(&obj, &descr_type_test);
1244 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1245 		goto out;
1246 	debug_object_free(&obj, &descr_type_test);
1247 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1248 		goto out;
1249 
1250 	obj.static_init = 1;
1251 	debug_object_activate(&obj, &descr_type_test);
1252 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1253 		goto out;
1254 	debug_object_init(&obj, &descr_type_test);
1255 	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1256 		goto out;
1257 	debug_object_free(&obj, &descr_type_test);
1258 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1259 		goto out;
1260 
1261 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1262 	debug_object_init(&obj, &descr_type_test);
1263 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1264 		goto out;
1265 	debug_object_activate(&obj, &descr_type_test);
1266 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1267 		goto out;
1268 	__debug_check_no_obj_freed(&obj, sizeof(obj));
1269 	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1270 		goto out;
1271 #endif
1272 	pr_info("selftest passed\n");
1273 
1274 out:
1275 	debug_objects_fixups = oldfixups;
1276 	debug_objects_warnings = oldwarnings;
1277 	descr_test = NULL;
1278 
1279 	local_irq_restore(flags);
1280 	return debug_objects_enabled;
1281 }
1282 #else
1283 static inline bool debug_objects_selftest(void) { return true; }
1284 #endif
1285 
1286 /*
1287  * Called during early boot to initialize the hash buckets and link
1288  * the static object pool objects into the poll list. After this call
1289  * the object tracker is fully operational.
1290  */
1291 void __init debug_objects_early_init(void)
1292 {
1293 	int i;
1294 
1295 	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1296 		raw_spin_lock_init(&obj_hash[i].lock);
1297 
1298 	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1299 		hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1300 }
1301 
1302 /*
1303  * Convert the statically allocated objects to dynamic ones.
1304  * debug_objects_mem_init() is called early so only one CPU is up and
1305  * interrupts are disabled, which means it is safe to replace the active
1306  * object references.
1307  */
1308 static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache)
1309 {
1310 	struct debug_bucket *db = obj_hash;
1311 	struct debug_obj *obj, *new;
1312 	struct hlist_node *tmp;
1313 	HLIST_HEAD(objects);
1314 	int i;
1315 
1316 	for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1317 		obj = kmem_cache_zalloc(cache, GFP_KERNEL);
1318 		if (!obj)
1319 			goto free;
1320 		hlist_add_head(&obj->node, &objects);
1321 	}
1322 
1323 	debug_objects_allocated += i;
1324 
1325 	/*
1326 	 * Replace the statically allocated objects list with the allocated
1327 	 * objects list.
1328 	 */
1329 	hlist_move_list(&objects, &obj_pool);
1330 
1331 	/* Replace the active object references */
1332 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1333 		hlist_move_list(&db->list, &objects);
1334 
1335 		hlist_for_each_entry(obj, &objects, node) {
1336 			new = hlist_entry(obj_pool.first, typeof(*obj), node);
1337 			hlist_del(&new->node);
1338 			/* copy object data */
1339 			*new = *obj;
1340 			hlist_add_head(&new->node, &db->list);
1341 		}
1342 	}
1343 	return true;
1344 free:
1345 	/* Can't use free_object_list() as the cache is not populated yet */
1346 	hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1347 		hlist_del(&obj->node);
1348 		kmem_cache_free(cache, obj);
1349 	}
1350 	return false;
1351 }
1352 
1353 /*
1354  * Called after the kmem_caches are functional to setup a dedicated
1355  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1356  * prevents that the debug code is called on kmem_cache_free() for the
1357  * debug tracker objects to avoid recursive calls.
1358  */
1359 void __init debug_objects_mem_init(void)
1360 {
1361 	struct kmem_cache *cache;
1362 	int extras;
1363 
1364 	if (!debug_objects_enabled)
1365 		return;
1366 
1367 	if (!debug_objects_selftest())
1368 		return;
1369 
1370 	cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0,
1371 				  SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL);
1372 
1373 	if (!cache || !debug_objects_replace_static_objects(cache)) {
1374 		debug_objects_enabled = false;
1375 		pr_warn("Out of memory.\n");
1376 		return;
1377 	}
1378 
1379 	/*
1380 	 * Adjust the thresholds for allocating and freeing objects
1381 	 * according to the number of possible CPUs available in the
1382 	 * system.
1383 	 */
1384 	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1385 	debug_objects_pool_size += extras;
1386 	debug_objects_pool_min_level += extras;
1387 
1388 	/* Everything worked. Expose the cache */
1389 	obj_cache = cache;
1390 
1391 #ifdef CONFIG_HOTPLUG_CPU
1392 	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1393 				  object_cpu_offline);
1394 #endif
1395 	return;
1396 }
1397