xref: /linux-6.15/lib/debugobjects.c (revision 96a9a042)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic infrastructure for lifetime debugging of objects.
4  *
5  * Copyright (C) 2008, Thomas Gleixner <[email protected]>
6  */
7 
8 #define pr_fmt(fmt) "ODEBUG: " fmt
9 
10 #include <linux/debugobjects.h>
11 #include <linux/interrupt.h>
12 #include <linux/sched.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/seq_file.h>
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/kmemleak.h>
19 #include <linux/cpu.h>
20 
21 #define ODEBUG_HASH_BITS	14
22 #define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
23 
24 #define ODEBUG_POOL_SIZE	1024
25 #define ODEBUG_POOL_MIN_LEVEL	256
26 #define ODEBUG_POOL_PERCPU_SIZE	64
27 #define ODEBUG_BATCH_SIZE	16
28 
29 #define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
32 
33 /*
34  * We limit the freeing of debug objects via workqueue at a maximum
35  * frequency of 10Hz and about 1024 objects for each freeing operation.
36  * So it is freeing at most 10k debug objects per second.
37  */
38 #define ODEBUG_FREE_WORK_MAX	1024
39 #define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
40 
41 struct debug_bucket {
42 	struct hlist_head	list;
43 	raw_spinlock_t		lock;
44 };
45 
46 struct obj_pool {
47 	struct hlist_head	objects;
48 	unsigned int		cnt;
49 	unsigned int		min_cnt;
50 	unsigned int		max_cnt;
51 } ____cacheline_aligned;
52 
53 
54 static DEFINE_PER_CPU_ALIGNED(struct obj_pool, pool_pcpu)  = {
55 	.max_cnt	= ODEBUG_POOL_PERCPU_SIZE,
56 };
57 
58 static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
59 
60 static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
61 
62 static DEFINE_RAW_SPINLOCK(pool_lock);
63 
64 static struct obj_pool pool_global = {
65 	.min_cnt	= ODEBUG_POOL_MIN_LEVEL,
66 	.max_cnt	= ODEBUG_POOL_SIZE,
67 };
68 
69 static struct obj_pool pool_to_free = {
70 	.max_cnt	= UINT_MAX,
71 };
72 
73 static HLIST_HEAD(pool_boot);
74 
75 /*
76  * Because of the presence of percpu free pools, obj_pool_free will
77  * under-count those in the percpu free pools. Similarly, obj_pool_used
78  * will over-count those in the percpu free pools. Adjustments will be
79  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
80  * can be off.
81  */
82 static int __data_racy		obj_pool_min_free = ODEBUG_POOL_SIZE;
83 static int			obj_pool_used;
84 static int __data_racy		obj_pool_max_used;
85 static bool			obj_freeing;
86 
87 static int __data_racy			debug_objects_maxchain __read_mostly;
88 static int __data_racy __maybe_unused	debug_objects_maxchecked __read_mostly;
89 static int __data_racy			debug_objects_fixups __read_mostly;
90 static int __data_racy			debug_objects_warnings __read_mostly;
91 static bool __data_racy			debug_objects_enabled __read_mostly
92 					= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
93 
94 static const struct debug_obj_descr	*descr_test  __read_mostly;
95 static struct kmem_cache		*obj_cache __ro_after_init;
96 
97 /*
98  * Track numbers of kmem_cache_alloc()/free() calls done.
99  */
100 static int __data_racy		debug_objects_allocated;
101 static int __data_racy		debug_objects_freed;
102 
103 static void free_obj_work(struct work_struct *work);
104 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
105 
106 static int __init enable_object_debug(char *str)
107 {
108 	debug_objects_enabled = true;
109 	return 0;
110 }
111 early_param("debug_objects", enable_object_debug);
112 
113 static int __init disable_object_debug(char *str)
114 {
115 	debug_objects_enabled = false;
116 	return 0;
117 }
118 early_param("no_debug_objects", disable_object_debug);
119 
120 static const char *obj_states[ODEBUG_STATE_MAX] = {
121 	[ODEBUG_STATE_NONE]		= "none",
122 	[ODEBUG_STATE_INIT]		= "initialized",
123 	[ODEBUG_STATE_INACTIVE]		= "inactive",
124 	[ODEBUG_STATE_ACTIVE]		= "active",
125 	[ODEBUG_STATE_DESTROYED]	= "destroyed",
126 	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
127 };
128 
129 static __always_inline unsigned int pool_count(struct obj_pool *pool)
130 {
131 	return READ_ONCE(pool->cnt);
132 }
133 
134 static __always_inline bool pool_should_refill(struct obj_pool *pool)
135 {
136 	return pool_count(pool) < pool->min_cnt;
137 }
138 
139 static __always_inline bool pool_must_refill(struct obj_pool *pool)
140 {
141 	return pool_count(pool) < pool->min_cnt / 2;
142 }
143 
144 static void free_object_list(struct hlist_head *head)
145 {
146 	struct hlist_node *tmp;
147 	struct debug_obj *obj;
148 	int cnt = 0;
149 
150 	hlist_for_each_entry_safe(obj, tmp, head, node) {
151 		hlist_del(&obj->node);
152 		kmem_cache_free(obj_cache, obj);
153 		cnt++;
154 	}
155 	debug_objects_freed += cnt;
156 }
157 
158 static void fill_pool_from_freelist(void)
159 {
160 	static unsigned long state;
161 	struct debug_obj *obj;
162 
163 	/*
164 	 * Reuse objs from the global obj_to_free list; they will be
165 	 * reinitialized when allocating.
166 	 */
167 	if (!pool_count(&pool_to_free))
168 		return;
169 
170 	/*
171 	 * Prevent the context from being scheduled or interrupted after
172 	 * setting the state flag;
173 	 */
174 	guard(irqsave)();
175 
176 	/*
177 	 * Avoid lock contention on &pool_lock and avoid making the cache
178 	 * line exclusive by testing the bit before attempting to set it.
179 	 */
180 	if (test_bit(0, &state) || test_and_set_bit(0, &state))
181 		return;
182 
183 	guard(raw_spinlock)(&pool_lock);
184 	/*
185 	 * Recheck with the lock held as the worker thread might have
186 	 * won the race and freed the global free list already.
187 	 */
188 	while (pool_to_free.cnt && (pool_global.cnt < pool_global.min_cnt)) {
189 		obj = hlist_entry(pool_to_free.objects.first, typeof(*obj), node);
190 		hlist_del(&obj->node);
191 		WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt - 1);
192 		hlist_add_head(&obj->node, &pool_global.objects);
193 		WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
194 	}
195 	clear_bit(0, &state);
196 }
197 
198 static void fill_pool(void)
199 {
200 	static atomic_t cpus_allocating;
201 
202 	/*
203 	 * Avoid allocation and lock contention when:
204 	 *   - One other CPU is already allocating
205 	 *   - the global pool has not reached the critical level yet
206 	 */
207 	if (!pool_must_refill(&pool_global) && atomic_read(&cpus_allocating))
208 		return;
209 
210 	atomic_inc(&cpus_allocating);
211 	while (pool_should_refill(&pool_global)) {
212 		struct debug_obj *new, *last = NULL;
213 		HLIST_HEAD(head);
214 		int cnt;
215 
216 		for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
217 			new = kmem_cache_zalloc(obj_cache, __GFP_HIGH | __GFP_NOWARN);
218 			if (!new)
219 				break;
220 			hlist_add_head(&new->node, &head);
221 			if (!last)
222 				last = new;
223 		}
224 		if (!cnt)
225 			break;
226 
227 		guard(raw_spinlock_irqsave)(&pool_lock);
228 		hlist_splice_init(&head, &last->node, &pool_global.objects);
229 		debug_objects_allocated += cnt;
230 		WRITE_ONCE(pool_global.cnt, pool_global.cnt + cnt);
231 	}
232 	atomic_dec(&cpus_allocating);
233 }
234 
235 /*
236  * Lookup an object in the hash bucket.
237  */
238 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
239 {
240 	struct debug_obj *obj;
241 	int cnt = 0;
242 
243 	hlist_for_each_entry(obj, &b->list, node) {
244 		cnt++;
245 		if (obj->object == addr)
246 			return obj;
247 	}
248 	if (cnt > debug_objects_maxchain)
249 		debug_objects_maxchain = cnt;
250 
251 	return NULL;
252 }
253 
254 /*
255  * Allocate a new object from the hlist
256  */
257 static struct debug_obj *__alloc_object(struct hlist_head *list)
258 {
259 	struct debug_obj *obj = NULL;
260 
261 	if (list->first) {
262 		obj = hlist_entry(list->first, typeof(*obj), node);
263 		hlist_del(&obj->node);
264 	}
265 
266 	return obj;
267 }
268 
269 static struct debug_obj *
270 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
271 {
272 	struct obj_pool *percpu_pool = this_cpu_ptr(&pool_pcpu);
273 	struct debug_obj *obj;
274 
275 	if (likely(obj_cache)) {
276 		obj = __alloc_object(&percpu_pool->objects);
277 		if (obj) {
278 			percpu_pool->cnt--;
279 			goto init_obj;
280 		}
281 	} else {
282 		obj = __alloc_object(&pool_boot);
283 		goto init_obj;
284 	}
285 
286 	raw_spin_lock(&pool_lock);
287 	obj = __alloc_object(&pool_global.objects);
288 	if (obj) {
289 		obj_pool_used++;
290 		WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
291 
292 		/*
293 		 * Looking ahead, allocate one batch of debug objects and
294 		 * put them into the percpu free pool.
295 		 */
296 		if (likely(obj_cache)) {
297 			int i;
298 
299 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
300 				struct debug_obj *obj2;
301 
302 				obj2 = __alloc_object(&pool_global.objects);
303 				if (!obj2)
304 					break;
305 				hlist_add_head(&obj2->node, &percpu_pool->objects);
306 				percpu_pool->cnt++;
307 				obj_pool_used++;
308 				WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
309 			}
310 		}
311 
312 		if (obj_pool_used > obj_pool_max_used)
313 			obj_pool_max_used = obj_pool_used;
314 
315 		if (pool_global.cnt < obj_pool_min_free)
316 			obj_pool_min_free = pool_global.cnt;
317 	}
318 	raw_spin_unlock(&pool_lock);
319 
320 init_obj:
321 	if (obj) {
322 		obj->object = addr;
323 		obj->descr  = descr;
324 		obj->state  = ODEBUG_STATE_NONE;
325 		obj->astate = 0;
326 		hlist_add_head(&obj->node, &b->list);
327 	}
328 	return obj;
329 }
330 
331 /*
332  * workqueue function to free objects.
333  *
334  * To reduce contention on the global pool_lock, the actual freeing of
335  * debug objects will be delayed if the pool_lock is busy.
336  */
337 static void free_obj_work(struct work_struct *work)
338 {
339 	struct debug_obj *obj;
340 	unsigned long flags;
341 	HLIST_HEAD(tofree);
342 
343 	WRITE_ONCE(obj_freeing, false);
344 	if (!raw_spin_trylock_irqsave(&pool_lock, flags))
345 		return;
346 
347 	if (pool_global.cnt >= pool_global.max_cnt)
348 		goto free_objs;
349 
350 	/*
351 	 * The objs on the pool list might be allocated before the work is
352 	 * run, so recheck if pool list it full or not, if not fill pool
353 	 * list from the global free list. As it is likely that a workload
354 	 * may be gearing up to use more and more objects, don't free any
355 	 * of them until the next round.
356 	 */
357 	while (pool_to_free.cnt && pool_global.cnt < pool_global.max_cnt) {
358 		obj = hlist_entry(pool_to_free.objects.first, typeof(*obj), node);
359 		hlist_del(&obj->node);
360 		hlist_add_head(&obj->node, &pool_global.objects);
361 		WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt - 1);
362 		WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
363 	}
364 	raw_spin_unlock_irqrestore(&pool_lock, flags);
365 	return;
366 
367 free_objs:
368 	/*
369 	 * Pool list is already full and there are still objs on the free
370 	 * list. Move remaining free objs to a temporary list to free the
371 	 * memory outside the pool_lock held region.
372 	 */
373 	if (pool_to_free.cnt) {
374 		hlist_move_list(&pool_to_free.objects, &tofree);
375 		WRITE_ONCE(pool_to_free.cnt, 0);
376 	}
377 	raw_spin_unlock_irqrestore(&pool_lock, flags);
378 
379 	free_object_list(&tofree);
380 }
381 
382 static void __free_object(struct debug_obj *obj)
383 {
384 	struct debug_obj *objs[ODEBUG_BATCH_SIZE];
385 	struct obj_pool *percpu_pool;
386 	int lookahead_count = 0;
387 	bool work;
388 
389 	guard(irqsave)();
390 
391 	if (unlikely(!obj_cache)) {
392 		hlist_add_head(&obj->node, &pool_boot);
393 		return;
394 	}
395 
396 	/*
397 	 * Try to free it into the percpu pool first.
398 	 */
399 	percpu_pool = this_cpu_ptr(&pool_pcpu);
400 	if (percpu_pool->cnt < ODEBUG_POOL_PERCPU_SIZE) {
401 		hlist_add_head(&obj->node, &percpu_pool->objects);
402 		percpu_pool->cnt++;
403 		return;
404 	}
405 
406 	/*
407 	 * As the percpu pool is full, look ahead and pull out a batch
408 	 * of objects from the percpu pool and free them as well.
409 	 */
410 	for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
411 		objs[lookahead_count] = __alloc_object(&percpu_pool->objects);
412 		if (!objs[lookahead_count])
413 			break;
414 		percpu_pool->cnt--;
415 	}
416 
417 	raw_spin_lock(&pool_lock);
418 	work = (pool_global.cnt > pool_global.max_cnt) && obj_cache &&
419 	       (pool_to_free.cnt < ODEBUG_FREE_WORK_MAX);
420 	obj_pool_used--;
421 
422 	if (work) {
423 		WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + 1);
424 		hlist_add_head(&obj->node, &pool_to_free.objects);
425 		if (lookahead_count) {
426 			WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + lookahead_count);
427 			obj_pool_used -= lookahead_count;
428 			while (lookahead_count) {
429 				hlist_add_head(&objs[--lookahead_count]->node,
430 					       &pool_to_free.objects);
431 			}
432 		}
433 
434 		if ((pool_global.cnt > pool_global.max_cnt) &&
435 		    (pool_to_free.cnt < ODEBUG_FREE_WORK_MAX)) {
436 			int i;
437 
438 			/*
439 			 * Free one more batch of objects from obj_pool.
440 			 */
441 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
442 				obj = __alloc_object(&pool_global.objects);
443 				hlist_add_head(&obj->node, &pool_to_free.objects);
444 				WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
445 				WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + 1);
446 			}
447 		}
448 	} else {
449 		WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
450 		hlist_add_head(&obj->node, &pool_global.objects);
451 		if (lookahead_count) {
452 			WRITE_ONCE(pool_global.cnt, pool_global.cnt + lookahead_count);
453 			obj_pool_used -= lookahead_count;
454 			while (lookahead_count) {
455 				hlist_add_head(&objs[--lookahead_count]->node,
456 					       &pool_global.objects);
457 			}
458 		}
459 	}
460 	raw_spin_unlock(&pool_lock);
461 }
462 
463 /*
464  * Put the object back into the pool and schedule work to free objects
465  * if necessary.
466  */
467 static void free_object(struct debug_obj *obj)
468 {
469 	__free_object(obj);
470 	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
471 		WRITE_ONCE(obj_freeing, true);
472 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
473 	}
474 }
475 
476 static void put_objects(struct hlist_head *list)
477 {
478 	struct hlist_node *tmp;
479 	struct debug_obj *obj;
480 
481 	/*
482 	 * Using free_object() puts the objects into reuse or schedules
483 	 * them for freeing and it get's all the accounting correct.
484 	 */
485 	hlist_for_each_entry_safe(obj, tmp, list, node) {
486 		hlist_del(&obj->node);
487 		free_object(obj);
488 	}
489 }
490 
491 #ifdef CONFIG_HOTPLUG_CPU
492 static int object_cpu_offline(unsigned int cpu)
493 {
494 	/* Remote access is safe as the CPU is dead already */
495 	struct obj_pool *pcp = per_cpu_ptr(&pool_pcpu, cpu);
496 
497 	put_objects(&pcp->objects);
498 	pcp->cnt = 0;
499 	return 0;
500 }
501 #endif
502 
503 /* Out of memory. Free all objects from hash */
504 static void debug_objects_oom(void)
505 {
506 	struct debug_bucket *db = obj_hash;
507 	HLIST_HEAD(freelist);
508 
509 	pr_warn("Out of memory. ODEBUG disabled\n");
510 
511 	for (int i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
512 		scoped_guard(raw_spinlock_irqsave, &db->lock)
513 			hlist_move_list(&db->list, &freelist);
514 
515 		put_objects(&freelist);
516 	}
517 }
518 
519 /*
520  * We use the pfn of the address for the hash. That way we can check
521  * for freed objects simply by checking the affected bucket.
522  */
523 static struct debug_bucket *get_bucket(unsigned long addr)
524 {
525 	unsigned long hash;
526 
527 	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
528 	return &obj_hash[hash];
529 }
530 
531 static void debug_print_object(struct debug_obj *obj, char *msg)
532 {
533 	const struct debug_obj_descr *descr = obj->descr;
534 	static int limit;
535 
536 	/*
537 	 * Don't report if lookup_object_or_alloc() by the current thread
538 	 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
539 	 * concurrent thread turned off debug_objects_enabled and cleared
540 	 * the hash buckets.
541 	 */
542 	if (!debug_objects_enabled)
543 		return;
544 
545 	if (limit < 5 && descr != descr_test) {
546 		void *hint = descr->debug_hint ?
547 			descr->debug_hint(obj->object) : NULL;
548 		limit++;
549 		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
550 				 "object: %p object type: %s hint: %pS\n",
551 			msg, obj_states[obj->state], obj->astate,
552 			obj->object, descr->name, hint);
553 	}
554 	debug_objects_warnings++;
555 }
556 
557 /*
558  * Try to repair the damage, so we have a better chance to get useful
559  * debug output.
560  */
561 static bool
562 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
563 		   void * addr, enum debug_obj_state state)
564 {
565 	if (fixup && fixup(addr, state)) {
566 		debug_objects_fixups++;
567 		return true;
568 	}
569 	return false;
570 }
571 
572 static void debug_object_is_on_stack(void *addr, int onstack)
573 {
574 	int is_on_stack;
575 	static int limit;
576 
577 	if (limit > 4)
578 		return;
579 
580 	is_on_stack = object_is_on_stack(addr);
581 	if (is_on_stack == onstack)
582 		return;
583 
584 	limit++;
585 	if (is_on_stack)
586 		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
587 			 task_stack_page(current));
588 	else
589 		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
590 			 task_stack_page(current));
591 
592 	WARN_ON(1);
593 }
594 
595 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
596 						const struct debug_obj_descr *descr,
597 						bool onstack, bool alloc_ifstatic)
598 {
599 	struct debug_obj *obj = lookup_object(addr, b);
600 	enum debug_obj_state state = ODEBUG_STATE_NONE;
601 
602 	if (likely(obj))
603 		return obj;
604 
605 	/*
606 	 * debug_object_init() unconditionally allocates untracked
607 	 * objects. It does not matter whether it is a static object or
608 	 * not.
609 	 *
610 	 * debug_object_assert_init() and debug_object_activate() allow
611 	 * allocation only if the descriptor callback confirms that the
612 	 * object is static and considered initialized. For non-static
613 	 * objects the allocation needs to be done from the fixup callback.
614 	 */
615 	if (unlikely(alloc_ifstatic)) {
616 		if (!descr->is_static_object || !descr->is_static_object(addr))
617 			return ERR_PTR(-ENOENT);
618 		/* Statically allocated objects are considered initialized */
619 		state = ODEBUG_STATE_INIT;
620 	}
621 
622 	obj = alloc_object(addr, b, descr);
623 	if (likely(obj)) {
624 		obj->state = state;
625 		debug_object_is_on_stack(addr, onstack);
626 		return obj;
627 	}
628 
629 	/* Out of memory. Do the cleanup outside of the locked region */
630 	debug_objects_enabled = false;
631 	return NULL;
632 }
633 
634 static void debug_objects_fill_pool(void)
635 {
636 	if (unlikely(!obj_cache))
637 		return;
638 
639 	if (likely(!pool_should_refill(&pool_global)))
640 		return;
641 
642 	/* Try reusing objects from obj_to_free_list */
643 	fill_pool_from_freelist();
644 
645 	if (likely(!pool_should_refill(&pool_global)))
646 		return;
647 
648 	/*
649 	 * On RT enabled kernels the pool refill must happen in preemptible
650 	 * context -- for !RT kernels we rely on the fact that spinlock_t and
651 	 * raw_spinlock_t are basically the same type and this lock-type
652 	 * inversion works just fine.
653 	 */
654 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
655 		/*
656 		 * Annotate away the spinlock_t inside raw_spinlock_t warning
657 		 * by temporarily raising the wait-type to WAIT_SLEEP, matching
658 		 * the preemptible() condition above.
659 		 */
660 		static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
661 		lock_map_acquire_try(&fill_pool_map);
662 		fill_pool();
663 		lock_map_release(&fill_pool_map);
664 	}
665 }
666 
667 static void
668 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
669 {
670 	struct debug_obj *obj, o;
671 	struct debug_bucket *db;
672 	unsigned long flags;
673 
674 	debug_objects_fill_pool();
675 
676 	db = get_bucket((unsigned long) addr);
677 
678 	raw_spin_lock_irqsave(&db->lock, flags);
679 
680 	obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
681 	if (unlikely(!obj)) {
682 		raw_spin_unlock_irqrestore(&db->lock, flags);
683 		debug_objects_oom();
684 		return;
685 	}
686 
687 	switch (obj->state) {
688 	case ODEBUG_STATE_NONE:
689 	case ODEBUG_STATE_INIT:
690 	case ODEBUG_STATE_INACTIVE:
691 		obj->state = ODEBUG_STATE_INIT;
692 		raw_spin_unlock_irqrestore(&db->lock, flags);
693 		return;
694 	default:
695 		break;
696 	}
697 
698 	o = *obj;
699 	raw_spin_unlock_irqrestore(&db->lock, flags);
700 	debug_print_object(&o, "init");
701 
702 	if (o.state == ODEBUG_STATE_ACTIVE)
703 		debug_object_fixup(descr->fixup_init, addr, o.state);
704 }
705 
706 /**
707  * debug_object_init - debug checks when an object is initialized
708  * @addr:	address of the object
709  * @descr:	pointer to an object specific debug description structure
710  */
711 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
712 {
713 	if (!debug_objects_enabled)
714 		return;
715 
716 	__debug_object_init(addr, descr, 0);
717 }
718 EXPORT_SYMBOL_GPL(debug_object_init);
719 
720 /**
721  * debug_object_init_on_stack - debug checks when an object on stack is
722  *				initialized
723  * @addr:	address of the object
724  * @descr:	pointer to an object specific debug description structure
725  */
726 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
727 {
728 	if (!debug_objects_enabled)
729 		return;
730 
731 	__debug_object_init(addr, descr, 1);
732 }
733 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
734 
735 /**
736  * debug_object_activate - debug checks when an object is activated
737  * @addr:	address of the object
738  * @descr:	pointer to an object specific debug description structure
739  * Returns 0 for success, -EINVAL for check failed.
740  */
741 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
742 {
743 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
744 	struct debug_bucket *db;
745 	struct debug_obj *obj;
746 	unsigned long flags;
747 
748 	if (!debug_objects_enabled)
749 		return 0;
750 
751 	debug_objects_fill_pool();
752 
753 	db = get_bucket((unsigned long) addr);
754 
755 	raw_spin_lock_irqsave(&db->lock, flags);
756 
757 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
758 	if (unlikely(!obj)) {
759 		raw_spin_unlock_irqrestore(&db->lock, flags);
760 		debug_objects_oom();
761 		return 0;
762 	} else if (likely(!IS_ERR(obj))) {
763 		switch (obj->state) {
764 		case ODEBUG_STATE_ACTIVE:
765 		case ODEBUG_STATE_DESTROYED:
766 			o = *obj;
767 			break;
768 		case ODEBUG_STATE_INIT:
769 		case ODEBUG_STATE_INACTIVE:
770 			obj->state = ODEBUG_STATE_ACTIVE;
771 			fallthrough;
772 		default:
773 			raw_spin_unlock_irqrestore(&db->lock, flags);
774 			return 0;
775 		}
776 	}
777 
778 	raw_spin_unlock_irqrestore(&db->lock, flags);
779 	debug_print_object(&o, "activate");
780 
781 	switch (o.state) {
782 	case ODEBUG_STATE_ACTIVE:
783 	case ODEBUG_STATE_NOTAVAILABLE:
784 		if (debug_object_fixup(descr->fixup_activate, addr, o.state))
785 			return 0;
786 		fallthrough;
787 	default:
788 		return -EINVAL;
789 	}
790 }
791 EXPORT_SYMBOL_GPL(debug_object_activate);
792 
793 /**
794  * debug_object_deactivate - debug checks when an object is deactivated
795  * @addr:	address of the object
796  * @descr:	pointer to an object specific debug description structure
797  */
798 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
799 {
800 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
801 	struct debug_bucket *db;
802 	struct debug_obj *obj;
803 	unsigned long flags;
804 
805 	if (!debug_objects_enabled)
806 		return;
807 
808 	db = get_bucket((unsigned long) addr);
809 
810 	raw_spin_lock_irqsave(&db->lock, flags);
811 
812 	obj = lookup_object(addr, db);
813 	if (obj) {
814 		switch (obj->state) {
815 		case ODEBUG_STATE_DESTROYED:
816 			break;
817 		case ODEBUG_STATE_INIT:
818 		case ODEBUG_STATE_INACTIVE:
819 		case ODEBUG_STATE_ACTIVE:
820 			if (obj->astate)
821 				break;
822 			obj->state = ODEBUG_STATE_INACTIVE;
823 			fallthrough;
824 		default:
825 			raw_spin_unlock_irqrestore(&db->lock, flags);
826 			return;
827 		}
828 		o = *obj;
829 	}
830 
831 	raw_spin_unlock_irqrestore(&db->lock, flags);
832 	debug_print_object(&o, "deactivate");
833 }
834 EXPORT_SYMBOL_GPL(debug_object_deactivate);
835 
836 /**
837  * debug_object_destroy - debug checks when an object is destroyed
838  * @addr:	address of the object
839  * @descr:	pointer to an object specific debug description structure
840  */
841 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
842 {
843 	struct debug_obj *obj, o;
844 	struct debug_bucket *db;
845 	unsigned long flags;
846 
847 	if (!debug_objects_enabled)
848 		return;
849 
850 	db = get_bucket((unsigned long) addr);
851 
852 	raw_spin_lock_irqsave(&db->lock, flags);
853 
854 	obj = lookup_object(addr, db);
855 	if (!obj) {
856 		raw_spin_unlock_irqrestore(&db->lock, flags);
857 		return;
858 	}
859 
860 	switch (obj->state) {
861 	case ODEBUG_STATE_ACTIVE:
862 	case ODEBUG_STATE_DESTROYED:
863 		break;
864 	case ODEBUG_STATE_NONE:
865 	case ODEBUG_STATE_INIT:
866 	case ODEBUG_STATE_INACTIVE:
867 		obj->state = ODEBUG_STATE_DESTROYED;
868 		fallthrough;
869 	default:
870 		raw_spin_unlock_irqrestore(&db->lock, flags);
871 		return;
872 	}
873 
874 	o = *obj;
875 	raw_spin_unlock_irqrestore(&db->lock, flags);
876 	debug_print_object(&o, "destroy");
877 
878 	if (o.state == ODEBUG_STATE_ACTIVE)
879 		debug_object_fixup(descr->fixup_destroy, addr, o.state);
880 }
881 EXPORT_SYMBOL_GPL(debug_object_destroy);
882 
883 /**
884  * debug_object_free - debug checks when an object is freed
885  * @addr:	address of the object
886  * @descr:	pointer to an object specific debug description structure
887  */
888 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
889 {
890 	struct debug_obj *obj, o;
891 	struct debug_bucket *db;
892 	unsigned long flags;
893 
894 	if (!debug_objects_enabled)
895 		return;
896 
897 	db = get_bucket((unsigned long) addr);
898 
899 	raw_spin_lock_irqsave(&db->lock, flags);
900 
901 	obj = lookup_object(addr, db);
902 	if (!obj) {
903 		raw_spin_unlock_irqrestore(&db->lock, flags);
904 		return;
905 	}
906 
907 	switch (obj->state) {
908 	case ODEBUG_STATE_ACTIVE:
909 		break;
910 	default:
911 		hlist_del(&obj->node);
912 		raw_spin_unlock_irqrestore(&db->lock, flags);
913 		free_object(obj);
914 		return;
915 	}
916 
917 	o = *obj;
918 	raw_spin_unlock_irqrestore(&db->lock, flags);
919 	debug_print_object(&o, "free");
920 
921 	debug_object_fixup(descr->fixup_free, addr, o.state);
922 }
923 EXPORT_SYMBOL_GPL(debug_object_free);
924 
925 /**
926  * debug_object_assert_init - debug checks when object should be init-ed
927  * @addr:	address of the object
928  * @descr:	pointer to an object specific debug description structure
929  */
930 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
931 {
932 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
933 	struct debug_bucket *db;
934 	struct debug_obj *obj;
935 	unsigned long flags;
936 
937 	if (!debug_objects_enabled)
938 		return;
939 
940 	debug_objects_fill_pool();
941 
942 	db = get_bucket((unsigned long) addr);
943 
944 	raw_spin_lock_irqsave(&db->lock, flags);
945 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
946 	raw_spin_unlock_irqrestore(&db->lock, flags);
947 	if (likely(!IS_ERR_OR_NULL(obj)))
948 		return;
949 
950 	/* If NULL the allocation has hit OOM */
951 	if (!obj) {
952 		debug_objects_oom();
953 		return;
954 	}
955 
956 	/* Object is neither tracked nor static. It's not initialized. */
957 	debug_print_object(&o, "assert_init");
958 	debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
959 }
960 EXPORT_SYMBOL_GPL(debug_object_assert_init);
961 
962 /**
963  * debug_object_active_state - debug checks object usage state machine
964  * @addr:	address of the object
965  * @descr:	pointer to an object specific debug description structure
966  * @expect:	expected state
967  * @next:	state to move to if expected state is found
968  */
969 void
970 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
971 			  unsigned int expect, unsigned int next)
972 {
973 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
974 	struct debug_bucket *db;
975 	struct debug_obj *obj;
976 	unsigned long flags;
977 
978 	if (!debug_objects_enabled)
979 		return;
980 
981 	db = get_bucket((unsigned long) addr);
982 
983 	raw_spin_lock_irqsave(&db->lock, flags);
984 
985 	obj = lookup_object(addr, db);
986 	if (obj) {
987 		switch (obj->state) {
988 		case ODEBUG_STATE_ACTIVE:
989 			if (obj->astate != expect)
990 				break;
991 			obj->astate = next;
992 			raw_spin_unlock_irqrestore(&db->lock, flags);
993 			return;
994 		default:
995 			break;
996 		}
997 		o = *obj;
998 	}
999 
1000 	raw_spin_unlock_irqrestore(&db->lock, flags);
1001 	debug_print_object(&o, "active_state");
1002 }
1003 EXPORT_SYMBOL_GPL(debug_object_active_state);
1004 
1005 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1006 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
1007 {
1008 	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
1009 	int cnt, objs_checked = 0;
1010 	struct debug_obj *obj, o;
1011 	struct debug_bucket *db;
1012 	struct hlist_node *tmp;
1013 
1014 	saddr = (unsigned long) address;
1015 	eaddr = saddr + size;
1016 	paddr = saddr & ODEBUG_CHUNK_MASK;
1017 	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
1018 	chunks >>= ODEBUG_CHUNK_SHIFT;
1019 
1020 	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
1021 		db = get_bucket(paddr);
1022 
1023 repeat:
1024 		cnt = 0;
1025 		raw_spin_lock_irqsave(&db->lock, flags);
1026 		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
1027 			cnt++;
1028 			oaddr = (unsigned long) obj->object;
1029 			if (oaddr < saddr || oaddr >= eaddr)
1030 				continue;
1031 
1032 			switch (obj->state) {
1033 			case ODEBUG_STATE_ACTIVE:
1034 				o = *obj;
1035 				raw_spin_unlock_irqrestore(&db->lock, flags);
1036 				debug_print_object(&o, "free");
1037 				debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
1038 				goto repeat;
1039 			default:
1040 				hlist_del(&obj->node);
1041 				__free_object(obj);
1042 				break;
1043 			}
1044 		}
1045 		raw_spin_unlock_irqrestore(&db->lock, flags);
1046 
1047 		if (cnt > debug_objects_maxchain)
1048 			debug_objects_maxchain = cnt;
1049 
1050 		objs_checked += cnt;
1051 	}
1052 
1053 	if (objs_checked > debug_objects_maxchecked)
1054 		debug_objects_maxchecked = objs_checked;
1055 
1056 	/* Schedule work to actually kmem_cache_free() objects */
1057 	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
1058 		WRITE_ONCE(obj_freeing, true);
1059 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1060 	}
1061 }
1062 
1063 void debug_check_no_obj_freed(const void *address, unsigned long size)
1064 {
1065 	if (debug_objects_enabled)
1066 		__debug_check_no_obj_freed(address, size);
1067 }
1068 #endif
1069 
1070 #ifdef CONFIG_DEBUG_FS
1071 
1072 static int debug_stats_show(struct seq_file *m, void *v)
1073 {
1074 	int cpu, obj_percpu_free = 0;
1075 
1076 	for_each_possible_cpu(cpu)
1077 		obj_percpu_free += per_cpu(pool_pcpu.cnt, cpu);
1078 
1079 	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1080 	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1081 	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1082 	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1083 	seq_printf(m, "pool_free     :%d\n", pool_count(&pool_global) + obj_percpu_free);
1084 	seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1085 	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1086 	seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1087 	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1088 	seq_printf(m, "on_free_list  :%d\n", pool_count(&pool_to_free));
1089 	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1090 	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1091 	return 0;
1092 }
1093 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1094 
1095 static int __init debug_objects_init_debugfs(void)
1096 {
1097 	struct dentry *dbgdir;
1098 
1099 	if (!debug_objects_enabled)
1100 		return 0;
1101 
1102 	dbgdir = debugfs_create_dir("debug_objects", NULL);
1103 
1104 	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1105 
1106 	return 0;
1107 }
1108 __initcall(debug_objects_init_debugfs);
1109 
1110 #else
1111 static inline void debug_objects_init_debugfs(void) { }
1112 #endif
1113 
1114 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1115 
1116 /* Random data structure for the self test */
1117 struct self_test {
1118 	unsigned long	dummy1[6];
1119 	int		static_init;
1120 	unsigned long	dummy2[3];
1121 };
1122 
1123 static __initconst const struct debug_obj_descr descr_type_test;
1124 
1125 static bool __init is_static_object(void *addr)
1126 {
1127 	struct self_test *obj = addr;
1128 
1129 	return obj->static_init;
1130 }
1131 
1132 /*
1133  * fixup_init is called when:
1134  * - an active object is initialized
1135  */
1136 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1137 {
1138 	struct self_test *obj = addr;
1139 
1140 	switch (state) {
1141 	case ODEBUG_STATE_ACTIVE:
1142 		debug_object_deactivate(obj, &descr_type_test);
1143 		debug_object_init(obj, &descr_type_test);
1144 		return true;
1145 	default:
1146 		return false;
1147 	}
1148 }
1149 
1150 /*
1151  * fixup_activate is called when:
1152  * - an active object is activated
1153  * - an unknown non-static object is activated
1154  */
1155 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1156 {
1157 	struct self_test *obj = addr;
1158 
1159 	switch (state) {
1160 	case ODEBUG_STATE_NOTAVAILABLE:
1161 		return true;
1162 	case ODEBUG_STATE_ACTIVE:
1163 		debug_object_deactivate(obj, &descr_type_test);
1164 		debug_object_activate(obj, &descr_type_test);
1165 		return true;
1166 
1167 	default:
1168 		return false;
1169 	}
1170 }
1171 
1172 /*
1173  * fixup_destroy is called when:
1174  * - an active object is destroyed
1175  */
1176 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1177 {
1178 	struct self_test *obj = addr;
1179 
1180 	switch (state) {
1181 	case ODEBUG_STATE_ACTIVE:
1182 		debug_object_deactivate(obj, &descr_type_test);
1183 		debug_object_destroy(obj, &descr_type_test);
1184 		return true;
1185 	default:
1186 		return false;
1187 	}
1188 }
1189 
1190 /*
1191  * fixup_free is called when:
1192  * - an active object is freed
1193  */
1194 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1195 {
1196 	struct self_test *obj = addr;
1197 
1198 	switch (state) {
1199 	case ODEBUG_STATE_ACTIVE:
1200 		debug_object_deactivate(obj, &descr_type_test);
1201 		debug_object_free(obj, &descr_type_test);
1202 		return true;
1203 	default:
1204 		return false;
1205 	}
1206 }
1207 
1208 static int __init
1209 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1210 {
1211 	struct debug_bucket *db;
1212 	struct debug_obj *obj;
1213 	unsigned long flags;
1214 	int res = -EINVAL;
1215 
1216 	db = get_bucket((unsigned long) addr);
1217 
1218 	raw_spin_lock_irqsave(&db->lock, flags);
1219 
1220 	obj = lookup_object(addr, db);
1221 	if (!obj && state != ODEBUG_STATE_NONE) {
1222 		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1223 		goto out;
1224 	}
1225 	if (obj && obj->state != state) {
1226 		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1227 		       obj->state, state);
1228 		goto out;
1229 	}
1230 	if (fixups != debug_objects_fixups) {
1231 		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1232 		       fixups, debug_objects_fixups);
1233 		goto out;
1234 	}
1235 	if (warnings != debug_objects_warnings) {
1236 		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1237 		       warnings, debug_objects_warnings);
1238 		goto out;
1239 	}
1240 	res = 0;
1241 out:
1242 	raw_spin_unlock_irqrestore(&db->lock, flags);
1243 	if (res)
1244 		debug_objects_enabled = false;
1245 	return res;
1246 }
1247 
1248 static __initconst const struct debug_obj_descr descr_type_test = {
1249 	.name			= "selftest",
1250 	.is_static_object	= is_static_object,
1251 	.fixup_init		= fixup_init,
1252 	.fixup_activate		= fixup_activate,
1253 	.fixup_destroy		= fixup_destroy,
1254 	.fixup_free		= fixup_free,
1255 };
1256 
1257 static __initdata struct self_test obj = { .static_init = 0 };
1258 
1259 static bool __init debug_objects_selftest(void)
1260 {
1261 	int fixups, oldfixups, warnings, oldwarnings;
1262 	unsigned long flags;
1263 
1264 	local_irq_save(flags);
1265 
1266 	fixups = oldfixups = debug_objects_fixups;
1267 	warnings = oldwarnings = debug_objects_warnings;
1268 	descr_test = &descr_type_test;
1269 
1270 	debug_object_init(&obj, &descr_type_test);
1271 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1272 		goto out;
1273 	debug_object_activate(&obj, &descr_type_test);
1274 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1275 		goto out;
1276 	debug_object_activate(&obj, &descr_type_test);
1277 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1278 		goto out;
1279 	debug_object_deactivate(&obj, &descr_type_test);
1280 	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1281 		goto out;
1282 	debug_object_destroy(&obj, &descr_type_test);
1283 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1284 		goto out;
1285 	debug_object_init(&obj, &descr_type_test);
1286 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1287 		goto out;
1288 	debug_object_activate(&obj, &descr_type_test);
1289 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1290 		goto out;
1291 	debug_object_deactivate(&obj, &descr_type_test);
1292 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1293 		goto out;
1294 	debug_object_free(&obj, &descr_type_test);
1295 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1296 		goto out;
1297 
1298 	obj.static_init = 1;
1299 	debug_object_activate(&obj, &descr_type_test);
1300 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1301 		goto out;
1302 	debug_object_init(&obj, &descr_type_test);
1303 	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1304 		goto out;
1305 	debug_object_free(&obj, &descr_type_test);
1306 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1307 		goto out;
1308 
1309 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1310 	debug_object_init(&obj, &descr_type_test);
1311 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1312 		goto out;
1313 	debug_object_activate(&obj, &descr_type_test);
1314 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1315 		goto out;
1316 	__debug_check_no_obj_freed(&obj, sizeof(obj));
1317 	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1318 		goto out;
1319 #endif
1320 	pr_info("selftest passed\n");
1321 
1322 out:
1323 	debug_objects_fixups = oldfixups;
1324 	debug_objects_warnings = oldwarnings;
1325 	descr_test = NULL;
1326 
1327 	local_irq_restore(flags);
1328 	return debug_objects_enabled;
1329 }
1330 #else
1331 static inline bool debug_objects_selftest(void) { return true; }
1332 #endif
1333 
1334 /*
1335  * Called during early boot to initialize the hash buckets and link
1336  * the static object pool objects into the poll list. After this call
1337  * the object tracker is fully operational.
1338  */
1339 void __init debug_objects_early_init(void)
1340 {
1341 	int i;
1342 
1343 	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1344 		raw_spin_lock_init(&obj_hash[i].lock);
1345 
1346 	/* Keep early boot simple and add everything to the boot list */
1347 	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1348 		hlist_add_head(&obj_static_pool[i].node, &pool_boot);
1349 }
1350 
1351 /*
1352  * Convert the statically allocated objects to dynamic ones.
1353  * debug_objects_mem_init() is called early so only one CPU is up and
1354  * interrupts are disabled, which means it is safe to replace the active
1355  * object references.
1356  */
1357 static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache)
1358 {
1359 	struct debug_bucket *db = obj_hash;
1360 	struct debug_obj *obj, *new;
1361 	struct hlist_node *tmp;
1362 	HLIST_HEAD(objects);
1363 	int i;
1364 
1365 	for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1366 		obj = kmem_cache_zalloc(cache, GFP_KERNEL);
1367 		if (!obj)
1368 			goto free;
1369 		hlist_add_head(&obj->node, &objects);
1370 	}
1371 
1372 	debug_objects_allocated = ODEBUG_POOL_SIZE;
1373 	pool_global.cnt = ODEBUG_POOL_SIZE;
1374 
1375 	/*
1376 	 * Move the allocated objects to the global pool and disconnect the
1377 	 * boot pool.
1378 	 */
1379 	hlist_move_list(&objects, &pool_global.objects);
1380 	pool_boot.first = NULL;
1381 
1382 	/* Replace the active object references */
1383 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1384 		hlist_move_list(&db->list, &objects);
1385 
1386 		hlist_for_each_entry(obj, &objects, node) {
1387 			new = hlist_entry(pool_global.objects.first, typeof(*obj), node);
1388 			hlist_del(&new->node);
1389 			pool_global.cnt--;
1390 			/* copy object data */
1391 			*new = *obj;
1392 			hlist_add_head(&new->node, &db->list);
1393 		}
1394 	}
1395 	return true;
1396 free:
1397 	/* Can't use free_object_list() as the cache is not populated yet */
1398 	hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1399 		hlist_del(&obj->node);
1400 		kmem_cache_free(cache, obj);
1401 	}
1402 	return false;
1403 }
1404 
1405 /*
1406  * Called after the kmem_caches are functional to setup a dedicated
1407  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1408  * prevents that the debug code is called on kmem_cache_free() for the
1409  * debug tracker objects to avoid recursive calls.
1410  */
1411 void __init debug_objects_mem_init(void)
1412 {
1413 	struct kmem_cache *cache;
1414 	int extras;
1415 
1416 	if (!debug_objects_enabled)
1417 		return;
1418 
1419 	if (!debug_objects_selftest())
1420 		return;
1421 
1422 	cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0,
1423 				  SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL);
1424 
1425 	if (!cache || !debug_objects_replace_static_objects(cache)) {
1426 		debug_objects_enabled = false;
1427 		pr_warn("Out of memory.\n");
1428 		return;
1429 	}
1430 
1431 	/*
1432 	 * Adjust the thresholds for allocating and freeing objects
1433 	 * according to the number of possible CPUs available in the
1434 	 * system.
1435 	 */
1436 	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1437 	pool_global.max_cnt += extras;
1438 	pool_global.min_cnt += extras;
1439 
1440 	/* Everything worked. Expose the cache */
1441 	obj_cache = cache;
1442 
1443 #ifdef CONFIG_HOTPLUG_CPU
1444 	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1445 				  object_cpu_offline);
1446 #endif
1447 	return;
1448 }
1449