xref: /linux-6.15/lib/debugobjects.c (revision 49968cf1)
19e4a51adSThomas Gleixner // SPDX-License-Identifier: GPL-2.0
23ac7fe5aSThomas Gleixner /*
33ac7fe5aSThomas Gleixner  * Generic infrastructure for lifetime debugging of objects.
43ac7fe5aSThomas Gleixner  *
53ac7fe5aSThomas Gleixner  * Copyright (C) 2008, Thomas Gleixner <[email protected]>
63ac7fe5aSThomas Gleixner  */
7719e4843SFabian Frederick 
8719e4843SFabian Frederick #define pr_fmt(fmt) "ODEBUG: " fmt
9719e4843SFabian Frederick 
103ac7fe5aSThomas Gleixner #include <linux/debugobjects.h>
113ac7fe5aSThomas Gleixner #include <linux/interrupt.h>
12d43c36dcSAlexey Dobriyan #include <linux/sched.h>
1368db0cf1SIngo Molnar #include <linux/sched/task_stack.h>
143ac7fe5aSThomas Gleixner #include <linux/seq_file.h>
153ac7fe5aSThomas Gleixner #include <linux/debugfs.h>
165a0e3ad6STejun Heo #include <linux/slab.h>
173ac7fe5aSThomas Gleixner #include <linux/hash.h>
18caba4cbbSWaiman Long #include <linux/kmemleak.h>
1988451f2cSZqiang #include <linux/cpu.h>
203ac7fe5aSThomas Gleixner 
213ac7fe5aSThomas Gleixner #define ODEBUG_HASH_BITS	14
223ac7fe5aSThomas Gleixner #define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
233ac7fe5aSThomas Gleixner 
240b6ec8c0SChristian Borntraeger #define ODEBUG_POOL_SIZE	1024
253ac7fe5aSThomas Gleixner #define ODEBUG_POOL_MIN_LEVEL	256
26d86998b1SWaiman Long #define ODEBUG_POOL_PERCPU_SIZE	64
27634d61f4SWaiman Long #define ODEBUG_BATCH_SIZE	16
283ac7fe5aSThomas Gleixner 
293ac7fe5aSThomas Gleixner #define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
303ac7fe5aSThomas Gleixner #define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
313ac7fe5aSThomas Gleixner #define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
323ac7fe5aSThomas Gleixner 
33a7344a68SWaiman Long /*
34a7344a68SWaiman Long  * We limit the freeing of debug objects via workqueue at a maximum
35a7344a68SWaiman Long  * frequency of 10Hz and about 1024 objects for each freeing operation.
36a7344a68SWaiman Long  * So it is freeing at most 10k debug objects per second.
37a7344a68SWaiman Long  */
38a7344a68SWaiman Long #define ODEBUG_FREE_WORK_MAX	1024
39a7344a68SWaiman Long #define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
40a7344a68SWaiman Long 
413ac7fe5aSThomas Gleixner struct debug_bucket {
423ac7fe5aSThomas Gleixner 	struct hlist_head	list;
43aef9cb05SThomas Gleixner 	raw_spinlock_t		lock;
443ac7fe5aSThomas Gleixner };
453ac7fe5aSThomas Gleixner 
46d86998b1SWaiman Long /*
47d86998b1SWaiman Long  * Debug object percpu free list
48d86998b1SWaiman Long  * Access is protected by disabling irq
49d86998b1SWaiman Long  */
50d86998b1SWaiman Long struct debug_percpu_free {
51d86998b1SWaiman Long 	struct hlist_head	free_objs;
52d86998b1SWaiman Long 	int			obj_free;
53d86998b1SWaiman Long };
54d86998b1SWaiman Long 
55d86998b1SWaiman Long static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
56d86998b1SWaiman Long 
573ac7fe5aSThomas Gleixner static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
583ac7fe5aSThomas Gleixner 
591be1cb7bSThomas Gleixner static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
603ac7fe5aSThomas Gleixner 
61aef9cb05SThomas Gleixner static DEFINE_RAW_SPINLOCK(pool_lock);
623ac7fe5aSThomas Gleixner 
633ac7fe5aSThomas Gleixner static HLIST_HEAD(obj_pool);
6436c4ead6SYang Shi static HLIST_HEAD(obj_to_free);
653ac7fe5aSThomas Gleixner 
66d86998b1SWaiman Long /*
67d86998b1SWaiman Long  * Because of the presence of percpu free pools, obj_pool_free will
68d86998b1SWaiman Long  * under-count those in the percpu free pools. Similarly, obj_pool_used
69d86998b1SWaiman Long  * will over-count those in the percpu free pools. Adjustments will be
70d86998b1SWaiman Long  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
71d86998b1SWaiman Long  * can be off.
72d86998b1SWaiman Long  */
73e4757c71SZhen Lei static int __data_racy		obj_pool_min_free = ODEBUG_POOL_SIZE;
74e4757c71SZhen Lei static int __data_racy		obj_pool_free = ODEBUG_POOL_SIZE;
753ac7fe5aSThomas Gleixner static int			obj_pool_used;
76e4757c71SZhen Lei static int __data_racy		obj_pool_max_used;
77a7344a68SWaiman Long static bool			obj_freeing;
7836c4ead6SYang Shi /* The number of objs on the global free list */
7936c4ead6SYang Shi static int			obj_nr_tofree;
803ac7fe5aSThomas Gleixner 
815b5baba6SBreno Leitao static int __data_racy			debug_objects_maxchain __read_mostly;
825b5baba6SBreno Leitao static int __data_racy __maybe_unused	debug_objects_maxchecked __read_mostly;
835b5baba6SBreno Leitao static int __data_racy			debug_objects_fixups __read_mostly;
845b5baba6SBreno Leitao static int __data_racy			debug_objects_warnings __read_mostly;
855b5baba6SBreno Leitao static int __data_racy			debug_objects_enabled __read_mostly
863ae70205SIngo Molnar 					= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
87e4757c71SZhen Lei static int				debug_objects_pool_size __ro_after_init
8897dd552eSWaiman Long 					= ODEBUG_POOL_SIZE;
89e4757c71SZhen Lei static int				debug_objects_pool_min_level __ro_after_init
9097dd552eSWaiman Long 					= ODEBUG_POOL_MIN_LEVEL;
915b5baba6SBreno Leitao 
92aedcade6SStephen Boyd static const struct debug_obj_descr *descr_test  __read_mostly;
9368279f9cSAlexey Dobriyan static struct kmem_cache	*obj_cache __ro_after_init;
943ac7fe5aSThomas Gleixner 
95c4b73aabSWaiman Long /*
960cad93c3SWaiman Long  * Track numbers of kmem_cache_alloc()/free() calls done.
97c4b73aabSWaiman Long  */
98e4757c71SZhen Lei static int __data_racy		debug_objects_allocated;
99e4757c71SZhen Lei static int __data_racy		debug_objects_freed;
100c4b73aabSWaiman Long 
101337fff8bSThomas Gleixner static void free_obj_work(struct work_struct *work);
102a7344a68SWaiman Long static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
103337fff8bSThomas Gleixner 
1043ac7fe5aSThomas Gleixner static int __init enable_object_debug(char *str)
1053ac7fe5aSThomas Gleixner {
1063ac7fe5aSThomas Gleixner 	debug_objects_enabled = 1;
1073ac7fe5aSThomas Gleixner 	return 0;
1083ac7fe5aSThomas Gleixner }
1093e8ebb5cSKyle McMartin 
1103e8ebb5cSKyle McMartin static int __init disable_object_debug(char *str)
1113e8ebb5cSKyle McMartin {
1123e8ebb5cSKyle McMartin 	debug_objects_enabled = 0;
1133e8ebb5cSKyle McMartin 	return 0;
1143e8ebb5cSKyle McMartin }
1153e8ebb5cSKyle McMartin 
1163ac7fe5aSThomas Gleixner early_param("debug_objects", enable_object_debug);
1173e8ebb5cSKyle McMartin early_param("no_debug_objects", disable_object_debug);
1183ac7fe5aSThomas Gleixner 
1193ac7fe5aSThomas Gleixner static const char *obj_states[ODEBUG_STATE_MAX] = {
1203ac7fe5aSThomas Gleixner 	[ODEBUG_STATE_NONE]		= "none",
1213ac7fe5aSThomas Gleixner 	[ODEBUG_STATE_INIT]		= "initialized",
1223ac7fe5aSThomas Gleixner 	[ODEBUG_STATE_INACTIVE]		= "inactive",
1233ac7fe5aSThomas Gleixner 	[ODEBUG_STATE_ACTIVE]		= "active",
1243ac7fe5aSThomas Gleixner 	[ODEBUG_STATE_DESTROYED]	= "destroyed",
1253ac7fe5aSThomas Gleixner 	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
1263ac7fe5aSThomas Gleixner };
1273ac7fe5aSThomas Gleixner 
1281fda107dSThomas Gleixner static void fill_pool(void)
1293ac7fe5aSThomas Gleixner {
130eb799279STetsuo Handa 	gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
131d26bf505SWaiman Long 	struct debug_obj *obj;
13250db04ddSVegard Nossum 	unsigned long flags;
1333ac7fe5aSThomas Gleixner 
13435fd7a63SMarco Elver 	if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
1351fda107dSThomas Gleixner 		return;
1363ac7fe5aSThomas Gleixner 
13736c4ead6SYang Shi 	/*
13863a4a9b5SZhen Lei 	 * Reuse objs from the global obj_to_free list; they will be
13963a4a9b5SZhen Lei 	 * reinitialized when allocating.
14035fd7a63SMarco Elver 	 *
14163a4a9b5SZhen Lei 	 * obj_nr_tofree is checked locklessly; the READ_ONCE() pairs with
14263a4a9b5SZhen Lei 	 * the WRITE_ONCE() in pool_lock critical sections.
14336c4ead6SYang Shi 	 */
14463a4a9b5SZhen Lei 	if (READ_ONCE(obj_nr_tofree)) {
14536c4ead6SYang Shi 		raw_spin_lock_irqsave(&pool_lock, flags);
14636c4ead6SYang Shi 		/*
14736c4ead6SYang Shi 		 * Recheck with the lock held as the worker thread might have
14836c4ead6SYang Shi 		 * won the race and freed the global free list already.
14936c4ead6SYang Shi 		 */
150684d28feSZhen Lei 		while (obj_nr_tofree && (obj_pool_free < debug_objects_pool_min_level)) {
15136c4ead6SYang Shi 			obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
15236c4ead6SYang Shi 			hlist_del(&obj->node);
15335fd7a63SMarco Elver 			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
15436c4ead6SYang Shi 			hlist_add_head(&obj->node, &obj_pool);
15535fd7a63SMarco Elver 			WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
15636c4ead6SYang Shi 		}
15736c4ead6SYang Shi 		raw_spin_unlock_irqrestore(&pool_lock, flags);
15836c4ead6SYang Shi 	}
15936c4ead6SYang Shi 
1603ac7fe5aSThomas Gleixner 	if (unlikely(!obj_cache))
1611fda107dSThomas Gleixner 		return;
1623ac7fe5aSThomas Gleixner 
16335fd7a63SMarco Elver 	while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
164813fd078SZhen Lei 		struct debug_obj *new, *last = NULL;
165813fd078SZhen Lei 		HLIST_HEAD(head);
166d26bf505SWaiman Long 		int cnt;
1673ac7fe5aSThomas Gleixner 
168d26bf505SWaiman Long 		for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
169813fd078SZhen Lei 			new = kmem_cache_zalloc(obj_cache, gfp);
170813fd078SZhen Lei 			if (!new)
171d26bf505SWaiman Long 				break;
172813fd078SZhen Lei 			hlist_add_head(&new->node, &head);
173813fd078SZhen Lei 			if (!last)
174813fd078SZhen Lei 				last = new;
175d26bf505SWaiman Long 		}
176d26bf505SWaiman Long 		if (!cnt)
1773340808cSDan Carpenter 			return;
1783ac7fe5aSThomas Gleixner 
179aef9cb05SThomas Gleixner 		raw_spin_lock_irqsave(&pool_lock, flags);
180813fd078SZhen Lei 		hlist_splice_init(&head, &last->node, &obj_pool);
181813fd078SZhen Lei 		debug_objects_allocated += cnt;
182813fd078SZhen Lei 		WRITE_ONCE(obj_pool_free, obj_pool_free + cnt);
183aef9cb05SThomas Gleixner 		raw_spin_unlock_irqrestore(&pool_lock, flags);
1843ac7fe5aSThomas Gleixner 	}
1853ac7fe5aSThomas Gleixner }
1863ac7fe5aSThomas Gleixner 
1873ac7fe5aSThomas Gleixner /*
1883ac7fe5aSThomas Gleixner  * Lookup an object in the hash bucket.
1893ac7fe5aSThomas Gleixner  */
1903ac7fe5aSThomas Gleixner static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
1913ac7fe5aSThomas Gleixner {
1923ac7fe5aSThomas Gleixner 	struct debug_obj *obj;
1933ac7fe5aSThomas Gleixner 	int cnt = 0;
1943ac7fe5aSThomas Gleixner 
195b67bfe0dSSasha Levin 	hlist_for_each_entry(obj, &b->list, node) {
1963ac7fe5aSThomas Gleixner 		cnt++;
1973ac7fe5aSThomas Gleixner 		if (obj->object == addr)
1983ac7fe5aSThomas Gleixner 			return obj;
1993ac7fe5aSThomas Gleixner 	}
2003ac7fe5aSThomas Gleixner 	if (cnt > debug_objects_maxchain)
2013ac7fe5aSThomas Gleixner 		debug_objects_maxchain = cnt;
2023ac7fe5aSThomas Gleixner 
2033ac7fe5aSThomas Gleixner 	return NULL;
2043ac7fe5aSThomas Gleixner }
2053ac7fe5aSThomas Gleixner 
2063ac7fe5aSThomas Gleixner /*
207d86998b1SWaiman Long  * Allocate a new object from the hlist
208d86998b1SWaiman Long  */
209d86998b1SWaiman Long static struct debug_obj *__alloc_object(struct hlist_head *list)
210d86998b1SWaiman Long {
211d86998b1SWaiman Long 	struct debug_obj *obj = NULL;
212d86998b1SWaiman Long 
213d86998b1SWaiman Long 	if (list->first) {
214d86998b1SWaiman Long 		obj = hlist_entry(list->first, typeof(*obj), node);
215d86998b1SWaiman Long 		hlist_del(&obj->node);
216d86998b1SWaiman Long 	}
217d86998b1SWaiman Long 
218d86998b1SWaiman Long 	return obj;
219d86998b1SWaiman Long }
220d86998b1SWaiman Long 
2213ac7fe5aSThomas Gleixner static struct debug_obj *
222aedcade6SStephen Boyd alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
2233ac7fe5aSThomas Gleixner {
224634d61f4SWaiman Long 	struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
225d86998b1SWaiman Long 	struct debug_obj *obj;
226d86998b1SWaiman Long 
227d86998b1SWaiman Long 	if (likely(obj_cache)) {
228d86998b1SWaiman Long 		obj = __alloc_object(&percpu_pool->free_objs);
229d86998b1SWaiman Long 		if (obj) {
230d86998b1SWaiman Long 			percpu_pool->obj_free--;
231d86998b1SWaiman Long 			goto init_obj;
232d86998b1SWaiman Long 		}
233d86998b1SWaiman Long 	}
2343ac7fe5aSThomas Gleixner 
235aef9cb05SThomas Gleixner 	raw_spin_lock(&pool_lock);
236d86998b1SWaiman Long 	obj = __alloc_object(&obj_pool);
237d86998b1SWaiman Long 	if (obj) {
2383ac7fe5aSThomas Gleixner 		obj_pool_used++;
23935fd7a63SMarco Elver 		WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
240634d61f4SWaiman Long 
241634d61f4SWaiman Long 		/*
242634d61f4SWaiman Long 		 * Looking ahead, allocate one batch of debug objects and
243634d61f4SWaiman Long 		 * put them into the percpu free pool.
244634d61f4SWaiman Long 		 */
245634d61f4SWaiman Long 		if (likely(obj_cache)) {
246634d61f4SWaiman Long 			int i;
247634d61f4SWaiman Long 
248634d61f4SWaiman Long 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
249634d61f4SWaiman Long 				struct debug_obj *obj2;
250634d61f4SWaiman Long 
251634d61f4SWaiman Long 				obj2 = __alloc_object(&obj_pool);
252634d61f4SWaiman Long 				if (!obj2)
253634d61f4SWaiman Long 					break;
254634d61f4SWaiman Long 				hlist_add_head(&obj2->node,
255634d61f4SWaiman Long 					       &percpu_pool->free_objs);
256634d61f4SWaiman Long 				percpu_pool->obj_free++;
257634d61f4SWaiman Long 				obj_pool_used++;
25835fd7a63SMarco Elver 				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
259634d61f4SWaiman Long 			}
260634d61f4SWaiman Long 		}
261634d61f4SWaiman Long 
2623ac7fe5aSThomas Gleixner 		if (obj_pool_used > obj_pool_max_used)
2633ac7fe5aSThomas Gleixner 			obj_pool_max_used = obj_pool_used;
2643ac7fe5aSThomas Gleixner 
2653ac7fe5aSThomas Gleixner 		if (obj_pool_free < obj_pool_min_free)
2663ac7fe5aSThomas Gleixner 			obj_pool_min_free = obj_pool_free;
2673ac7fe5aSThomas Gleixner 	}
268aef9cb05SThomas Gleixner 	raw_spin_unlock(&pool_lock);
2693ac7fe5aSThomas Gleixner 
270d86998b1SWaiman Long init_obj:
271d86998b1SWaiman Long 	if (obj) {
272d86998b1SWaiman Long 		obj->object = addr;
273d86998b1SWaiman Long 		obj->descr  = descr;
274d86998b1SWaiman Long 		obj->state  = ODEBUG_STATE_NONE;
275d86998b1SWaiman Long 		obj->astate = 0;
276d86998b1SWaiman Long 		hlist_add_head(&obj->node, &b->list);
277d86998b1SWaiman Long 	}
2783ac7fe5aSThomas Gleixner 	return obj;
2793ac7fe5aSThomas Gleixner }
2803ac7fe5aSThomas Gleixner 
2813ac7fe5aSThomas Gleixner /*
282337fff8bSThomas Gleixner  * workqueue function to free objects.
283858274b6SWaiman Long  *
284858274b6SWaiman Long  * To reduce contention on the global pool_lock, the actual freeing of
285636e1970SYang Shi  * debug objects will be delayed if the pool_lock is busy.
286337fff8bSThomas Gleixner  */
287337fff8bSThomas Gleixner static void free_obj_work(struct work_struct *work)
288337fff8bSThomas Gleixner {
28936c4ead6SYang Shi 	struct hlist_node *tmp;
29036c4ead6SYang Shi 	struct debug_obj *obj;
291337fff8bSThomas Gleixner 	unsigned long flags;
29236c4ead6SYang Shi 	HLIST_HEAD(tofree);
293337fff8bSThomas Gleixner 
294a7344a68SWaiman Long 	WRITE_ONCE(obj_freeing, false);
295858274b6SWaiman Long 	if (!raw_spin_trylock_irqsave(&pool_lock, flags))
296858274b6SWaiman Long 		return;
29736c4ead6SYang Shi 
298a7344a68SWaiman Long 	if (obj_pool_free >= debug_objects_pool_size)
299a7344a68SWaiman Long 		goto free_objs;
300a7344a68SWaiman Long 
30136c4ead6SYang Shi 	/*
30236c4ead6SYang Shi 	 * The objs on the pool list might be allocated before the work is
30336c4ead6SYang Shi 	 * run, so recheck if pool list it full or not, if not fill pool
304a7344a68SWaiman Long 	 * list from the global free list. As it is likely that a workload
305a7344a68SWaiman Long 	 * may be gearing up to use more and more objects, don't free any
306a7344a68SWaiman Long 	 * of them until the next round.
30736c4ead6SYang Shi 	 */
30836c4ead6SYang Shi 	while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
30936c4ead6SYang Shi 		obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
31036c4ead6SYang Shi 		hlist_del(&obj->node);
31136c4ead6SYang Shi 		hlist_add_head(&obj->node, &obj_pool);
31235fd7a63SMarco Elver 		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
31335fd7a63SMarco Elver 		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
31436c4ead6SYang Shi 	}
315a7344a68SWaiman Long 	raw_spin_unlock_irqrestore(&pool_lock, flags);
316a7344a68SWaiman Long 	return;
31736c4ead6SYang Shi 
318a7344a68SWaiman Long free_objs:
31936c4ead6SYang Shi 	/*
32036c4ead6SYang Shi 	 * Pool list is already full and there are still objs on the free
32136c4ead6SYang Shi 	 * list. Move remaining free objs to a temporary list to free the
32236c4ead6SYang Shi 	 * memory outside the pool_lock held region.
32336c4ead6SYang Shi 	 */
32436c4ead6SYang Shi 	if (obj_nr_tofree) {
32536c4ead6SYang Shi 		hlist_move_list(&obj_to_free, &tofree);
32604148187SArnd Bergmann 		debug_objects_freed += obj_nr_tofree;
32735fd7a63SMarco Elver 		WRITE_ONCE(obj_nr_tofree, 0);
32836c4ead6SYang Shi 	}
329aef9cb05SThomas Gleixner 	raw_spin_unlock_irqrestore(&pool_lock, flags);
33036c4ead6SYang Shi 
33136c4ead6SYang Shi 	hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
33236c4ead6SYang Shi 		hlist_del(&obj->node);
33336c4ead6SYang Shi 		kmem_cache_free(obj_cache, obj);
33436c4ead6SYang Shi 	}
335337fff8bSThomas Gleixner }
336337fff8bSThomas Gleixner 
337a7344a68SWaiman Long static void __free_object(struct debug_obj *obj)
338636e1970SYang Shi {
339634d61f4SWaiman Long 	struct debug_obj *objs[ODEBUG_BATCH_SIZE];
340634d61f4SWaiman Long 	struct debug_percpu_free *percpu_pool;
341634d61f4SWaiman Long 	int lookahead_count = 0;
342636e1970SYang Shi 	unsigned long flags;
343636e1970SYang Shi 	bool work;
344636e1970SYang Shi 
345d86998b1SWaiman Long 	local_irq_save(flags);
346634d61f4SWaiman Long 	if (!obj_cache)
347634d61f4SWaiman Long 		goto free_to_obj_pool;
348634d61f4SWaiman Long 
349d86998b1SWaiman Long 	/*
350d86998b1SWaiman Long 	 * Try to free it into the percpu pool first.
351d86998b1SWaiman Long 	 */
352d86998b1SWaiman Long 	percpu_pool = this_cpu_ptr(&percpu_obj_pool);
353634d61f4SWaiman Long 	if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
354d86998b1SWaiman Long 		hlist_add_head(&obj->node, &percpu_pool->free_objs);
355d86998b1SWaiman Long 		percpu_pool->obj_free++;
356d86998b1SWaiman Long 		local_irq_restore(flags);
357a7344a68SWaiman Long 		return;
358d86998b1SWaiman Long 	}
359d86998b1SWaiman Long 
360634d61f4SWaiman Long 	/*
361634d61f4SWaiman Long 	 * As the percpu pool is full, look ahead and pull out a batch
362634d61f4SWaiman Long 	 * of objects from the percpu pool and free them as well.
363634d61f4SWaiman Long 	 */
364634d61f4SWaiman Long 	for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
365634d61f4SWaiman Long 		objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
366634d61f4SWaiman Long 		if (!objs[lookahead_count])
367634d61f4SWaiman Long 			break;
368634d61f4SWaiman Long 		percpu_pool->obj_free--;
369634d61f4SWaiman Long 	}
370634d61f4SWaiman Long 
371634d61f4SWaiman Long free_to_obj_pool:
372d86998b1SWaiman Long 	raw_spin_lock(&pool_lock);
373a7344a68SWaiman Long 	work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
374a7344a68SWaiman Long 	       (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
375636e1970SYang Shi 	obj_pool_used--;
376636e1970SYang Shi 
377636e1970SYang Shi 	if (work) {
37835fd7a63SMarco Elver 		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
379636e1970SYang Shi 		hlist_add_head(&obj->node, &obj_to_free);
380634d61f4SWaiman Long 		if (lookahead_count) {
38135fd7a63SMarco Elver 			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
382634d61f4SWaiman Long 			obj_pool_used -= lookahead_count;
383634d61f4SWaiman Long 			while (lookahead_count) {
384634d61f4SWaiman Long 				hlist_add_head(&objs[--lookahead_count]->node,
385634d61f4SWaiman Long 					       &obj_to_free);
386634d61f4SWaiman Long 			}
387634d61f4SWaiman Long 		}
388a7344a68SWaiman Long 
389a7344a68SWaiman Long 		if ((obj_pool_free > debug_objects_pool_size) &&
390a7344a68SWaiman Long 		    (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
391a7344a68SWaiman Long 			int i;
392a7344a68SWaiman Long 
393a7344a68SWaiman Long 			/*
394a7344a68SWaiman Long 			 * Free one more batch of objects from obj_pool.
395a7344a68SWaiman Long 			 */
396a7344a68SWaiman Long 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
397a7344a68SWaiman Long 				obj = __alloc_object(&obj_pool);
398a7344a68SWaiman Long 				hlist_add_head(&obj->node, &obj_to_free);
39935fd7a63SMarco Elver 				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
40035fd7a63SMarco Elver 				WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
401a7344a68SWaiman Long 			}
402a7344a68SWaiman Long 		}
403636e1970SYang Shi 	} else {
40435fd7a63SMarco Elver 		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
405636e1970SYang Shi 		hlist_add_head(&obj->node, &obj_pool);
406634d61f4SWaiman Long 		if (lookahead_count) {
40735fd7a63SMarco Elver 			WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
408634d61f4SWaiman Long 			obj_pool_used -= lookahead_count;
409634d61f4SWaiman Long 			while (lookahead_count) {
410634d61f4SWaiman Long 				hlist_add_head(&objs[--lookahead_count]->node,
411634d61f4SWaiman Long 					       &obj_pool);
412634d61f4SWaiman Long 			}
413634d61f4SWaiman Long 		}
414636e1970SYang Shi 	}
415d86998b1SWaiman Long 	raw_spin_unlock(&pool_lock);
416d86998b1SWaiman Long 	local_irq_restore(flags);
417636e1970SYang Shi }
418636e1970SYang Shi 
419337fff8bSThomas Gleixner /*
420337fff8bSThomas Gleixner  * Put the object back into the pool and schedule work to free objects
421337fff8bSThomas Gleixner  * if necessary.
4223ac7fe5aSThomas Gleixner  */
4233ac7fe5aSThomas Gleixner static void free_object(struct debug_obj *obj)
4243ac7fe5aSThomas Gleixner {
425a7344a68SWaiman Long 	__free_object(obj);
42635fd7a63SMarco Elver 	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
427a7344a68SWaiman Long 		WRITE_ONCE(obj_freeing, true);
428a7344a68SWaiman Long 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
429a7344a68SWaiman Long 	}
4303ac7fe5aSThomas Gleixner }
4313ac7fe5aSThomas Gleixner 
432a2a70238SThomas Gleixner static void put_objects(struct hlist_head *list)
43388451f2cSZqiang {
43488451f2cSZqiang 	struct hlist_node *tmp;
43588451f2cSZqiang 	struct debug_obj *obj;
43688451f2cSZqiang 
437a2a70238SThomas Gleixner 	/*
438a2a70238SThomas Gleixner 	 * Using free_object() puts the objects into reuse or schedules
439a2a70238SThomas Gleixner 	 * them for freeing and it get's all the accounting correct.
440a2a70238SThomas Gleixner 	 */
441a2a70238SThomas Gleixner 	hlist_for_each_entry_safe(obj, tmp, list, node) {
44288451f2cSZqiang 		hlist_del(&obj->node);
443a2a70238SThomas Gleixner 		free_object(obj);
444a2a70238SThomas Gleixner 	}
44588451f2cSZqiang }
446eabb7f1aSwuchi 
447*49968cf1SThomas Gleixner #ifdef CONFIG_HOTPLUG_CPU
448a2a70238SThomas Gleixner static int object_cpu_offline(unsigned int cpu)
449a2a70238SThomas Gleixner {
450a2a70238SThomas Gleixner 	/* Remote access is safe as the CPU is dead already */
451a2a70238SThomas Gleixner 	struct debug_percpu_free *pcp = per_cpu_ptr(&percpu_obj_pool, cpu);
452eabb7f1aSwuchi 
453a2a70238SThomas Gleixner 	put_objects(&pcp->free_objs);
454a2a70238SThomas Gleixner 	pcp->obj_free = 0;
45588451f2cSZqiang 	return 0;
45688451f2cSZqiang }
45788451f2cSZqiang #endif
45888451f2cSZqiang 
459*49968cf1SThomas Gleixner /* Out of memory. Free all objects from hash */
4603ac7fe5aSThomas Gleixner static void debug_objects_oom(void)
4613ac7fe5aSThomas Gleixner {
4623ac7fe5aSThomas Gleixner 	struct debug_bucket *db = obj_hash;
463673d62ccSVegard Nossum 	HLIST_HEAD(freelist);
4643ac7fe5aSThomas Gleixner 
465719e4843SFabian Frederick 	pr_warn("Out of memory. ODEBUG disabled\n");
4663ac7fe5aSThomas Gleixner 
467*49968cf1SThomas Gleixner 	for (int i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
468*49968cf1SThomas Gleixner 		scoped_guard(raw_spinlock_irqsave, &db->lock)
469673d62ccSVegard Nossum 			hlist_move_list(&db->list, &freelist);
470673d62ccSVegard Nossum 
471*49968cf1SThomas Gleixner 		put_objects(&freelist);
4723ac7fe5aSThomas Gleixner 	}
4733ac7fe5aSThomas Gleixner }
4743ac7fe5aSThomas Gleixner 
4753ac7fe5aSThomas Gleixner /*
4763ac7fe5aSThomas Gleixner  * We use the pfn of the address for the hash. That way we can check
4773ac7fe5aSThomas Gleixner  * for freed objects simply by checking the affected bucket.
4783ac7fe5aSThomas Gleixner  */
4793ac7fe5aSThomas Gleixner static struct debug_bucket *get_bucket(unsigned long addr)
4803ac7fe5aSThomas Gleixner {
4813ac7fe5aSThomas Gleixner 	unsigned long hash;
4823ac7fe5aSThomas Gleixner 
4833ac7fe5aSThomas Gleixner 	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
4843ac7fe5aSThomas Gleixner 	return &obj_hash[hash];
4853ac7fe5aSThomas Gleixner }
4863ac7fe5aSThomas Gleixner 
4873ac7fe5aSThomas Gleixner static void debug_print_object(struct debug_obj *obj, char *msg)
4883ac7fe5aSThomas Gleixner {
489aedcade6SStephen Boyd 	const struct debug_obj_descr *descr = obj->descr;
4903ac7fe5aSThomas Gleixner 	static int limit;
4913ac7fe5aSThomas Gleixner 
4928b64d420STetsuo Handa 	/*
4938b64d420STetsuo Handa 	 * Don't report if lookup_object_or_alloc() by the current thread
4948b64d420STetsuo Handa 	 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
4958b64d420STetsuo Handa 	 * concurrent thread turned off debug_objects_enabled and cleared
4968b64d420STetsuo Handa 	 * the hash buckets.
4978b64d420STetsuo Handa 	 */
4988b64d420STetsuo Handa 	if (!debug_objects_enabled)
4998b64d420STetsuo Handa 		return;
5008b64d420STetsuo Handa 
50199777288SStanislaw Gruszka 	if (limit < 5 && descr != descr_test) {
50299777288SStanislaw Gruszka 		void *hint = descr->debug_hint ?
50399777288SStanislaw Gruszka 			descr->debug_hint(obj->object) : NULL;
5043ac7fe5aSThomas Gleixner 		limit++;
505a5d8e467SMathieu Desnoyers 		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
506c4db2d3bSStephen Boyd 				 "object: %p object type: %s hint: %pS\n",
507a5d8e467SMathieu Desnoyers 			msg, obj_states[obj->state], obj->astate,
508c4db2d3bSStephen Boyd 			obj->object, descr->name, hint);
5093ac7fe5aSThomas Gleixner 	}
5103ac7fe5aSThomas Gleixner 	debug_objects_warnings++;
5113ac7fe5aSThomas Gleixner }
5123ac7fe5aSThomas Gleixner 
5133ac7fe5aSThomas Gleixner /*
5143ac7fe5aSThomas Gleixner  * Try to repair the damage, so we have a better chance to get useful
5153ac7fe5aSThomas Gleixner  * debug output.
5163ac7fe5aSThomas Gleixner  */
517b1e4d9d8SDu, Changbin static bool
518b1e4d9d8SDu, Changbin debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
5193ac7fe5aSThomas Gleixner 		   void * addr, enum debug_obj_state state)
5203ac7fe5aSThomas Gleixner {
521b1e4d9d8SDu, Changbin 	if (fixup && fixup(addr, state)) {
522b1e4d9d8SDu, Changbin 		debug_objects_fixups++;
523b1e4d9d8SDu, Changbin 		return true;
524b1e4d9d8SDu, Changbin 	}
525b1e4d9d8SDu, Changbin 	return false;
5263ac7fe5aSThomas Gleixner }
5273ac7fe5aSThomas Gleixner 
5283ac7fe5aSThomas Gleixner static void debug_object_is_on_stack(void *addr, int onstack)
5293ac7fe5aSThomas Gleixner {
5303ac7fe5aSThomas Gleixner 	int is_on_stack;
5313ac7fe5aSThomas Gleixner 	static int limit;
5323ac7fe5aSThomas Gleixner 
5333ac7fe5aSThomas Gleixner 	if (limit > 4)
5343ac7fe5aSThomas Gleixner 		return;
5353ac7fe5aSThomas Gleixner 
5368b05c7e6SFUJITA Tomonori 	is_on_stack = object_is_on_stack(addr);
5373ac7fe5aSThomas Gleixner 	if (is_on_stack == onstack)
5383ac7fe5aSThomas Gleixner 		return;
5393ac7fe5aSThomas Gleixner 
5403ac7fe5aSThomas Gleixner 	limit++;
5413ac7fe5aSThomas Gleixner 	if (is_on_stack)
542fc91a3c4SJoel Fernandes (Google) 		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
543fc91a3c4SJoel Fernandes (Google) 			 task_stack_page(current));
5443ac7fe5aSThomas Gleixner 	else
545fc91a3c4SJoel Fernandes (Google) 		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
546fc91a3c4SJoel Fernandes (Google) 			 task_stack_page(current));
547fc91a3c4SJoel Fernandes (Google) 
5483ac7fe5aSThomas Gleixner 	WARN_ON(1);
5493ac7fe5aSThomas Gleixner }
5503ac7fe5aSThomas Gleixner 
55163a75969SThomas Gleixner static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
55263a75969SThomas Gleixner 						const struct debug_obj_descr *descr,
55363a75969SThomas Gleixner 						bool onstack, bool alloc_ifstatic)
55463a75969SThomas Gleixner {
55563a75969SThomas Gleixner 	struct debug_obj *obj = lookup_object(addr, b);
55663a75969SThomas Gleixner 	enum debug_obj_state state = ODEBUG_STATE_NONE;
55763a75969SThomas Gleixner 
55863a75969SThomas Gleixner 	if (likely(obj))
55963a75969SThomas Gleixner 		return obj;
56063a75969SThomas Gleixner 
56163a75969SThomas Gleixner 	/*
56263a75969SThomas Gleixner 	 * debug_object_init() unconditionally allocates untracked
56363a75969SThomas Gleixner 	 * objects. It does not matter whether it is a static object or
56463a75969SThomas Gleixner 	 * not.
56563a75969SThomas Gleixner 	 *
56663a75969SThomas Gleixner 	 * debug_object_assert_init() and debug_object_activate() allow
56763a75969SThomas Gleixner 	 * allocation only if the descriptor callback confirms that the
56863a75969SThomas Gleixner 	 * object is static and considered initialized. For non-static
56963a75969SThomas Gleixner 	 * objects the allocation needs to be done from the fixup callback.
57063a75969SThomas Gleixner 	 */
57163a75969SThomas Gleixner 	if (unlikely(alloc_ifstatic)) {
57263a75969SThomas Gleixner 		if (!descr->is_static_object || !descr->is_static_object(addr))
57363a75969SThomas Gleixner 			return ERR_PTR(-ENOENT);
57463a75969SThomas Gleixner 		/* Statically allocated objects are considered initialized */
57563a75969SThomas Gleixner 		state = ODEBUG_STATE_INIT;
57663a75969SThomas Gleixner 	}
57763a75969SThomas Gleixner 
57863a75969SThomas Gleixner 	obj = alloc_object(addr, b, descr);
57963a75969SThomas Gleixner 	if (likely(obj)) {
58063a75969SThomas Gleixner 		obj->state = state;
58163a75969SThomas Gleixner 		debug_object_is_on_stack(addr, onstack);
58263a75969SThomas Gleixner 		return obj;
58363a75969SThomas Gleixner 	}
58463a75969SThomas Gleixner 
58563a75969SThomas Gleixner 	/* Out of memory. Do the cleanup outside of the locked region */
58663a75969SThomas Gleixner 	debug_objects_enabled = 0;
58763a75969SThomas Gleixner 	return NULL;
58863a75969SThomas Gleixner }
58963a75969SThomas Gleixner 
5900af462f1SThomas Gleixner static void debug_objects_fill_pool(void)
5910af462f1SThomas Gleixner {
5920af462f1SThomas Gleixner 	/*
5930af462f1SThomas Gleixner 	 * On RT enabled kernels the pool refill must happen in preemptible
5940cce06baSPeter Zijlstra 	 * context -- for !RT kernels we rely on the fact that spinlock_t and
5950cce06baSPeter Zijlstra 	 * raw_spinlock_t are basically the same type and this lock-type
5960cce06baSPeter Zijlstra 	 * inversion works just fine.
5970af462f1SThomas Gleixner 	 */
5980cce06baSPeter Zijlstra 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
5990cce06baSPeter Zijlstra 		/*
6000cce06baSPeter Zijlstra 		 * Annotate away the spinlock_t inside raw_spinlock_t warning
6010cce06baSPeter Zijlstra 		 * by temporarily raising the wait-type to WAIT_SLEEP, matching
6020cce06baSPeter Zijlstra 		 * the preemptible() condition above.
6030cce06baSPeter Zijlstra 		 */
6040cce06baSPeter Zijlstra 		static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
6050cce06baSPeter Zijlstra 		lock_map_acquire_try(&fill_pool_map);
6060af462f1SThomas Gleixner 		fill_pool();
6070cce06baSPeter Zijlstra 		lock_map_release(&fill_pool_map);
6080cce06baSPeter Zijlstra 	}
6090af462f1SThomas Gleixner }
6100af462f1SThomas Gleixner 
6113ac7fe5aSThomas Gleixner static void
612aedcade6SStephen Boyd __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
6133ac7fe5aSThomas Gleixner {
6149bb63626SAndrzej Hajda 	struct debug_obj *obj, o;
6153ac7fe5aSThomas Gleixner 	struct debug_bucket *db;
6163ac7fe5aSThomas Gleixner 	unsigned long flags;
6173ac7fe5aSThomas Gleixner 
6180af462f1SThomas Gleixner 	debug_objects_fill_pool();
61950db04ddSVegard Nossum 
6203ac7fe5aSThomas Gleixner 	db = get_bucket((unsigned long) addr);
6213ac7fe5aSThomas Gleixner 
622aef9cb05SThomas Gleixner 	raw_spin_lock_irqsave(&db->lock, flags);
6233ac7fe5aSThomas Gleixner 
62463a75969SThomas Gleixner 	obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
62563a75969SThomas Gleixner 	if (unlikely(!obj)) {
626aef9cb05SThomas Gleixner 		raw_spin_unlock_irqrestore(&db->lock, flags);
6273ac7fe5aSThomas Gleixner 		debug_objects_oom();
6283ac7fe5aSThomas Gleixner 		return;
6293ac7fe5aSThomas Gleixner 	}
6303ac7fe5aSThomas Gleixner 
6313ac7fe5aSThomas Gleixner 	switch (obj->state) {
6323ac7fe5aSThomas Gleixner 	case ODEBUG_STATE_NONE:
6333ac7fe5aSThomas Gleixner 	case ODEBUG_STATE_INIT:
6343ac7fe5aSThomas Gleixner 	case ODEBUG_STATE_INACTIVE:
6353ac7fe5aSThomas Gleixner 		obj->state = ODEBUG_STATE_INIT;
636aef9cb05SThomas Gleixner 		raw_spin_unlock_irqrestore(&db->lock, flags);
637d5f34153SWaiman Long 		return;
6383ac7fe5aSThomas Gleixner 	default:
6393ac7fe5aSThomas Gleixner 		break;
6403ac7fe5aSThomas Gleixner 	}
6413ac7fe5aSThomas Gleixner 
6429bb63626SAndrzej Hajda 	o = *obj;
643aef9cb05SThomas Gleixner 	raw_spin_unlock_irqrestore(&db->lock, flags);
6449bb63626SAndrzej Hajda 	debug_print_object(&o, "init");
6459bb63626SAndrzej Hajda 
6469bb63626SAndrzej Hajda 	if (o.state == ODEBUG_STATE_ACTIVE)
6479bb63626SAndrzej Hajda 		debug_object_fixup(descr->fixup_init, addr, o.state);
6483ac7fe5aSThomas Gleixner }
6493ac7fe5aSThomas Gleixner 
6503ac7fe5aSThomas Gleixner /**
6513ac7fe5aSThomas Gleixner  * debug_object_init - debug checks when an object is initialized
6523ac7fe5aSThomas Gleixner  * @addr:	address of the object
6533ac7fe5aSThomas Gleixner  * @descr:	pointer to an object specific debug description structure
6543ac7fe5aSThomas Gleixner  */
655aedcade6SStephen Boyd void debug_object_init(void *addr, const struct debug_obj_descr *descr)
6563ac7fe5aSThomas Gleixner {
6573ac7fe5aSThomas Gleixner 	if (!debug_objects_enabled)
6583ac7fe5aSThomas Gleixner 		return;
6593ac7fe5aSThomas Gleixner 
6603ac7fe5aSThomas Gleixner 	__debug_object_init(addr, descr, 0);
6613ac7fe5aSThomas Gleixner }
662f8ff04e2SChris Wilson EXPORT_SYMBOL_GPL(debug_object_init);
6633ac7fe5aSThomas Gleixner 
6643ac7fe5aSThomas Gleixner /**
6653ac7fe5aSThomas Gleixner  * debug_object_init_on_stack - debug checks when an object on stack is
6663ac7fe5aSThomas Gleixner  *				initialized
6673ac7fe5aSThomas Gleixner  * @addr:	address of the object
6683ac7fe5aSThomas Gleixner  * @descr:	pointer to an object specific debug description structure
6693ac7fe5aSThomas Gleixner  */
670aedcade6SStephen Boyd void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
6713ac7fe5aSThomas Gleixner {
6723ac7fe5aSThomas Gleixner 	if (!debug_objects_enabled)
6733ac7fe5aSThomas Gleixner 		return;
6743ac7fe5aSThomas Gleixner 
6753ac7fe5aSThomas Gleixner 	__debug_object_init(addr, descr, 1);
6763ac7fe5aSThomas Gleixner }
677f8ff04e2SChris Wilson EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
6783ac7fe5aSThomas Gleixner 
6793ac7fe5aSThomas Gleixner /**
6803ac7fe5aSThomas Gleixner  * debug_object_activate - debug checks when an object is activated
6813ac7fe5aSThomas Gleixner  * @addr:	address of the object
6823ac7fe5aSThomas Gleixner  * @descr:	pointer to an object specific debug description structure
683b778ae25SPaul E. McKenney  * Returns 0 for success, -EINVAL for check failed.
6843ac7fe5aSThomas Gleixner  */
685aedcade6SStephen Boyd int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
6863ac7fe5aSThomas Gleixner {
68763a75969SThomas Gleixner 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
6883ac7fe5aSThomas Gleixner 	struct debug_bucket *db;
6893ac7fe5aSThomas Gleixner 	struct debug_obj *obj;
6903ac7fe5aSThomas Gleixner 	unsigned long flags;
6913ac7fe5aSThomas Gleixner 
6923ac7fe5aSThomas Gleixner 	if (!debug_objects_enabled)
693b778ae25SPaul E. McKenney 		return 0;
6943ac7fe5aSThomas Gleixner 
6950af462f1SThomas Gleixner 	debug_objects_fill_pool();
6960af462f1SThomas Gleixner 
6973ac7fe5aSThomas Gleixner 	db = get_bucket((unsigned long) addr);
6983ac7fe5aSThomas Gleixner 
699aef9cb05SThomas Gleixner 	raw_spin_lock_irqsave(&db->lock, flags);
7003ac7fe5aSThomas Gleixner 
70163a75969SThomas Gleixner 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
7029bb63626SAndrzej Hajda 	if (unlikely(!obj)) {
7039bb63626SAndrzej Hajda 		raw_spin_unlock_irqrestore(&db->lock, flags);
7049bb63626SAndrzej Hajda 		debug_objects_oom();
7059bb63626SAndrzej Hajda 		return 0;
7069bb63626SAndrzej Hajda 	} else if (likely(!IS_ERR(obj))) {
7073ac7fe5aSThomas Gleixner 		switch (obj->state) {
7089bb63626SAndrzej Hajda 		case ODEBUG_STATE_ACTIVE:
7099bb63626SAndrzej Hajda 		case ODEBUG_STATE_DESTROYED:
7109bb63626SAndrzej Hajda 			o = *obj;
7119bb63626SAndrzej Hajda 			break;
7123ac7fe5aSThomas Gleixner 		case ODEBUG_STATE_INIT:
7133ac7fe5aSThomas Gleixner 		case ODEBUG_STATE_INACTIVE:
7143ac7fe5aSThomas Gleixner 			obj->state = ODEBUG_STATE_ACTIVE;
7159bb63626SAndrzej Hajda 			fallthrough;
7163ac7fe5aSThomas Gleixner 		default:
717aef9cb05SThomas Gleixner 			raw_spin_unlock_irqrestore(&db->lock, flags);
718b778ae25SPaul E. McKenney 			return 0;
7193ac7fe5aSThomas Gleixner 		}
7209bb63626SAndrzej Hajda 	}
72163a75969SThomas Gleixner 
7229bb63626SAndrzej Hajda 	raw_spin_unlock_irqrestore(&db->lock, flags);
72363a75969SThomas Gleixner 	debug_print_object(&o, "activate");
7249bb63626SAndrzej Hajda 
7259bb63626SAndrzej Hajda 	switch (o.state) {
7269bb63626SAndrzej Hajda 	case ODEBUG_STATE_ACTIVE:
7279bb63626SAndrzej Hajda 	case ODEBUG_STATE_NOTAVAILABLE:
7289bb63626SAndrzej Hajda 		if (debug_object_fixup(descr->fixup_activate, addr, o.state))
7299bb63626SAndrzej Hajda 			return 0;
7309bb63626SAndrzej Hajda 		fallthrough;
7319bb63626SAndrzej Hajda 	default:
7329bb63626SAndrzej Hajda 		return -EINVAL;
7339bb63626SAndrzej Hajda 	}
73463a75969SThomas Gleixner }
735f8ff04e2SChris Wilson EXPORT_SYMBOL_GPL(debug_object_activate);
7363ac7fe5aSThomas Gleixner 
7373ac7fe5aSThomas Gleixner /**
7383ac7fe5aSThomas Gleixner  * debug_object_deactivate - debug checks when an object is deactivated
7393ac7fe5aSThomas Gleixner  * @addr:	address of the object
7403ac7fe5aSThomas Gleixner  * @descr:	pointer to an object specific debug description structure
7413ac7fe5aSThomas Gleixner  */
742aedcade6SStephen Boyd void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
7433ac7fe5aSThomas Gleixner {
7449bb63626SAndrzej Hajda 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
7453ac7fe5aSThomas Gleixner 	struct debug_bucket *db;
7463ac7fe5aSThomas Gleixner 	struct debug_obj *obj;
7473ac7fe5aSThomas Gleixner 	unsigned long flags;
7483ac7fe5aSThomas Gleixner 
7493ac7fe5aSThomas Gleixner 	if (!debug_objects_enabled)
7503ac7fe5aSThomas Gleixner 		return;
7513ac7fe5aSThomas Gleixner 
7523ac7fe5aSThomas Gleixner 	db = get_bucket((unsigned long) addr);
7533ac7fe5aSThomas Gleixner 
754aef9cb05SThomas Gleixner 	raw_spin_lock_irqsave(&db->lock, flags);
7553ac7fe5aSThomas Gleixner 
7563ac7fe5aSThomas Gleixner 	obj = lookup_object(addr, db);
7573ac7fe5aSThomas Gleixner 	if (obj) {
7583ac7fe5aSThomas Gleixner 		switch (obj->state) {
7599bb63626SAndrzej Hajda 		case ODEBUG_STATE_DESTROYED:
7609bb63626SAndrzej Hajda 			break;
7613ac7fe5aSThomas Gleixner 		case ODEBUG_STATE_INIT:
7623ac7fe5aSThomas Gleixner 		case ODEBUG_STATE_INACTIVE:
7633ac7fe5aSThomas Gleixner 		case ODEBUG_STATE_ACTIVE:
7649bb63626SAndrzej Hajda 			if (obj->astate)
7659bb63626SAndrzej Hajda 				break;
7663ac7fe5aSThomas Gleixner 			obj->state = ODEBUG_STATE_INACTIVE;
7679bb63626SAndrzej Hajda 			fallthrough;
7683ac7fe5aSThomas Gleixner 		default:
7699bb63626SAndrzej Hajda 			raw_spin_unlock_irqrestore(&db->lock, flags);
7709bb63626SAndrzej Hajda 			return;
7713ac7fe5aSThomas Gleixner 		}
7729bb63626SAndrzej Hajda 		o = *obj;
773d5f34153SWaiman Long 	}
774d5f34153SWaiman Long 
775d5f34153SWaiman Long 	raw_spin_unlock_irqrestore(&db->lock, flags);
7763ac7fe5aSThomas Gleixner 	debug_print_object(&o, "deactivate");
7773ac7fe5aSThomas Gleixner }
778f8ff04e2SChris Wilson EXPORT_SYMBOL_GPL(debug_object_deactivate);
7793ac7fe5aSThomas Gleixner 
7803ac7fe5aSThomas Gleixner /**
7813ac7fe5aSThomas Gleixner  * debug_object_destroy - debug checks when an object is destroyed
7823ac7fe5aSThomas Gleixner  * @addr:	address of the object
7833ac7fe5aSThomas Gleixner  * @descr:	pointer to an object specific debug description structure
7843ac7fe5aSThomas Gleixner  */
785aedcade6SStephen Boyd void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
7863ac7fe5aSThomas Gleixner {
7879bb63626SAndrzej Hajda 	struct debug_obj *obj, o;
7883ac7fe5aSThomas Gleixner 	struct debug_bucket *db;
7893ac7fe5aSThomas Gleixner 	unsigned long flags;
7903ac7fe5aSThomas Gleixner 
7913ac7fe5aSThomas Gleixner 	if (!debug_objects_enabled)
7923ac7fe5aSThomas Gleixner 		return;
7933ac7fe5aSThomas Gleixner 
7943ac7fe5aSThomas Gleixner 	db = get_bucket((unsigned long) addr);
7953ac7fe5aSThomas Gleixner 
796aef9cb05SThomas Gleixner 	raw_spin_lock_irqsave(&db->lock, flags);
7973ac7fe5aSThomas Gleixner 
7983ac7fe5aSThomas Gleixner 	obj = lookup_object(addr, db);
7999bb63626SAndrzej Hajda 	if (!obj) {
8009bb63626SAndrzej Hajda 		raw_spin_unlock_irqrestore(&db->lock, flags);
8019bb63626SAndrzej Hajda 		return;
8029bb63626SAndrzej Hajda 	}
8033ac7fe5aSThomas Gleixner 
8043ac7fe5aSThomas Gleixner 	switch (obj->state) {
8059bb63626SAndrzej Hajda 	case ODEBUG_STATE_ACTIVE:
8069bb63626SAndrzej Hajda 	case ODEBUG_STATE_DESTROYED:
8079bb63626SAndrzej Hajda 		break;
8083ac7fe5aSThomas Gleixner 	case ODEBUG_STATE_NONE:
8093ac7fe5aSThomas Gleixner 	case ODEBUG_STATE_INIT:
8103ac7fe5aSThomas Gleixner 	case ODEBUG_STATE_INACTIVE:
8113ac7fe5aSThomas Gleixner 		obj->state = ODEBUG_STATE_DESTROYED;
8129bb63626SAndrzej Hajda 		fallthrough;
8133ac7fe5aSThomas Gleixner 	default:
814aef9cb05SThomas Gleixner 		raw_spin_unlock_irqrestore(&db->lock, flags);
8159bb63626SAndrzej Hajda 		return;
8169bb63626SAndrzej Hajda 	}
8179bb63626SAndrzej Hajda 
8189bb63626SAndrzej Hajda 	o = *obj;
8199bb63626SAndrzej Hajda 	raw_spin_unlock_irqrestore(&db->lock, flags);
8209bb63626SAndrzej Hajda 	debug_print_object(&o, "destroy");
8219bb63626SAndrzej Hajda 
8229bb63626SAndrzej Hajda 	if (o.state == ODEBUG_STATE_ACTIVE)
8239bb63626SAndrzej Hajda 		debug_object_fixup(descr->fixup_destroy, addr, o.state);
8243ac7fe5aSThomas Gleixner }
825f8ff04e2SChris Wilson EXPORT_SYMBOL_GPL(debug_object_destroy);
8263ac7fe5aSThomas Gleixner 
8273ac7fe5aSThomas Gleixner /**
8283ac7fe5aSThomas Gleixner  * debug_object_free - debug checks when an object is freed
8293ac7fe5aSThomas Gleixner  * @addr:	address of the object
8303ac7fe5aSThomas Gleixner  * @descr:	pointer to an object specific debug description structure
8313ac7fe5aSThomas Gleixner  */
832aedcade6SStephen Boyd void debug_object_free(void *addr, const struct debug_obj_descr *descr)
8333ac7fe5aSThomas Gleixner {
8349bb63626SAndrzej Hajda 	struct debug_obj *obj, o;
8353ac7fe5aSThomas Gleixner 	struct debug_bucket *db;
8363ac7fe5aSThomas Gleixner 	unsigned long flags;
8373ac7fe5aSThomas Gleixner 
8383ac7fe5aSThomas Gleixner 	if (!debug_objects_enabled)
8393ac7fe5aSThomas Gleixner 		return;
8403ac7fe5aSThomas Gleixner 
8413ac7fe5aSThomas Gleixner 	db = get_bucket((unsigned long) addr);
8423ac7fe5aSThomas Gleixner 
843aef9cb05SThomas Gleixner 	raw_spin_lock_irqsave(&db->lock, flags);
8443ac7fe5aSThomas Gleixner 
8453ac7fe5aSThomas Gleixner 	obj = lookup_object(addr, db);
8469bb63626SAndrzej Hajda 	if (!obj) {
8479bb63626SAndrzej Hajda 		raw_spin_unlock_irqrestore(&db->lock, flags);
8489bb63626SAndrzej Hajda 		return;
8499bb63626SAndrzej Hajda 	}
8503ac7fe5aSThomas Gleixner 
8513ac7fe5aSThomas Gleixner 	switch (obj->state) {
8523ac7fe5aSThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
8539bb63626SAndrzej Hajda 		break;
8543ac7fe5aSThomas Gleixner 	default:
8553ac7fe5aSThomas Gleixner 		hlist_del(&obj->node);
856aef9cb05SThomas Gleixner 		raw_spin_unlock_irqrestore(&db->lock, flags);
8573ac7fe5aSThomas Gleixner 		free_object(obj);
858673d62ccSVegard Nossum 		return;
8593ac7fe5aSThomas Gleixner 	}
8609bb63626SAndrzej Hajda 
8619bb63626SAndrzej Hajda 	o = *obj;
862aef9cb05SThomas Gleixner 	raw_spin_unlock_irqrestore(&db->lock, flags);
8639bb63626SAndrzej Hajda 	debug_print_object(&o, "free");
8649bb63626SAndrzej Hajda 
8659bb63626SAndrzej Hajda 	debug_object_fixup(descr->fixup_free, addr, o.state);
8663ac7fe5aSThomas Gleixner }
867f8ff04e2SChris Wilson EXPORT_SYMBOL_GPL(debug_object_free);
8683ac7fe5aSThomas Gleixner 
869a5d8e467SMathieu Desnoyers /**
870b84d435cSChristine Chan  * debug_object_assert_init - debug checks when object should be init-ed
871b84d435cSChristine Chan  * @addr:	address of the object
872b84d435cSChristine Chan  * @descr:	pointer to an object specific debug description structure
873b84d435cSChristine Chan  */
874aedcade6SStephen Boyd void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
875b84d435cSChristine Chan {
87663a75969SThomas Gleixner 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
877b84d435cSChristine Chan 	struct debug_bucket *db;
878b84d435cSChristine Chan 	struct debug_obj *obj;
879b84d435cSChristine Chan 	unsigned long flags;
880b84d435cSChristine Chan 
881b84d435cSChristine Chan 	if (!debug_objects_enabled)
882b84d435cSChristine Chan 		return;
883b84d435cSChristine Chan 
8840af462f1SThomas Gleixner 	debug_objects_fill_pool();
8850af462f1SThomas Gleixner 
886b84d435cSChristine Chan 	db = get_bucket((unsigned long) addr);
887b84d435cSChristine Chan 
888b84d435cSChristine Chan 	raw_spin_lock_irqsave(&db->lock, flags);
88963a75969SThomas Gleixner 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
890b84d435cSChristine Chan 	raw_spin_unlock_irqrestore(&db->lock, flags);
89163a75969SThomas Gleixner 	if (likely(!IS_ERR_OR_NULL(obj)))
89263a75969SThomas Gleixner 		return;
89363a75969SThomas Gleixner 
89463a75969SThomas Gleixner 	/* If NULL the allocation has hit OOM */
89563a75969SThomas Gleixner 	if (!obj) {
89663a75969SThomas Gleixner 		debug_objects_oom();
897b84d435cSChristine Chan 		return;
898b84d435cSChristine Chan 	}
899b84d435cSChristine Chan 
90063a75969SThomas Gleixner 	/* Object is neither tracked nor static. It's not initialized. */
90163a75969SThomas Gleixner 	debug_print_object(&o, "assert_init");
90263a75969SThomas Gleixner 	debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
903b84d435cSChristine Chan }
904f8ff04e2SChris Wilson EXPORT_SYMBOL_GPL(debug_object_assert_init);
905b84d435cSChristine Chan 
906b84d435cSChristine Chan /**
907a5d8e467SMathieu Desnoyers  * debug_object_active_state - debug checks object usage state machine
908a5d8e467SMathieu Desnoyers  * @addr:	address of the object
909a5d8e467SMathieu Desnoyers  * @descr:	pointer to an object specific debug description structure
910a5d8e467SMathieu Desnoyers  * @expect:	expected state
911a5d8e467SMathieu Desnoyers  * @next:	state to move to if expected state is found
912a5d8e467SMathieu Desnoyers  */
913a5d8e467SMathieu Desnoyers void
914aedcade6SStephen Boyd debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
915a5d8e467SMathieu Desnoyers 			  unsigned int expect, unsigned int next)
916a5d8e467SMathieu Desnoyers {
9179bb63626SAndrzej Hajda 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
918a5d8e467SMathieu Desnoyers 	struct debug_bucket *db;
919a5d8e467SMathieu Desnoyers 	struct debug_obj *obj;
920a5d8e467SMathieu Desnoyers 	unsigned long flags;
921a5d8e467SMathieu Desnoyers 
922a5d8e467SMathieu Desnoyers 	if (!debug_objects_enabled)
923a5d8e467SMathieu Desnoyers 		return;
924a5d8e467SMathieu Desnoyers 
925a5d8e467SMathieu Desnoyers 	db = get_bucket((unsigned long) addr);
926a5d8e467SMathieu Desnoyers 
927a5d8e467SMathieu Desnoyers 	raw_spin_lock_irqsave(&db->lock, flags);
928a5d8e467SMathieu Desnoyers 
929a5d8e467SMathieu Desnoyers 	obj = lookup_object(addr, db);
930a5d8e467SMathieu Desnoyers 	if (obj) {
931a5d8e467SMathieu Desnoyers 		switch (obj->state) {
932a5d8e467SMathieu Desnoyers 		case ODEBUG_STATE_ACTIVE:
9339bb63626SAndrzej Hajda 			if (obj->astate != expect)
934a5d8e467SMathieu Desnoyers 				break;
9359bb63626SAndrzej Hajda 			obj->astate = next;
9369bb63626SAndrzej Hajda 			raw_spin_unlock_irqrestore(&db->lock, flags);
9379bb63626SAndrzej Hajda 			return;
938a5d8e467SMathieu Desnoyers 		default:
939a5d8e467SMathieu Desnoyers 			break;
940a5d8e467SMathieu Desnoyers 		}
9419bb63626SAndrzej Hajda 		o = *obj;
942d5f34153SWaiman Long 	}
943d5f34153SWaiman Long 
944d5f34153SWaiman Long 	raw_spin_unlock_irqrestore(&db->lock, flags);
945a5d8e467SMathieu Desnoyers 	debug_print_object(&o, "active_state");
946a5d8e467SMathieu Desnoyers }
947f8ff04e2SChris Wilson EXPORT_SYMBOL_GPL(debug_object_active_state);
948a5d8e467SMathieu Desnoyers 
9493ac7fe5aSThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_FREE
9503ac7fe5aSThomas Gleixner static void __debug_check_no_obj_freed(const void *address, unsigned long size)
9513ac7fe5aSThomas Gleixner {
9523ac7fe5aSThomas Gleixner 	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
9539bb63626SAndrzej Hajda 	int cnt, objs_checked = 0;
9549bb63626SAndrzej Hajda 	struct debug_obj *obj, o;
9553ac7fe5aSThomas Gleixner 	struct debug_bucket *db;
9561ea9b98bSYang Shi 	struct hlist_node *tmp;
9573ac7fe5aSThomas Gleixner 
9583ac7fe5aSThomas Gleixner 	saddr = (unsigned long) address;
9593ac7fe5aSThomas Gleixner 	eaddr = saddr + size;
9603ac7fe5aSThomas Gleixner 	paddr = saddr & ODEBUG_CHUNK_MASK;
9613ac7fe5aSThomas Gleixner 	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
9623ac7fe5aSThomas Gleixner 	chunks >>= ODEBUG_CHUNK_SHIFT;
9633ac7fe5aSThomas Gleixner 
9643ac7fe5aSThomas Gleixner 	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
9653ac7fe5aSThomas Gleixner 		db = get_bucket(paddr);
9663ac7fe5aSThomas Gleixner 
9673ac7fe5aSThomas Gleixner repeat:
9683ac7fe5aSThomas Gleixner 		cnt = 0;
969aef9cb05SThomas Gleixner 		raw_spin_lock_irqsave(&db->lock, flags);
970b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
9713ac7fe5aSThomas Gleixner 			cnt++;
9723ac7fe5aSThomas Gleixner 			oaddr = (unsigned long) obj->object;
9733ac7fe5aSThomas Gleixner 			if (oaddr < saddr || oaddr >= eaddr)
9743ac7fe5aSThomas Gleixner 				continue;
9753ac7fe5aSThomas Gleixner 
9763ac7fe5aSThomas Gleixner 			switch (obj->state) {
9773ac7fe5aSThomas Gleixner 			case ODEBUG_STATE_ACTIVE:
9789bb63626SAndrzej Hajda 				o = *obj;
979aef9cb05SThomas Gleixner 				raw_spin_unlock_irqrestore(&db->lock, flags);
9809bb63626SAndrzej Hajda 				debug_print_object(&o, "free");
9819bb63626SAndrzej Hajda 				debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
9823ac7fe5aSThomas Gleixner 				goto repeat;
9833ac7fe5aSThomas Gleixner 			default:
9843ac7fe5aSThomas Gleixner 				hlist_del(&obj->node);
985a7344a68SWaiman Long 				__free_object(obj);
9863ac7fe5aSThomas Gleixner 				break;
9873ac7fe5aSThomas Gleixner 			}
9883ac7fe5aSThomas Gleixner 		}
989aef9cb05SThomas Gleixner 		raw_spin_unlock_irqrestore(&db->lock, flags);
990673d62ccSVegard Nossum 
9913ac7fe5aSThomas Gleixner 		if (cnt > debug_objects_maxchain)
9923ac7fe5aSThomas Gleixner 			debug_objects_maxchain = cnt;
993bd9dcd04SYang Shi 
994bd9dcd04SYang Shi 		objs_checked += cnt;
9953ac7fe5aSThomas Gleixner 	}
996bd9dcd04SYang Shi 
997bd9dcd04SYang Shi 	if (objs_checked > debug_objects_maxchecked)
998bd9dcd04SYang Shi 		debug_objects_maxchecked = objs_checked;
9991ea9b98bSYang Shi 
10001ea9b98bSYang Shi 	/* Schedule work to actually kmem_cache_free() objects */
100135fd7a63SMarco Elver 	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1002a7344a68SWaiman Long 		WRITE_ONCE(obj_freeing, true);
1003a7344a68SWaiman Long 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1004a7344a68SWaiman Long 	}
10053ac7fe5aSThomas Gleixner }
10063ac7fe5aSThomas Gleixner 
10073ac7fe5aSThomas Gleixner void debug_check_no_obj_freed(const void *address, unsigned long size)
10083ac7fe5aSThomas Gleixner {
10093ac7fe5aSThomas Gleixner 	if (debug_objects_enabled)
10103ac7fe5aSThomas Gleixner 		__debug_check_no_obj_freed(address, size);
10113ac7fe5aSThomas Gleixner }
10123ac7fe5aSThomas Gleixner #endif
10133ac7fe5aSThomas Gleixner 
10143ac7fe5aSThomas Gleixner #ifdef CONFIG_DEBUG_FS
10153ac7fe5aSThomas Gleixner 
10163ac7fe5aSThomas Gleixner static int debug_stats_show(struct seq_file *m, void *v)
10173ac7fe5aSThomas Gleixner {
1018d86998b1SWaiman Long 	int cpu, obj_percpu_free = 0;
1019d86998b1SWaiman Long 
1020d86998b1SWaiman Long 	for_each_possible_cpu(cpu)
1021d86998b1SWaiman Long 		obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1022d86998b1SWaiman Long 
10233ac7fe5aSThomas Gleixner 	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1024bd9dcd04SYang Shi 	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
10253ac7fe5aSThomas Gleixner 	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
10263ac7fe5aSThomas Gleixner 	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
102735fd7a63SMarco Elver 	seq_printf(m, "pool_free     :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1028d86998b1SWaiman Long 	seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
10293ac7fe5aSThomas Gleixner 	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1030d86998b1SWaiman Long 	seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
10313ac7fe5aSThomas Gleixner 	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
103235fd7a63SMarco Elver 	seq_printf(m, "on_free_list  :%d\n", READ_ONCE(obj_nr_tofree));
10330cad93c3SWaiman Long 	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
10340cad93c3SWaiman Long 	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
10353ac7fe5aSThomas Gleixner 	return 0;
10363ac7fe5aSThomas Gleixner }
10370f85c480SQinglang Miao DEFINE_SHOW_ATTRIBUTE(debug_stats);
10383ac7fe5aSThomas Gleixner 
10393ac7fe5aSThomas Gleixner static int __init debug_objects_init_debugfs(void)
10403ac7fe5aSThomas Gleixner {
1041fecb0d95SGreg Kroah-Hartman 	struct dentry *dbgdir;
10423ac7fe5aSThomas Gleixner 
10433ac7fe5aSThomas Gleixner 	if (!debug_objects_enabled)
10443ac7fe5aSThomas Gleixner 		return 0;
10453ac7fe5aSThomas Gleixner 
10463ac7fe5aSThomas Gleixner 	dbgdir = debugfs_create_dir("debug_objects", NULL);
10473ac7fe5aSThomas Gleixner 
1048fecb0d95SGreg Kroah-Hartman 	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
10493ac7fe5aSThomas Gleixner 
10503ac7fe5aSThomas Gleixner 	return 0;
10513ac7fe5aSThomas Gleixner }
10523ac7fe5aSThomas Gleixner __initcall(debug_objects_init_debugfs);
10533ac7fe5aSThomas Gleixner 
10543ac7fe5aSThomas Gleixner #else
10553ac7fe5aSThomas Gleixner static inline void debug_objects_init_debugfs(void) { }
10563ac7fe5aSThomas Gleixner #endif
10573ac7fe5aSThomas Gleixner 
10583ac7fe5aSThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
10593ac7fe5aSThomas Gleixner 
10603ac7fe5aSThomas Gleixner /* Random data structure for the self test */
10613ac7fe5aSThomas Gleixner struct self_test {
10623ac7fe5aSThomas Gleixner 	unsigned long	dummy1[6];
10633ac7fe5aSThomas Gleixner 	int		static_init;
10643ac7fe5aSThomas Gleixner 	unsigned long	dummy2[3];
10653ac7fe5aSThomas Gleixner };
10663ac7fe5aSThomas Gleixner 
1067aedcade6SStephen Boyd static __initconst const struct debug_obj_descr descr_type_test;
10683ac7fe5aSThomas Gleixner 
1069b9fdac7fSDu, Changbin static bool __init is_static_object(void *addr)
1070b9fdac7fSDu, Changbin {
1071b9fdac7fSDu, Changbin 	struct self_test *obj = addr;
1072b9fdac7fSDu, Changbin 
1073b9fdac7fSDu, Changbin 	return obj->static_init;
1074b9fdac7fSDu, Changbin }
1075b9fdac7fSDu, Changbin 
10763ac7fe5aSThomas Gleixner /*
10773ac7fe5aSThomas Gleixner  * fixup_init is called when:
10783ac7fe5aSThomas Gleixner  * - an active object is initialized
10793ac7fe5aSThomas Gleixner  */
1080b1e4d9d8SDu, Changbin static bool __init fixup_init(void *addr, enum debug_obj_state state)
10813ac7fe5aSThomas Gleixner {
10823ac7fe5aSThomas Gleixner 	struct self_test *obj = addr;
10833ac7fe5aSThomas Gleixner 
10843ac7fe5aSThomas Gleixner 	switch (state) {
10853ac7fe5aSThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
10863ac7fe5aSThomas Gleixner 		debug_object_deactivate(obj, &descr_type_test);
10873ac7fe5aSThomas Gleixner 		debug_object_init(obj, &descr_type_test);
1088b1e4d9d8SDu, Changbin 		return true;
10893ac7fe5aSThomas Gleixner 	default:
1090b1e4d9d8SDu, Changbin 		return false;
10913ac7fe5aSThomas Gleixner 	}
10923ac7fe5aSThomas Gleixner }
10933ac7fe5aSThomas Gleixner 
10943ac7fe5aSThomas Gleixner /*
10953ac7fe5aSThomas Gleixner  * fixup_activate is called when:
10963ac7fe5aSThomas Gleixner  * - an active object is activated
1097b9fdac7fSDu, Changbin  * - an unknown non-static object is activated
10983ac7fe5aSThomas Gleixner  */
1099b1e4d9d8SDu, Changbin static bool __init fixup_activate(void *addr, enum debug_obj_state state)
11003ac7fe5aSThomas Gleixner {
11013ac7fe5aSThomas Gleixner 	struct self_test *obj = addr;
11023ac7fe5aSThomas Gleixner 
11033ac7fe5aSThomas Gleixner 	switch (state) {
11043ac7fe5aSThomas Gleixner 	case ODEBUG_STATE_NOTAVAILABLE:
1105b1e4d9d8SDu, Changbin 		return true;
11063ac7fe5aSThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
11073ac7fe5aSThomas Gleixner 		debug_object_deactivate(obj, &descr_type_test);
11083ac7fe5aSThomas Gleixner 		debug_object_activate(obj, &descr_type_test);
1109b1e4d9d8SDu, Changbin 		return true;
11103ac7fe5aSThomas Gleixner 
11113ac7fe5aSThomas Gleixner 	default:
1112b1e4d9d8SDu, Changbin 		return false;
11133ac7fe5aSThomas Gleixner 	}
11143ac7fe5aSThomas Gleixner }
11153ac7fe5aSThomas Gleixner 
11163ac7fe5aSThomas Gleixner /*
11173ac7fe5aSThomas Gleixner  * fixup_destroy is called when:
11183ac7fe5aSThomas Gleixner  * - an active object is destroyed
11193ac7fe5aSThomas Gleixner  */
1120b1e4d9d8SDu, Changbin static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
11213ac7fe5aSThomas Gleixner {
11223ac7fe5aSThomas Gleixner 	struct self_test *obj = addr;
11233ac7fe5aSThomas Gleixner 
11243ac7fe5aSThomas Gleixner 	switch (state) {
11253ac7fe5aSThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
11263ac7fe5aSThomas Gleixner 		debug_object_deactivate(obj, &descr_type_test);
11273ac7fe5aSThomas Gleixner 		debug_object_destroy(obj, &descr_type_test);
1128b1e4d9d8SDu, Changbin 		return true;
11293ac7fe5aSThomas Gleixner 	default:
1130b1e4d9d8SDu, Changbin 		return false;
11313ac7fe5aSThomas Gleixner 	}
11323ac7fe5aSThomas Gleixner }
11333ac7fe5aSThomas Gleixner 
11343ac7fe5aSThomas Gleixner /*
11353ac7fe5aSThomas Gleixner  * fixup_free is called when:
11363ac7fe5aSThomas Gleixner  * - an active object is freed
11373ac7fe5aSThomas Gleixner  */
1138b1e4d9d8SDu, Changbin static bool __init fixup_free(void *addr, enum debug_obj_state state)
11393ac7fe5aSThomas Gleixner {
11403ac7fe5aSThomas Gleixner 	struct self_test *obj = addr;
11413ac7fe5aSThomas Gleixner 
11423ac7fe5aSThomas Gleixner 	switch (state) {
11433ac7fe5aSThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
11443ac7fe5aSThomas Gleixner 		debug_object_deactivate(obj, &descr_type_test);
11453ac7fe5aSThomas Gleixner 		debug_object_free(obj, &descr_type_test);
1146b1e4d9d8SDu, Changbin 		return true;
11473ac7fe5aSThomas Gleixner 	default:
1148b1e4d9d8SDu, Changbin 		return false;
11493ac7fe5aSThomas Gleixner 	}
11503ac7fe5aSThomas Gleixner }
11513ac7fe5aSThomas Gleixner 
11521fb2f77cSHenrik Kretzschmar static int __init
11533ac7fe5aSThomas Gleixner check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
11543ac7fe5aSThomas Gleixner {
11553ac7fe5aSThomas Gleixner 	struct debug_bucket *db;
11563ac7fe5aSThomas Gleixner 	struct debug_obj *obj;
11573ac7fe5aSThomas Gleixner 	unsigned long flags;
11583ac7fe5aSThomas Gleixner 	int res = -EINVAL;
11593ac7fe5aSThomas Gleixner 
11603ac7fe5aSThomas Gleixner 	db = get_bucket((unsigned long) addr);
11613ac7fe5aSThomas Gleixner 
1162aef9cb05SThomas Gleixner 	raw_spin_lock_irqsave(&db->lock, flags);
11633ac7fe5aSThomas Gleixner 
11643ac7fe5aSThomas Gleixner 	obj = lookup_object(addr, db);
11653ac7fe5aSThomas Gleixner 	if (!obj && state != ODEBUG_STATE_NONE) {
11665cd2b459SArjan van de Ven 		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
11673ac7fe5aSThomas Gleixner 		goto out;
11683ac7fe5aSThomas Gleixner 	}
11693ac7fe5aSThomas Gleixner 	if (obj && obj->state != state) {
11705cd2b459SArjan van de Ven 		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
11713ac7fe5aSThomas Gleixner 		       obj->state, state);
11723ac7fe5aSThomas Gleixner 		goto out;
11733ac7fe5aSThomas Gleixner 	}
11743ac7fe5aSThomas Gleixner 	if (fixups != debug_objects_fixups) {
11755cd2b459SArjan van de Ven 		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
11763ac7fe5aSThomas Gleixner 		       fixups, debug_objects_fixups);
11773ac7fe5aSThomas Gleixner 		goto out;
11783ac7fe5aSThomas Gleixner 	}
11793ac7fe5aSThomas Gleixner 	if (warnings != debug_objects_warnings) {
11805cd2b459SArjan van de Ven 		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
11813ac7fe5aSThomas Gleixner 		       warnings, debug_objects_warnings);
11823ac7fe5aSThomas Gleixner 		goto out;
11833ac7fe5aSThomas Gleixner 	}
11843ac7fe5aSThomas Gleixner 	res = 0;
11853ac7fe5aSThomas Gleixner out:
1186aef9cb05SThomas Gleixner 	raw_spin_unlock_irqrestore(&db->lock, flags);
11873ac7fe5aSThomas Gleixner 	if (res)
11883ac7fe5aSThomas Gleixner 		debug_objects_enabled = 0;
11893ac7fe5aSThomas Gleixner 	return res;
11903ac7fe5aSThomas Gleixner }
11913ac7fe5aSThomas Gleixner 
1192aedcade6SStephen Boyd static __initconst const struct debug_obj_descr descr_type_test = {
11933ac7fe5aSThomas Gleixner 	.name			= "selftest",
1194b9fdac7fSDu, Changbin 	.is_static_object	= is_static_object,
11953ac7fe5aSThomas Gleixner 	.fixup_init		= fixup_init,
11963ac7fe5aSThomas Gleixner 	.fixup_activate		= fixup_activate,
11973ac7fe5aSThomas Gleixner 	.fixup_destroy		= fixup_destroy,
11983ac7fe5aSThomas Gleixner 	.fixup_free		= fixup_free,
11993ac7fe5aSThomas Gleixner };
12003ac7fe5aSThomas Gleixner 
12013ac7fe5aSThomas Gleixner static __initdata struct self_test obj = { .static_init = 0 };
12023ac7fe5aSThomas Gleixner 
120355fb412eSThomas Gleixner static bool __init debug_objects_selftest(void)
12043ac7fe5aSThomas Gleixner {
12053ac7fe5aSThomas Gleixner 	int fixups, oldfixups, warnings, oldwarnings;
12063ac7fe5aSThomas Gleixner 	unsigned long flags;
12073ac7fe5aSThomas Gleixner 
12083ac7fe5aSThomas Gleixner 	local_irq_save(flags);
12093ac7fe5aSThomas Gleixner 
12103ac7fe5aSThomas Gleixner 	fixups = oldfixups = debug_objects_fixups;
12113ac7fe5aSThomas Gleixner 	warnings = oldwarnings = debug_objects_warnings;
12123ac7fe5aSThomas Gleixner 	descr_test = &descr_type_test;
12133ac7fe5aSThomas Gleixner 
12143ac7fe5aSThomas Gleixner 	debug_object_init(&obj, &descr_type_test);
12153ac7fe5aSThomas Gleixner 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
12163ac7fe5aSThomas Gleixner 		goto out;
12173ac7fe5aSThomas Gleixner 	debug_object_activate(&obj, &descr_type_test);
12183ac7fe5aSThomas Gleixner 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
12193ac7fe5aSThomas Gleixner 		goto out;
12203ac7fe5aSThomas Gleixner 	debug_object_activate(&obj, &descr_type_test);
12213ac7fe5aSThomas Gleixner 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
12223ac7fe5aSThomas Gleixner 		goto out;
12233ac7fe5aSThomas Gleixner 	debug_object_deactivate(&obj, &descr_type_test);
12243ac7fe5aSThomas Gleixner 	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
12253ac7fe5aSThomas Gleixner 		goto out;
12263ac7fe5aSThomas Gleixner 	debug_object_destroy(&obj, &descr_type_test);
12273ac7fe5aSThomas Gleixner 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
12283ac7fe5aSThomas Gleixner 		goto out;
12293ac7fe5aSThomas Gleixner 	debug_object_init(&obj, &descr_type_test);
12303ac7fe5aSThomas Gleixner 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
12313ac7fe5aSThomas Gleixner 		goto out;
12323ac7fe5aSThomas Gleixner 	debug_object_activate(&obj, &descr_type_test);
12333ac7fe5aSThomas Gleixner 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
12343ac7fe5aSThomas Gleixner 		goto out;
12353ac7fe5aSThomas Gleixner 	debug_object_deactivate(&obj, &descr_type_test);
12363ac7fe5aSThomas Gleixner 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
12373ac7fe5aSThomas Gleixner 		goto out;
12383ac7fe5aSThomas Gleixner 	debug_object_free(&obj, &descr_type_test);
12393ac7fe5aSThomas Gleixner 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
12403ac7fe5aSThomas Gleixner 		goto out;
12413ac7fe5aSThomas Gleixner 
12423ac7fe5aSThomas Gleixner 	obj.static_init = 1;
12433ac7fe5aSThomas Gleixner 	debug_object_activate(&obj, &descr_type_test);
12449f78ff00SStephen Boyd 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
12453ac7fe5aSThomas Gleixner 		goto out;
12463ac7fe5aSThomas Gleixner 	debug_object_init(&obj, &descr_type_test);
12473ac7fe5aSThomas Gleixner 	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
12483ac7fe5aSThomas Gleixner 		goto out;
12493ac7fe5aSThomas Gleixner 	debug_object_free(&obj, &descr_type_test);
12503ac7fe5aSThomas Gleixner 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
12513ac7fe5aSThomas Gleixner 		goto out;
12523ac7fe5aSThomas Gleixner 
12533ac7fe5aSThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_FREE
12543ac7fe5aSThomas Gleixner 	debug_object_init(&obj, &descr_type_test);
12553ac7fe5aSThomas Gleixner 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
12563ac7fe5aSThomas Gleixner 		goto out;
12573ac7fe5aSThomas Gleixner 	debug_object_activate(&obj, &descr_type_test);
12583ac7fe5aSThomas Gleixner 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
12593ac7fe5aSThomas Gleixner 		goto out;
12603ac7fe5aSThomas Gleixner 	__debug_check_no_obj_freed(&obj, sizeof(obj));
12613ac7fe5aSThomas Gleixner 	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
12623ac7fe5aSThomas Gleixner 		goto out;
12633ac7fe5aSThomas Gleixner #endif
1264719e4843SFabian Frederick 	pr_info("selftest passed\n");
12653ac7fe5aSThomas Gleixner 
12663ac7fe5aSThomas Gleixner out:
12673ac7fe5aSThomas Gleixner 	debug_objects_fixups = oldfixups;
12683ac7fe5aSThomas Gleixner 	debug_objects_warnings = oldwarnings;
12693ac7fe5aSThomas Gleixner 	descr_test = NULL;
12703ac7fe5aSThomas Gleixner 
12713ac7fe5aSThomas Gleixner 	local_irq_restore(flags);
127255fb412eSThomas Gleixner 	return !!debug_objects_enabled;
12733ac7fe5aSThomas Gleixner }
12743ac7fe5aSThomas Gleixner #else
127555fb412eSThomas Gleixner static inline bool debug_objects_selftest(void) { return true; }
12763ac7fe5aSThomas Gleixner #endif
12773ac7fe5aSThomas Gleixner 
12783ac7fe5aSThomas Gleixner /*
12793ac7fe5aSThomas Gleixner  * Called during early boot to initialize the hash buckets and link
12803ac7fe5aSThomas Gleixner  * the static object pool objects into the poll list. After this call
12813ac7fe5aSThomas Gleixner  * the object tracker is fully operational.
12823ac7fe5aSThomas Gleixner  */
12833ac7fe5aSThomas Gleixner void __init debug_objects_early_init(void)
12843ac7fe5aSThomas Gleixner {
12853ac7fe5aSThomas Gleixner 	int i;
12863ac7fe5aSThomas Gleixner 
12873ac7fe5aSThomas Gleixner 	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1288aef9cb05SThomas Gleixner 		raw_spin_lock_init(&obj_hash[i].lock);
12893ac7fe5aSThomas Gleixner 
12903ac7fe5aSThomas Gleixner 	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
12913ac7fe5aSThomas Gleixner 		hlist_add_head(&obj_static_pool[i].node, &obj_pool);
12923ac7fe5aSThomas Gleixner }
12933ac7fe5aSThomas Gleixner 
12943ac7fe5aSThomas Gleixner /*
129555fb412eSThomas Gleixner  * Convert the statically allocated objects to dynamic ones.
129655fb412eSThomas Gleixner  * debug_objects_mem_init() is called early so only one CPU is up and
129755fb412eSThomas Gleixner  * interrupts are disabled, which means it is safe to replace the active
129855fb412eSThomas Gleixner  * object references.
12991be1cb7bSThomas Gleixner  */
130055fb412eSThomas Gleixner static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache)
13011be1cb7bSThomas Gleixner {
13021be1cb7bSThomas Gleixner 	struct debug_bucket *db = obj_hash;
13031be1cb7bSThomas Gleixner 	struct debug_obj *obj, *new;
130455fb412eSThomas Gleixner 	struct hlist_node *tmp;
13051be1cb7bSThomas Gleixner 	HLIST_HEAD(objects);
13061be1cb7bSThomas Gleixner 	int i, cnt = 0;
13071be1cb7bSThomas Gleixner 
13081be1cb7bSThomas Gleixner 	for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
130955fb412eSThomas Gleixner 		obj = kmem_cache_zalloc(cache, GFP_KERNEL);
13101be1cb7bSThomas Gleixner 		if (!obj)
13111be1cb7bSThomas Gleixner 			goto free;
13121be1cb7bSThomas Gleixner 		hlist_add_head(&obj->node, &objects);
13131be1cb7bSThomas Gleixner 	}
13141be1cb7bSThomas Gleixner 
1315eabb7f1aSwuchi 	debug_objects_allocated += i;
1316eabb7f1aSwuchi 
13171be1cb7bSThomas Gleixner 	/*
1318a0ae9504SZhen Lei 	 * Replace the statically allocated objects list with the allocated
1319a0ae9504SZhen Lei 	 * objects list.
1320a0ae9504SZhen Lei 	 */
13211be1cb7bSThomas Gleixner 	hlist_move_list(&objects, &obj_pool);
13221be1cb7bSThomas Gleixner 
13231be1cb7bSThomas Gleixner 	/* Replace the active object references */
13241be1cb7bSThomas Gleixner 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
13251be1cb7bSThomas Gleixner 		hlist_move_list(&db->list, &objects);
13261be1cb7bSThomas Gleixner 
1327b67bfe0dSSasha Levin 		hlist_for_each_entry(obj, &objects, node) {
13281be1cb7bSThomas Gleixner 			new = hlist_entry(obj_pool.first, typeof(*obj), node);
13291be1cb7bSThomas Gleixner 			hlist_del(&new->node);
13301be1cb7bSThomas Gleixner 			/* copy object data */
13311be1cb7bSThomas Gleixner 			*new = *obj;
13321be1cb7bSThomas Gleixner 			hlist_add_head(&new->node, &db->list);
13331be1cb7bSThomas Gleixner 			cnt++;
13341be1cb7bSThomas Gleixner 		}
13351be1cb7bSThomas Gleixner 	}
13361be1cb7bSThomas Gleixner 
133755fb412eSThomas Gleixner 	pr_debug("%d of %d active objects replaced\n", cnt, obj_pool_used);
133855fb412eSThomas Gleixner 	return true;
13391be1cb7bSThomas Gleixner free:
1340b67bfe0dSSasha Levin 	hlist_for_each_entry_safe(obj, tmp, &objects, node) {
13411be1cb7bSThomas Gleixner 		hlist_del(&obj->node);
134255fb412eSThomas Gleixner 		kmem_cache_free(cache, obj);
13431be1cb7bSThomas Gleixner 	}
134455fb412eSThomas Gleixner 	return false;
13451be1cb7bSThomas Gleixner }
13461be1cb7bSThomas Gleixner 
13471be1cb7bSThomas Gleixner /*
13483ac7fe5aSThomas Gleixner  * Called after the kmem_caches are functional to setup a dedicated
13493ac7fe5aSThomas Gleixner  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
13503ac7fe5aSThomas Gleixner  * prevents that the debug code is called on kmem_cache_free() for the
13513ac7fe5aSThomas Gleixner  * debug tracker objects to avoid recursive calls.
13523ac7fe5aSThomas Gleixner  */
13533ac7fe5aSThomas Gleixner void __init debug_objects_mem_init(void)
13543ac7fe5aSThomas Gleixner {
135555fb412eSThomas Gleixner 	struct kmem_cache *cache;
13563f397bf9SThomas Gleixner 	int extras;
1357d86998b1SWaiman Long 
13583ac7fe5aSThomas Gleixner 	if (!debug_objects_enabled)
13593ac7fe5aSThomas Gleixner 		return;
13603ac7fe5aSThomas Gleixner 
136155fb412eSThomas Gleixner 	if (!debug_objects_selftest())
1362eabb7f1aSwuchi 		return;
136355fb412eSThomas Gleixner 
136455fb412eSThomas Gleixner 	cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0,
136555fb412eSThomas Gleixner 				  SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL);
136655fb412eSThomas Gleixner 
136755fb412eSThomas Gleixner 	if (!cache || !debug_objects_replace_static_objects(cache)) {
136855fb412eSThomas Gleixner 		debug_objects_enabled = 0;
136955fb412eSThomas Gleixner 		pr_warn("Out of memory.\n");
137055fb412eSThomas Gleixner 		return;
137155fb412eSThomas Gleixner 	}
137255fb412eSThomas Gleixner 
137355fb412eSThomas Gleixner 	/*
137455fb412eSThomas Gleixner 	 * Adjust the thresholds for allocating and freeing objects
137555fb412eSThomas Gleixner 	 * according to the number of possible CPUs available in the
137655fb412eSThomas Gleixner 	 * system.
137755fb412eSThomas Gleixner 	 */
137855fb412eSThomas Gleixner 	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
137955fb412eSThomas Gleixner 	debug_objects_pool_size += extras;
138055fb412eSThomas Gleixner 	debug_objects_pool_min_level += extras;
138155fb412eSThomas Gleixner 
138255fb412eSThomas Gleixner 	/* Everything worked. Expose the cache */
138355fb412eSThomas Gleixner 	obj_cache = cache;
1384634d61f4SWaiman Long 
138588451f2cSZqiang #ifdef CONFIG_HOTPLUG_CPU
138688451f2cSZqiang 	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
138788451f2cSZqiang 				  object_cpu_offline);
138888451f2cSZqiang #endif
138955fb412eSThomas Gleixner 	return;
13903ac7fe5aSThomas Gleixner }
1391