xref: /linux-6.15/lib/debugobjects.c (revision a2a70238)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic infrastructure for lifetime debugging of objects.
4  *
5  * Copyright (C) 2008, Thomas Gleixner <[email protected]>
6  */
7 
8 #define pr_fmt(fmt) "ODEBUG: " fmt
9 
10 #include <linux/debugobjects.h>
11 #include <linux/interrupt.h>
12 #include <linux/sched.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/seq_file.h>
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/kmemleak.h>
19 #include <linux/cpu.h>
20 
21 #define ODEBUG_HASH_BITS	14
22 #define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
23 
24 #define ODEBUG_POOL_SIZE	1024
25 #define ODEBUG_POOL_MIN_LEVEL	256
26 #define ODEBUG_POOL_PERCPU_SIZE	64
27 #define ODEBUG_BATCH_SIZE	16
28 
29 #define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
32 
33 /*
34  * We limit the freeing of debug objects via workqueue at a maximum
35  * frequency of 10Hz and about 1024 objects for each freeing operation.
36  * So it is freeing at most 10k debug objects per second.
37  */
38 #define ODEBUG_FREE_WORK_MAX	1024
39 #define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
40 
41 struct debug_bucket {
42 	struct hlist_head	list;
43 	raw_spinlock_t		lock;
44 };
45 
46 /*
47  * Debug object percpu free list
48  * Access is protected by disabling irq
49  */
50 struct debug_percpu_free {
51 	struct hlist_head	free_objs;
52 	int			obj_free;
53 };
54 
55 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
56 
57 static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
58 
59 static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
60 
61 static DEFINE_RAW_SPINLOCK(pool_lock);
62 
63 static HLIST_HEAD(obj_pool);
64 static HLIST_HEAD(obj_to_free);
65 
66 /*
67  * Because of the presence of percpu free pools, obj_pool_free will
68  * under-count those in the percpu free pools. Similarly, obj_pool_used
69  * will over-count those in the percpu free pools. Adjustments will be
70  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
71  * can be off.
72  */
73 static int __data_racy		obj_pool_min_free = ODEBUG_POOL_SIZE;
74 static int __data_racy		obj_pool_free = ODEBUG_POOL_SIZE;
75 static int			obj_pool_used;
76 static int __data_racy		obj_pool_max_used;
77 static bool			obj_freeing;
78 /* The number of objs on the global free list */
79 static int			obj_nr_tofree;
80 
81 static int __data_racy			debug_objects_maxchain __read_mostly;
82 static int __data_racy __maybe_unused	debug_objects_maxchecked __read_mostly;
83 static int __data_racy			debug_objects_fixups __read_mostly;
84 static int __data_racy			debug_objects_warnings __read_mostly;
85 static int __data_racy			debug_objects_enabled __read_mostly
86 					= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
87 static int				debug_objects_pool_size __ro_after_init
88 					= ODEBUG_POOL_SIZE;
89 static int				debug_objects_pool_min_level __ro_after_init
90 					= ODEBUG_POOL_MIN_LEVEL;
91 
92 static const struct debug_obj_descr *descr_test  __read_mostly;
93 static struct kmem_cache	*obj_cache __ro_after_init;
94 
95 /*
96  * Track numbers of kmem_cache_alloc()/free() calls done.
97  */
98 static int __data_racy		debug_objects_allocated;
99 static int __data_racy		debug_objects_freed;
100 
101 static void free_obj_work(struct work_struct *work);
102 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
103 
104 static int __init enable_object_debug(char *str)
105 {
106 	debug_objects_enabled = 1;
107 	return 0;
108 }
109 
110 static int __init disable_object_debug(char *str)
111 {
112 	debug_objects_enabled = 0;
113 	return 0;
114 }
115 
116 early_param("debug_objects", enable_object_debug);
117 early_param("no_debug_objects", disable_object_debug);
118 
119 static const char *obj_states[ODEBUG_STATE_MAX] = {
120 	[ODEBUG_STATE_NONE]		= "none",
121 	[ODEBUG_STATE_INIT]		= "initialized",
122 	[ODEBUG_STATE_INACTIVE]		= "inactive",
123 	[ODEBUG_STATE_ACTIVE]		= "active",
124 	[ODEBUG_STATE_DESTROYED]	= "destroyed",
125 	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
126 };
127 
128 static void fill_pool(void)
129 {
130 	gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
131 	struct debug_obj *obj;
132 	unsigned long flags;
133 
134 	if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
135 		return;
136 
137 	/*
138 	 * Reuse objs from the global obj_to_free list; they will be
139 	 * reinitialized when allocating.
140 	 *
141 	 * obj_nr_tofree is checked locklessly; the READ_ONCE() pairs with
142 	 * the WRITE_ONCE() in pool_lock critical sections.
143 	 */
144 	if (READ_ONCE(obj_nr_tofree)) {
145 		raw_spin_lock_irqsave(&pool_lock, flags);
146 		/*
147 		 * Recheck with the lock held as the worker thread might have
148 		 * won the race and freed the global free list already.
149 		 */
150 		while (obj_nr_tofree && (obj_pool_free < debug_objects_pool_min_level)) {
151 			obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
152 			hlist_del(&obj->node);
153 			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
154 			hlist_add_head(&obj->node, &obj_pool);
155 			WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
156 		}
157 		raw_spin_unlock_irqrestore(&pool_lock, flags);
158 	}
159 
160 	if (unlikely(!obj_cache))
161 		return;
162 
163 	while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
164 		struct debug_obj *new, *last = NULL;
165 		HLIST_HEAD(head);
166 		int cnt;
167 
168 		for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
169 			new = kmem_cache_zalloc(obj_cache, gfp);
170 			if (!new)
171 				break;
172 			hlist_add_head(&new->node, &head);
173 			if (!last)
174 				last = new;
175 		}
176 		if (!cnt)
177 			return;
178 
179 		raw_spin_lock_irqsave(&pool_lock, flags);
180 		hlist_splice_init(&head, &last->node, &obj_pool);
181 		debug_objects_allocated += cnt;
182 		WRITE_ONCE(obj_pool_free, obj_pool_free + cnt);
183 		raw_spin_unlock_irqrestore(&pool_lock, flags);
184 	}
185 }
186 
187 /*
188  * Lookup an object in the hash bucket.
189  */
190 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
191 {
192 	struct debug_obj *obj;
193 	int cnt = 0;
194 
195 	hlist_for_each_entry(obj, &b->list, node) {
196 		cnt++;
197 		if (obj->object == addr)
198 			return obj;
199 	}
200 	if (cnt > debug_objects_maxchain)
201 		debug_objects_maxchain = cnt;
202 
203 	return NULL;
204 }
205 
206 /*
207  * Allocate a new object from the hlist
208  */
209 static struct debug_obj *__alloc_object(struct hlist_head *list)
210 {
211 	struct debug_obj *obj = NULL;
212 
213 	if (list->first) {
214 		obj = hlist_entry(list->first, typeof(*obj), node);
215 		hlist_del(&obj->node);
216 	}
217 
218 	return obj;
219 }
220 
221 static struct debug_obj *
222 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
223 {
224 	struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
225 	struct debug_obj *obj;
226 
227 	if (likely(obj_cache)) {
228 		obj = __alloc_object(&percpu_pool->free_objs);
229 		if (obj) {
230 			percpu_pool->obj_free--;
231 			goto init_obj;
232 		}
233 	}
234 
235 	raw_spin_lock(&pool_lock);
236 	obj = __alloc_object(&obj_pool);
237 	if (obj) {
238 		obj_pool_used++;
239 		WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
240 
241 		/*
242 		 * Looking ahead, allocate one batch of debug objects and
243 		 * put them into the percpu free pool.
244 		 */
245 		if (likely(obj_cache)) {
246 			int i;
247 
248 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
249 				struct debug_obj *obj2;
250 
251 				obj2 = __alloc_object(&obj_pool);
252 				if (!obj2)
253 					break;
254 				hlist_add_head(&obj2->node,
255 					       &percpu_pool->free_objs);
256 				percpu_pool->obj_free++;
257 				obj_pool_used++;
258 				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
259 			}
260 		}
261 
262 		if (obj_pool_used > obj_pool_max_used)
263 			obj_pool_max_used = obj_pool_used;
264 
265 		if (obj_pool_free < obj_pool_min_free)
266 			obj_pool_min_free = obj_pool_free;
267 	}
268 	raw_spin_unlock(&pool_lock);
269 
270 init_obj:
271 	if (obj) {
272 		obj->object = addr;
273 		obj->descr  = descr;
274 		obj->state  = ODEBUG_STATE_NONE;
275 		obj->astate = 0;
276 		hlist_add_head(&obj->node, &b->list);
277 	}
278 	return obj;
279 }
280 
281 /*
282  * workqueue function to free objects.
283  *
284  * To reduce contention on the global pool_lock, the actual freeing of
285  * debug objects will be delayed if the pool_lock is busy.
286  */
287 static void free_obj_work(struct work_struct *work)
288 {
289 	struct hlist_node *tmp;
290 	struct debug_obj *obj;
291 	unsigned long flags;
292 	HLIST_HEAD(tofree);
293 
294 	WRITE_ONCE(obj_freeing, false);
295 	if (!raw_spin_trylock_irqsave(&pool_lock, flags))
296 		return;
297 
298 	if (obj_pool_free >= debug_objects_pool_size)
299 		goto free_objs;
300 
301 	/*
302 	 * The objs on the pool list might be allocated before the work is
303 	 * run, so recheck if pool list it full or not, if not fill pool
304 	 * list from the global free list. As it is likely that a workload
305 	 * may be gearing up to use more and more objects, don't free any
306 	 * of them until the next round.
307 	 */
308 	while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
309 		obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
310 		hlist_del(&obj->node);
311 		hlist_add_head(&obj->node, &obj_pool);
312 		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
313 		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
314 	}
315 	raw_spin_unlock_irqrestore(&pool_lock, flags);
316 	return;
317 
318 free_objs:
319 	/*
320 	 * Pool list is already full and there are still objs on the free
321 	 * list. Move remaining free objs to a temporary list to free the
322 	 * memory outside the pool_lock held region.
323 	 */
324 	if (obj_nr_tofree) {
325 		hlist_move_list(&obj_to_free, &tofree);
326 		debug_objects_freed += obj_nr_tofree;
327 		WRITE_ONCE(obj_nr_tofree, 0);
328 	}
329 	raw_spin_unlock_irqrestore(&pool_lock, flags);
330 
331 	hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
332 		hlist_del(&obj->node);
333 		kmem_cache_free(obj_cache, obj);
334 	}
335 }
336 
337 static void __free_object(struct debug_obj *obj)
338 {
339 	struct debug_obj *objs[ODEBUG_BATCH_SIZE];
340 	struct debug_percpu_free *percpu_pool;
341 	int lookahead_count = 0;
342 	unsigned long flags;
343 	bool work;
344 
345 	local_irq_save(flags);
346 	if (!obj_cache)
347 		goto free_to_obj_pool;
348 
349 	/*
350 	 * Try to free it into the percpu pool first.
351 	 */
352 	percpu_pool = this_cpu_ptr(&percpu_obj_pool);
353 	if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
354 		hlist_add_head(&obj->node, &percpu_pool->free_objs);
355 		percpu_pool->obj_free++;
356 		local_irq_restore(flags);
357 		return;
358 	}
359 
360 	/*
361 	 * As the percpu pool is full, look ahead and pull out a batch
362 	 * of objects from the percpu pool and free them as well.
363 	 */
364 	for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
365 		objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
366 		if (!objs[lookahead_count])
367 			break;
368 		percpu_pool->obj_free--;
369 	}
370 
371 free_to_obj_pool:
372 	raw_spin_lock(&pool_lock);
373 	work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
374 	       (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
375 	obj_pool_used--;
376 
377 	if (work) {
378 		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
379 		hlist_add_head(&obj->node, &obj_to_free);
380 		if (lookahead_count) {
381 			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
382 			obj_pool_used -= lookahead_count;
383 			while (lookahead_count) {
384 				hlist_add_head(&objs[--lookahead_count]->node,
385 					       &obj_to_free);
386 			}
387 		}
388 
389 		if ((obj_pool_free > debug_objects_pool_size) &&
390 		    (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
391 			int i;
392 
393 			/*
394 			 * Free one more batch of objects from obj_pool.
395 			 */
396 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
397 				obj = __alloc_object(&obj_pool);
398 				hlist_add_head(&obj->node, &obj_to_free);
399 				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
400 				WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
401 			}
402 		}
403 	} else {
404 		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
405 		hlist_add_head(&obj->node, &obj_pool);
406 		if (lookahead_count) {
407 			WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
408 			obj_pool_used -= lookahead_count;
409 			while (lookahead_count) {
410 				hlist_add_head(&objs[--lookahead_count]->node,
411 					       &obj_pool);
412 			}
413 		}
414 	}
415 	raw_spin_unlock(&pool_lock);
416 	local_irq_restore(flags);
417 }
418 
419 /*
420  * Put the object back into the pool and schedule work to free objects
421  * if necessary.
422  */
423 static void free_object(struct debug_obj *obj)
424 {
425 	__free_object(obj);
426 	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
427 		WRITE_ONCE(obj_freeing, true);
428 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
429 	}
430 }
431 
432 #ifdef CONFIG_HOTPLUG_CPU
433 static void put_objects(struct hlist_head *list)
434 {
435 	struct hlist_node *tmp;
436 	struct debug_obj *obj;
437 
438 	/*
439 	 * Using free_object() puts the objects into reuse or schedules
440 	 * them for freeing and it get's all the accounting correct.
441 	 */
442 	hlist_for_each_entry_safe(obj, tmp, list, node) {
443 		hlist_del(&obj->node);
444 		free_object(obj);
445 	}
446 }
447 
448 static int object_cpu_offline(unsigned int cpu)
449 {
450 	/* Remote access is safe as the CPU is dead already */
451 	struct debug_percpu_free *pcp = per_cpu_ptr(&percpu_obj_pool, cpu);
452 
453 	put_objects(&pcp->free_objs);
454 	pcp->obj_free = 0;
455 	return 0;
456 }
457 #endif
458 
459 /*
460  * We run out of memory. That means we probably have tons of objects
461  * allocated.
462  */
463 static void debug_objects_oom(void)
464 {
465 	struct debug_bucket *db = obj_hash;
466 	struct hlist_node *tmp;
467 	HLIST_HEAD(freelist);
468 	struct debug_obj *obj;
469 	unsigned long flags;
470 	int i;
471 
472 	pr_warn("Out of memory. ODEBUG disabled\n");
473 
474 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
475 		raw_spin_lock_irqsave(&db->lock, flags);
476 		hlist_move_list(&db->list, &freelist);
477 		raw_spin_unlock_irqrestore(&db->lock, flags);
478 
479 		/* Now free them */
480 		hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
481 			hlist_del(&obj->node);
482 			free_object(obj);
483 		}
484 	}
485 }
486 
487 /*
488  * We use the pfn of the address for the hash. That way we can check
489  * for freed objects simply by checking the affected bucket.
490  */
491 static struct debug_bucket *get_bucket(unsigned long addr)
492 {
493 	unsigned long hash;
494 
495 	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
496 	return &obj_hash[hash];
497 }
498 
499 static void debug_print_object(struct debug_obj *obj, char *msg)
500 {
501 	const struct debug_obj_descr *descr = obj->descr;
502 	static int limit;
503 
504 	/*
505 	 * Don't report if lookup_object_or_alloc() by the current thread
506 	 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
507 	 * concurrent thread turned off debug_objects_enabled and cleared
508 	 * the hash buckets.
509 	 */
510 	if (!debug_objects_enabled)
511 		return;
512 
513 	if (limit < 5 && descr != descr_test) {
514 		void *hint = descr->debug_hint ?
515 			descr->debug_hint(obj->object) : NULL;
516 		limit++;
517 		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
518 				 "object: %p object type: %s hint: %pS\n",
519 			msg, obj_states[obj->state], obj->astate,
520 			obj->object, descr->name, hint);
521 	}
522 	debug_objects_warnings++;
523 }
524 
525 /*
526  * Try to repair the damage, so we have a better chance to get useful
527  * debug output.
528  */
529 static bool
530 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
531 		   void * addr, enum debug_obj_state state)
532 {
533 	if (fixup && fixup(addr, state)) {
534 		debug_objects_fixups++;
535 		return true;
536 	}
537 	return false;
538 }
539 
540 static void debug_object_is_on_stack(void *addr, int onstack)
541 {
542 	int is_on_stack;
543 	static int limit;
544 
545 	if (limit > 4)
546 		return;
547 
548 	is_on_stack = object_is_on_stack(addr);
549 	if (is_on_stack == onstack)
550 		return;
551 
552 	limit++;
553 	if (is_on_stack)
554 		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
555 			 task_stack_page(current));
556 	else
557 		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
558 			 task_stack_page(current));
559 
560 	WARN_ON(1);
561 }
562 
563 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
564 						const struct debug_obj_descr *descr,
565 						bool onstack, bool alloc_ifstatic)
566 {
567 	struct debug_obj *obj = lookup_object(addr, b);
568 	enum debug_obj_state state = ODEBUG_STATE_NONE;
569 
570 	if (likely(obj))
571 		return obj;
572 
573 	/*
574 	 * debug_object_init() unconditionally allocates untracked
575 	 * objects. It does not matter whether it is a static object or
576 	 * not.
577 	 *
578 	 * debug_object_assert_init() and debug_object_activate() allow
579 	 * allocation only if the descriptor callback confirms that the
580 	 * object is static and considered initialized. For non-static
581 	 * objects the allocation needs to be done from the fixup callback.
582 	 */
583 	if (unlikely(alloc_ifstatic)) {
584 		if (!descr->is_static_object || !descr->is_static_object(addr))
585 			return ERR_PTR(-ENOENT);
586 		/* Statically allocated objects are considered initialized */
587 		state = ODEBUG_STATE_INIT;
588 	}
589 
590 	obj = alloc_object(addr, b, descr);
591 	if (likely(obj)) {
592 		obj->state = state;
593 		debug_object_is_on_stack(addr, onstack);
594 		return obj;
595 	}
596 
597 	/* Out of memory. Do the cleanup outside of the locked region */
598 	debug_objects_enabled = 0;
599 	return NULL;
600 }
601 
602 static void debug_objects_fill_pool(void)
603 {
604 	/*
605 	 * On RT enabled kernels the pool refill must happen in preemptible
606 	 * context -- for !RT kernels we rely on the fact that spinlock_t and
607 	 * raw_spinlock_t are basically the same type and this lock-type
608 	 * inversion works just fine.
609 	 */
610 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
611 		/*
612 		 * Annotate away the spinlock_t inside raw_spinlock_t warning
613 		 * by temporarily raising the wait-type to WAIT_SLEEP, matching
614 		 * the preemptible() condition above.
615 		 */
616 		static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
617 		lock_map_acquire_try(&fill_pool_map);
618 		fill_pool();
619 		lock_map_release(&fill_pool_map);
620 	}
621 }
622 
623 static void
624 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
625 {
626 	struct debug_obj *obj, o;
627 	struct debug_bucket *db;
628 	unsigned long flags;
629 
630 	debug_objects_fill_pool();
631 
632 	db = get_bucket((unsigned long) addr);
633 
634 	raw_spin_lock_irqsave(&db->lock, flags);
635 
636 	obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
637 	if (unlikely(!obj)) {
638 		raw_spin_unlock_irqrestore(&db->lock, flags);
639 		debug_objects_oom();
640 		return;
641 	}
642 
643 	switch (obj->state) {
644 	case ODEBUG_STATE_NONE:
645 	case ODEBUG_STATE_INIT:
646 	case ODEBUG_STATE_INACTIVE:
647 		obj->state = ODEBUG_STATE_INIT;
648 		raw_spin_unlock_irqrestore(&db->lock, flags);
649 		return;
650 	default:
651 		break;
652 	}
653 
654 	o = *obj;
655 	raw_spin_unlock_irqrestore(&db->lock, flags);
656 	debug_print_object(&o, "init");
657 
658 	if (o.state == ODEBUG_STATE_ACTIVE)
659 		debug_object_fixup(descr->fixup_init, addr, o.state);
660 }
661 
662 /**
663  * debug_object_init - debug checks when an object is initialized
664  * @addr:	address of the object
665  * @descr:	pointer to an object specific debug description structure
666  */
667 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
668 {
669 	if (!debug_objects_enabled)
670 		return;
671 
672 	__debug_object_init(addr, descr, 0);
673 }
674 EXPORT_SYMBOL_GPL(debug_object_init);
675 
676 /**
677  * debug_object_init_on_stack - debug checks when an object on stack is
678  *				initialized
679  * @addr:	address of the object
680  * @descr:	pointer to an object specific debug description structure
681  */
682 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
683 {
684 	if (!debug_objects_enabled)
685 		return;
686 
687 	__debug_object_init(addr, descr, 1);
688 }
689 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
690 
691 /**
692  * debug_object_activate - debug checks when an object is activated
693  * @addr:	address of the object
694  * @descr:	pointer to an object specific debug description structure
695  * Returns 0 for success, -EINVAL for check failed.
696  */
697 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
698 {
699 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
700 	struct debug_bucket *db;
701 	struct debug_obj *obj;
702 	unsigned long flags;
703 
704 	if (!debug_objects_enabled)
705 		return 0;
706 
707 	debug_objects_fill_pool();
708 
709 	db = get_bucket((unsigned long) addr);
710 
711 	raw_spin_lock_irqsave(&db->lock, flags);
712 
713 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
714 	if (unlikely(!obj)) {
715 		raw_spin_unlock_irqrestore(&db->lock, flags);
716 		debug_objects_oom();
717 		return 0;
718 	} else if (likely(!IS_ERR(obj))) {
719 		switch (obj->state) {
720 		case ODEBUG_STATE_ACTIVE:
721 		case ODEBUG_STATE_DESTROYED:
722 			o = *obj;
723 			break;
724 		case ODEBUG_STATE_INIT:
725 		case ODEBUG_STATE_INACTIVE:
726 			obj->state = ODEBUG_STATE_ACTIVE;
727 			fallthrough;
728 		default:
729 			raw_spin_unlock_irqrestore(&db->lock, flags);
730 			return 0;
731 		}
732 	}
733 
734 	raw_spin_unlock_irqrestore(&db->lock, flags);
735 	debug_print_object(&o, "activate");
736 
737 	switch (o.state) {
738 	case ODEBUG_STATE_ACTIVE:
739 	case ODEBUG_STATE_NOTAVAILABLE:
740 		if (debug_object_fixup(descr->fixup_activate, addr, o.state))
741 			return 0;
742 		fallthrough;
743 	default:
744 		return -EINVAL;
745 	}
746 }
747 EXPORT_SYMBOL_GPL(debug_object_activate);
748 
749 /**
750  * debug_object_deactivate - debug checks when an object is deactivated
751  * @addr:	address of the object
752  * @descr:	pointer to an object specific debug description structure
753  */
754 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
755 {
756 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
757 	struct debug_bucket *db;
758 	struct debug_obj *obj;
759 	unsigned long flags;
760 
761 	if (!debug_objects_enabled)
762 		return;
763 
764 	db = get_bucket((unsigned long) addr);
765 
766 	raw_spin_lock_irqsave(&db->lock, flags);
767 
768 	obj = lookup_object(addr, db);
769 	if (obj) {
770 		switch (obj->state) {
771 		case ODEBUG_STATE_DESTROYED:
772 			break;
773 		case ODEBUG_STATE_INIT:
774 		case ODEBUG_STATE_INACTIVE:
775 		case ODEBUG_STATE_ACTIVE:
776 			if (obj->astate)
777 				break;
778 			obj->state = ODEBUG_STATE_INACTIVE;
779 			fallthrough;
780 		default:
781 			raw_spin_unlock_irqrestore(&db->lock, flags);
782 			return;
783 		}
784 		o = *obj;
785 	}
786 
787 	raw_spin_unlock_irqrestore(&db->lock, flags);
788 	debug_print_object(&o, "deactivate");
789 }
790 EXPORT_SYMBOL_GPL(debug_object_deactivate);
791 
792 /**
793  * debug_object_destroy - debug checks when an object is destroyed
794  * @addr:	address of the object
795  * @descr:	pointer to an object specific debug description structure
796  */
797 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
798 {
799 	struct debug_obj *obj, o;
800 	struct debug_bucket *db;
801 	unsigned long flags;
802 
803 	if (!debug_objects_enabled)
804 		return;
805 
806 	db = get_bucket((unsigned long) addr);
807 
808 	raw_spin_lock_irqsave(&db->lock, flags);
809 
810 	obj = lookup_object(addr, db);
811 	if (!obj) {
812 		raw_spin_unlock_irqrestore(&db->lock, flags);
813 		return;
814 	}
815 
816 	switch (obj->state) {
817 	case ODEBUG_STATE_ACTIVE:
818 	case ODEBUG_STATE_DESTROYED:
819 		break;
820 	case ODEBUG_STATE_NONE:
821 	case ODEBUG_STATE_INIT:
822 	case ODEBUG_STATE_INACTIVE:
823 		obj->state = ODEBUG_STATE_DESTROYED;
824 		fallthrough;
825 	default:
826 		raw_spin_unlock_irqrestore(&db->lock, flags);
827 		return;
828 	}
829 
830 	o = *obj;
831 	raw_spin_unlock_irqrestore(&db->lock, flags);
832 	debug_print_object(&o, "destroy");
833 
834 	if (o.state == ODEBUG_STATE_ACTIVE)
835 		debug_object_fixup(descr->fixup_destroy, addr, o.state);
836 }
837 EXPORT_SYMBOL_GPL(debug_object_destroy);
838 
839 /**
840  * debug_object_free - debug checks when an object is freed
841  * @addr:	address of the object
842  * @descr:	pointer to an object specific debug description structure
843  */
844 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
845 {
846 	struct debug_obj *obj, o;
847 	struct debug_bucket *db;
848 	unsigned long flags;
849 
850 	if (!debug_objects_enabled)
851 		return;
852 
853 	db = get_bucket((unsigned long) addr);
854 
855 	raw_spin_lock_irqsave(&db->lock, flags);
856 
857 	obj = lookup_object(addr, db);
858 	if (!obj) {
859 		raw_spin_unlock_irqrestore(&db->lock, flags);
860 		return;
861 	}
862 
863 	switch (obj->state) {
864 	case ODEBUG_STATE_ACTIVE:
865 		break;
866 	default:
867 		hlist_del(&obj->node);
868 		raw_spin_unlock_irqrestore(&db->lock, flags);
869 		free_object(obj);
870 		return;
871 	}
872 
873 	o = *obj;
874 	raw_spin_unlock_irqrestore(&db->lock, flags);
875 	debug_print_object(&o, "free");
876 
877 	debug_object_fixup(descr->fixup_free, addr, o.state);
878 }
879 EXPORT_SYMBOL_GPL(debug_object_free);
880 
881 /**
882  * debug_object_assert_init - debug checks when object should be init-ed
883  * @addr:	address of the object
884  * @descr:	pointer to an object specific debug description structure
885  */
886 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
887 {
888 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
889 	struct debug_bucket *db;
890 	struct debug_obj *obj;
891 	unsigned long flags;
892 
893 	if (!debug_objects_enabled)
894 		return;
895 
896 	debug_objects_fill_pool();
897 
898 	db = get_bucket((unsigned long) addr);
899 
900 	raw_spin_lock_irqsave(&db->lock, flags);
901 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
902 	raw_spin_unlock_irqrestore(&db->lock, flags);
903 	if (likely(!IS_ERR_OR_NULL(obj)))
904 		return;
905 
906 	/* If NULL the allocation has hit OOM */
907 	if (!obj) {
908 		debug_objects_oom();
909 		return;
910 	}
911 
912 	/* Object is neither tracked nor static. It's not initialized. */
913 	debug_print_object(&o, "assert_init");
914 	debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
915 }
916 EXPORT_SYMBOL_GPL(debug_object_assert_init);
917 
918 /**
919  * debug_object_active_state - debug checks object usage state machine
920  * @addr:	address of the object
921  * @descr:	pointer to an object specific debug description structure
922  * @expect:	expected state
923  * @next:	state to move to if expected state is found
924  */
925 void
926 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
927 			  unsigned int expect, unsigned int next)
928 {
929 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
930 	struct debug_bucket *db;
931 	struct debug_obj *obj;
932 	unsigned long flags;
933 
934 	if (!debug_objects_enabled)
935 		return;
936 
937 	db = get_bucket((unsigned long) addr);
938 
939 	raw_spin_lock_irqsave(&db->lock, flags);
940 
941 	obj = lookup_object(addr, db);
942 	if (obj) {
943 		switch (obj->state) {
944 		case ODEBUG_STATE_ACTIVE:
945 			if (obj->astate != expect)
946 				break;
947 			obj->astate = next;
948 			raw_spin_unlock_irqrestore(&db->lock, flags);
949 			return;
950 		default:
951 			break;
952 		}
953 		o = *obj;
954 	}
955 
956 	raw_spin_unlock_irqrestore(&db->lock, flags);
957 	debug_print_object(&o, "active_state");
958 }
959 EXPORT_SYMBOL_GPL(debug_object_active_state);
960 
961 #ifdef CONFIG_DEBUG_OBJECTS_FREE
962 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
963 {
964 	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
965 	int cnt, objs_checked = 0;
966 	struct debug_obj *obj, o;
967 	struct debug_bucket *db;
968 	struct hlist_node *tmp;
969 
970 	saddr = (unsigned long) address;
971 	eaddr = saddr + size;
972 	paddr = saddr & ODEBUG_CHUNK_MASK;
973 	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
974 	chunks >>= ODEBUG_CHUNK_SHIFT;
975 
976 	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
977 		db = get_bucket(paddr);
978 
979 repeat:
980 		cnt = 0;
981 		raw_spin_lock_irqsave(&db->lock, flags);
982 		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
983 			cnt++;
984 			oaddr = (unsigned long) obj->object;
985 			if (oaddr < saddr || oaddr >= eaddr)
986 				continue;
987 
988 			switch (obj->state) {
989 			case ODEBUG_STATE_ACTIVE:
990 				o = *obj;
991 				raw_spin_unlock_irqrestore(&db->lock, flags);
992 				debug_print_object(&o, "free");
993 				debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
994 				goto repeat;
995 			default:
996 				hlist_del(&obj->node);
997 				__free_object(obj);
998 				break;
999 			}
1000 		}
1001 		raw_spin_unlock_irqrestore(&db->lock, flags);
1002 
1003 		if (cnt > debug_objects_maxchain)
1004 			debug_objects_maxchain = cnt;
1005 
1006 		objs_checked += cnt;
1007 	}
1008 
1009 	if (objs_checked > debug_objects_maxchecked)
1010 		debug_objects_maxchecked = objs_checked;
1011 
1012 	/* Schedule work to actually kmem_cache_free() objects */
1013 	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1014 		WRITE_ONCE(obj_freeing, true);
1015 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1016 	}
1017 }
1018 
1019 void debug_check_no_obj_freed(const void *address, unsigned long size)
1020 {
1021 	if (debug_objects_enabled)
1022 		__debug_check_no_obj_freed(address, size);
1023 }
1024 #endif
1025 
1026 #ifdef CONFIG_DEBUG_FS
1027 
1028 static int debug_stats_show(struct seq_file *m, void *v)
1029 {
1030 	int cpu, obj_percpu_free = 0;
1031 
1032 	for_each_possible_cpu(cpu)
1033 		obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1034 
1035 	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1036 	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1037 	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1038 	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1039 	seq_printf(m, "pool_free     :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1040 	seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1041 	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1042 	seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1043 	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1044 	seq_printf(m, "on_free_list  :%d\n", READ_ONCE(obj_nr_tofree));
1045 	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1046 	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1047 	return 0;
1048 }
1049 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1050 
1051 static int __init debug_objects_init_debugfs(void)
1052 {
1053 	struct dentry *dbgdir;
1054 
1055 	if (!debug_objects_enabled)
1056 		return 0;
1057 
1058 	dbgdir = debugfs_create_dir("debug_objects", NULL);
1059 
1060 	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1061 
1062 	return 0;
1063 }
1064 __initcall(debug_objects_init_debugfs);
1065 
1066 #else
1067 static inline void debug_objects_init_debugfs(void) { }
1068 #endif
1069 
1070 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1071 
1072 /* Random data structure for the self test */
1073 struct self_test {
1074 	unsigned long	dummy1[6];
1075 	int		static_init;
1076 	unsigned long	dummy2[3];
1077 };
1078 
1079 static __initconst const struct debug_obj_descr descr_type_test;
1080 
1081 static bool __init is_static_object(void *addr)
1082 {
1083 	struct self_test *obj = addr;
1084 
1085 	return obj->static_init;
1086 }
1087 
1088 /*
1089  * fixup_init is called when:
1090  * - an active object is initialized
1091  */
1092 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1093 {
1094 	struct self_test *obj = addr;
1095 
1096 	switch (state) {
1097 	case ODEBUG_STATE_ACTIVE:
1098 		debug_object_deactivate(obj, &descr_type_test);
1099 		debug_object_init(obj, &descr_type_test);
1100 		return true;
1101 	default:
1102 		return false;
1103 	}
1104 }
1105 
1106 /*
1107  * fixup_activate is called when:
1108  * - an active object is activated
1109  * - an unknown non-static object is activated
1110  */
1111 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1112 {
1113 	struct self_test *obj = addr;
1114 
1115 	switch (state) {
1116 	case ODEBUG_STATE_NOTAVAILABLE:
1117 		return true;
1118 	case ODEBUG_STATE_ACTIVE:
1119 		debug_object_deactivate(obj, &descr_type_test);
1120 		debug_object_activate(obj, &descr_type_test);
1121 		return true;
1122 
1123 	default:
1124 		return false;
1125 	}
1126 }
1127 
1128 /*
1129  * fixup_destroy is called when:
1130  * - an active object is destroyed
1131  */
1132 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1133 {
1134 	struct self_test *obj = addr;
1135 
1136 	switch (state) {
1137 	case ODEBUG_STATE_ACTIVE:
1138 		debug_object_deactivate(obj, &descr_type_test);
1139 		debug_object_destroy(obj, &descr_type_test);
1140 		return true;
1141 	default:
1142 		return false;
1143 	}
1144 }
1145 
1146 /*
1147  * fixup_free is called when:
1148  * - an active object is freed
1149  */
1150 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1151 {
1152 	struct self_test *obj = addr;
1153 
1154 	switch (state) {
1155 	case ODEBUG_STATE_ACTIVE:
1156 		debug_object_deactivate(obj, &descr_type_test);
1157 		debug_object_free(obj, &descr_type_test);
1158 		return true;
1159 	default:
1160 		return false;
1161 	}
1162 }
1163 
1164 static int __init
1165 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1166 {
1167 	struct debug_bucket *db;
1168 	struct debug_obj *obj;
1169 	unsigned long flags;
1170 	int res = -EINVAL;
1171 
1172 	db = get_bucket((unsigned long) addr);
1173 
1174 	raw_spin_lock_irqsave(&db->lock, flags);
1175 
1176 	obj = lookup_object(addr, db);
1177 	if (!obj && state != ODEBUG_STATE_NONE) {
1178 		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1179 		goto out;
1180 	}
1181 	if (obj && obj->state != state) {
1182 		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1183 		       obj->state, state);
1184 		goto out;
1185 	}
1186 	if (fixups != debug_objects_fixups) {
1187 		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1188 		       fixups, debug_objects_fixups);
1189 		goto out;
1190 	}
1191 	if (warnings != debug_objects_warnings) {
1192 		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1193 		       warnings, debug_objects_warnings);
1194 		goto out;
1195 	}
1196 	res = 0;
1197 out:
1198 	raw_spin_unlock_irqrestore(&db->lock, flags);
1199 	if (res)
1200 		debug_objects_enabled = 0;
1201 	return res;
1202 }
1203 
1204 static __initconst const struct debug_obj_descr descr_type_test = {
1205 	.name			= "selftest",
1206 	.is_static_object	= is_static_object,
1207 	.fixup_init		= fixup_init,
1208 	.fixup_activate		= fixup_activate,
1209 	.fixup_destroy		= fixup_destroy,
1210 	.fixup_free		= fixup_free,
1211 };
1212 
1213 static __initdata struct self_test obj = { .static_init = 0 };
1214 
1215 static bool __init debug_objects_selftest(void)
1216 {
1217 	int fixups, oldfixups, warnings, oldwarnings;
1218 	unsigned long flags;
1219 
1220 	local_irq_save(flags);
1221 
1222 	fixups = oldfixups = debug_objects_fixups;
1223 	warnings = oldwarnings = debug_objects_warnings;
1224 	descr_test = &descr_type_test;
1225 
1226 	debug_object_init(&obj, &descr_type_test);
1227 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1228 		goto out;
1229 	debug_object_activate(&obj, &descr_type_test);
1230 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1231 		goto out;
1232 	debug_object_activate(&obj, &descr_type_test);
1233 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1234 		goto out;
1235 	debug_object_deactivate(&obj, &descr_type_test);
1236 	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1237 		goto out;
1238 	debug_object_destroy(&obj, &descr_type_test);
1239 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1240 		goto out;
1241 	debug_object_init(&obj, &descr_type_test);
1242 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1243 		goto out;
1244 	debug_object_activate(&obj, &descr_type_test);
1245 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1246 		goto out;
1247 	debug_object_deactivate(&obj, &descr_type_test);
1248 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1249 		goto out;
1250 	debug_object_free(&obj, &descr_type_test);
1251 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1252 		goto out;
1253 
1254 	obj.static_init = 1;
1255 	debug_object_activate(&obj, &descr_type_test);
1256 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1257 		goto out;
1258 	debug_object_init(&obj, &descr_type_test);
1259 	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1260 		goto out;
1261 	debug_object_free(&obj, &descr_type_test);
1262 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1263 		goto out;
1264 
1265 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1266 	debug_object_init(&obj, &descr_type_test);
1267 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1268 		goto out;
1269 	debug_object_activate(&obj, &descr_type_test);
1270 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1271 		goto out;
1272 	__debug_check_no_obj_freed(&obj, sizeof(obj));
1273 	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1274 		goto out;
1275 #endif
1276 	pr_info("selftest passed\n");
1277 
1278 out:
1279 	debug_objects_fixups = oldfixups;
1280 	debug_objects_warnings = oldwarnings;
1281 	descr_test = NULL;
1282 
1283 	local_irq_restore(flags);
1284 	return !!debug_objects_enabled;
1285 }
1286 #else
1287 static inline bool debug_objects_selftest(void) { return true; }
1288 #endif
1289 
1290 /*
1291  * Called during early boot to initialize the hash buckets and link
1292  * the static object pool objects into the poll list. After this call
1293  * the object tracker is fully operational.
1294  */
1295 void __init debug_objects_early_init(void)
1296 {
1297 	int i;
1298 
1299 	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1300 		raw_spin_lock_init(&obj_hash[i].lock);
1301 
1302 	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1303 		hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1304 }
1305 
1306 /*
1307  * Convert the statically allocated objects to dynamic ones.
1308  * debug_objects_mem_init() is called early so only one CPU is up and
1309  * interrupts are disabled, which means it is safe to replace the active
1310  * object references.
1311  */
1312 static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache)
1313 {
1314 	struct debug_bucket *db = obj_hash;
1315 	struct debug_obj *obj, *new;
1316 	struct hlist_node *tmp;
1317 	HLIST_HEAD(objects);
1318 	int i, cnt = 0;
1319 
1320 	for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1321 		obj = kmem_cache_zalloc(cache, GFP_KERNEL);
1322 		if (!obj)
1323 			goto free;
1324 		hlist_add_head(&obj->node, &objects);
1325 	}
1326 
1327 	debug_objects_allocated += i;
1328 
1329 	/*
1330 	 * Replace the statically allocated objects list with the allocated
1331 	 * objects list.
1332 	 */
1333 	hlist_move_list(&objects, &obj_pool);
1334 
1335 	/* Replace the active object references */
1336 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1337 		hlist_move_list(&db->list, &objects);
1338 
1339 		hlist_for_each_entry(obj, &objects, node) {
1340 			new = hlist_entry(obj_pool.first, typeof(*obj), node);
1341 			hlist_del(&new->node);
1342 			/* copy object data */
1343 			*new = *obj;
1344 			hlist_add_head(&new->node, &db->list);
1345 			cnt++;
1346 		}
1347 	}
1348 
1349 	pr_debug("%d of %d active objects replaced\n", cnt, obj_pool_used);
1350 	return true;
1351 free:
1352 	hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1353 		hlist_del(&obj->node);
1354 		kmem_cache_free(cache, obj);
1355 	}
1356 	return false;
1357 }
1358 
1359 /*
1360  * Called after the kmem_caches are functional to setup a dedicated
1361  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1362  * prevents that the debug code is called on kmem_cache_free() for the
1363  * debug tracker objects to avoid recursive calls.
1364  */
1365 void __init debug_objects_mem_init(void)
1366 {
1367 	struct kmem_cache *cache;
1368 	int extras;
1369 
1370 	if (!debug_objects_enabled)
1371 		return;
1372 
1373 	if (!debug_objects_selftest())
1374 		return;
1375 
1376 	cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0,
1377 				  SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL);
1378 
1379 	if (!cache || !debug_objects_replace_static_objects(cache)) {
1380 		debug_objects_enabled = 0;
1381 		pr_warn("Out of memory.\n");
1382 		return;
1383 	}
1384 
1385 	/*
1386 	 * Adjust the thresholds for allocating and freeing objects
1387 	 * according to the number of possible CPUs available in the
1388 	 * system.
1389 	 */
1390 	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1391 	debug_objects_pool_size += extras;
1392 	debug_objects_pool_min_level += extras;
1393 
1394 	/* Everything worked. Expose the cache */
1395 	obj_cache = cache;
1396 
1397 #ifdef CONFIG_HOTPLUG_CPU
1398 	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1399 				  object_cpu_offline);
1400 #endif
1401 	return;
1402 }
1403