xref: /linux-6.15/lib/debugobjects.c (revision e18328ff)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic infrastructure for lifetime debugging of objects.
4  *
5  * Copyright (C) 2008, Thomas Gleixner <[email protected]>
6  */
7 
8 #define pr_fmt(fmt) "ODEBUG: " fmt
9 
10 #include <linux/debugobjects.h>
11 #include <linux/interrupt.h>
12 #include <linux/sched.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/seq_file.h>
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/kmemleak.h>
19 #include <linux/cpu.h>
20 
21 #define ODEBUG_HASH_BITS	14
22 #define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
23 
24 #define ODEBUG_POOL_SIZE	1024
25 #define ODEBUG_POOL_MIN_LEVEL	256
26 #define ODEBUG_POOL_PERCPU_SIZE	64
27 #define ODEBUG_BATCH_SIZE	16
28 
29 #define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
32 
33 /*
34  * We limit the freeing of debug objects via workqueue at a maximum
35  * frequency of 10Hz and about 1024 objects for each freeing operation.
36  * So it is freeing at most 10k debug objects per second.
37  */
38 #define ODEBUG_FREE_WORK_MAX	1024
39 #define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
40 
41 struct debug_bucket {
42 	struct hlist_head	list;
43 	raw_spinlock_t		lock;
44 };
45 
46 /*
47  * Debug object percpu free list
48  * Access is protected by disabling irq
49  */
50 struct debug_percpu_free {
51 	struct hlist_head	free_objs;
52 	int			obj_free;
53 };
54 
55 struct obj_pool {
56 	struct hlist_head	objects;
57 	unsigned int		cnt;
58 } ____cacheline_aligned;
59 
60 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
61 
62 static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
63 
64 static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
65 
66 static DEFINE_RAW_SPINLOCK(pool_lock);
67 
68 static struct obj_pool		pool_global;
69 static struct obj_pool		pool_to_free;
70 
71 /*
72  * Because of the presence of percpu free pools, obj_pool_free will
73  * under-count those in the percpu free pools. Similarly, obj_pool_used
74  * will over-count those in the percpu free pools. Adjustments will be
75  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
76  * can be off.
77  */
78 static int __data_racy		obj_pool_min_free = ODEBUG_POOL_SIZE;
79 static int			obj_pool_used;
80 static int __data_racy		obj_pool_max_used;
81 static bool			obj_freeing;
82 
83 static int __data_racy			debug_objects_maxchain __read_mostly;
84 static int __data_racy __maybe_unused	debug_objects_maxchecked __read_mostly;
85 static int __data_racy			debug_objects_fixups __read_mostly;
86 static int __data_racy			debug_objects_warnings __read_mostly;
87 static bool __data_racy			debug_objects_enabled __read_mostly
88 					= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
89 static int				debug_objects_pool_size __ro_after_init
90 					= ODEBUG_POOL_SIZE;
91 static int				debug_objects_pool_min_level __ro_after_init
92 					= ODEBUG_POOL_MIN_LEVEL;
93 
94 static const struct debug_obj_descr *descr_test  __read_mostly;
95 static struct kmem_cache	*obj_cache __ro_after_init;
96 
97 /*
98  * Track numbers of kmem_cache_alloc()/free() calls done.
99  */
100 static int __data_racy		debug_objects_allocated;
101 static int __data_racy		debug_objects_freed;
102 
103 static void free_obj_work(struct work_struct *work);
104 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
105 
106 static int __init enable_object_debug(char *str)
107 {
108 	debug_objects_enabled = true;
109 	return 0;
110 }
111 early_param("debug_objects", enable_object_debug);
112 
113 static int __init disable_object_debug(char *str)
114 {
115 	debug_objects_enabled = false;
116 	return 0;
117 }
118 early_param("no_debug_objects", disable_object_debug);
119 
120 static const char *obj_states[ODEBUG_STATE_MAX] = {
121 	[ODEBUG_STATE_NONE]		= "none",
122 	[ODEBUG_STATE_INIT]		= "initialized",
123 	[ODEBUG_STATE_INACTIVE]		= "inactive",
124 	[ODEBUG_STATE_ACTIVE]		= "active",
125 	[ODEBUG_STATE_DESTROYED]	= "destroyed",
126 	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
127 };
128 
129 static __always_inline unsigned int pool_count(struct obj_pool *pool)
130 {
131 	return READ_ONCE(pool->cnt);
132 }
133 
134 static inline bool pool_global_should_refill(void)
135 {
136 	return READ_ONCE(pool_global.cnt) < debug_objects_pool_min_level;
137 }
138 
139 static inline bool pool_global_must_refill(void)
140 {
141 	return READ_ONCE(pool_global.cnt) < (debug_objects_pool_min_level / 2);
142 }
143 
144 static void free_object_list(struct hlist_head *head)
145 {
146 	struct hlist_node *tmp;
147 	struct debug_obj *obj;
148 	int cnt = 0;
149 
150 	hlist_for_each_entry_safe(obj, tmp, head, node) {
151 		hlist_del(&obj->node);
152 		kmem_cache_free(obj_cache, obj);
153 		cnt++;
154 	}
155 	debug_objects_freed += cnt;
156 }
157 
158 static void fill_pool_from_freelist(void)
159 {
160 	static unsigned long state;
161 	struct debug_obj *obj;
162 
163 	/*
164 	 * Reuse objs from the global obj_to_free list; they will be
165 	 * reinitialized when allocating.
166 	 */
167 	if (!pool_count(&pool_to_free))
168 		return;
169 
170 	/*
171 	 * Prevent the context from being scheduled or interrupted after
172 	 * setting the state flag;
173 	 */
174 	guard(irqsave)();
175 
176 	/*
177 	 * Avoid lock contention on &pool_lock and avoid making the cache
178 	 * line exclusive by testing the bit before attempting to set it.
179 	 */
180 	if (test_bit(0, &state) || test_and_set_bit(0, &state))
181 		return;
182 
183 	guard(raw_spinlock)(&pool_lock);
184 	/*
185 	 * Recheck with the lock held as the worker thread might have
186 	 * won the race and freed the global free list already.
187 	 */
188 	while (pool_to_free.cnt && (pool_global.cnt < debug_objects_pool_min_level)) {
189 		obj = hlist_entry(pool_to_free.objects.first, typeof(*obj), node);
190 		hlist_del(&obj->node);
191 		WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt - 1);
192 		hlist_add_head(&obj->node, &pool_global.objects);
193 		WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
194 	}
195 	clear_bit(0, &state);
196 }
197 
198 static void fill_pool(void)
199 {
200 	static atomic_t cpus_allocating;
201 
202 	/*
203 	 * Avoid allocation and lock contention when:
204 	 *   - One other CPU is already allocating
205 	 *   - the global pool has not reached the critical level yet
206 	 */
207 	if (!pool_global_must_refill() && atomic_read(&cpus_allocating))
208 		return;
209 
210 	atomic_inc(&cpus_allocating);
211 	while (pool_global_should_refill()) {
212 		struct debug_obj *new, *last = NULL;
213 		HLIST_HEAD(head);
214 		int cnt;
215 
216 		for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
217 			new = kmem_cache_zalloc(obj_cache, __GFP_HIGH | __GFP_NOWARN);
218 			if (!new)
219 				break;
220 			hlist_add_head(&new->node, &head);
221 			if (!last)
222 				last = new;
223 		}
224 		if (!cnt)
225 			break;
226 
227 		guard(raw_spinlock_irqsave)(&pool_lock);
228 		hlist_splice_init(&head, &last->node, &pool_global.objects);
229 		debug_objects_allocated += cnt;
230 		WRITE_ONCE(pool_global.cnt, pool_global.cnt + cnt);
231 	}
232 	atomic_dec(&cpus_allocating);
233 }
234 
235 /*
236  * Lookup an object in the hash bucket.
237  */
238 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
239 {
240 	struct debug_obj *obj;
241 	int cnt = 0;
242 
243 	hlist_for_each_entry(obj, &b->list, node) {
244 		cnt++;
245 		if (obj->object == addr)
246 			return obj;
247 	}
248 	if (cnt > debug_objects_maxchain)
249 		debug_objects_maxchain = cnt;
250 
251 	return NULL;
252 }
253 
254 /*
255  * Allocate a new object from the hlist
256  */
257 static struct debug_obj *__alloc_object(struct hlist_head *list)
258 {
259 	struct debug_obj *obj = NULL;
260 
261 	if (list->first) {
262 		obj = hlist_entry(list->first, typeof(*obj), node);
263 		hlist_del(&obj->node);
264 	}
265 
266 	return obj;
267 }
268 
269 static struct debug_obj *
270 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
271 {
272 	struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
273 	struct debug_obj *obj;
274 
275 	if (likely(obj_cache)) {
276 		obj = __alloc_object(&percpu_pool->free_objs);
277 		if (obj) {
278 			percpu_pool->obj_free--;
279 			goto init_obj;
280 		}
281 	}
282 
283 	raw_spin_lock(&pool_lock);
284 	obj = __alloc_object(&pool_global.objects);
285 	if (obj) {
286 		obj_pool_used++;
287 		WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
288 
289 		/*
290 		 * Looking ahead, allocate one batch of debug objects and
291 		 * put them into the percpu free pool.
292 		 */
293 		if (likely(obj_cache)) {
294 			int i;
295 
296 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
297 				struct debug_obj *obj2;
298 
299 				obj2 = __alloc_object(&pool_global.objects);
300 				if (!obj2)
301 					break;
302 				hlist_add_head(&obj2->node, &percpu_pool->free_objs);
303 				percpu_pool->obj_free++;
304 				obj_pool_used++;
305 				WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
306 			}
307 		}
308 
309 		if (obj_pool_used > obj_pool_max_used)
310 			obj_pool_max_used = obj_pool_used;
311 
312 		if (pool_global.cnt < obj_pool_min_free)
313 			obj_pool_min_free = pool_global.cnt;
314 	}
315 	raw_spin_unlock(&pool_lock);
316 
317 init_obj:
318 	if (obj) {
319 		obj->object = addr;
320 		obj->descr  = descr;
321 		obj->state  = ODEBUG_STATE_NONE;
322 		obj->astate = 0;
323 		hlist_add_head(&obj->node, &b->list);
324 	}
325 	return obj;
326 }
327 
328 /*
329  * workqueue function to free objects.
330  *
331  * To reduce contention on the global pool_lock, the actual freeing of
332  * debug objects will be delayed if the pool_lock is busy.
333  */
334 static void free_obj_work(struct work_struct *work)
335 {
336 	struct debug_obj *obj;
337 	unsigned long flags;
338 	HLIST_HEAD(tofree);
339 
340 	WRITE_ONCE(obj_freeing, false);
341 	if (!raw_spin_trylock_irqsave(&pool_lock, flags))
342 		return;
343 
344 	if (pool_global.cnt >= debug_objects_pool_size)
345 		goto free_objs;
346 
347 	/*
348 	 * The objs on the pool list might be allocated before the work is
349 	 * run, so recheck if pool list it full or not, if not fill pool
350 	 * list from the global free list. As it is likely that a workload
351 	 * may be gearing up to use more and more objects, don't free any
352 	 * of them until the next round.
353 	 */
354 	while (pool_to_free.cnt && pool_global.cnt < debug_objects_pool_size) {
355 		obj = hlist_entry(pool_to_free.objects.first, typeof(*obj), node);
356 		hlist_del(&obj->node);
357 		hlist_add_head(&obj->node, &pool_global.objects);
358 		WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt - 1);
359 		WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
360 	}
361 	raw_spin_unlock_irqrestore(&pool_lock, flags);
362 	return;
363 
364 free_objs:
365 	/*
366 	 * Pool list is already full and there are still objs on the free
367 	 * list. Move remaining free objs to a temporary list to free the
368 	 * memory outside the pool_lock held region.
369 	 */
370 	if (pool_to_free.cnt) {
371 		hlist_move_list(&pool_to_free.objects, &tofree);
372 		WRITE_ONCE(pool_to_free.cnt, 0);
373 	}
374 	raw_spin_unlock_irqrestore(&pool_lock, flags);
375 
376 	free_object_list(&tofree);
377 }
378 
379 static void __free_object(struct debug_obj *obj)
380 {
381 	struct debug_obj *objs[ODEBUG_BATCH_SIZE];
382 	struct debug_percpu_free *percpu_pool;
383 	int lookahead_count = 0;
384 	unsigned long flags;
385 	bool work;
386 
387 	local_irq_save(flags);
388 	if (!obj_cache)
389 		goto free_to_obj_pool;
390 
391 	/*
392 	 * Try to free it into the percpu pool first.
393 	 */
394 	percpu_pool = this_cpu_ptr(&percpu_obj_pool);
395 	if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
396 		hlist_add_head(&obj->node, &percpu_pool->free_objs);
397 		percpu_pool->obj_free++;
398 		local_irq_restore(flags);
399 		return;
400 	}
401 
402 	/*
403 	 * As the percpu pool is full, look ahead and pull out a batch
404 	 * of objects from the percpu pool and free them as well.
405 	 */
406 	for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
407 		objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
408 		if (!objs[lookahead_count])
409 			break;
410 		percpu_pool->obj_free--;
411 	}
412 
413 free_to_obj_pool:
414 	raw_spin_lock(&pool_lock);
415 	work = (pool_global.cnt > debug_objects_pool_size) && obj_cache &&
416 	       (pool_to_free.cnt < ODEBUG_FREE_WORK_MAX);
417 	obj_pool_used--;
418 
419 	if (work) {
420 		WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + 1);
421 		hlist_add_head(&obj->node, &pool_to_free.objects);
422 		if (lookahead_count) {
423 			WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + lookahead_count);
424 			obj_pool_used -= lookahead_count;
425 			while (lookahead_count) {
426 				hlist_add_head(&objs[--lookahead_count]->node,
427 					       &pool_to_free.objects);
428 			}
429 		}
430 
431 		if ((pool_global.cnt > debug_objects_pool_size) &&
432 		    (pool_to_free.cnt < ODEBUG_FREE_WORK_MAX)) {
433 			int i;
434 
435 			/*
436 			 * Free one more batch of objects from obj_pool.
437 			 */
438 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
439 				obj = __alloc_object(&pool_global.objects);
440 				hlist_add_head(&obj->node, &pool_to_free.objects);
441 				WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
442 				WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + 1);
443 			}
444 		}
445 	} else {
446 		WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
447 		hlist_add_head(&obj->node, &pool_global.objects);
448 		if (lookahead_count) {
449 			WRITE_ONCE(pool_global.cnt, pool_global.cnt + lookahead_count);
450 			obj_pool_used -= lookahead_count;
451 			while (lookahead_count) {
452 				hlist_add_head(&objs[--lookahead_count]->node,
453 					       &pool_global.objects);
454 			}
455 		}
456 	}
457 	raw_spin_unlock(&pool_lock);
458 	local_irq_restore(flags);
459 }
460 
461 /*
462  * Put the object back into the pool and schedule work to free objects
463  * if necessary.
464  */
465 static void free_object(struct debug_obj *obj)
466 {
467 	__free_object(obj);
468 	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
469 		WRITE_ONCE(obj_freeing, true);
470 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
471 	}
472 }
473 
474 static void put_objects(struct hlist_head *list)
475 {
476 	struct hlist_node *tmp;
477 	struct debug_obj *obj;
478 
479 	/*
480 	 * Using free_object() puts the objects into reuse or schedules
481 	 * them for freeing and it get's all the accounting correct.
482 	 */
483 	hlist_for_each_entry_safe(obj, tmp, list, node) {
484 		hlist_del(&obj->node);
485 		free_object(obj);
486 	}
487 }
488 
489 #ifdef CONFIG_HOTPLUG_CPU
490 static int object_cpu_offline(unsigned int cpu)
491 {
492 	/* Remote access is safe as the CPU is dead already */
493 	struct debug_percpu_free *pcp = per_cpu_ptr(&percpu_obj_pool, cpu);
494 
495 	put_objects(&pcp->free_objs);
496 	pcp->obj_free = 0;
497 	return 0;
498 }
499 #endif
500 
501 /* Out of memory. Free all objects from hash */
502 static void debug_objects_oom(void)
503 {
504 	struct debug_bucket *db = obj_hash;
505 	HLIST_HEAD(freelist);
506 
507 	pr_warn("Out of memory. ODEBUG disabled\n");
508 
509 	for (int i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
510 		scoped_guard(raw_spinlock_irqsave, &db->lock)
511 			hlist_move_list(&db->list, &freelist);
512 
513 		put_objects(&freelist);
514 	}
515 }
516 
517 /*
518  * We use the pfn of the address for the hash. That way we can check
519  * for freed objects simply by checking the affected bucket.
520  */
521 static struct debug_bucket *get_bucket(unsigned long addr)
522 {
523 	unsigned long hash;
524 
525 	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
526 	return &obj_hash[hash];
527 }
528 
529 static void debug_print_object(struct debug_obj *obj, char *msg)
530 {
531 	const struct debug_obj_descr *descr = obj->descr;
532 	static int limit;
533 
534 	/*
535 	 * Don't report if lookup_object_or_alloc() by the current thread
536 	 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
537 	 * concurrent thread turned off debug_objects_enabled and cleared
538 	 * the hash buckets.
539 	 */
540 	if (!debug_objects_enabled)
541 		return;
542 
543 	if (limit < 5 && descr != descr_test) {
544 		void *hint = descr->debug_hint ?
545 			descr->debug_hint(obj->object) : NULL;
546 		limit++;
547 		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
548 				 "object: %p object type: %s hint: %pS\n",
549 			msg, obj_states[obj->state], obj->astate,
550 			obj->object, descr->name, hint);
551 	}
552 	debug_objects_warnings++;
553 }
554 
555 /*
556  * Try to repair the damage, so we have a better chance to get useful
557  * debug output.
558  */
559 static bool
560 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
561 		   void * addr, enum debug_obj_state state)
562 {
563 	if (fixup && fixup(addr, state)) {
564 		debug_objects_fixups++;
565 		return true;
566 	}
567 	return false;
568 }
569 
570 static void debug_object_is_on_stack(void *addr, int onstack)
571 {
572 	int is_on_stack;
573 	static int limit;
574 
575 	if (limit > 4)
576 		return;
577 
578 	is_on_stack = object_is_on_stack(addr);
579 	if (is_on_stack == onstack)
580 		return;
581 
582 	limit++;
583 	if (is_on_stack)
584 		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
585 			 task_stack_page(current));
586 	else
587 		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
588 			 task_stack_page(current));
589 
590 	WARN_ON(1);
591 }
592 
593 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
594 						const struct debug_obj_descr *descr,
595 						bool onstack, bool alloc_ifstatic)
596 {
597 	struct debug_obj *obj = lookup_object(addr, b);
598 	enum debug_obj_state state = ODEBUG_STATE_NONE;
599 
600 	if (likely(obj))
601 		return obj;
602 
603 	/*
604 	 * debug_object_init() unconditionally allocates untracked
605 	 * objects. It does not matter whether it is a static object or
606 	 * not.
607 	 *
608 	 * debug_object_assert_init() and debug_object_activate() allow
609 	 * allocation only if the descriptor callback confirms that the
610 	 * object is static and considered initialized. For non-static
611 	 * objects the allocation needs to be done from the fixup callback.
612 	 */
613 	if (unlikely(alloc_ifstatic)) {
614 		if (!descr->is_static_object || !descr->is_static_object(addr))
615 			return ERR_PTR(-ENOENT);
616 		/* Statically allocated objects are considered initialized */
617 		state = ODEBUG_STATE_INIT;
618 	}
619 
620 	obj = alloc_object(addr, b, descr);
621 	if (likely(obj)) {
622 		obj->state = state;
623 		debug_object_is_on_stack(addr, onstack);
624 		return obj;
625 	}
626 
627 	/* Out of memory. Do the cleanup outside of the locked region */
628 	debug_objects_enabled = false;
629 	return NULL;
630 }
631 
632 static void debug_objects_fill_pool(void)
633 {
634 	if (unlikely(!obj_cache))
635 		return;
636 
637 	if (likely(!pool_global_should_refill()))
638 		return;
639 
640 	/* Try reusing objects from obj_to_free_list */
641 	fill_pool_from_freelist();
642 
643 	if (likely(!pool_global_should_refill()))
644 		return;
645 
646 	/*
647 	 * On RT enabled kernels the pool refill must happen in preemptible
648 	 * context -- for !RT kernels we rely on the fact that spinlock_t and
649 	 * raw_spinlock_t are basically the same type and this lock-type
650 	 * inversion works just fine.
651 	 */
652 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
653 		/*
654 		 * Annotate away the spinlock_t inside raw_spinlock_t warning
655 		 * by temporarily raising the wait-type to WAIT_SLEEP, matching
656 		 * the preemptible() condition above.
657 		 */
658 		static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
659 		lock_map_acquire_try(&fill_pool_map);
660 		fill_pool();
661 		lock_map_release(&fill_pool_map);
662 	}
663 }
664 
665 static void
666 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
667 {
668 	struct debug_obj *obj, o;
669 	struct debug_bucket *db;
670 	unsigned long flags;
671 
672 	debug_objects_fill_pool();
673 
674 	db = get_bucket((unsigned long) addr);
675 
676 	raw_spin_lock_irqsave(&db->lock, flags);
677 
678 	obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
679 	if (unlikely(!obj)) {
680 		raw_spin_unlock_irqrestore(&db->lock, flags);
681 		debug_objects_oom();
682 		return;
683 	}
684 
685 	switch (obj->state) {
686 	case ODEBUG_STATE_NONE:
687 	case ODEBUG_STATE_INIT:
688 	case ODEBUG_STATE_INACTIVE:
689 		obj->state = ODEBUG_STATE_INIT;
690 		raw_spin_unlock_irqrestore(&db->lock, flags);
691 		return;
692 	default:
693 		break;
694 	}
695 
696 	o = *obj;
697 	raw_spin_unlock_irqrestore(&db->lock, flags);
698 	debug_print_object(&o, "init");
699 
700 	if (o.state == ODEBUG_STATE_ACTIVE)
701 		debug_object_fixup(descr->fixup_init, addr, o.state);
702 }
703 
704 /**
705  * debug_object_init - debug checks when an object is initialized
706  * @addr:	address of the object
707  * @descr:	pointer to an object specific debug description structure
708  */
709 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
710 {
711 	if (!debug_objects_enabled)
712 		return;
713 
714 	__debug_object_init(addr, descr, 0);
715 }
716 EXPORT_SYMBOL_GPL(debug_object_init);
717 
718 /**
719  * debug_object_init_on_stack - debug checks when an object on stack is
720  *				initialized
721  * @addr:	address of the object
722  * @descr:	pointer to an object specific debug description structure
723  */
724 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
725 {
726 	if (!debug_objects_enabled)
727 		return;
728 
729 	__debug_object_init(addr, descr, 1);
730 }
731 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
732 
733 /**
734  * debug_object_activate - debug checks when an object is activated
735  * @addr:	address of the object
736  * @descr:	pointer to an object specific debug description structure
737  * Returns 0 for success, -EINVAL for check failed.
738  */
739 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
740 {
741 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
742 	struct debug_bucket *db;
743 	struct debug_obj *obj;
744 	unsigned long flags;
745 
746 	if (!debug_objects_enabled)
747 		return 0;
748 
749 	debug_objects_fill_pool();
750 
751 	db = get_bucket((unsigned long) addr);
752 
753 	raw_spin_lock_irqsave(&db->lock, flags);
754 
755 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
756 	if (unlikely(!obj)) {
757 		raw_spin_unlock_irqrestore(&db->lock, flags);
758 		debug_objects_oom();
759 		return 0;
760 	} else if (likely(!IS_ERR(obj))) {
761 		switch (obj->state) {
762 		case ODEBUG_STATE_ACTIVE:
763 		case ODEBUG_STATE_DESTROYED:
764 			o = *obj;
765 			break;
766 		case ODEBUG_STATE_INIT:
767 		case ODEBUG_STATE_INACTIVE:
768 			obj->state = ODEBUG_STATE_ACTIVE;
769 			fallthrough;
770 		default:
771 			raw_spin_unlock_irqrestore(&db->lock, flags);
772 			return 0;
773 		}
774 	}
775 
776 	raw_spin_unlock_irqrestore(&db->lock, flags);
777 	debug_print_object(&o, "activate");
778 
779 	switch (o.state) {
780 	case ODEBUG_STATE_ACTIVE:
781 	case ODEBUG_STATE_NOTAVAILABLE:
782 		if (debug_object_fixup(descr->fixup_activate, addr, o.state))
783 			return 0;
784 		fallthrough;
785 	default:
786 		return -EINVAL;
787 	}
788 }
789 EXPORT_SYMBOL_GPL(debug_object_activate);
790 
791 /**
792  * debug_object_deactivate - debug checks when an object is deactivated
793  * @addr:	address of the object
794  * @descr:	pointer to an object specific debug description structure
795  */
796 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
797 {
798 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
799 	struct debug_bucket *db;
800 	struct debug_obj *obj;
801 	unsigned long flags;
802 
803 	if (!debug_objects_enabled)
804 		return;
805 
806 	db = get_bucket((unsigned long) addr);
807 
808 	raw_spin_lock_irqsave(&db->lock, flags);
809 
810 	obj = lookup_object(addr, db);
811 	if (obj) {
812 		switch (obj->state) {
813 		case ODEBUG_STATE_DESTROYED:
814 			break;
815 		case ODEBUG_STATE_INIT:
816 		case ODEBUG_STATE_INACTIVE:
817 		case ODEBUG_STATE_ACTIVE:
818 			if (obj->astate)
819 				break;
820 			obj->state = ODEBUG_STATE_INACTIVE;
821 			fallthrough;
822 		default:
823 			raw_spin_unlock_irqrestore(&db->lock, flags);
824 			return;
825 		}
826 		o = *obj;
827 	}
828 
829 	raw_spin_unlock_irqrestore(&db->lock, flags);
830 	debug_print_object(&o, "deactivate");
831 }
832 EXPORT_SYMBOL_GPL(debug_object_deactivate);
833 
834 /**
835  * debug_object_destroy - debug checks when an object is destroyed
836  * @addr:	address of the object
837  * @descr:	pointer to an object specific debug description structure
838  */
839 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
840 {
841 	struct debug_obj *obj, o;
842 	struct debug_bucket *db;
843 	unsigned long flags;
844 
845 	if (!debug_objects_enabled)
846 		return;
847 
848 	db = get_bucket((unsigned long) addr);
849 
850 	raw_spin_lock_irqsave(&db->lock, flags);
851 
852 	obj = lookup_object(addr, db);
853 	if (!obj) {
854 		raw_spin_unlock_irqrestore(&db->lock, flags);
855 		return;
856 	}
857 
858 	switch (obj->state) {
859 	case ODEBUG_STATE_ACTIVE:
860 	case ODEBUG_STATE_DESTROYED:
861 		break;
862 	case ODEBUG_STATE_NONE:
863 	case ODEBUG_STATE_INIT:
864 	case ODEBUG_STATE_INACTIVE:
865 		obj->state = ODEBUG_STATE_DESTROYED;
866 		fallthrough;
867 	default:
868 		raw_spin_unlock_irqrestore(&db->lock, flags);
869 		return;
870 	}
871 
872 	o = *obj;
873 	raw_spin_unlock_irqrestore(&db->lock, flags);
874 	debug_print_object(&o, "destroy");
875 
876 	if (o.state == ODEBUG_STATE_ACTIVE)
877 		debug_object_fixup(descr->fixup_destroy, addr, o.state);
878 }
879 EXPORT_SYMBOL_GPL(debug_object_destroy);
880 
881 /**
882  * debug_object_free - debug checks when an object is freed
883  * @addr:	address of the object
884  * @descr:	pointer to an object specific debug description structure
885  */
886 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
887 {
888 	struct debug_obj *obj, o;
889 	struct debug_bucket *db;
890 	unsigned long flags;
891 
892 	if (!debug_objects_enabled)
893 		return;
894 
895 	db = get_bucket((unsigned long) addr);
896 
897 	raw_spin_lock_irqsave(&db->lock, flags);
898 
899 	obj = lookup_object(addr, db);
900 	if (!obj) {
901 		raw_spin_unlock_irqrestore(&db->lock, flags);
902 		return;
903 	}
904 
905 	switch (obj->state) {
906 	case ODEBUG_STATE_ACTIVE:
907 		break;
908 	default:
909 		hlist_del(&obj->node);
910 		raw_spin_unlock_irqrestore(&db->lock, flags);
911 		free_object(obj);
912 		return;
913 	}
914 
915 	o = *obj;
916 	raw_spin_unlock_irqrestore(&db->lock, flags);
917 	debug_print_object(&o, "free");
918 
919 	debug_object_fixup(descr->fixup_free, addr, o.state);
920 }
921 EXPORT_SYMBOL_GPL(debug_object_free);
922 
923 /**
924  * debug_object_assert_init - debug checks when object should be init-ed
925  * @addr:	address of the object
926  * @descr:	pointer to an object specific debug description structure
927  */
928 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
929 {
930 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
931 	struct debug_bucket *db;
932 	struct debug_obj *obj;
933 	unsigned long flags;
934 
935 	if (!debug_objects_enabled)
936 		return;
937 
938 	debug_objects_fill_pool();
939 
940 	db = get_bucket((unsigned long) addr);
941 
942 	raw_spin_lock_irqsave(&db->lock, flags);
943 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
944 	raw_spin_unlock_irqrestore(&db->lock, flags);
945 	if (likely(!IS_ERR_OR_NULL(obj)))
946 		return;
947 
948 	/* If NULL the allocation has hit OOM */
949 	if (!obj) {
950 		debug_objects_oom();
951 		return;
952 	}
953 
954 	/* Object is neither tracked nor static. It's not initialized. */
955 	debug_print_object(&o, "assert_init");
956 	debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
957 }
958 EXPORT_SYMBOL_GPL(debug_object_assert_init);
959 
960 /**
961  * debug_object_active_state - debug checks object usage state machine
962  * @addr:	address of the object
963  * @descr:	pointer to an object specific debug description structure
964  * @expect:	expected state
965  * @next:	state to move to if expected state is found
966  */
967 void
968 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
969 			  unsigned int expect, unsigned int next)
970 {
971 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
972 	struct debug_bucket *db;
973 	struct debug_obj *obj;
974 	unsigned long flags;
975 
976 	if (!debug_objects_enabled)
977 		return;
978 
979 	db = get_bucket((unsigned long) addr);
980 
981 	raw_spin_lock_irqsave(&db->lock, flags);
982 
983 	obj = lookup_object(addr, db);
984 	if (obj) {
985 		switch (obj->state) {
986 		case ODEBUG_STATE_ACTIVE:
987 			if (obj->astate != expect)
988 				break;
989 			obj->astate = next;
990 			raw_spin_unlock_irqrestore(&db->lock, flags);
991 			return;
992 		default:
993 			break;
994 		}
995 		o = *obj;
996 	}
997 
998 	raw_spin_unlock_irqrestore(&db->lock, flags);
999 	debug_print_object(&o, "active_state");
1000 }
1001 EXPORT_SYMBOL_GPL(debug_object_active_state);
1002 
1003 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1004 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
1005 {
1006 	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
1007 	int cnt, objs_checked = 0;
1008 	struct debug_obj *obj, o;
1009 	struct debug_bucket *db;
1010 	struct hlist_node *tmp;
1011 
1012 	saddr = (unsigned long) address;
1013 	eaddr = saddr + size;
1014 	paddr = saddr & ODEBUG_CHUNK_MASK;
1015 	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
1016 	chunks >>= ODEBUG_CHUNK_SHIFT;
1017 
1018 	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
1019 		db = get_bucket(paddr);
1020 
1021 repeat:
1022 		cnt = 0;
1023 		raw_spin_lock_irqsave(&db->lock, flags);
1024 		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
1025 			cnt++;
1026 			oaddr = (unsigned long) obj->object;
1027 			if (oaddr < saddr || oaddr >= eaddr)
1028 				continue;
1029 
1030 			switch (obj->state) {
1031 			case ODEBUG_STATE_ACTIVE:
1032 				o = *obj;
1033 				raw_spin_unlock_irqrestore(&db->lock, flags);
1034 				debug_print_object(&o, "free");
1035 				debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
1036 				goto repeat;
1037 			default:
1038 				hlist_del(&obj->node);
1039 				__free_object(obj);
1040 				break;
1041 			}
1042 		}
1043 		raw_spin_unlock_irqrestore(&db->lock, flags);
1044 
1045 		if (cnt > debug_objects_maxchain)
1046 			debug_objects_maxchain = cnt;
1047 
1048 		objs_checked += cnt;
1049 	}
1050 
1051 	if (objs_checked > debug_objects_maxchecked)
1052 		debug_objects_maxchecked = objs_checked;
1053 
1054 	/* Schedule work to actually kmem_cache_free() objects */
1055 	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
1056 		WRITE_ONCE(obj_freeing, true);
1057 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1058 	}
1059 }
1060 
1061 void debug_check_no_obj_freed(const void *address, unsigned long size)
1062 {
1063 	if (debug_objects_enabled)
1064 		__debug_check_no_obj_freed(address, size);
1065 }
1066 #endif
1067 
1068 #ifdef CONFIG_DEBUG_FS
1069 
1070 static int debug_stats_show(struct seq_file *m, void *v)
1071 {
1072 	int cpu, obj_percpu_free = 0;
1073 
1074 	for_each_possible_cpu(cpu)
1075 		obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1076 
1077 	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1078 	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1079 	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1080 	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1081 	seq_printf(m, "pool_free     :%d\n", pool_count(&pool_global) + obj_percpu_free);
1082 	seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1083 	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1084 	seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1085 	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1086 	seq_printf(m, "on_free_list  :%d\n", pool_count(&pool_to_free));
1087 	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1088 	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1089 	return 0;
1090 }
1091 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1092 
1093 static int __init debug_objects_init_debugfs(void)
1094 {
1095 	struct dentry *dbgdir;
1096 
1097 	if (!debug_objects_enabled)
1098 		return 0;
1099 
1100 	dbgdir = debugfs_create_dir("debug_objects", NULL);
1101 
1102 	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1103 
1104 	return 0;
1105 }
1106 __initcall(debug_objects_init_debugfs);
1107 
1108 #else
1109 static inline void debug_objects_init_debugfs(void) { }
1110 #endif
1111 
1112 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1113 
1114 /* Random data structure for the self test */
1115 struct self_test {
1116 	unsigned long	dummy1[6];
1117 	int		static_init;
1118 	unsigned long	dummy2[3];
1119 };
1120 
1121 static __initconst const struct debug_obj_descr descr_type_test;
1122 
1123 static bool __init is_static_object(void *addr)
1124 {
1125 	struct self_test *obj = addr;
1126 
1127 	return obj->static_init;
1128 }
1129 
1130 /*
1131  * fixup_init is called when:
1132  * - an active object is initialized
1133  */
1134 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1135 {
1136 	struct self_test *obj = addr;
1137 
1138 	switch (state) {
1139 	case ODEBUG_STATE_ACTIVE:
1140 		debug_object_deactivate(obj, &descr_type_test);
1141 		debug_object_init(obj, &descr_type_test);
1142 		return true;
1143 	default:
1144 		return false;
1145 	}
1146 }
1147 
1148 /*
1149  * fixup_activate is called when:
1150  * - an active object is activated
1151  * - an unknown non-static object is activated
1152  */
1153 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1154 {
1155 	struct self_test *obj = addr;
1156 
1157 	switch (state) {
1158 	case ODEBUG_STATE_NOTAVAILABLE:
1159 		return true;
1160 	case ODEBUG_STATE_ACTIVE:
1161 		debug_object_deactivate(obj, &descr_type_test);
1162 		debug_object_activate(obj, &descr_type_test);
1163 		return true;
1164 
1165 	default:
1166 		return false;
1167 	}
1168 }
1169 
1170 /*
1171  * fixup_destroy is called when:
1172  * - an active object is destroyed
1173  */
1174 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1175 {
1176 	struct self_test *obj = addr;
1177 
1178 	switch (state) {
1179 	case ODEBUG_STATE_ACTIVE:
1180 		debug_object_deactivate(obj, &descr_type_test);
1181 		debug_object_destroy(obj, &descr_type_test);
1182 		return true;
1183 	default:
1184 		return false;
1185 	}
1186 }
1187 
1188 /*
1189  * fixup_free is called when:
1190  * - an active object is freed
1191  */
1192 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1193 {
1194 	struct self_test *obj = addr;
1195 
1196 	switch (state) {
1197 	case ODEBUG_STATE_ACTIVE:
1198 		debug_object_deactivate(obj, &descr_type_test);
1199 		debug_object_free(obj, &descr_type_test);
1200 		return true;
1201 	default:
1202 		return false;
1203 	}
1204 }
1205 
1206 static int __init
1207 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1208 {
1209 	struct debug_bucket *db;
1210 	struct debug_obj *obj;
1211 	unsigned long flags;
1212 	int res = -EINVAL;
1213 
1214 	db = get_bucket((unsigned long) addr);
1215 
1216 	raw_spin_lock_irqsave(&db->lock, flags);
1217 
1218 	obj = lookup_object(addr, db);
1219 	if (!obj && state != ODEBUG_STATE_NONE) {
1220 		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1221 		goto out;
1222 	}
1223 	if (obj && obj->state != state) {
1224 		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1225 		       obj->state, state);
1226 		goto out;
1227 	}
1228 	if (fixups != debug_objects_fixups) {
1229 		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1230 		       fixups, debug_objects_fixups);
1231 		goto out;
1232 	}
1233 	if (warnings != debug_objects_warnings) {
1234 		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1235 		       warnings, debug_objects_warnings);
1236 		goto out;
1237 	}
1238 	res = 0;
1239 out:
1240 	raw_spin_unlock_irqrestore(&db->lock, flags);
1241 	if (res)
1242 		debug_objects_enabled = false;
1243 	return res;
1244 }
1245 
1246 static __initconst const struct debug_obj_descr descr_type_test = {
1247 	.name			= "selftest",
1248 	.is_static_object	= is_static_object,
1249 	.fixup_init		= fixup_init,
1250 	.fixup_activate		= fixup_activate,
1251 	.fixup_destroy		= fixup_destroy,
1252 	.fixup_free		= fixup_free,
1253 };
1254 
1255 static __initdata struct self_test obj = { .static_init = 0 };
1256 
1257 static bool __init debug_objects_selftest(void)
1258 {
1259 	int fixups, oldfixups, warnings, oldwarnings;
1260 	unsigned long flags;
1261 
1262 	local_irq_save(flags);
1263 
1264 	fixups = oldfixups = debug_objects_fixups;
1265 	warnings = oldwarnings = debug_objects_warnings;
1266 	descr_test = &descr_type_test;
1267 
1268 	debug_object_init(&obj, &descr_type_test);
1269 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1270 		goto out;
1271 	debug_object_activate(&obj, &descr_type_test);
1272 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1273 		goto out;
1274 	debug_object_activate(&obj, &descr_type_test);
1275 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1276 		goto out;
1277 	debug_object_deactivate(&obj, &descr_type_test);
1278 	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1279 		goto out;
1280 	debug_object_destroy(&obj, &descr_type_test);
1281 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1282 		goto out;
1283 	debug_object_init(&obj, &descr_type_test);
1284 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1285 		goto out;
1286 	debug_object_activate(&obj, &descr_type_test);
1287 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1288 		goto out;
1289 	debug_object_deactivate(&obj, &descr_type_test);
1290 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1291 		goto out;
1292 	debug_object_free(&obj, &descr_type_test);
1293 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1294 		goto out;
1295 
1296 	obj.static_init = 1;
1297 	debug_object_activate(&obj, &descr_type_test);
1298 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1299 		goto out;
1300 	debug_object_init(&obj, &descr_type_test);
1301 	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1302 		goto out;
1303 	debug_object_free(&obj, &descr_type_test);
1304 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1305 		goto out;
1306 
1307 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1308 	debug_object_init(&obj, &descr_type_test);
1309 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1310 		goto out;
1311 	debug_object_activate(&obj, &descr_type_test);
1312 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1313 		goto out;
1314 	__debug_check_no_obj_freed(&obj, sizeof(obj));
1315 	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1316 		goto out;
1317 #endif
1318 	pr_info("selftest passed\n");
1319 
1320 out:
1321 	debug_objects_fixups = oldfixups;
1322 	debug_objects_warnings = oldwarnings;
1323 	descr_test = NULL;
1324 
1325 	local_irq_restore(flags);
1326 	return debug_objects_enabled;
1327 }
1328 #else
1329 static inline bool debug_objects_selftest(void) { return true; }
1330 #endif
1331 
1332 /*
1333  * Called during early boot to initialize the hash buckets and link
1334  * the static object pool objects into the poll list. After this call
1335  * the object tracker is fully operational.
1336  */
1337 void __init debug_objects_early_init(void)
1338 {
1339 	int i;
1340 
1341 	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1342 		raw_spin_lock_init(&obj_hash[i].lock);
1343 
1344 	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1345 		hlist_add_head(&obj_static_pool[i].node, &pool_global.objects);
1346 
1347 	pool_global.cnt = ODEBUG_POOL_SIZE;
1348 }
1349 
1350 /*
1351  * Convert the statically allocated objects to dynamic ones.
1352  * debug_objects_mem_init() is called early so only one CPU is up and
1353  * interrupts are disabled, which means it is safe to replace the active
1354  * object references.
1355  */
1356 static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache)
1357 {
1358 	struct debug_bucket *db = obj_hash;
1359 	struct debug_obj *obj, *new;
1360 	struct hlist_node *tmp;
1361 	HLIST_HEAD(objects);
1362 	int i;
1363 
1364 	for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1365 		obj = kmem_cache_zalloc(cache, GFP_KERNEL);
1366 		if (!obj)
1367 			goto free;
1368 		hlist_add_head(&obj->node, &objects);
1369 	}
1370 
1371 	debug_objects_allocated = ODEBUG_POOL_SIZE;
1372 	pool_global.cnt = ODEBUG_POOL_SIZE;
1373 
1374 	/*
1375 	 * Replace the statically allocated objects list with the allocated
1376 	 * objects list.
1377 	 */
1378 	hlist_move_list(&objects, &pool_global.objects);
1379 
1380 	/* Replace the active object references */
1381 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1382 		hlist_move_list(&db->list, &objects);
1383 
1384 		hlist_for_each_entry(obj, &objects, node) {
1385 			new = hlist_entry(pool_global.objects.first, typeof(*obj), node);
1386 			hlist_del(&new->node);
1387 			pool_global.cnt--;
1388 			/* copy object data */
1389 			*new = *obj;
1390 			hlist_add_head(&new->node, &db->list);
1391 		}
1392 	}
1393 	return true;
1394 free:
1395 	/* Can't use free_object_list() as the cache is not populated yet */
1396 	hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1397 		hlist_del(&obj->node);
1398 		kmem_cache_free(cache, obj);
1399 	}
1400 	return false;
1401 }
1402 
1403 /*
1404  * Called after the kmem_caches are functional to setup a dedicated
1405  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1406  * prevents that the debug code is called on kmem_cache_free() for the
1407  * debug tracker objects to avoid recursive calls.
1408  */
1409 void __init debug_objects_mem_init(void)
1410 {
1411 	struct kmem_cache *cache;
1412 	int extras;
1413 
1414 	if (!debug_objects_enabled)
1415 		return;
1416 
1417 	if (!debug_objects_selftest())
1418 		return;
1419 
1420 	cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0,
1421 				  SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL);
1422 
1423 	if (!cache || !debug_objects_replace_static_objects(cache)) {
1424 		debug_objects_enabled = false;
1425 		pr_warn("Out of memory.\n");
1426 		return;
1427 	}
1428 
1429 	/*
1430 	 * Adjust the thresholds for allocating and freeing objects
1431 	 * according to the number of possible CPUs available in the
1432 	 * system.
1433 	 */
1434 	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1435 	debug_objects_pool_size += extras;
1436 	debug_objects_pool_min_level += extras;
1437 
1438 	/* Everything worked. Expose the cache */
1439 	obj_cache = cache;
1440 
1441 #ifdef CONFIG_HOTPLUG_CPU
1442 	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1443 				  object_cpu_offline);
1444 #endif
1445 	return;
1446 }
1447