xref: /linux-6.15/include/linux/lockdep.h (revision 6edf2e37)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Runtime locking correctness validator
4  *
5  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <[email protected]>
6  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7  *
8  * see Documentation/locking/lockdep-design.txt for more details.
9  */
10 #ifndef __LINUX_LOCKDEP_H
11 #define __LINUX_LOCKDEP_H
12 
13 struct task_struct;
14 struct lockdep_map;
15 
16 /* for sysctl */
17 extern int prove_locking;
18 extern int lock_stat;
19 
20 #define MAX_LOCKDEP_SUBCLASSES		8UL
21 
22 #include <linux/types.h>
23 
24 #ifdef CONFIG_LOCKDEP
25 
26 #include <linux/linkage.h>
27 #include <linux/list.h>
28 #include <linux/debug_locks.h>
29 #include <linux/stacktrace.h>
30 
31 /*
32  * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
33  * the total number of states... :-(
34  */
35 #define XXX_LOCK_USAGE_STATES		(1+2*4)
36 
37 /*
38  * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
39  * cached in the instance of lockdep_map
40  *
41  * Currently main class (subclass == 0) and signle depth subclass
42  * are cached in lockdep_map. This optimization is mainly targeting
43  * on rq->lock. double_rq_lock() acquires this highly competitive with
44  * single depth.
45  */
46 #define NR_LOCKDEP_CACHING_CLASSES	2
47 
48 /*
49  * Lock-classes are keyed via unique addresses, by embedding the
50  * lockclass-key into the kernel (or module) .data section. (For
51  * static locks we use the lock address itself as the key.)
52  */
53 struct lockdep_subclass_key {
54 	char __one_byte;
55 } __attribute__ ((__packed__));
56 
57 struct lock_class_key {
58 	struct lockdep_subclass_key	subkeys[MAX_LOCKDEP_SUBCLASSES];
59 };
60 
61 extern struct lock_class_key __lockdep_no_validate__;
62 
63 #define LOCKSTAT_POINTS		4
64 
65 /*
66  * The lock-class itself:
67  */
68 struct lock_class {
69 	/*
70 	 * class-hash:
71 	 */
72 	struct hlist_node		hash_entry;
73 
74 	/*
75 	 * global list of all lock-classes:
76 	 */
77 	struct list_head		lock_entry;
78 
79 	struct lockdep_subclass_key	*key;
80 	unsigned int			subclass;
81 	unsigned int			dep_gen_id;
82 
83 	/*
84 	 * IRQ/softirq usage tracking bits:
85 	 */
86 	unsigned long			usage_mask;
87 	struct stack_trace		usage_traces[XXX_LOCK_USAGE_STATES];
88 
89 	/*
90 	 * These fields represent a directed graph of lock dependencies,
91 	 * to every node we attach a list of "forward" and a list of
92 	 * "backward" graph nodes.
93 	 */
94 	struct list_head		locks_after, locks_before;
95 
96 	/*
97 	 * Generation counter, when doing certain classes of graph walking,
98 	 * to ensure that we check one node only once:
99 	 */
100 	unsigned int			version;
101 
102 	int				name_version;
103 	const char			*name;
104 
105 #ifdef CONFIG_LOCK_STAT
106 	unsigned long			contention_point[LOCKSTAT_POINTS];
107 	unsigned long			contending_point[LOCKSTAT_POINTS];
108 #endif
109 };
110 
111 #ifdef CONFIG_LOCK_STAT
112 struct lock_time {
113 	s64				min;
114 	s64				max;
115 	s64				total;
116 	unsigned long			nr;
117 };
118 
119 enum bounce_type {
120 	bounce_acquired_write,
121 	bounce_acquired_read,
122 	bounce_contended_write,
123 	bounce_contended_read,
124 	nr_bounce_types,
125 
126 	bounce_acquired = bounce_acquired_write,
127 	bounce_contended = bounce_contended_write,
128 };
129 
130 struct lock_class_stats {
131 	unsigned long			contention_point[LOCKSTAT_POINTS];
132 	unsigned long			contending_point[LOCKSTAT_POINTS];
133 	struct lock_time		read_waittime;
134 	struct lock_time		write_waittime;
135 	struct lock_time		read_holdtime;
136 	struct lock_time		write_holdtime;
137 	unsigned long			bounces[nr_bounce_types];
138 };
139 
140 struct lock_class_stats lock_stats(struct lock_class *class);
141 void clear_lock_stats(struct lock_class *class);
142 #endif
143 
144 /*
145  * Map the lock object (the lock instance) to the lock-class object.
146  * This is embedded into specific lock instances:
147  */
148 struct lockdep_map {
149 	struct lock_class_key		*key;
150 	struct lock_class		*class_cache[NR_LOCKDEP_CACHING_CLASSES];
151 	const char			*name;
152 #ifdef CONFIG_LOCK_STAT
153 	int				cpu;
154 	unsigned long			ip;
155 #endif
156 };
157 
158 static inline void lockdep_copy_map(struct lockdep_map *to,
159 				    struct lockdep_map *from)
160 {
161 	int i;
162 
163 	*to = *from;
164 	/*
165 	 * Since the class cache can be modified concurrently we could observe
166 	 * half pointers (64bit arch using 32bit copy insns). Therefore clear
167 	 * the caches and take the performance hit.
168 	 *
169 	 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
170 	 *     that relies on cache abuse.
171 	 */
172 	for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
173 		to->class_cache[i] = NULL;
174 }
175 
176 /*
177  * Every lock has a list of other locks that were taken after it.
178  * We only grow the list, never remove from it:
179  */
180 struct lock_list {
181 	struct list_head		entry;
182 	struct lock_class		*class;
183 	struct stack_trace		trace;
184 	int				distance;
185 
186 	/*
187 	 * The parent field is used to implement breadth-first search, and the
188 	 * bit 0 is reused to indicate if the lock has been accessed in BFS.
189 	 */
190 	struct lock_list		*parent;
191 };
192 
193 /*
194  * We record lock dependency chains, so that we can cache them:
195  */
196 struct lock_chain {
197 	/* see BUILD_BUG_ON()s in lookup_chain_cache() */
198 	unsigned int			irq_context :  2,
199 					depth       :  6,
200 					base	    : 24;
201 	/* 4 byte hole */
202 	struct hlist_node		entry;
203 	u64				chain_key;
204 };
205 
206 #define MAX_LOCKDEP_KEYS_BITS		13
207 /*
208  * Subtract one because we offset hlock->class_idx by 1 in order
209  * to make 0 mean no class. This avoids overflowing the class_idx
210  * bitfield and hitting the BUG in hlock_class().
211  */
212 #define MAX_LOCKDEP_KEYS		((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
213 
214 struct held_lock {
215 	/*
216 	 * One-way hash of the dependency chain up to this point. We
217 	 * hash the hashes step by step as the dependency chain grows.
218 	 *
219 	 * We use it for dependency-caching and we skip detection
220 	 * passes and dependency-updates if there is a cache-hit, so
221 	 * it is absolutely critical for 100% coverage of the validator
222 	 * to have a unique key value for every unique dependency path
223 	 * that can occur in the system, to make a unique hash value
224 	 * as likely as possible - hence the 64-bit width.
225 	 *
226 	 * The task struct holds the current hash value (initialized
227 	 * with zero), here we store the previous hash value:
228 	 */
229 	u64				prev_chain_key;
230 	unsigned long			acquire_ip;
231 	struct lockdep_map		*instance;
232 	struct lockdep_map		*nest_lock;
233 #ifdef CONFIG_LOCK_STAT
234 	u64 				waittime_stamp;
235 	u64				holdtime_stamp;
236 #endif
237 	unsigned int			class_idx:MAX_LOCKDEP_KEYS_BITS;
238 	/*
239 	 * The lock-stack is unified in that the lock chains of interrupt
240 	 * contexts nest ontop of process context chains, but we 'separate'
241 	 * the hashes by starting with 0 if we cross into an interrupt
242 	 * context, and we also keep do not add cross-context lock
243 	 * dependencies - the lock usage graph walking covers that area
244 	 * anyway, and we'd just unnecessarily increase the number of
245 	 * dependencies otherwise. [Note: hardirq and softirq contexts
246 	 * are separated from each other too.]
247 	 *
248 	 * The following field is used to detect when we cross into an
249 	 * interrupt context:
250 	 */
251 	unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
252 	unsigned int trylock:1;						/* 16 bits */
253 
254 	unsigned int read:2;        /* see lock_acquire() comment */
255 	unsigned int check:1;       /* see lock_acquire() comment */
256 	unsigned int hardirqs_off:1;
257 	unsigned int references:12;					/* 32 bits */
258 	unsigned int pin_count;
259 };
260 
261 /*
262  * Initialization, self-test and debugging-output methods:
263  */
264 extern void lockdep_init(void);
265 extern void lockdep_reset(void);
266 extern void lockdep_reset_lock(struct lockdep_map *lock);
267 extern void lockdep_free_key_range(void *start, unsigned long size);
268 extern asmlinkage void lockdep_sys_exit(void);
269 
270 extern void lockdep_off(void);
271 extern void lockdep_on(void);
272 
273 /*
274  * These methods are used by specific locking variants (spinlocks,
275  * rwlocks, mutexes and rwsems) to pass init/acquire/release events
276  * to lockdep:
277  */
278 
279 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
280 			     struct lock_class_key *key, int subclass);
281 
282 /*
283  * Reinitialize a lock key - for cases where there is special locking or
284  * special initialization of locks so that the validator gets the scope
285  * of dependencies wrong: they are either too broad (they need a class-split)
286  * or they are too narrow (they suffer from a false class-split):
287  */
288 #define lockdep_set_class(lock, key) \
289 		lockdep_init_map(&(lock)->dep_map, #key, key, 0)
290 #define lockdep_set_class_and_name(lock, key, name) \
291 		lockdep_init_map(&(lock)->dep_map, name, key, 0)
292 #define lockdep_set_class_and_subclass(lock, key, sub) \
293 		lockdep_init_map(&(lock)->dep_map, #key, key, sub)
294 #define lockdep_set_subclass(lock, sub)	\
295 		lockdep_init_map(&(lock)->dep_map, #lock, \
296 				 (lock)->dep_map.key, sub)
297 
298 #define lockdep_set_novalidate_class(lock) \
299 	lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
300 /*
301  * Compare locking classes
302  */
303 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
304 
305 static inline int lockdep_match_key(struct lockdep_map *lock,
306 				    struct lock_class_key *key)
307 {
308 	return lock->key == key;
309 }
310 
311 /*
312  * Acquire a lock.
313  *
314  * Values for "read":
315  *
316  *   0: exclusive (write) acquire
317  *   1: read-acquire (no recursion allowed)
318  *   2: read-acquire with same-instance recursion allowed
319  *
320  * Values for check:
321  *
322  *   0: simple checks (freeing, held-at-exit-time, etc.)
323  *   1: full validation
324  */
325 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
326 			 int trylock, int read, int check,
327 			 struct lockdep_map *nest_lock, unsigned long ip);
328 
329 extern void lock_release(struct lockdep_map *lock, int nested,
330 			 unsigned long ip);
331 
332 /*
333  * Same "read" as for lock_acquire(), except -1 means any.
334  */
335 extern int lock_is_held_type(const struct lockdep_map *lock, int read);
336 
337 static inline int lock_is_held(const struct lockdep_map *lock)
338 {
339 	return lock_is_held_type(lock, -1);
340 }
341 
342 #define lockdep_is_held(lock)		lock_is_held(&(lock)->dep_map)
343 #define lockdep_is_held_type(lock, r)	lock_is_held_type(&(lock)->dep_map, (r))
344 
345 extern void lock_set_class(struct lockdep_map *lock, const char *name,
346 			   struct lock_class_key *key, unsigned int subclass,
347 			   unsigned long ip);
348 
349 static inline void lock_set_subclass(struct lockdep_map *lock,
350 		unsigned int subclass, unsigned long ip)
351 {
352 	lock_set_class(lock, lock->name, lock->key, subclass, ip);
353 }
354 
355 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
356 
357 struct pin_cookie { unsigned int val; };
358 
359 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
360 
361 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
362 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
363 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
364 
365 #define lockdep_depth(tsk)	(debug_locks ? (tsk)->lockdep_depth : 0)
366 
367 #define lockdep_assert_held(l)	do {				\
368 		WARN_ON(debug_locks && !lockdep_is_held(l));	\
369 	} while (0)
370 
371 #define lockdep_assert_held_exclusive(l)	do {			\
372 		WARN_ON(debug_locks && !lockdep_is_held_type(l, 0));	\
373 	} while (0)
374 
375 #define lockdep_assert_held_read(l)	do {				\
376 		WARN_ON(debug_locks && !lockdep_is_held_type(l, 1));	\
377 	} while (0)
378 
379 #define lockdep_assert_held_once(l)	do {				\
380 		WARN_ON_ONCE(debug_locks && !lockdep_is_held(l));	\
381 	} while (0)
382 
383 #define lockdep_recursing(tsk)	((tsk)->lockdep_recursion)
384 
385 #define lockdep_pin_lock(l)	lock_pin_lock(&(l)->dep_map)
386 #define lockdep_repin_lock(l,c)	lock_repin_lock(&(l)->dep_map, (c))
387 #define lockdep_unpin_lock(l,c)	lock_unpin_lock(&(l)->dep_map, (c))
388 
389 #else /* !CONFIG_LOCKDEP */
390 
391 static inline void lockdep_off(void)
392 {
393 }
394 
395 static inline void lockdep_on(void)
396 {
397 }
398 
399 # define lock_acquire(l, s, t, r, c, n, i)	do { } while (0)
400 # define lock_release(l, n, i)			do { } while (0)
401 # define lock_downgrade(l, i)			do { } while (0)
402 # define lock_set_class(l, n, k, s, i)		do { } while (0)
403 # define lock_set_subclass(l, s, i)		do { } while (0)
404 # define lockdep_init()				do { } while (0)
405 # define lockdep_init_map(lock, name, key, sub) \
406 		do { (void)(name); (void)(key); } while (0)
407 # define lockdep_set_class(lock, key)		do { (void)(key); } while (0)
408 # define lockdep_set_class_and_name(lock, key, name) \
409 		do { (void)(key); (void)(name); } while (0)
410 #define lockdep_set_class_and_subclass(lock, key, sub) \
411 		do { (void)(key); } while (0)
412 #define lockdep_set_subclass(lock, sub)		do { } while (0)
413 
414 #define lockdep_set_novalidate_class(lock) do { } while (0)
415 
416 /*
417  * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
418  * case since the result is not well defined and the caller should rather
419  * #ifdef the call himself.
420  */
421 
422 # define lockdep_reset()		do { debug_locks = 1; } while (0)
423 # define lockdep_free_key_range(start, size)	do { } while (0)
424 # define lockdep_sys_exit() 			do { } while (0)
425 /*
426  * The class key takes no space if lockdep is disabled:
427  */
428 struct lock_class_key { };
429 
430 /*
431  * The lockdep_map takes no space if lockdep is disabled:
432  */
433 struct lockdep_map { };
434 
435 #define lockdep_depth(tsk)	(0)
436 
437 #define lockdep_is_held_type(l, r)		(1)
438 
439 #define lockdep_assert_held(l)			do { (void)(l); } while (0)
440 #define lockdep_assert_held_exclusive(l)	do { (void)(l); } while (0)
441 #define lockdep_assert_held_read(l)		do { (void)(l); } while (0)
442 #define lockdep_assert_held_once(l)		do { (void)(l); } while (0)
443 
444 #define lockdep_recursing(tsk)			(0)
445 
446 struct pin_cookie { };
447 
448 #define NIL_COOKIE (struct pin_cookie){ }
449 
450 #define lockdep_pin_lock(l)			({ struct pin_cookie cookie; cookie; })
451 #define lockdep_repin_lock(l, c)		do { (void)(l); (void)(c); } while (0)
452 #define lockdep_unpin_lock(l, c)		do { (void)(l); (void)(c); } while (0)
453 
454 #endif /* !LOCKDEP */
455 
456 enum xhlock_context_t {
457 	XHLOCK_HARD,
458 	XHLOCK_SOFT,
459 	XHLOCK_CTX_NR,
460 };
461 
462 #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
463 /*
464  * To initialize a lockdep_map statically use this macro.
465  * Note that _name must not be NULL.
466  */
467 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
468 	{ .name = (_name), .key = (void *)(_key), }
469 
470 static inline void lockdep_invariant_state(bool force) {}
471 static inline void lockdep_init_task(struct task_struct *task) {}
472 static inline void lockdep_free_task(struct task_struct *task) {}
473 
474 #ifdef CONFIG_LOCK_STAT
475 
476 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
477 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
478 
479 #define LOCK_CONTENDED(_lock, try, lock)			\
480 do {								\
481 	if (!try(_lock)) {					\
482 		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
483 		lock(_lock);					\
484 	}							\
485 	lock_acquired(&(_lock)->dep_map, _RET_IP_);			\
486 } while (0)
487 
488 #define LOCK_CONTENDED_RETURN(_lock, try, lock)			\
489 ({								\
490 	int ____err = 0;					\
491 	if (!try(_lock)) {					\
492 		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
493 		____err = lock(_lock);				\
494 	}							\
495 	if (!____err)						\
496 		lock_acquired(&(_lock)->dep_map, _RET_IP_);	\
497 	____err;						\
498 })
499 
500 #else /* CONFIG_LOCK_STAT */
501 
502 #define lock_contended(lockdep_map, ip) do {} while (0)
503 #define lock_acquired(lockdep_map, ip) do {} while (0)
504 
505 #define LOCK_CONTENDED(_lock, try, lock) \
506 	lock(_lock)
507 
508 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
509 	lock(_lock)
510 
511 #endif /* CONFIG_LOCK_STAT */
512 
513 #ifdef CONFIG_LOCKDEP
514 
515 /*
516  * On lockdep we dont want the hand-coded irq-enable of
517  * _raw_*_lock_flags() code, because lockdep assumes
518  * that interrupts are not re-enabled during lock-acquire:
519  */
520 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
521 	LOCK_CONTENDED((_lock), (try), (lock))
522 
523 #else /* CONFIG_LOCKDEP */
524 
525 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
526 	lockfl((_lock), (flags))
527 
528 #endif /* CONFIG_LOCKDEP */
529 
530 #ifdef CONFIG_PROVE_LOCKING
531 extern void print_irqtrace_events(struct task_struct *curr);
532 #else
533 static inline void print_irqtrace_events(struct task_struct *curr)
534 {
535 }
536 #endif
537 
538 /*
539  * For trivial one-depth nesting of a lock-class, the following
540  * global define can be used. (Subsystems with multiple levels
541  * of nesting should define their own lock-nesting subclasses.)
542  */
543 #define SINGLE_DEPTH_NESTING			1
544 
545 /*
546  * Map the dependency ops to NOP or to real lockdep ops, depending
547  * on the per lock-class debug mode:
548  */
549 
550 #define lock_acquire_exclusive(l, s, t, n, i)		lock_acquire(l, s, t, 0, 1, n, i)
551 #define lock_acquire_shared(l, s, t, n, i)		lock_acquire(l, s, t, 1, 1, n, i)
552 #define lock_acquire_shared_recursive(l, s, t, n, i)	lock_acquire(l, s, t, 2, 1, n, i)
553 
554 #define spin_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
555 #define spin_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
556 #define spin_release(l, n, i)			lock_release(l, n, i)
557 
558 #define rwlock_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
559 #define rwlock_acquire_read(l, s, t, i)		lock_acquire_shared_recursive(l, s, t, NULL, i)
560 #define rwlock_release(l, n, i)			lock_release(l, n, i)
561 
562 #define seqcount_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
563 #define seqcount_acquire_read(l, s, t, i)	lock_acquire_shared_recursive(l, s, t, NULL, i)
564 #define seqcount_release(l, n, i)		lock_release(l, n, i)
565 
566 #define mutex_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
567 #define mutex_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
568 #define mutex_release(l, n, i)			lock_release(l, n, i)
569 
570 #define rwsem_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
571 #define rwsem_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
572 #define rwsem_acquire_read(l, s, t, i)		lock_acquire_shared(l, s, t, NULL, i)
573 #define rwsem_release(l, n, i)			lock_release(l, n, i)
574 
575 #define lock_map_acquire(l)			lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
576 #define lock_map_acquire_read(l)		lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
577 #define lock_map_acquire_tryread(l)		lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
578 #define lock_map_release(l)			lock_release(l, 1, _THIS_IP_)
579 
580 #ifdef CONFIG_PROVE_LOCKING
581 # define might_lock(lock) 						\
582 do {									\
583 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
584 	lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);	\
585 	lock_release(&(lock)->dep_map, 0, _THIS_IP_);			\
586 } while (0)
587 # define might_lock_read(lock) 						\
588 do {									\
589 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
590 	lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);	\
591 	lock_release(&(lock)->dep_map, 0, _THIS_IP_);			\
592 } while (0)
593 
594 #define lockdep_assert_irqs_enabled()	do {				\
595 		WARN_ONCE(debug_locks && !current->lockdep_recursion &&	\
596 			  !current->hardirqs_enabled,			\
597 			  "IRQs not enabled as expected\n");		\
598 	} while (0)
599 
600 #define lockdep_assert_irqs_disabled()	do {				\
601 		WARN_ONCE(debug_locks && !current->lockdep_recursion &&	\
602 			  current->hardirqs_enabled,			\
603 			  "IRQs not disabled as expected\n");		\
604 	} while (0)
605 
606 #else
607 # define might_lock(lock) do { } while (0)
608 # define might_lock_read(lock) do { } while (0)
609 # define lockdep_assert_irqs_enabled() do { } while (0)
610 # define lockdep_assert_irqs_disabled() do { } while (0)
611 #endif
612 
613 #ifdef CONFIG_LOCKDEP
614 void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
615 #else
616 static inline void
617 lockdep_rcu_suspicious(const char *file, const int line, const char *s)
618 {
619 }
620 #endif
621 
622 #endif /* __LINUX_LOCKDEP_H */
623