xref: /linux-6.15/include/linux/lockdep.h (revision d7fe143c)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2fbb9ce95SIngo Molnar /*
3fbb9ce95SIngo Molnar  * Runtime locking correctness validator
4fbb9ce95SIngo Molnar  *
54b32d0a4SPeter Zijlstra  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <[email protected]>
690eec103SPeter Zijlstra  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7fbb9ce95SIngo Molnar  *
8387b1468SMauro Carvalho Chehab  * see Documentation/locking/lockdep-design.rst for more details.
9fbb9ce95SIngo Molnar  */
10fbb9ce95SIngo Molnar #ifndef __LINUX_LOCKDEP_H
11fbb9ce95SIngo Molnar #define __LINUX_LOCKDEP_H
12fbb9ce95SIngo Molnar 
13c935cd62SHerbert Xu #include <linux/lockdep_types.h>
140cd39f46SPeter Zijlstra #include <linux/smp.h>
15a21ee605SPeter Zijlstra #include <asm/percpu.h>
16c935cd62SHerbert Xu 
17a1e96b03SHeiko Carstens struct task_struct;
18a1e96b03SHeiko Carstens 
19db0b0eadSMichael S. Tsirkin #ifdef CONFIG_LOCKDEP
20db0b0eadSMichael S. Tsirkin 
21fbb9ce95SIngo Molnar #include <linux/linkage.h>
225be542e9SHerbert Xu #include <linux/list.h>
23fbb9ce95SIngo Molnar #include <linux/debug_locks.h>
24fbb9ce95SIngo Molnar #include <linux/stacktrace.h>
25fbb9ce95SIngo Molnar 
lockdep_copy_map(struct lockdep_map * to,struct lockdep_map * from)264d82a1deSPeter Zijlstra static inline void lockdep_copy_map(struct lockdep_map *to,
274d82a1deSPeter Zijlstra 				    struct lockdep_map *from)
284d82a1deSPeter Zijlstra {
294d82a1deSPeter Zijlstra 	int i;
304d82a1deSPeter Zijlstra 
314d82a1deSPeter Zijlstra 	*to = *from;
324d82a1deSPeter Zijlstra 	/*
334d82a1deSPeter Zijlstra 	 * Since the class cache can be modified concurrently we could observe
344d82a1deSPeter Zijlstra 	 * half pointers (64bit arch using 32bit copy insns). Therefore clear
354d82a1deSPeter Zijlstra 	 * the caches and take the performance hit.
364d82a1deSPeter Zijlstra 	 *
374d82a1deSPeter Zijlstra 	 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
384d82a1deSPeter Zijlstra 	 *     that relies on cache abuse.
394d82a1deSPeter Zijlstra 	 */
404d82a1deSPeter Zijlstra 	for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
414d82a1deSPeter Zijlstra 		to->class_cache[i] = NULL;
424d82a1deSPeter Zijlstra }
434d82a1deSPeter Zijlstra 
44fbb9ce95SIngo Molnar /*
45fbb9ce95SIngo Molnar  * Every lock has a list of other locks that were taken after it.
46fbb9ce95SIngo Molnar  * We only grow the list, never remove from it:
47fbb9ce95SIngo Molnar  */
48fbb9ce95SIngo Molnar struct lock_list {
49fbb9ce95SIngo Molnar 	struct list_head		entry;
50fbb9ce95SIngo Molnar 	struct lock_class		*class;
5186cffb80SBart Van Assche 	struct lock_class		*links_to;
5212593b74SBart Van Assche 	const struct lock_trace		*trace;
53bd76eca1SBoqun Feng 	u16				distance;
543454a36dSBoqun Feng 	/* bitmap of different dependencies from head to this */
553454a36dSBoqun Feng 	u8				dep;
566971c0f3SBoqun Feng 	/* used by BFS to record whether "prev -> this" only has -(*R)-> */
576971c0f3SBoqun Feng 	u8				only_xr;
58c94aa5caSMing Lei 
59af012961SPeter Zijlstra 	/*
60af012961SPeter Zijlstra 	 * The parent field is used to implement breadth-first search, and the
61af012961SPeter Zijlstra 	 * bit 0 is reused to indicate if the lock has been accessed in BFS.
62c94aa5caSMing Lei 	 */
63c94aa5caSMing Lei 	struct lock_list		*parent;
64fbb9ce95SIngo Molnar };
65fbb9ce95SIngo Molnar 
66d16dbd1bSYuyang Du /**
67d16dbd1bSYuyang Du  * struct lock_chain - lock dependency chain record
68d16dbd1bSYuyang Du  *
69d16dbd1bSYuyang Du  * @irq_context: the same as irq_context in held_lock below
70d16dbd1bSYuyang Du  * @depth:       the number of held locks in this chain
71d16dbd1bSYuyang Du  * @base:        the index in chain_hlocks for this chain
72d16dbd1bSYuyang Du  * @entry:       the collided lock chains in lock_chain hash list
73d16dbd1bSYuyang Du  * @chain_key:   the hash key of this lock_chain
74fbb9ce95SIngo Molnar  */
75fbb9ce95SIngo Molnar struct lock_chain {
76d16dbd1bSYuyang Du 	/* see BUILD_BUG_ON()s in add_chain_cache() */
7775dd602aSPeter Zijlstra 	unsigned int			irq_context :  2,
7875dd602aSPeter Zijlstra 					depth       :  6,
7975dd602aSPeter Zijlstra 					base	    : 24;
8075dd602aSPeter Zijlstra 	/* 4 byte hole */
81a63f38ccSAndrew Morton 	struct hlist_node		entry;
82fbb9ce95SIngo Molnar 	u64				chain_key;
83fbb9ce95SIngo Molnar };
84fbb9ce95SIngo Molnar 
85fbb9ce95SIngo Molnar /*
86fbb9ce95SIngo Molnar  * Initialization, self-test and debugging-output methods:
87fbb9ce95SIngo Molnar  */
88c3bc8fd6SJoel Fernandes (Google) extern void lockdep_init(void);
89fbb9ce95SIngo Molnar extern void lockdep_reset(void);
90fbb9ce95SIngo Molnar extern void lockdep_reset_lock(struct lockdep_map *lock);
91fbb9ce95SIngo Molnar extern void lockdep_free_key_range(void *start, unsigned long size);
9263f9a7fdSAndi Kleen extern asmlinkage void lockdep_sys_exit(void);
93cdc84d79SBart Van Assche extern void lockdep_set_selftest_task(struct task_struct *task);
94fbb9ce95SIngo Molnar 
95e196e479SYuyang Du extern void lockdep_init_task(struct task_struct *task);
96e196e479SYuyang Du 
97e616cb8dSPeter Zijlstra /*
98e2db7592SIngo Molnar  * Split the recursion counter in two to readily detect 'off' vs recursion.
99e616cb8dSPeter Zijlstra  */
100e616cb8dSPeter Zijlstra #define LOCKDEP_RECURSION_BITS	16
101e616cb8dSPeter Zijlstra #define LOCKDEP_OFF		(1U << LOCKDEP_RECURSION_BITS)
102e616cb8dSPeter Zijlstra #define LOCKDEP_RECURSION_MASK	(LOCKDEP_OFF - 1)
103e616cb8dSPeter Zijlstra 
104e616cb8dSPeter Zijlstra /*
105e616cb8dSPeter Zijlstra  * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due
106e616cb8dSPeter Zijlstra  * to header dependencies.
107e616cb8dSPeter Zijlstra  */
108e616cb8dSPeter Zijlstra 
109e616cb8dSPeter Zijlstra #define lockdep_off()					\
110e616cb8dSPeter Zijlstra do {							\
111e616cb8dSPeter Zijlstra 	current->lockdep_recursion += LOCKDEP_OFF;	\
112e616cb8dSPeter Zijlstra } while (0)
113e616cb8dSPeter Zijlstra 
114e616cb8dSPeter Zijlstra #define lockdep_on()					\
115e616cb8dSPeter Zijlstra do {							\
116e616cb8dSPeter Zijlstra 	current->lockdep_recursion -= LOCKDEP_OFF;	\
117e616cb8dSPeter Zijlstra } while (0)
118fbb9ce95SIngo Molnar 
119108c1485SBart Van Assche extern void lockdep_register_key(struct lock_class_key *key);
120108c1485SBart Van Assche extern void lockdep_unregister_key(struct lock_class_key *key);
121108c1485SBart Van Assche 
122fbb9ce95SIngo Molnar /*
123fbb9ce95SIngo Molnar  * These methods are used by specific locking variants (spinlocks,
124fbb9ce95SIngo Molnar  * rwlocks, mutexes and rwsems) to pass init/acquire/release events
125fbb9ce95SIngo Molnar  * to lockdep:
126fbb9ce95SIngo Molnar  */
127fbb9ce95SIngo Molnar 
128dfd5e3f5SPeter Zijlstra extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
129dfd5e3f5SPeter Zijlstra 	struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);
130dfd5e3f5SPeter Zijlstra 
131dfd5e3f5SPeter Zijlstra static inline void
lockdep_init_map_waits(struct lockdep_map * lock,const char * name,struct lock_class_key * key,int subclass,u8 inner,u8 outer)132dfd5e3f5SPeter Zijlstra lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
133dfd5e3f5SPeter Zijlstra 		       struct lock_class_key *key, int subclass, u8 inner, u8 outer)
134dfd5e3f5SPeter Zijlstra {
135eae6d58dSPeter Zijlstra 	lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL);
136dfd5e3f5SPeter Zijlstra }
137de8f5e4fSPeter Zijlstra 
138de8f5e4fSPeter Zijlstra static inline void
lockdep_init_map_wait(struct lockdep_map * lock,const char * name,struct lock_class_key * key,int subclass,u8 inner)139de8f5e4fSPeter Zijlstra lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
140dfd5e3f5SPeter Zijlstra 		      struct lock_class_key *key, int subclass, u8 inner)
141de8f5e4fSPeter Zijlstra {
142de8f5e4fSPeter Zijlstra 	lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
143de8f5e4fSPeter Zijlstra }
144de8f5e4fSPeter Zijlstra 
lockdep_init_map(struct lockdep_map * lock,const char * name,struct lock_class_key * key,int subclass)145de8f5e4fSPeter Zijlstra static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
146de8f5e4fSPeter Zijlstra 			     struct lock_class_key *key, int subclass)
147de8f5e4fSPeter Zijlstra {
148de8f5e4fSPeter Zijlstra 	lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
149de8f5e4fSPeter Zijlstra }
150fbb9ce95SIngo Molnar 
151fbb9ce95SIngo Molnar /*
152fbb9ce95SIngo Molnar  * Reinitialize a lock key - for cases where there is special locking or
153fbb9ce95SIngo Molnar  * special initialization of locks so that the validator gets the scope
154fbb9ce95SIngo Molnar  * of dependencies wrong: they are either too broad (they need a class-split)
155fbb9ce95SIngo Molnar  * or they are too narrow (they suffer from a false class-split):
156fbb9ce95SIngo Molnar  */
157fbb9ce95SIngo Molnar #define lockdep_set_class(lock, key)				\
158eae6d58dSPeter Zijlstra 	lockdep_init_map_type(&(lock)->dep_map, #key, key, 0,	\
159de8f5e4fSPeter Zijlstra 			      (lock)->dep_map.wait_type_inner,	\
160eae6d58dSPeter Zijlstra 			      (lock)->dep_map.wait_type_outer,	\
161eae6d58dSPeter Zijlstra 			      (lock)->dep_map.lock_type)
162de8f5e4fSPeter Zijlstra 
163fbb9ce95SIngo Molnar #define lockdep_set_class_and_name(lock, key, name)		\
164eae6d58dSPeter Zijlstra 	lockdep_init_map_type(&(lock)->dep_map, name, key, 0,	\
165de8f5e4fSPeter Zijlstra 			      (lock)->dep_map.wait_type_inner,	\
166eae6d58dSPeter Zijlstra 			      (lock)->dep_map.wait_type_outer,	\
167eae6d58dSPeter Zijlstra 			      (lock)->dep_map.lock_type)
168de8f5e4fSPeter Zijlstra 
1694dfbb9d8SPeter Zijlstra #define lockdep_set_class_and_subclass(lock, key, sub)		\
170eae6d58dSPeter Zijlstra 	lockdep_init_map_type(&(lock)->dep_map, #key, key, sub,	\
171de8f5e4fSPeter Zijlstra 			      (lock)->dep_map.wait_type_inner,	\
172eae6d58dSPeter Zijlstra 			      (lock)->dep_map.wait_type_outer,	\
173eae6d58dSPeter Zijlstra 			      (lock)->dep_map.lock_type)
174de8f5e4fSPeter Zijlstra 
1754dfbb9d8SPeter Zijlstra #define lockdep_set_subclass(lock, sub)					\
176*d7fe143cSAhmed Ehab 	lockdep_init_map_type(&(lock)->dep_map, (lock)->dep_map.name, (lock)->dep_map.key, sub,\
177de8f5e4fSPeter Zijlstra 			      (lock)->dep_map.wait_type_inner,		\
178eae6d58dSPeter Zijlstra 			      (lock)->dep_map.wait_type_outer,		\
179eae6d58dSPeter Zijlstra 			      (lock)->dep_map.lock_type)
1801704f47bSPeter Zijlstra 
181a97b43faSKent Overstreet /**
182a97b43faSKent Overstreet  * lockdep_set_novalidate_class: disable checking of lock ordering on a given
183a97b43faSKent Overstreet  * lock
184a97b43faSKent Overstreet  * @lock: Lock to mark
185a97b43faSKent Overstreet  *
186a97b43faSKent Overstreet  * Lockdep will still record that this lock has been taken, and print held
187a97b43faSKent Overstreet  * instances when dumping locks
188a97b43faSKent Overstreet  */
1891704f47bSPeter Zijlstra #define lockdep_set_novalidate_class(lock) \
19047be1c1aSOleg Nesterov 	lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
191de8f5e4fSPeter Zijlstra 
192a97b43faSKent Overstreet /**
193a97b43faSKent Overstreet  * lockdep_set_notrack_class: disable lockdep tracking of a given lock entirely
194a97b43faSKent Overstreet  * @lock: Lock to mark
195a97b43faSKent Overstreet  *
196a97b43faSKent Overstreet  * Bigger hammer than lockdep_set_novalidate_class: so far just for bcachefs,
197a97b43faSKent Overstreet  * which takes more locks than lockdep is able to track (48).
198a97b43faSKent Overstreet  */
1991a616c2fSKent Overstreet #define lockdep_set_notrack_class(lock) \
2001a616c2fSKent Overstreet 	lockdep_set_class_and_name(lock, &__lockdep_no_track__, #lock)
2011a616c2fSKent Overstreet 
2029a7aa12fSJan Kara /*
2039a7aa12fSJan Kara  * Compare locking classes
2049a7aa12fSJan Kara  */
2059a7aa12fSJan Kara #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
2069a7aa12fSJan Kara 
lockdep_match_key(struct lockdep_map * lock,struct lock_class_key * key)2079a7aa12fSJan Kara static inline int lockdep_match_key(struct lockdep_map *lock,
2089a7aa12fSJan Kara 				    struct lock_class_key *key)
2099a7aa12fSJan Kara {
2109a7aa12fSJan Kara 	return lock->key == key;
2119a7aa12fSJan Kara }
212fbb9ce95SIngo Molnar 
213fbb9ce95SIngo Molnar /*
214fbb9ce95SIngo Molnar  * Acquire a lock.
215fbb9ce95SIngo Molnar  *
216fbb9ce95SIngo Molnar  * Values for "read":
217fbb9ce95SIngo Molnar  *
218fbb9ce95SIngo Molnar  *   0: exclusive (write) acquire
219fbb9ce95SIngo Molnar  *   1: read-acquire (no recursion allowed)
220fbb9ce95SIngo Molnar  *   2: read-acquire with same-instance recursion allowed
221fbb9ce95SIngo Molnar  *
222fbb9ce95SIngo Molnar  * Values for check:
223fbb9ce95SIngo Molnar  *
224fb9edbe9SOleg Nesterov  *   0: simple checks (freeing, held-at-exit-time, etc.)
225fb9edbe9SOleg Nesterov  *   1: full validation
226fbb9ce95SIngo Molnar  */
227fbb9ce95SIngo Molnar extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2287531e2f3SPeter Zijlstra 			 int trylock, int read, int check,
2297531e2f3SPeter Zijlstra 			 struct lockdep_map *nest_lock, unsigned long ip);
230fbb9ce95SIngo Molnar 
2315facae4fSQian Cai extern void lock_release(struct lockdep_map *lock, unsigned long ip);
232fbb9ce95SIngo Molnar 
2332f1f043eSBoqun Feng extern void lock_sync(struct lockdep_map *lock, unsigned int subclass,
2342f1f043eSBoqun Feng 		      int read, int check, struct lockdep_map *nest_lock,
2352f1f043eSBoqun Feng 		      unsigned long ip);
2362f1f043eSBoqun Feng 
237f8cfa466SShuah Khan /* lock_is_held_type() returns */
238f8cfa466SShuah Khan #define LOCK_STATE_UNKNOWN	-1
239f8cfa466SShuah Khan #define LOCK_STATE_NOT_HELD	0
240f8cfa466SShuah Khan #define LOCK_STATE_HELD		1
241f8cfa466SShuah Khan 
242f8319483SPeter Zijlstra /*
243f8319483SPeter Zijlstra  * Same "read" as for lock_acquire(), except -1 means any.
244f8319483SPeter Zijlstra  */
24508f36ff6SMatthew Wilcox extern int lock_is_held_type(const struct lockdep_map *lock, int read);
246f607c668SPeter Zijlstra 
lock_is_held(const struct lockdep_map * lock)24708f36ff6SMatthew Wilcox static inline int lock_is_held(const struct lockdep_map *lock)
248f8319483SPeter Zijlstra {
249f8319483SPeter Zijlstra 	return lock_is_held_type(lock, -1);
250f8319483SPeter Zijlstra }
251f8319483SPeter Zijlstra 
252f8319483SPeter Zijlstra #define lockdep_is_held(lock)		lock_is_held(&(lock)->dep_map)
253f8319483SPeter Zijlstra #define lockdep_is_held_type(lock, r)	lock_is_held_type(&(lock)->dep_map, (r))
254f607c668SPeter Zijlstra 
25500ef9f73SPeter Zijlstra extern void lock_set_class(struct lockdep_map *lock, const char *name,
25600ef9f73SPeter Zijlstra 			   struct lock_class_key *key, unsigned int subclass,
25764aa348eSPeter Zijlstra 			   unsigned long ip);
25864aa348eSPeter Zijlstra 
259d864b8eaSDan Williams #define lock_set_novalidate_class(l, n, i) \
260d864b8eaSDan Williams 	lock_set_class(l, n, &__lockdep_no_validate__, 0, i)
261d864b8eaSDan Williams 
lock_set_subclass(struct lockdep_map * lock,unsigned int subclass,unsigned long ip)26200ef9f73SPeter Zijlstra static inline void lock_set_subclass(struct lockdep_map *lock,
26300ef9f73SPeter Zijlstra 		unsigned int subclass, unsigned long ip)
26400ef9f73SPeter Zijlstra {
26500ef9f73SPeter Zijlstra 	lock_set_class(lock, lock->name, lock->key, subclass, ip);
26600ef9f73SPeter Zijlstra }
26700ef9f73SPeter Zijlstra 
2686419c4afSJ. R. Okajima extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
2696419c4afSJ. R. Okajima 
270e7904a28SPeter Zijlstra #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
271e7904a28SPeter Zijlstra 
272e7904a28SPeter Zijlstra extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
273e7904a28SPeter Zijlstra extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
274e7904a28SPeter Zijlstra extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
275a24fc60dSPeter Zijlstra 
276e3a55fd1SJarek Poplawski #define lockdep_depth(tsk)	(debug_locks ? (tsk)->lockdep_depth : 0)
277d5abe669SPeter Zijlstra 
278d19c8137SPeter Zijlstra #define lockdep_assert(cond)		\
279d19c8137SPeter Zijlstra 	do { WARN_ON(debug_locks && !(cond)); } while (0)
2803e31f947SShuah Khan 
281d19c8137SPeter Zijlstra #define lockdep_assert_once(cond)	\
282d19c8137SPeter Zijlstra 	do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0)
283f607c668SPeter Zijlstra 
284d19c8137SPeter Zijlstra #define lockdep_assert_held(l)		\
285d19c8137SPeter Zijlstra 	lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
286f8319483SPeter Zijlstra 
287d19c8137SPeter Zijlstra #define lockdep_assert_not_held(l)	\
288d19c8137SPeter Zijlstra 	lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD)
289f8319483SPeter Zijlstra 
290d19c8137SPeter Zijlstra #define lockdep_assert_held_write(l)	\
291d19c8137SPeter Zijlstra 	lockdep_assert(lockdep_is_held_type(l, 0))
2929a37110dSPeter Hurley 
293d19c8137SPeter Zijlstra #define lockdep_assert_held_read(l)	\
294d19c8137SPeter Zijlstra 	lockdep_assert(lockdep_is_held_type(l, 1))
295d19c8137SPeter Zijlstra 
296d19c8137SPeter Zijlstra #define lockdep_assert_held_once(l)		\
297d19c8137SPeter Zijlstra 	lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
298d19c8137SPeter Zijlstra 
299d19c8137SPeter Zijlstra #define lockdep_assert_none_held_once()		\
300d19c8137SPeter Zijlstra 	lockdep_assert_once(!current->lockdep_depth)
3017621350cSChristian König 
30294d24fc4SPeter Zijlstra #define lockdep_recursing(tsk)	((tsk)->lockdep_recursion)
30394d24fc4SPeter Zijlstra 
304a24fc60dSPeter Zijlstra #define lockdep_pin_lock(l)	lock_pin_lock(&(l)->dep_map)
305e7904a28SPeter Zijlstra #define lockdep_repin_lock(l,c)	lock_repin_lock(&(l)->dep_map, (c))
306e7904a28SPeter Zijlstra #define lockdep_unpin_lock(l,c)	lock_unpin_lock(&(l)->dep_map, (c))
307a24fc60dSPeter Zijlstra 
3080cce06baSPeter Zijlstra /*
3090cce06baSPeter Zijlstra  * Must use lock_map_aquire_try() with override maps to avoid
3100cce06baSPeter Zijlstra  * lockdep thinking they participate in the block chain.
3110cce06baSPeter Zijlstra  */
3120cce06baSPeter Zijlstra #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type)	\
3130cce06baSPeter Zijlstra 	struct lockdep_map _name = {			\
3140cce06baSPeter Zijlstra 		.name = #_name "-wait-type-override",	\
3150cce06baSPeter Zijlstra 		.wait_type_inner = _wait_type,		\
3160cce06baSPeter Zijlstra 		.lock_type = LD_LOCK_WAIT_OVERRIDE, }
3170cce06baSPeter Zijlstra 
318a51805efSMichel Lespinasse #else /* !CONFIG_LOCKDEP */
319fbb9ce95SIngo Molnar 
lockdep_init_task(struct task_struct * task)320e196e479SYuyang Du static inline void lockdep_init_task(struct task_struct *task)
321e196e479SYuyang Du {
322e196e479SYuyang Du }
323e196e479SYuyang Du 
lockdep_off(void)324fbb9ce95SIngo Molnar static inline void lockdep_off(void)
325fbb9ce95SIngo Molnar {
326fbb9ce95SIngo Molnar }
327fbb9ce95SIngo Molnar 
lockdep_on(void)328fbb9ce95SIngo Molnar static inline void lockdep_on(void)
329fbb9ce95SIngo Molnar {
330fbb9ce95SIngo Molnar }
331fbb9ce95SIngo Molnar 
lockdep_set_selftest_task(struct task_struct * task)332cdc84d79SBart Van Assche static inline void lockdep_set_selftest_task(struct task_struct *task)
333cdc84d79SBart Van Assche {
334cdc84d79SBart Van Assche }
335cdc84d79SBart Van Assche 
3367531e2f3SPeter Zijlstra # define lock_acquire(l, s, t, r, c, n, i)	do { } while (0)
3375facae4fSQian Cai # define lock_release(l, i)			do { } while (0)
3386419c4afSJ. R. Okajima # define lock_downgrade(l, i)			do { } while (0)
339d864b8eaSDan Williams # define lock_set_class(l, n, key, s, i)	do { (void)(key); } while (0)
340d864b8eaSDan Williams # define lock_set_novalidate_class(l, n, i)	do { } while (0)
34164aa348eSPeter Zijlstra # define lock_set_subclass(l, s, i)		do { } while (0)
342c3bc8fd6SJoel Fernandes (Google) # define lockdep_init()				do { } while (0)
343dfd5e3f5SPeter Zijlstra # define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
344dfd5e3f5SPeter Zijlstra 		do { (void)(name); (void)(key); } while (0)
345de8f5e4fSPeter Zijlstra # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
346de8f5e4fSPeter Zijlstra 		do { (void)(name); (void)(key); } while (0)
347de8f5e4fSPeter Zijlstra # define lockdep_init_map_wait(lock, name, key, sub, inner) \
348de8f5e4fSPeter Zijlstra 		do { (void)(name); (void)(key); } while (0)
349e25cf3dbSIngo Molnar # define lockdep_init_map(lock, name, key, sub) \
350e25cf3dbSIngo Molnar 		do { (void)(name); (void)(key); } while (0)
351fbb9ce95SIngo Molnar # define lockdep_set_class(lock, key)		do { (void)(key); } while (0)
352fbb9ce95SIngo Molnar # define lockdep_set_class_and_name(lock, key, name) \
353e25cf3dbSIngo Molnar 		do { (void)(key); (void)(name); } while (0)
3544dfbb9d8SPeter Zijlstra #define lockdep_set_class_and_subclass(lock, key, sub) \
3554dfbb9d8SPeter Zijlstra 		do { (void)(key); } while (0)
35607646e21SAndrew Morton #define lockdep_set_subclass(lock, sub)		do { } while (0)
3571704f47bSPeter Zijlstra 
3581704f47bSPeter Zijlstra #define lockdep_set_novalidate_class(lock) do { } while (0)
3591a616c2fSKent Overstreet #define lockdep_set_notrack_class(lock) do { } while (0)
3601704f47bSPeter Zijlstra 
3619a7aa12fSJan Kara /*
3629a7aa12fSJan Kara  * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
3639a7aa12fSJan Kara  * case since the result is not well defined and the caller should rather
3649a7aa12fSJan Kara  * #ifdef the call himself.
3659a7aa12fSJan Kara  */
36607646e21SAndrew Morton 
367fbb9ce95SIngo Molnar # define lockdep_reset()		do { debug_locks = 1; } while (0)
368fbb9ce95SIngo Molnar # define lockdep_free_key_range(start, size)	do { } while (0)
369b351d164SPeter Zijlstra # define lockdep_sys_exit() 			do { } while (0)
370d5abe669SPeter Zijlstra 
lockdep_register_key(struct lock_class_key * key)371108c1485SBart Van Assche static inline void lockdep_register_key(struct lock_class_key *key)
372108c1485SBart Van Assche {
373108c1485SBart Van Assche }
374108c1485SBart Van Assche 
lockdep_unregister_key(struct lock_class_key * key)375108c1485SBart Van Assche static inline void lockdep_unregister_key(struct lock_class_key *key)
376108c1485SBart Van Assche {
377108c1485SBart Van Assche }
378108c1485SBart Van Assche 
379d5abe669SPeter Zijlstra #define lockdep_depth(tsk)	(0)
380d5abe669SPeter Zijlstra 
381cd539cffSJakub Kicinski /*
382cd539cffSJakub Kicinski  * Dummy forward declarations, allow users to write less ifdef-y code
383cd539cffSJakub Kicinski  * and depend on dead code elimination.
384cd539cffSJakub Kicinski  */
385cd539cffSJakub Kicinski extern int lock_is_held(const void *);
386cd539cffSJakub Kicinski extern int lockdep_is_held(const void *);
387f8319483SPeter Zijlstra #define lockdep_is_held_type(l, r)		(1)
388f8319483SPeter Zijlstra 
389d19c8137SPeter Zijlstra #define lockdep_assert(c)			do { } while (0)
390d19c8137SPeter Zijlstra #define lockdep_assert_once(c)			do { } while (0)
391d19c8137SPeter Zijlstra 
3925cd3f5afSPaul Bolle #define lockdep_assert_held(l)			do { (void)(l); } while (0)
3933e31f947SShuah Khan #define lockdep_assert_not_held(l)		do { (void)(l); } while (0)
3949ffbe8acSNikolay Borisov #define lockdep_assert_held_write(l)		do { (void)(l); } while (0)
395f8319483SPeter Zijlstra #define lockdep_assert_held_read(l)		do { (void)(l); } while (0)
3969a37110dSPeter Hurley #define lockdep_assert_held_once(l)		do { (void)(l); } while (0)
3977621350cSChristian König #define lockdep_assert_none_held_once()	do { } while (0)
398f607c668SPeter Zijlstra 
39994d24fc4SPeter Zijlstra #define lockdep_recursing(tsk)			(0)
40094d24fc4SPeter Zijlstra 
401e7904a28SPeter Zijlstra #define NIL_COOKIE (struct pin_cookie){ }
402e7904a28SPeter Zijlstra 
4033771b0feSArnd Bergmann #define lockdep_pin_lock(l)			({ struct pin_cookie cookie = { }; cookie; })
404e7904a28SPeter Zijlstra #define lockdep_repin_lock(l, c)		do { (void)(l); (void)(c); } while (0)
405e7904a28SPeter Zijlstra #define lockdep_unpin_lock(l, c)		do { (void)(l); (void)(c); } while (0)
406a24fc60dSPeter Zijlstra 
4070cce06baSPeter Zijlstra #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type)	\
4080cce06baSPeter Zijlstra 	struct lockdep_map __maybe_unused _name = {}
4090cce06baSPeter Zijlstra 
410fbb9ce95SIngo Molnar #endif /* !LOCKDEP */
411fbb9ce95SIngo Molnar 
412eb1cfd09SKent Overstreet #ifdef CONFIG_PROVE_LOCKING
413eb1cfd09SKent Overstreet void lockdep_set_lock_cmp_fn(struct lockdep_map *, lock_cmp_fn, lock_print_fn);
414eb1cfd09SKent Overstreet 
415eb1cfd09SKent Overstreet #define lock_set_cmp_fn(lock, ...)	lockdep_set_lock_cmp_fn(&(lock)->dep_map, __VA_ARGS__)
416eb1cfd09SKent Overstreet #else
417eb1cfd09SKent Overstreet #define lock_set_cmp_fn(lock, ...)	do { } while (0)
418eb1cfd09SKent Overstreet #endif
419eb1cfd09SKent Overstreet 
420b09be676SByungchul Park enum xhlock_context_t {
421b09be676SByungchul Park 	XHLOCK_HARD,
422b09be676SByungchul Park 	XHLOCK_SOFT,
423b09be676SByungchul Park 	XHLOCK_CTX_NR,
424b09be676SByungchul Park };
425b09be676SByungchul Park 
426b09be676SByungchul Park /*
427b09be676SByungchul Park  * To initialize a lockdep_map statically use this macro.
428b09be676SByungchul Park  * Note that _name must not be NULL.
429b09be676SByungchul Park  */
430b09be676SByungchul Park #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
431b09be676SByungchul Park 	{ .name = (_name), .key = (void *)(_key), }
432b09be676SByungchul Park 
lockdep_invariant_state(bool force)433f52be570SPeter Zijlstra static inline void lockdep_invariant_state(bool force) {}
lockdep_free_task(struct task_struct * task)434b09be676SByungchul Park static inline void lockdep_free_task(struct task_struct *task) {}
435b09be676SByungchul Park 
436f20786ffSPeter Zijlstra #ifdef CONFIG_LOCK_STAT
437f20786ffSPeter Zijlstra 
438f20786ffSPeter Zijlstra extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
439c7e78cffSPeter Zijlstra extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
440f20786ffSPeter Zijlstra 
441f20786ffSPeter Zijlstra #define LOCK_CONTENDED(_lock, try, lock)			\
442f20786ffSPeter Zijlstra do {								\
443f20786ffSPeter Zijlstra 	if (!try(_lock)) {					\
444f20786ffSPeter Zijlstra 		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
445f20786ffSPeter Zijlstra 		lock(_lock);					\
446f20786ffSPeter Zijlstra 	}							\
447c7e78cffSPeter Zijlstra 	lock_acquired(&(_lock)->dep_map, _RET_IP_);			\
448f20786ffSPeter Zijlstra } while (0)
449f20786ffSPeter Zijlstra 
450916633a4SMichal Hocko #define LOCK_CONTENDED_RETURN(_lock, try, lock)			\
451916633a4SMichal Hocko ({								\
452916633a4SMichal Hocko 	int ____err = 0;					\
453916633a4SMichal Hocko 	if (!try(_lock)) {					\
454916633a4SMichal Hocko 		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
455916633a4SMichal Hocko 		____err = lock(_lock);				\
456916633a4SMichal Hocko 	}							\
457916633a4SMichal Hocko 	if (!____err)						\
458916633a4SMichal Hocko 		lock_acquired(&(_lock)->dep_map, _RET_IP_);	\
459916633a4SMichal Hocko 	____err;						\
460916633a4SMichal Hocko })
461916633a4SMichal Hocko 
462f20786ffSPeter Zijlstra #else /* CONFIG_LOCK_STAT */
463f20786ffSPeter Zijlstra 
464f20786ffSPeter Zijlstra #define lock_contended(lockdep_map, ip) do {} while (0)
465c7e78cffSPeter Zijlstra #define lock_acquired(lockdep_map, ip) do {} while (0)
466f20786ffSPeter Zijlstra 
467f20786ffSPeter Zijlstra #define LOCK_CONTENDED(_lock, try, lock) \
468f20786ffSPeter Zijlstra 	lock(_lock)
469f20786ffSPeter Zijlstra 
470916633a4SMichal Hocko #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
471916633a4SMichal Hocko 	lock(_lock)
472916633a4SMichal Hocko 
473f20786ffSPeter Zijlstra #endif /* CONFIG_LOCK_STAT */
474f20786ffSPeter Zijlstra 
475c3bc8fd6SJoel Fernandes (Google) #ifdef CONFIG_PROVE_LOCKING
4763117df04SIngo Molnar extern void print_irqtrace_events(struct task_struct *curr);
477fbb9ce95SIngo Molnar #else
print_irqtrace_events(struct task_struct * curr)4783117df04SIngo Molnar static inline void print_irqtrace_events(struct task_struct *curr)
4793117df04SIngo Molnar {
4803117df04SIngo Molnar }
481fbb9ce95SIngo Molnar #endif
482fbb9ce95SIngo Molnar 
483e9181886SBoqun Feng /* Variable used to make lockdep treat read_lock() as recursive in selftests */
484e9181886SBoqun Feng #ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
485e9181886SBoqun Feng extern unsigned int force_read_lock_recursive;
486e9181886SBoqun Feng #else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
487e9181886SBoqun Feng #define force_read_lock_recursive 0
488e9181886SBoqun Feng #endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
489e9181886SBoqun Feng 
490e9181886SBoqun Feng #ifdef CONFIG_LOCKDEP
491e9181886SBoqun Feng extern bool read_lock_is_recursive(void);
492e9181886SBoqun Feng #else /* CONFIG_LOCKDEP */
493e9181886SBoqun Feng /* If !LOCKDEP, the value is meaningless */
494e9181886SBoqun Feng #define read_lock_is_recursive() 0
495e9181886SBoqun Feng #endif
496e9181886SBoqun Feng 
497fbb9ce95SIngo Molnar /*
498fbb9ce95SIngo Molnar  * For trivial one-depth nesting of a lock-class, the following
499fbb9ce95SIngo Molnar  * global define can be used. (Subsystems with multiple levels
500fbb9ce95SIngo Molnar  * of nesting should define their own lock-nesting subclasses.)
501fbb9ce95SIngo Molnar  */
502fbb9ce95SIngo Molnar #define SINGLE_DEPTH_NESTING			1
503fbb9ce95SIngo Molnar 
504fbb9ce95SIngo Molnar /*
505fbb9ce95SIngo Molnar  * Map the dependency ops to NOP or to real lockdep ops, depending
506fbb9ce95SIngo Molnar  * on the per lock-class debug mode:
507fbb9ce95SIngo Molnar  */
508fbb9ce95SIngo Molnar 
509a51805efSMichel Lespinasse #define lock_acquire_exclusive(l, s, t, n, i)		lock_acquire(l, s, t, 0, 1, n, i)
510a51805efSMichel Lespinasse #define lock_acquire_shared(l, s, t, n, i)		lock_acquire(l, s, t, 1, 1, n, i)
511a51805efSMichel Lespinasse #define lock_acquire_shared_recursive(l, s, t, n, i)	lock_acquire(l, s, t, 2, 1, n, i)
512a51805efSMichel Lespinasse 
513a51805efSMichel Lespinasse #define spin_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
514a51805efSMichel Lespinasse #define spin_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
5155facae4fSQian Cai #define spin_release(l, i)			lock_release(l, i)
516fbb9ce95SIngo Molnar 
517a51805efSMichel Lespinasse #define rwlock_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
518e9181886SBoqun Feng #define rwlock_acquire_read(l, s, t, i)					\
519e9181886SBoqun Feng do {									\
520e9181886SBoqun Feng 	if (read_lock_is_recursive())					\
521e9181886SBoqun Feng 		lock_acquire_shared_recursive(l, s, t, NULL, i);	\
522e9181886SBoqun Feng 	else								\
523e9181886SBoqun Feng 		lock_acquire_shared(l, s, t, NULL, i);			\
524e9181886SBoqun Feng } while (0)
525e9181886SBoqun Feng 
5265facae4fSQian Cai #define rwlock_release(l, i)			lock_release(l, i)
527fbb9ce95SIngo Molnar 
5281ca7d67cSJohn Stultz #define seqcount_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
5291ca7d67cSJohn Stultz #define seqcount_acquire_read(l, s, t, i)	lock_acquire_shared_recursive(l, s, t, NULL, i)
5305facae4fSQian Cai #define seqcount_release(l, i)			lock_release(l, i)
5311ca7d67cSJohn Stultz 
532a51805efSMichel Lespinasse #define mutex_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
533a51805efSMichel Lespinasse #define mutex_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
5345facae4fSQian Cai #define mutex_release(l, i)			lock_release(l, i)
535fbb9ce95SIngo Molnar 
536a51805efSMichel Lespinasse #define rwsem_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
537a51805efSMichel Lespinasse #define rwsem_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
538a51805efSMichel Lespinasse #define rwsem_acquire_read(l, s, t, i)		lock_acquire_shared(l, s, t, NULL, i)
5395facae4fSQian Cai #define rwsem_release(l, i)			lock_release(l, i)
540fbb9ce95SIngo Molnar 
541a51805efSMichel Lespinasse #define lock_map_acquire(l)			lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
5420cce06baSPeter Zijlstra #define lock_map_acquire_try(l)			lock_acquire_exclusive(l, 0, 1, NULL, _THIS_IP_)
543a51805efSMichel Lespinasse #define lock_map_acquire_read(l)		lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
544dd56af42SPaul E. McKenney #define lock_map_acquire_tryread(l)		lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
5455facae4fSQian Cai #define lock_map_release(l)			lock_release(l, _THIS_IP_)
5462f1f043eSBoqun Feng #define lock_map_sync(l)			lock_sync(l, 0, 0, 1, NULL, _THIS_IP_)
5474f3e7524SPeter Zijlstra 
54876b189e9SPeter Zijlstra #ifdef CONFIG_PROVE_LOCKING
54976b189e9SPeter Zijlstra # define might_lock(lock)						\
55076b189e9SPeter Zijlstra do {									\
55176b189e9SPeter Zijlstra 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
552fb9edbe9SOleg Nesterov 	lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);	\
5535facae4fSQian Cai 	lock_release(&(lock)->dep_map, _THIS_IP_);			\
55476b189e9SPeter Zijlstra } while (0)
55576b189e9SPeter Zijlstra # define might_lock_read(lock)						\
55676b189e9SPeter Zijlstra do {									\
55776b189e9SPeter Zijlstra 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
558fb9edbe9SOleg Nesterov 	lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);	\
5595facae4fSQian Cai 	lock_release(&(lock)->dep_map, _THIS_IP_);			\
56076b189e9SPeter Zijlstra } while (0)
561e692b402SDaniel Vetter # define might_lock_nested(lock, subclass)				\
562e692b402SDaniel Vetter do {									\
563e692b402SDaniel Vetter 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
564e692b402SDaniel Vetter 	lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL,		\
565e692b402SDaniel Vetter 		     _THIS_IP_);					\
566023265edSJani Nikula 	lock_release(&(lock)->dep_map, _THIS_IP_);			\
567e692b402SDaniel Vetter } while (0)
568f54bb2ecSFrederic Weisbecker 
569a21ee605SPeter Zijlstra DECLARE_PER_CPU(int, hardirqs_enabled);
570a21ee605SPeter Zijlstra DECLARE_PER_CPU(int, hardirq_context);
5714d004099SPeter Zijlstra DECLARE_PER_CPU(unsigned int, lockdep_recursion);
572a21ee605SPeter Zijlstra 
573baffd723SPeter Zijlstra #define __lockdep_enabled	(debug_locks && !this_cpu_read(lockdep_recursion))
574fddf9055SPeter Zijlstra 
575a21ee605SPeter Zijlstra #define lockdep_assert_irqs_enabled()					\
576a21ee605SPeter Zijlstra do {									\
577baffd723SPeter Zijlstra 	WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
578f54bb2ecSFrederic Weisbecker } while (0)
579f54bb2ecSFrederic Weisbecker 
580a21ee605SPeter Zijlstra #define lockdep_assert_irqs_disabled()					\
581a21ee605SPeter Zijlstra do {									\
582baffd723SPeter Zijlstra 	WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
583f54bb2ecSFrederic Weisbecker } while (0)
584f54bb2ecSFrederic Weisbecker 
585a21ee605SPeter Zijlstra #define lockdep_assert_in_irq()						\
586a21ee605SPeter Zijlstra do {									\
587baffd723SPeter Zijlstra 	WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
58871d8d153SJoel Fernandes (Google) } while (0)
58971d8d153SJoel Fernandes (Google) 
590ff4e538cSJakub Kicinski #define lockdep_assert_no_hardirq()					\
591ff4e538cSJakub Kicinski do {									\
592ff4e538cSJakub Kicinski 	WARN_ON_ONCE(__lockdep_enabled && (this_cpu_read(hardirq_context) || \
593ff4e538cSJakub Kicinski 					   !this_cpu_read(hardirqs_enabled))); \
594ff4e538cSJakub Kicinski } while (0)
595ff4e538cSJakub Kicinski 
5968fd8ad5cSAhmed S. Darwish #define lockdep_assert_preemption_enabled()				\
5978fd8ad5cSAhmed S. Darwish do {									\
5988fd8ad5cSAhmed S. Darwish 	WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)	&&		\
5994d004099SPeter Zijlstra 		     __lockdep_enabled			&&		\
6008fd8ad5cSAhmed S. Darwish 		     (preempt_count() != 0		||		\
601baffd723SPeter Zijlstra 		      !this_cpu_read(hardirqs_enabled)));		\
6028fd8ad5cSAhmed S. Darwish } while (0)
6038fd8ad5cSAhmed S. Darwish 
6048fd8ad5cSAhmed S. Darwish #define lockdep_assert_preemption_disabled()				\
6058fd8ad5cSAhmed S. Darwish do {									\
6068fd8ad5cSAhmed S. Darwish 	WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)	&&		\
6074d004099SPeter Zijlstra 		     __lockdep_enabled			&&		\
6088fd8ad5cSAhmed S. Darwish 		     (preempt_count() == 0		&&		\
609baffd723SPeter Zijlstra 		      this_cpu_read(hardirqs_enabled)));		\
6108fd8ad5cSAhmed S. Darwish } while (0)
6118fd8ad5cSAhmed S. Darwish 
6128b5536adSYunsheng Lin /*
6138b5536adSYunsheng Lin  * Acceptable for protecting per-CPU resources accessed from BH.
6148b5536adSYunsheng Lin  * Much like in_softirq() - semantics are ambiguous, use carefully.
6158b5536adSYunsheng Lin  */
6168b5536adSYunsheng Lin #define lockdep_assert_in_softirq()					\
6178b5536adSYunsheng Lin do {									\
6188b5536adSYunsheng Lin 	WARN_ON_ONCE(__lockdep_enabled			&&		\
6198b5536adSYunsheng Lin 		     (!in_softirq() || in_irq() || in_nmi()));		\
6208b5536adSYunsheng Lin } while (0)
6218b5536adSYunsheng Lin 
622c5bcab75SSebastian Andrzej Siewior extern void lockdep_assert_in_softirq_func(void);
623c5bcab75SSebastian Andrzej Siewior 
62476b189e9SPeter Zijlstra #else
62576b189e9SPeter Zijlstra # define might_lock(lock) do { } while (0)
62676b189e9SPeter Zijlstra # define might_lock_read(lock) do { } while (0)
627e692b402SDaniel Vetter # define might_lock_nested(lock, subclass) do { } while (0)
628a21ee605SPeter Zijlstra 
629f54bb2ecSFrederic Weisbecker # define lockdep_assert_irqs_enabled() do { } while (0)
630f54bb2ecSFrederic Weisbecker # define lockdep_assert_irqs_disabled() do { } while (0)
63171d8d153SJoel Fernandes (Google) # define lockdep_assert_in_irq() do { } while (0)
632ff4e538cSJakub Kicinski # define lockdep_assert_no_hardirq() do { } while (0)
6338fd8ad5cSAhmed S. Darwish 
6348fd8ad5cSAhmed S. Darwish # define lockdep_assert_preemption_enabled() do { } while (0)
6358fd8ad5cSAhmed S. Darwish # define lockdep_assert_preemption_disabled() do { } while (0)
6368b5536adSYunsheng Lin # define lockdep_assert_in_softirq() do { } while (0)
637c5bcab75SSebastian Andrzej Siewior # define lockdep_assert_in_softirq_func() do { } while (0)
63876b189e9SPeter Zijlstra #endif
63976b189e9SPeter Zijlstra 
6408bf6c677SSebastian Siewior #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
6418bf6c677SSebastian Siewior 
6428bf6c677SSebastian Siewior # define lockdep_assert_RT_in_threaded_ctx() do {			\
6438bf6c677SSebastian Siewior 		WARN_ONCE(debug_locks && !current->lockdep_recursion &&	\
644f9ad4a5fSPeter Zijlstra 			  lockdep_hardirq_context() &&			\
6458bf6c677SSebastian Siewior 			  !(current->hardirq_threaded || current->irq_config),	\
6468bf6c677SSebastian Siewior 			  "Not in threaded context on PREEMPT_RT as expected\n");	\
6478bf6c677SSebastian Siewior } while (0)
6488bf6c677SSebastian Siewior 
6498bf6c677SSebastian Siewior #else
6508bf6c677SSebastian Siewior 
6518bf6c677SSebastian Siewior # define lockdep_assert_RT_in_threaded_ctx() do { } while (0)
6528bf6c677SSebastian Siewior 
6538bf6c677SSebastian Siewior #endif
6548bf6c677SSebastian Siewior 
655d24209bbSPaul E. McKenney #ifdef CONFIG_LOCKDEP
656b3fbab05SPaul E. McKenney void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
657d24209bbSPaul E. McKenney #else
658d24209bbSPaul E. McKenney static inline void
lockdep_rcu_suspicious(const char * file,const int line,const char * s)659d24209bbSPaul E. McKenney lockdep_rcu_suspicious(const char *file, const int line, const char *s)
660d24209bbSPaul E. McKenney {
661d24209bbSPaul E. McKenney }
6620632eb3dSPaul E. McKenney #endif
6630632eb3dSPaul E. McKenney 
664fbb9ce95SIngo Molnar #endif /* __LINUX_LOCKDEP_H */
665