xref: /linux-6.15/include/linux/lockdep.h (revision 151f4e2b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Runtime locking correctness validator
4  *
5  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <[email protected]>
6  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7  *
8  * see Documentation/locking/lockdep-design.txt for more details.
9  */
10 #ifndef __LINUX_LOCKDEP_H
11 #define __LINUX_LOCKDEP_H
12 
13 struct task_struct;
14 struct lockdep_map;
15 
16 /* for sysctl */
17 extern int prove_locking;
18 extern int lock_stat;
19 
20 #define MAX_LOCKDEP_SUBCLASSES		8UL
21 
22 #include <linux/types.h>
23 
24 #ifdef CONFIG_LOCKDEP
25 
26 #include <linux/linkage.h>
27 #include <linux/list.h>
28 #include <linux/debug_locks.h>
29 #include <linux/stacktrace.h>
30 
31 /*
32  * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
33  * the total number of states... :-(
34  */
35 #define XXX_LOCK_USAGE_STATES		(1+2*4)
36 
37 /*
38  * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
39  * cached in the instance of lockdep_map
40  *
41  * Currently main class (subclass == 0) and signle depth subclass
42  * are cached in lockdep_map. This optimization is mainly targeting
43  * on rq->lock. double_rq_lock() acquires this highly competitive with
44  * single depth.
45  */
46 #define NR_LOCKDEP_CACHING_CLASSES	2
47 
48 /*
49  * A lockdep key is associated with each lock object. For static locks we use
50  * the lock address itself as the key. Dynamically allocated lock objects can
51  * have a statically or dynamically allocated key. Dynamically allocated lock
52  * keys must be registered before being used and must be unregistered before
53  * the key memory is freed.
54  */
55 struct lockdep_subclass_key {
56 	char __one_byte;
57 } __attribute__ ((__packed__));
58 
59 /* hash_entry is used to keep track of dynamically allocated keys. */
60 struct lock_class_key {
61 	union {
62 		struct hlist_node		hash_entry;
63 		struct lockdep_subclass_key	subkeys[MAX_LOCKDEP_SUBCLASSES];
64 	};
65 };
66 
67 extern struct lock_class_key __lockdep_no_validate__;
68 
69 struct lock_trace {
70 	unsigned int		nr_entries;
71 	unsigned int		offset;
72 };
73 
74 #define LOCKSTAT_POINTS		4
75 
76 /*
77  * The lock-class itself. The order of the structure members matters.
78  * reinit_class() zeroes the key member and all subsequent members.
79  */
80 struct lock_class {
81 	/*
82 	 * class-hash:
83 	 */
84 	struct hlist_node		hash_entry;
85 
86 	/*
87 	 * Entry in all_lock_classes when in use. Entry in free_lock_classes
88 	 * when not in use. Instances that are being freed are on one of the
89 	 * zapped_classes lists.
90 	 */
91 	struct list_head		lock_entry;
92 
93 	/*
94 	 * These fields represent a directed graph of lock dependencies,
95 	 * to every node we attach a list of "forward" and a list of
96 	 * "backward" graph nodes.
97 	 */
98 	struct list_head		locks_after, locks_before;
99 
100 	struct lockdep_subclass_key	*key;
101 	unsigned int			subclass;
102 	unsigned int			dep_gen_id;
103 
104 	/*
105 	 * IRQ/softirq usage tracking bits:
106 	 */
107 	unsigned long			usage_mask;
108 	struct lock_trace		usage_traces[XXX_LOCK_USAGE_STATES];
109 
110 	/*
111 	 * Generation counter, when doing certain classes of graph walking,
112 	 * to ensure that we check one node only once:
113 	 */
114 	int				name_version;
115 	const char			*name;
116 
117 #ifdef CONFIG_LOCK_STAT
118 	unsigned long			contention_point[LOCKSTAT_POINTS];
119 	unsigned long			contending_point[LOCKSTAT_POINTS];
120 #endif
121 } __no_randomize_layout;
122 
123 #ifdef CONFIG_LOCK_STAT
124 struct lock_time {
125 	s64				min;
126 	s64				max;
127 	s64				total;
128 	unsigned long			nr;
129 };
130 
131 enum bounce_type {
132 	bounce_acquired_write,
133 	bounce_acquired_read,
134 	bounce_contended_write,
135 	bounce_contended_read,
136 	nr_bounce_types,
137 
138 	bounce_acquired = bounce_acquired_write,
139 	bounce_contended = bounce_contended_write,
140 };
141 
142 struct lock_class_stats {
143 	unsigned long			contention_point[LOCKSTAT_POINTS];
144 	unsigned long			contending_point[LOCKSTAT_POINTS];
145 	struct lock_time		read_waittime;
146 	struct lock_time		write_waittime;
147 	struct lock_time		read_holdtime;
148 	struct lock_time		write_holdtime;
149 	unsigned long			bounces[nr_bounce_types];
150 };
151 
152 struct lock_class_stats lock_stats(struct lock_class *class);
153 void clear_lock_stats(struct lock_class *class);
154 #endif
155 
156 /*
157  * Map the lock object (the lock instance) to the lock-class object.
158  * This is embedded into specific lock instances:
159  */
160 struct lockdep_map {
161 	struct lock_class_key		*key;
162 	struct lock_class		*class_cache[NR_LOCKDEP_CACHING_CLASSES];
163 	const char			*name;
164 #ifdef CONFIG_LOCK_STAT
165 	int				cpu;
166 	unsigned long			ip;
167 #endif
168 };
169 
170 static inline void lockdep_copy_map(struct lockdep_map *to,
171 				    struct lockdep_map *from)
172 {
173 	int i;
174 
175 	*to = *from;
176 	/*
177 	 * Since the class cache can be modified concurrently we could observe
178 	 * half pointers (64bit arch using 32bit copy insns). Therefore clear
179 	 * the caches and take the performance hit.
180 	 *
181 	 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
182 	 *     that relies on cache abuse.
183 	 */
184 	for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
185 		to->class_cache[i] = NULL;
186 }
187 
188 /*
189  * Every lock has a list of other locks that were taken after it.
190  * We only grow the list, never remove from it:
191  */
192 struct lock_list {
193 	struct list_head		entry;
194 	struct lock_class		*class;
195 	struct lock_class		*links_to;
196 	struct lock_trace		trace;
197 	int				distance;
198 
199 	/*
200 	 * The parent field is used to implement breadth-first search, and the
201 	 * bit 0 is reused to indicate if the lock has been accessed in BFS.
202 	 */
203 	struct lock_list		*parent;
204 };
205 
206 /*
207  * We record lock dependency chains, so that we can cache them:
208  */
209 struct lock_chain {
210 	/* see BUILD_BUG_ON()s in lookup_chain_cache() */
211 	unsigned int			irq_context :  2,
212 					depth       :  6,
213 					base	    : 24;
214 	/* 4 byte hole */
215 	struct hlist_node		entry;
216 	u64				chain_key;
217 };
218 
219 #define MAX_LOCKDEP_KEYS_BITS		13
220 /*
221  * Subtract one because we offset hlock->class_idx by 1 in order
222  * to make 0 mean no class. This avoids overflowing the class_idx
223  * bitfield and hitting the BUG in hlock_class().
224  */
225 #define MAX_LOCKDEP_KEYS		((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
226 
227 struct held_lock {
228 	/*
229 	 * One-way hash of the dependency chain up to this point. We
230 	 * hash the hashes step by step as the dependency chain grows.
231 	 *
232 	 * We use it for dependency-caching and we skip detection
233 	 * passes and dependency-updates if there is a cache-hit, so
234 	 * it is absolutely critical for 100% coverage of the validator
235 	 * to have a unique key value for every unique dependency path
236 	 * that can occur in the system, to make a unique hash value
237 	 * as likely as possible - hence the 64-bit width.
238 	 *
239 	 * The task struct holds the current hash value (initialized
240 	 * with zero), here we store the previous hash value:
241 	 */
242 	u64				prev_chain_key;
243 	unsigned long			acquire_ip;
244 	struct lockdep_map		*instance;
245 	struct lockdep_map		*nest_lock;
246 #ifdef CONFIG_LOCK_STAT
247 	u64 				waittime_stamp;
248 	u64				holdtime_stamp;
249 #endif
250 	unsigned int			class_idx:MAX_LOCKDEP_KEYS_BITS;
251 	/*
252 	 * The lock-stack is unified in that the lock chains of interrupt
253 	 * contexts nest ontop of process context chains, but we 'separate'
254 	 * the hashes by starting with 0 if we cross into an interrupt
255 	 * context, and we also keep do not add cross-context lock
256 	 * dependencies - the lock usage graph walking covers that area
257 	 * anyway, and we'd just unnecessarily increase the number of
258 	 * dependencies otherwise. [Note: hardirq and softirq contexts
259 	 * are separated from each other too.]
260 	 *
261 	 * The following field is used to detect when we cross into an
262 	 * interrupt context:
263 	 */
264 	unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
265 	unsigned int trylock:1;						/* 16 bits */
266 
267 	unsigned int read:2;        /* see lock_acquire() comment */
268 	unsigned int check:1;       /* see lock_acquire() comment */
269 	unsigned int hardirqs_off:1;
270 	unsigned int references:12;					/* 32 bits */
271 	unsigned int pin_count;
272 };
273 
274 /*
275  * Initialization, self-test and debugging-output methods:
276  */
277 extern void lockdep_init(void);
278 extern void lockdep_reset(void);
279 extern void lockdep_reset_lock(struct lockdep_map *lock);
280 extern void lockdep_free_key_range(void *start, unsigned long size);
281 extern asmlinkage void lockdep_sys_exit(void);
282 extern void lockdep_set_selftest_task(struct task_struct *task);
283 
284 extern void lockdep_off(void);
285 extern void lockdep_on(void);
286 
287 extern void lockdep_register_key(struct lock_class_key *key);
288 extern void lockdep_unregister_key(struct lock_class_key *key);
289 
290 /*
291  * These methods are used by specific locking variants (spinlocks,
292  * rwlocks, mutexes and rwsems) to pass init/acquire/release events
293  * to lockdep:
294  */
295 
296 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
297 			     struct lock_class_key *key, int subclass);
298 
299 /*
300  * Reinitialize a lock key - for cases where there is special locking or
301  * special initialization of locks so that the validator gets the scope
302  * of dependencies wrong: they are either too broad (they need a class-split)
303  * or they are too narrow (they suffer from a false class-split):
304  */
305 #define lockdep_set_class(lock, key) \
306 		lockdep_init_map(&(lock)->dep_map, #key, key, 0)
307 #define lockdep_set_class_and_name(lock, key, name) \
308 		lockdep_init_map(&(lock)->dep_map, name, key, 0)
309 #define lockdep_set_class_and_subclass(lock, key, sub) \
310 		lockdep_init_map(&(lock)->dep_map, #key, key, sub)
311 #define lockdep_set_subclass(lock, sub)	\
312 		lockdep_init_map(&(lock)->dep_map, #lock, \
313 				 (lock)->dep_map.key, sub)
314 
315 #define lockdep_set_novalidate_class(lock) \
316 	lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
317 /*
318  * Compare locking classes
319  */
320 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
321 
322 static inline int lockdep_match_key(struct lockdep_map *lock,
323 				    struct lock_class_key *key)
324 {
325 	return lock->key == key;
326 }
327 
328 /*
329  * Acquire a lock.
330  *
331  * Values for "read":
332  *
333  *   0: exclusive (write) acquire
334  *   1: read-acquire (no recursion allowed)
335  *   2: read-acquire with same-instance recursion allowed
336  *
337  * Values for check:
338  *
339  *   0: simple checks (freeing, held-at-exit-time, etc.)
340  *   1: full validation
341  */
342 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
343 			 int trylock, int read, int check,
344 			 struct lockdep_map *nest_lock, unsigned long ip);
345 
346 extern void lock_release(struct lockdep_map *lock, int nested,
347 			 unsigned long ip);
348 
349 /*
350  * Same "read" as for lock_acquire(), except -1 means any.
351  */
352 extern int lock_is_held_type(const struct lockdep_map *lock, int read);
353 
354 static inline int lock_is_held(const struct lockdep_map *lock)
355 {
356 	return lock_is_held_type(lock, -1);
357 }
358 
359 #define lockdep_is_held(lock)		lock_is_held(&(lock)->dep_map)
360 #define lockdep_is_held_type(lock, r)	lock_is_held_type(&(lock)->dep_map, (r))
361 
362 extern void lock_set_class(struct lockdep_map *lock, const char *name,
363 			   struct lock_class_key *key, unsigned int subclass,
364 			   unsigned long ip);
365 
366 static inline void lock_set_subclass(struct lockdep_map *lock,
367 		unsigned int subclass, unsigned long ip)
368 {
369 	lock_set_class(lock, lock->name, lock->key, subclass, ip);
370 }
371 
372 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
373 
374 struct pin_cookie { unsigned int val; };
375 
376 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
377 
378 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
379 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
380 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
381 
382 #define lockdep_depth(tsk)	(debug_locks ? (tsk)->lockdep_depth : 0)
383 
384 #define lockdep_assert_held(l)	do {				\
385 		WARN_ON(debug_locks && !lockdep_is_held(l));	\
386 	} while (0)
387 
388 #define lockdep_assert_held_exclusive(l)	do {			\
389 		WARN_ON(debug_locks && !lockdep_is_held_type(l, 0));	\
390 	} while (0)
391 
392 #define lockdep_assert_held_read(l)	do {				\
393 		WARN_ON(debug_locks && !lockdep_is_held_type(l, 1));	\
394 	} while (0)
395 
396 #define lockdep_assert_held_once(l)	do {				\
397 		WARN_ON_ONCE(debug_locks && !lockdep_is_held(l));	\
398 	} while (0)
399 
400 #define lockdep_recursing(tsk)	((tsk)->lockdep_recursion)
401 
402 #define lockdep_pin_lock(l)	lock_pin_lock(&(l)->dep_map)
403 #define lockdep_repin_lock(l,c)	lock_repin_lock(&(l)->dep_map, (c))
404 #define lockdep_unpin_lock(l,c)	lock_unpin_lock(&(l)->dep_map, (c))
405 
406 #else /* !CONFIG_LOCKDEP */
407 
408 static inline void lockdep_off(void)
409 {
410 }
411 
412 static inline void lockdep_on(void)
413 {
414 }
415 
416 static inline void lockdep_set_selftest_task(struct task_struct *task)
417 {
418 }
419 
420 # define lock_acquire(l, s, t, r, c, n, i)	do { } while (0)
421 # define lock_release(l, n, i)			do { } while (0)
422 # define lock_downgrade(l, i)			do { } while (0)
423 # define lock_set_class(l, n, k, s, i)		do { } while (0)
424 # define lock_set_subclass(l, s, i)		do { } while (0)
425 # define lockdep_init()				do { } while (0)
426 # define lockdep_init_map(lock, name, key, sub) \
427 		do { (void)(name); (void)(key); } while (0)
428 # define lockdep_set_class(lock, key)		do { (void)(key); } while (0)
429 # define lockdep_set_class_and_name(lock, key, name) \
430 		do { (void)(key); (void)(name); } while (0)
431 #define lockdep_set_class_and_subclass(lock, key, sub) \
432 		do { (void)(key); } while (0)
433 #define lockdep_set_subclass(lock, sub)		do { } while (0)
434 
435 #define lockdep_set_novalidate_class(lock) do { } while (0)
436 
437 /*
438  * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
439  * case since the result is not well defined and the caller should rather
440  * #ifdef the call himself.
441  */
442 
443 # define lockdep_reset()		do { debug_locks = 1; } while (0)
444 # define lockdep_free_key_range(start, size)	do { } while (0)
445 # define lockdep_sys_exit() 			do { } while (0)
446 /*
447  * The class key takes no space if lockdep is disabled:
448  */
449 struct lock_class_key { };
450 
451 static inline void lockdep_register_key(struct lock_class_key *key)
452 {
453 }
454 
455 static inline void lockdep_unregister_key(struct lock_class_key *key)
456 {
457 }
458 
459 /*
460  * The lockdep_map takes no space if lockdep is disabled:
461  */
462 struct lockdep_map { };
463 
464 #define lockdep_depth(tsk)	(0)
465 
466 #define lockdep_is_held_type(l, r)		(1)
467 
468 #define lockdep_assert_held(l)			do { (void)(l); } while (0)
469 #define lockdep_assert_held_exclusive(l)	do { (void)(l); } while (0)
470 #define lockdep_assert_held_read(l)		do { (void)(l); } while (0)
471 #define lockdep_assert_held_once(l)		do { (void)(l); } while (0)
472 
473 #define lockdep_recursing(tsk)			(0)
474 
475 struct pin_cookie { };
476 
477 #define NIL_COOKIE (struct pin_cookie){ }
478 
479 #define lockdep_pin_lock(l)			({ struct pin_cookie cookie = { }; cookie; })
480 #define lockdep_repin_lock(l, c)		do { (void)(l); (void)(c); } while (0)
481 #define lockdep_unpin_lock(l, c)		do { (void)(l); (void)(c); } while (0)
482 
483 #endif /* !LOCKDEP */
484 
485 enum xhlock_context_t {
486 	XHLOCK_HARD,
487 	XHLOCK_SOFT,
488 	XHLOCK_CTX_NR,
489 };
490 
491 #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
492 /*
493  * To initialize a lockdep_map statically use this macro.
494  * Note that _name must not be NULL.
495  */
496 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
497 	{ .name = (_name), .key = (void *)(_key), }
498 
499 static inline void lockdep_invariant_state(bool force) {}
500 static inline void lockdep_init_task(struct task_struct *task) {}
501 static inline void lockdep_free_task(struct task_struct *task) {}
502 
503 #ifdef CONFIG_LOCK_STAT
504 
505 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
506 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
507 
508 #define LOCK_CONTENDED(_lock, try, lock)			\
509 do {								\
510 	if (!try(_lock)) {					\
511 		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
512 		lock(_lock);					\
513 	}							\
514 	lock_acquired(&(_lock)->dep_map, _RET_IP_);			\
515 } while (0)
516 
517 #define LOCK_CONTENDED_RETURN(_lock, try, lock)			\
518 ({								\
519 	int ____err = 0;					\
520 	if (!try(_lock)) {					\
521 		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
522 		____err = lock(_lock);				\
523 	}							\
524 	if (!____err)						\
525 		lock_acquired(&(_lock)->dep_map, _RET_IP_);	\
526 	____err;						\
527 })
528 
529 #else /* CONFIG_LOCK_STAT */
530 
531 #define lock_contended(lockdep_map, ip) do {} while (0)
532 #define lock_acquired(lockdep_map, ip) do {} while (0)
533 
534 #define LOCK_CONTENDED(_lock, try, lock) \
535 	lock(_lock)
536 
537 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
538 	lock(_lock)
539 
540 #endif /* CONFIG_LOCK_STAT */
541 
542 #ifdef CONFIG_LOCKDEP
543 
544 /*
545  * On lockdep we dont want the hand-coded irq-enable of
546  * _raw_*_lock_flags() code, because lockdep assumes
547  * that interrupts are not re-enabled during lock-acquire:
548  */
549 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
550 	LOCK_CONTENDED((_lock), (try), (lock))
551 
552 #else /* CONFIG_LOCKDEP */
553 
554 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
555 	lockfl((_lock), (flags))
556 
557 #endif /* CONFIG_LOCKDEP */
558 
559 #ifdef CONFIG_PROVE_LOCKING
560 extern void print_irqtrace_events(struct task_struct *curr);
561 #else
562 static inline void print_irqtrace_events(struct task_struct *curr)
563 {
564 }
565 #endif
566 
567 /*
568  * For trivial one-depth nesting of a lock-class, the following
569  * global define can be used. (Subsystems with multiple levels
570  * of nesting should define their own lock-nesting subclasses.)
571  */
572 #define SINGLE_DEPTH_NESTING			1
573 
574 /*
575  * Map the dependency ops to NOP or to real lockdep ops, depending
576  * on the per lock-class debug mode:
577  */
578 
579 #define lock_acquire_exclusive(l, s, t, n, i)		lock_acquire(l, s, t, 0, 1, n, i)
580 #define lock_acquire_shared(l, s, t, n, i)		lock_acquire(l, s, t, 1, 1, n, i)
581 #define lock_acquire_shared_recursive(l, s, t, n, i)	lock_acquire(l, s, t, 2, 1, n, i)
582 
583 #define spin_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
584 #define spin_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
585 #define spin_release(l, n, i)			lock_release(l, n, i)
586 
587 #define rwlock_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
588 #define rwlock_acquire_read(l, s, t, i)		lock_acquire_shared_recursive(l, s, t, NULL, i)
589 #define rwlock_release(l, n, i)			lock_release(l, n, i)
590 
591 #define seqcount_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
592 #define seqcount_acquire_read(l, s, t, i)	lock_acquire_shared_recursive(l, s, t, NULL, i)
593 #define seqcount_release(l, n, i)		lock_release(l, n, i)
594 
595 #define mutex_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
596 #define mutex_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
597 #define mutex_release(l, n, i)			lock_release(l, n, i)
598 
599 #define rwsem_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
600 #define rwsem_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
601 #define rwsem_acquire_read(l, s, t, i)		lock_acquire_shared(l, s, t, NULL, i)
602 #define rwsem_release(l, n, i)			lock_release(l, n, i)
603 
604 #define lock_map_acquire(l)			lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
605 #define lock_map_acquire_read(l)		lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
606 #define lock_map_acquire_tryread(l)		lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
607 #define lock_map_release(l)			lock_release(l, 1, _THIS_IP_)
608 
609 #ifdef CONFIG_PROVE_LOCKING
610 # define might_lock(lock) 						\
611 do {									\
612 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
613 	lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);	\
614 	lock_release(&(lock)->dep_map, 0, _THIS_IP_);			\
615 } while (0)
616 # define might_lock_read(lock) 						\
617 do {									\
618 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
619 	lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);	\
620 	lock_release(&(lock)->dep_map, 0, _THIS_IP_);			\
621 } while (0)
622 
623 #define lockdep_assert_irqs_enabled()	do {				\
624 		WARN_ONCE(debug_locks && !current->lockdep_recursion &&	\
625 			  !current->hardirqs_enabled,			\
626 			  "IRQs not enabled as expected\n");		\
627 	} while (0)
628 
629 #define lockdep_assert_irqs_disabled()	do {				\
630 		WARN_ONCE(debug_locks && !current->lockdep_recursion &&	\
631 			  current->hardirqs_enabled,			\
632 			  "IRQs not disabled as expected\n");		\
633 	} while (0)
634 
635 #else
636 # define might_lock(lock) do { } while (0)
637 # define might_lock_read(lock) do { } while (0)
638 # define lockdep_assert_irqs_enabled() do { } while (0)
639 # define lockdep_assert_irqs_disabled() do { } while (0)
640 #endif
641 
642 #ifdef CONFIG_LOCKDEP
643 void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
644 #else
645 static inline void
646 lockdep_rcu_suspicious(const char *file, const int line, const char *s)
647 {
648 }
649 #endif
650 
651 #endif /* __LINUX_LOCKDEP_H */
652