xref: /linux-6.15/include/linux/lockdep.h (revision af901ca1)
1 /*
2  * Runtime locking correctness validator
3  *
4  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <[email protected]>
5  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <[email protected]>
6  *
7  * see Documentation/lockdep-design.txt for more details.
8  */
9 #ifndef __LINUX_LOCKDEP_H
10 #define __LINUX_LOCKDEP_H
11 
12 struct task_struct;
13 struct lockdep_map;
14 
15 #ifdef CONFIG_LOCKDEP
16 
17 #include <linux/linkage.h>
18 #include <linux/list.h>
19 #include <linux/debug_locks.h>
20 #include <linux/stacktrace.h>
21 
22 /*
23  * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
24  * the total number of states... :-(
25  */
26 #define XXX_LOCK_USAGE_STATES		(1+3*4)
27 
28 #define MAX_LOCKDEP_SUBCLASSES		8UL
29 
30 /*
31  * Lock-classes are keyed via unique addresses, by embedding the
32  * lockclass-key into the kernel (or module) .data section. (For
33  * static locks we use the lock address itself as the key.)
34  */
35 struct lockdep_subclass_key {
36 	char __one_byte;
37 } __attribute__ ((__packed__));
38 
39 struct lock_class_key {
40 	struct lockdep_subclass_key	subkeys[MAX_LOCKDEP_SUBCLASSES];
41 };
42 
43 #define LOCKSTAT_POINTS		4
44 
45 /*
46  * The lock-class itself:
47  */
48 struct lock_class {
49 	/*
50 	 * class-hash:
51 	 */
52 	struct list_head		hash_entry;
53 
54 	/*
55 	 * global list of all lock-classes:
56 	 */
57 	struct list_head		lock_entry;
58 
59 	struct lockdep_subclass_key	*key;
60 	unsigned int			subclass;
61 	unsigned int			dep_gen_id;
62 
63 	/*
64 	 * IRQ/softirq usage tracking bits:
65 	 */
66 	unsigned long			usage_mask;
67 	struct stack_trace		usage_traces[XXX_LOCK_USAGE_STATES];
68 
69 	/*
70 	 * These fields represent a directed graph of lock dependencies,
71 	 * to every node we attach a list of "forward" and a list of
72 	 * "backward" graph nodes.
73 	 */
74 	struct list_head		locks_after, locks_before;
75 
76 	/*
77 	 * Generation counter, when doing certain classes of graph walking,
78 	 * to ensure that we check one node only once:
79 	 */
80 	unsigned int			version;
81 
82 	/*
83 	 * Statistics counter:
84 	 */
85 	unsigned long			ops;
86 
87 	const char			*name;
88 	int				name_version;
89 
90 #ifdef CONFIG_LOCK_STAT
91 	unsigned long			contention_point[LOCKSTAT_POINTS];
92 	unsigned long			contending_point[LOCKSTAT_POINTS];
93 #endif
94 };
95 
96 #ifdef CONFIG_LOCK_STAT
97 struct lock_time {
98 	s64				min;
99 	s64				max;
100 	s64				total;
101 	unsigned long			nr;
102 };
103 
104 enum bounce_type {
105 	bounce_acquired_write,
106 	bounce_acquired_read,
107 	bounce_contended_write,
108 	bounce_contended_read,
109 	nr_bounce_types,
110 
111 	bounce_acquired = bounce_acquired_write,
112 	bounce_contended = bounce_contended_write,
113 };
114 
115 struct lock_class_stats {
116 	unsigned long			contention_point[4];
117 	unsigned long			contending_point[4];
118 	struct lock_time		read_waittime;
119 	struct lock_time		write_waittime;
120 	struct lock_time		read_holdtime;
121 	struct lock_time		write_holdtime;
122 	unsigned long			bounces[nr_bounce_types];
123 };
124 
125 struct lock_class_stats lock_stats(struct lock_class *class);
126 void clear_lock_stats(struct lock_class *class);
127 #endif
128 
129 /*
130  * Map the lock object (the lock instance) to the lock-class object.
131  * This is embedded into specific lock instances:
132  */
133 struct lockdep_map {
134 	struct lock_class_key		*key;
135 	struct lock_class		*class_cache;
136 	const char			*name;
137 #ifdef CONFIG_LOCK_STAT
138 	int				cpu;
139 	unsigned long			ip;
140 #endif
141 };
142 
143 /*
144  * Every lock has a list of other locks that were taken after it.
145  * We only grow the list, never remove from it:
146  */
147 struct lock_list {
148 	struct list_head		entry;
149 	struct lock_class		*class;
150 	struct stack_trace		trace;
151 	int				distance;
152 
153 	/*
154 	 * The parent field is used to implement breadth-first search, and the
155 	 * bit 0 is reused to indicate if the lock has been accessed in BFS.
156 	 */
157 	struct lock_list		*parent;
158 };
159 
160 /*
161  * We record lock dependency chains, so that we can cache them:
162  */
163 struct lock_chain {
164 	u8				irq_context;
165 	u8				depth;
166 	u16				base;
167 	struct list_head		entry;
168 	u64				chain_key;
169 };
170 
171 #define MAX_LOCKDEP_KEYS_BITS		13
172 /*
173  * Subtract one because we offset hlock->class_idx by 1 in order
174  * to make 0 mean no class. This avoids overflowing the class_idx
175  * bitfield and hitting the BUG in hlock_class().
176  */
177 #define MAX_LOCKDEP_KEYS		((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
178 
179 struct held_lock {
180 	/*
181 	 * One-way hash of the dependency chain up to this point. We
182 	 * hash the hashes step by step as the dependency chain grows.
183 	 *
184 	 * We use it for dependency-caching and we skip detection
185 	 * passes and dependency-updates if there is a cache-hit, so
186 	 * it is absolutely critical for 100% coverage of the validator
187 	 * to have a unique key value for every unique dependency path
188 	 * that can occur in the system, to make a unique hash value
189 	 * as likely as possible - hence the 64-bit width.
190 	 *
191 	 * The task struct holds the current hash value (initialized
192 	 * with zero), here we store the previous hash value:
193 	 */
194 	u64				prev_chain_key;
195 	unsigned long			acquire_ip;
196 	struct lockdep_map		*instance;
197 	struct lockdep_map		*nest_lock;
198 #ifdef CONFIG_LOCK_STAT
199 	u64 				waittime_stamp;
200 	u64				holdtime_stamp;
201 #endif
202 	unsigned int			class_idx:MAX_LOCKDEP_KEYS_BITS;
203 	/*
204 	 * The lock-stack is unified in that the lock chains of interrupt
205 	 * contexts nest ontop of process context chains, but we 'separate'
206 	 * the hashes by starting with 0 if we cross into an interrupt
207 	 * context, and we also keep do not add cross-context lock
208 	 * dependencies - the lock usage graph walking covers that area
209 	 * anyway, and we'd just unnecessarily increase the number of
210 	 * dependencies otherwise. [Note: hardirq and softirq contexts
211 	 * are separated from each other too.]
212 	 *
213 	 * The following field is used to detect when we cross into an
214 	 * interrupt context:
215 	 */
216 	unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
217 	unsigned int trylock:1;						/* 16 bits */
218 
219 	unsigned int read:2;        /* see lock_acquire() comment */
220 	unsigned int check:2;       /* see lock_acquire() comment */
221 	unsigned int hardirqs_off:1;
222 	unsigned int references:11;					/* 32 bits */
223 };
224 
225 /*
226  * Initialization, self-test and debugging-output methods:
227  */
228 extern void lockdep_init(void);
229 extern void lockdep_info(void);
230 extern void lockdep_reset(void);
231 extern void lockdep_reset_lock(struct lockdep_map *lock);
232 extern void lockdep_free_key_range(void *start, unsigned long size);
233 extern void lockdep_sys_exit(void);
234 
235 extern void lockdep_off(void);
236 extern void lockdep_on(void);
237 
238 /*
239  * These methods are used by specific locking variants (spinlocks,
240  * rwlocks, mutexes and rwsems) to pass init/acquire/release events
241  * to lockdep:
242  */
243 
244 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
245 			     struct lock_class_key *key, int subclass);
246 
247 /*
248  * To initialize a lockdep_map statically use this macro.
249  * Note that _name must not be NULL.
250  */
251 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
252 	{ .name = (_name), .key = (void *)(_key), }
253 
254 /*
255  * Reinitialize a lock key - for cases where there is special locking or
256  * special initialization of locks so that the validator gets the scope
257  * of dependencies wrong: they are either too broad (they need a class-split)
258  * or they are too narrow (they suffer from a false class-split):
259  */
260 #define lockdep_set_class(lock, key) \
261 		lockdep_init_map(&(lock)->dep_map, #key, key, 0)
262 #define lockdep_set_class_and_name(lock, key, name) \
263 		lockdep_init_map(&(lock)->dep_map, name, key, 0)
264 #define lockdep_set_class_and_subclass(lock, key, sub) \
265 		lockdep_init_map(&(lock)->dep_map, #key, key, sub)
266 #define lockdep_set_subclass(lock, sub)	\
267 		lockdep_init_map(&(lock)->dep_map, #lock, \
268 				 (lock)->dep_map.key, sub)
269 /*
270  * Compare locking classes
271  */
272 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
273 
274 static inline int lockdep_match_key(struct lockdep_map *lock,
275 				    struct lock_class_key *key)
276 {
277 	return lock->key == key;
278 }
279 
280 /*
281  * Acquire a lock.
282  *
283  * Values for "read":
284  *
285  *   0: exclusive (write) acquire
286  *   1: read-acquire (no recursion allowed)
287  *   2: read-acquire with same-instance recursion allowed
288  *
289  * Values for check:
290  *
291  *   0: disabled
292  *   1: simple checks (freeing, held-at-exit-time, etc.)
293  *   2: full validation
294  */
295 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
296 			 int trylock, int read, int check,
297 			 struct lockdep_map *nest_lock, unsigned long ip);
298 
299 extern void lock_release(struct lockdep_map *lock, int nested,
300 			 unsigned long ip);
301 
302 #define lockdep_is_held(lock)	lock_is_held(&(lock)->dep_map)
303 
304 extern int lock_is_held(struct lockdep_map *lock);
305 
306 extern void lock_set_class(struct lockdep_map *lock, const char *name,
307 			   struct lock_class_key *key, unsigned int subclass,
308 			   unsigned long ip);
309 
310 static inline void lock_set_subclass(struct lockdep_map *lock,
311 		unsigned int subclass, unsigned long ip)
312 {
313 	lock_set_class(lock, lock->name, lock->key, subclass, ip);
314 }
315 
316 extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
317 extern void lockdep_clear_current_reclaim_state(void);
318 extern void lockdep_trace_alloc(gfp_t mask);
319 
320 # define INIT_LOCKDEP				.lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
321 
322 #define lockdep_depth(tsk)	(debug_locks ? (tsk)->lockdep_depth : 0)
323 
324 #define lockdep_assert_held(l)	WARN_ON(debug_locks && !lockdep_is_held(l))
325 
326 #else /* !LOCKDEP */
327 
328 static inline void lockdep_off(void)
329 {
330 }
331 
332 static inline void lockdep_on(void)
333 {
334 }
335 
336 # define lock_acquire(l, s, t, r, c, n, i)	do { } while (0)
337 # define lock_release(l, n, i)			do { } while (0)
338 # define lock_set_class(l, n, k, s, i)		do { } while (0)
339 # define lock_set_subclass(l, s, i)		do { } while (0)
340 # define lockdep_set_current_reclaim_state(g)	do { } while (0)
341 # define lockdep_clear_current_reclaim_state()	do { } while (0)
342 # define lockdep_trace_alloc(g)			do { } while (0)
343 # define lockdep_init()				do { } while (0)
344 # define lockdep_info()				do { } while (0)
345 # define lockdep_init_map(lock, name, key, sub) \
346 		do { (void)(name); (void)(key); } while (0)
347 # define lockdep_set_class(lock, key)		do { (void)(key); } while (0)
348 # define lockdep_set_class_and_name(lock, key, name) \
349 		do { (void)(key); (void)(name); } while (0)
350 #define lockdep_set_class_and_subclass(lock, key, sub) \
351 		do { (void)(key); } while (0)
352 #define lockdep_set_subclass(lock, sub)		do { } while (0)
353 /*
354  * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
355  * case since the result is not well defined and the caller should rather
356  * #ifdef the call himself.
357  */
358 
359 # define INIT_LOCKDEP
360 # define lockdep_reset()		do { debug_locks = 1; } while (0)
361 # define lockdep_free_key_range(start, size)	do { } while (0)
362 # define lockdep_sys_exit() 			do { } while (0)
363 /*
364  * The class key takes no space if lockdep is disabled:
365  */
366 struct lock_class_key { };
367 
368 #define lockdep_depth(tsk)	(0)
369 
370 #define lockdep_assert_held(l)			do { } while (0)
371 
372 #endif /* !LOCKDEP */
373 
374 #ifdef CONFIG_LOCK_STAT
375 
376 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
377 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
378 
379 #define LOCK_CONTENDED(_lock, try, lock)			\
380 do {								\
381 	if (!try(_lock)) {					\
382 		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
383 		lock(_lock);					\
384 	}							\
385 	lock_acquired(&(_lock)->dep_map, _RET_IP_);			\
386 } while (0)
387 
388 #else /* CONFIG_LOCK_STAT */
389 
390 #define lock_contended(lockdep_map, ip) do {} while (0)
391 #define lock_acquired(lockdep_map, ip) do {} while (0)
392 
393 #define LOCK_CONTENDED(_lock, try, lock) \
394 	lock(_lock)
395 
396 #endif /* CONFIG_LOCK_STAT */
397 
398 #ifdef CONFIG_LOCKDEP
399 
400 /*
401  * On lockdep we dont want the hand-coded irq-enable of
402  * _raw_*_lock_flags() code, because lockdep assumes
403  * that interrupts are not re-enabled during lock-acquire:
404  */
405 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
406 	LOCK_CONTENDED((_lock), (try), (lock))
407 
408 #else /* CONFIG_LOCKDEP */
409 
410 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
411 	lockfl((_lock), (flags))
412 
413 #endif /* CONFIG_LOCKDEP */
414 
415 #ifdef CONFIG_GENERIC_HARDIRQS
416 extern void early_init_irq_lock_class(void);
417 #else
418 static inline void early_init_irq_lock_class(void)
419 {
420 }
421 #endif
422 
423 #ifdef CONFIG_TRACE_IRQFLAGS
424 extern void early_boot_irqs_off(void);
425 extern void early_boot_irqs_on(void);
426 extern void print_irqtrace_events(struct task_struct *curr);
427 #else
428 static inline void early_boot_irqs_off(void)
429 {
430 }
431 static inline void early_boot_irqs_on(void)
432 {
433 }
434 static inline void print_irqtrace_events(struct task_struct *curr)
435 {
436 }
437 #endif
438 
439 /*
440  * For trivial one-depth nesting of a lock-class, the following
441  * global define can be used. (Subsystems with multiple levels
442  * of nesting should define their own lock-nesting subclasses.)
443  */
444 #define SINGLE_DEPTH_NESTING			1
445 
446 /*
447  * Map the dependency ops to NOP or to real lockdep ops, depending
448  * on the per lock-class debug mode:
449  */
450 
451 #ifdef CONFIG_DEBUG_LOCK_ALLOC
452 # ifdef CONFIG_PROVE_LOCKING
453 #  define spin_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
454 #  define spin_acquire_nest(l, s, t, n, i)	lock_acquire(l, s, t, 0, 2, n, i)
455 # else
456 #  define spin_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
457 #  define spin_acquire_nest(l, s, t, n, i)	lock_acquire(l, s, t, 0, 1, NULL, i)
458 # endif
459 # define spin_release(l, n, i)			lock_release(l, n, i)
460 #else
461 # define spin_acquire(l, s, t, i)		do { } while (0)
462 # define spin_release(l, n, i)			do { } while (0)
463 #endif
464 
465 #ifdef CONFIG_DEBUG_LOCK_ALLOC
466 # ifdef CONFIG_PROVE_LOCKING
467 #  define rwlock_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
468 #  define rwlock_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 2, 2, NULL, i)
469 # else
470 #  define rwlock_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
471 #  define rwlock_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 2, 1, NULL, i)
472 # endif
473 # define rwlock_release(l, n, i)		lock_release(l, n, i)
474 #else
475 # define rwlock_acquire(l, s, t, i)		do { } while (0)
476 # define rwlock_acquire_read(l, s, t, i)	do { } while (0)
477 # define rwlock_release(l, n, i)		do { } while (0)
478 #endif
479 
480 #ifdef CONFIG_DEBUG_LOCK_ALLOC
481 # ifdef CONFIG_PROVE_LOCKING
482 #  define mutex_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
483 # else
484 #  define mutex_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
485 # endif
486 # define mutex_release(l, n, i)			lock_release(l, n, i)
487 #else
488 # define mutex_acquire(l, s, t, i)		do { } while (0)
489 # define mutex_release(l, n, i)			do { } while (0)
490 #endif
491 
492 #ifdef CONFIG_DEBUG_LOCK_ALLOC
493 # ifdef CONFIG_PROVE_LOCKING
494 #  define rwsem_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
495 #  define rwsem_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 1, 2, NULL, i)
496 # else
497 #  define rwsem_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
498 #  define rwsem_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 1, 1, NULL, i)
499 # endif
500 # define rwsem_release(l, n, i)			lock_release(l, n, i)
501 #else
502 # define rwsem_acquire(l, s, t, i)		do { } while (0)
503 # define rwsem_acquire_read(l, s, t, i)		do { } while (0)
504 # define rwsem_release(l, n, i)			do { } while (0)
505 #endif
506 
507 #ifdef CONFIG_DEBUG_LOCK_ALLOC
508 # ifdef CONFIG_PROVE_LOCKING
509 #  define lock_map_acquire(l)		lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
510 # else
511 #  define lock_map_acquire(l)		lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
512 # endif
513 # define lock_map_release(l)			lock_release(l, 1, _THIS_IP_)
514 #else
515 # define lock_map_acquire(l)			do { } while (0)
516 # define lock_map_release(l)			do { } while (0)
517 #endif
518 
519 #ifdef CONFIG_PROVE_LOCKING
520 # define might_lock(lock) 						\
521 do {									\
522 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
523 	lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_);	\
524 	lock_release(&(lock)->dep_map, 0, _THIS_IP_);			\
525 } while (0)
526 # define might_lock_read(lock) 						\
527 do {									\
528 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
529 	lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_);	\
530 	lock_release(&(lock)->dep_map, 0, _THIS_IP_);			\
531 } while (0)
532 #else
533 # define might_lock(lock) do { } while (0)
534 # define might_lock_read(lock) do { } while (0)
535 #endif
536 
537 #endif /* __LINUX_LOCKDEP_H */
538