xref: /linux-6.15/include/linux/lockdep_types.h (revision b1c8ea3c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Runtime locking correctness validator
4  *
5  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <[email protected]>
6  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7  *
8  * see Documentation/locking/lockdep-design.rst for more details.
9  */
10 #ifndef __LINUX_LOCKDEP_TYPES_H
11 #define __LINUX_LOCKDEP_TYPES_H
12 
13 #include <linux/types.h>
14 
15 #define MAX_LOCKDEP_SUBCLASSES		8UL
16 
17 enum lockdep_wait_type {
18 	LD_WAIT_INV = 0,	/* not checked, catch all */
19 
20 	LD_WAIT_FREE,		/* wait free, rcu etc.. */
21 	LD_WAIT_SPIN,		/* spin loops, raw_spinlock_t etc.. */
22 
23 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
24 	LD_WAIT_CONFIG,		/* preemptible in PREEMPT_RT, spinlock_t etc.. */
25 #else
26 	LD_WAIT_CONFIG = LD_WAIT_SPIN,
27 #endif
28 	LD_WAIT_SLEEP,		/* sleeping locks, mutex_t etc.. */
29 
30 	LD_WAIT_MAX,		/* must be last */
31 };
32 
33 enum lockdep_lock_type {
34 	LD_LOCK_NORMAL = 0,	/* normal, catch all */
35 	LD_LOCK_PERCPU,		/* percpu */
36 	LD_LOCK_WAIT_OVERRIDE,	/* annotation */
37 	LD_LOCK_MAX,
38 };
39 
40 #ifdef CONFIG_LOCKDEP
41 
42 /*
43  * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
44  * the total number of states... :-(
45  *
46  * XXX_LOCK_USAGE_STATES is the number of lines in lockdep_states.h, for each
47  * of those we generates 4 states, Additionally we report on USED and USED_READ.
48  */
49 #define XXX_LOCK_USAGE_STATES		2
50 #define LOCK_TRACE_STATES		(XXX_LOCK_USAGE_STATES*4 + 2)
51 
52 /*
53  * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
54  * cached in the instance of lockdep_map
55  *
56  * Currently main class (subclass == 0) and single depth subclass
57  * are cached in lockdep_map. This optimization is mainly targeting
58  * on rq->lock. double_rq_lock() acquires this highly competitive with
59  * single depth.
60  */
61 #define NR_LOCKDEP_CACHING_CLASSES	2
62 
63 /*
64  * A lockdep key is associated with each lock object. For static locks we use
65  * the lock address itself as the key. Dynamically allocated lock objects can
66  * have a statically or dynamically allocated key. Dynamically allocated lock
67  * keys must be registered before being used and must be unregistered before
68  * the key memory is freed.
69  */
70 struct lockdep_subclass_key {
71 	char __one_byte;
72 } __attribute__ ((__packed__));
73 
74 /* hash_entry is used to keep track of dynamically allocated keys. */
75 struct lock_class_key {
76 	union {
77 		struct hlist_node		hash_entry;
78 		struct lockdep_subclass_key	subkeys[MAX_LOCKDEP_SUBCLASSES];
79 	};
80 };
81 
82 extern struct lock_class_key __lockdep_no_validate__;
83 
84 struct lock_trace;
85 
86 #define LOCKSTAT_POINTS		4
87 
88 /*
89  * The lock-class itself. The order of the structure members matters.
90  * reinit_class() zeroes the key member and all subsequent members.
91  */
92 struct lock_class {
93 	/*
94 	 * class-hash:
95 	 */
96 	struct hlist_node		hash_entry;
97 
98 	/*
99 	 * Entry in all_lock_classes when in use. Entry in free_lock_classes
100 	 * when not in use. Instances that are being freed are on one of the
101 	 * zapped_classes lists.
102 	 */
103 	struct list_head		lock_entry;
104 
105 	/*
106 	 * These fields represent a directed graph of lock dependencies,
107 	 * to every node we attach a list of "forward" and a list of
108 	 * "backward" graph nodes.
109 	 */
110 	struct list_head		locks_after, locks_before;
111 
112 	const struct lockdep_subclass_key *key;
113 	unsigned int			subclass;
114 	unsigned int			dep_gen_id;
115 
116 	/*
117 	 * IRQ/softirq usage tracking bits:
118 	 */
119 	unsigned long			usage_mask;
120 	const struct lock_trace		*usage_traces[LOCK_TRACE_STATES];
121 
122 	/*
123 	 * Generation counter, when doing certain classes of graph walking,
124 	 * to ensure that we check one node only once:
125 	 */
126 	int				name_version;
127 	const char			*name;
128 
129 	u8				wait_type_inner;
130 	u8				wait_type_outer;
131 	u8				lock_type;
132 	/* u8				hole; */
133 
134 #ifdef CONFIG_LOCK_STAT
135 	unsigned long			contention_point[LOCKSTAT_POINTS];
136 	unsigned long			contending_point[LOCKSTAT_POINTS];
137 #endif
138 } __no_randomize_layout;
139 
140 #ifdef CONFIG_LOCK_STAT
141 struct lock_time {
142 	s64				min;
143 	s64				max;
144 	s64				total;
145 	unsigned long			nr;
146 };
147 
148 enum bounce_type {
149 	bounce_acquired_write,
150 	bounce_acquired_read,
151 	bounce_contended_write,
152 	bounce_contended_read,
153 	nr_bounce_types,
154 
155 	bounce_acquired = bounce_acquired_write,
156 	bounce_contended = bounce_contended_write,
157 };
158 
159 struct lock_class_stats {
160 	unsigned long			contention_point[LOCKSTAT_POINTS];
161 	unsigned long			contending_point[LOCKSTAT_POINTS];
162 	struct lock_time		read_waittime;
163 	struct lock_time		write_waittime;
164 	struct lock_time		read_holdtime;
165 	struct lock_time		write_holdtime;
166 	unsigned long			bounces[nr_bounce_types];
167 };
168 
169 struct lock_class_stats lock_stats(struct lock_class *class);
170 void clear_lock_stats(struct lock_class *class);
171 #endif
172 
173 /*
174  * Map the lock object (the lock instance) to the lock-class object.
175  * This is embedded into specific lock instances:
176  */
177 struct lockdep_map {
178 	struct lock_class_key		*key;
179 	struct lock_class		*class_cache[NR_LOCKDEP_CACHING_CLASSES];
180 	const char			*name;
181 	u8				wait_type_outer; /* can be taken in this context */
182 	u8				wait_type_inner; /* presents this context */
183 	u8				lock_type;
184 	/* u8				hole; */
185 #ifdef CONFIG_LOCK_STAT
186 	int				cpu;
187 	unsigned long			ip;
188 #endif
189 };
190 
191 struct pin_cookie { unsigned int val; };
192 
193 #else /* !CONFIG_LOCKDEP */
194 
195 /*
196  * The class key takes no space if lockdep is disabled:
197  */
198 struct lock_class_key { };
199 
200 /*
201  * The lockdep_map takes no space if lockdep is disabled:
202  */
203 struct lockdep_map { };
204 
205 struct pin_cookie { };
206 
207 #endif /* !LOCKDEP */
208 
209 #endif /* __LINUX_LOCKDEP_TYPES_H */
210