1 /* 2 * Runtime locking correctness validator 3 * 4 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <[email protected]> 5 * 6 * see Documentation/lockdep-design.txt for more details. 7 */ 8 #ifndef __LINUX_LOCKDEP_H 9 #define __LINUX_LOCKDEP_H 10 11 struct task_struct; 12 13 #ifdef CONFIG_LOCKDEP 14 15 #include <linux/linkage.h> 16 #include <linux/list.h> 17 #include <linux/debug_locks.h> 18 #include <linux/stacktrace.h> 19 20 /* 21 * Lock-class usage-state bits: 22 */ 23 enum lock_usage_bit 24 { 25 LOCK_USED = 0, 26 LOCK_USED_IN_HARDIRQ, 27 LOCK_USED_IN_SOFTIRQ, 28 LOCK_ENABLED_SOFTIRQS, 29 LOCK_ENABLED_HARDIRQS, 30 LOCK_USED_IN_HARDIRQ_READ, 31 LOCK_USED_IN_SOFTIRQ_READ, 32 LOCK_ENABLED_SOFTIRQS_READ, 33 LOCK_ENABLED_HARDIRQS_READ, 34 LOCK_USAGE_STATES 35 }; 36 37 /* 38 * Usage-state bitmasks: 39 */ 40 #define LOCKF_USED (1 << LOCK_USED) 41 #define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) 42 #define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) 43 #define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS) 44 #define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS) 45 46 #define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS) 47 #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) 48 49 #define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) 50 #define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) 51 #define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ) 52 #define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ) 53 54 #define LOCKF_ENABLED_IRQS_READ \ 55 (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ) 56 #define LOCKF_USED_IN_IRQ_READ \ 57 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) 58 59 #define MAX_LOCKDEP_SUBCLASSES 8UL 60 61 /* 62 * Lock-classes are keyed via unique addresses, by embedding the 63 * lockclass-key into the kernel (or module) .data section. (For 64 * static locks we use the lock address itself as the key.) 65 */ 66 struct lockdep_subclass_key { 67 char __one_byte; 68 } __attribute__ ((__packed__)); 69 70 struct lock_class_key { 71 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; 72 }; 73 74 /* 75 * The lock-class itself: 76 */ 77 struct lock_class { 78 /* 79 * class-hash: 80 */ 81 struct list_head hash_entry; 82 83 /* 84 * global list of all lock-classes: 85 */ 86 struct list_head lock_entry; 87 88 struct lockdep_subclass_key *key; 89 unsigned int subclass; 90 91 /* 92 * IRQ/softirq usage tracking bits: 93 */ 94 unsigned long usage_mask; 95 struct stack_trace usage_traces[LOCK_USAGE_STATES]; 96 97 /* 98 * These fields represent a directed graph of lock dependencies, 99 * to every node we attach a list of "forward" and a list of 100 * "backward" graph nodes. 101 */ 102 struct list_head locks_after, locks_before; 103 104 /* 105 * Generation counter, when doing certain classes of graph walking, 106 * to ensure that we check one node only once: 107 */ 108 unsigned int version; 109 110 /* 111 * Statistics counter: 112 */ 113 unsigned long ops; 114 115 const char *name; 116 int name_version; 117 }; 118 119 /* 120 * Map the lock object (the lock instance) to the lock-class object. 121 * This is embedded into specific lock instances: 122 */ 123 struct lockdep_map { 124 struct lock_class_key *key; 125 struct lock_class *class_cache; 126 const char *name; 127 }; 128 129 /* 130 * Every lock has a list of other locks that were taken after it. 131 * We only grow the list, never remove from it: 132 */ 133 struct lock_list { 134 struct list_head entry; 135 struct lock_class *class; 136 struct stack_trace trace; 137 int distance; 138 }; 139 140 /* 141 * We record lock dependency chains, so that we can cache them: 142 */ 143 struct lock_chain { 144 struct list_head entry; 145 u64 chain_key; 146 }; 147 148 struct held_lock { 149 /* 150 * One-way hash of the dependency chain up to this point. We 151 * hash the hashes step by step as the dependency chain grows. 152 * 153 * We use it for dependency-caching and we skip detection 154 * passes and dependency-updates if there is a cache-hit, so 155 * it is absolutely critical for 100% coverage of the validator 156 * to have a unique key value for every unique dependency path 157 * that can occur in the system, to make a unique hash value 158 * as likely as possible - hence the 64-bit width. 159 * 160 * The task struct holds the current hash value (initialized 161 * with zero), here we store the previous hash value: 162 */ 163 u64 prev_chain_key; 164 struct lock_class *class; 165 unsigned long acquire_ip; 166 struct lockdep_map *instance; 167 168 /* 169 * The lock-stack is unified in that the lock chains of interrupt 170 * contexts nest ontop of process context chains, but we 'separate' 171 * the hashes by starting with 0 if we cross into an interrupt 172 * context, and we also keep do not add cross-context lock 173 * dependencies - the lock usage graph walking covers that area 174 * anyway, and we'd just unnecessarily increase the number of 175 * dependencies otherwise. [Note: hardirq and softirq contexts 176 * are separated from each other too.] 177 * 178 * The following field is used to detect when we cross into an 179 * interrupt context: 180 */ 181 int irq_context; 182 int trylock; 183 int read; 184 int check; 185 int hardirqs_off; 186 }; 187 188 /* 189 * Initialization, self-test and debugging-output methods: 190 */ 191 extern void lockdep_init(void); 192 extern void lockdep_info(void); 193 extern void lockdep_reset(void); 194 extern void lockdep_reset_lock(struct lockdep_map *lock); 195 extern void lockdep_free_key_range(void *start, unsigned long size); 196 197 extern void lockdep_off(void); 198 extern void lockdep_on(void); 199 200 /* 201 * These methods are used by specific locking variants (spinlocks, 202 * rwlocks, mutexes and rwsems) to pass init/acquire/release events 203 * to lockdep: 204 */ 205 206 extern void lockdep_init_map(struct lockdep_map *lock, const char *name, 207 struct lock_class_key *key, int subclass); 208 209 /* 210 * Reinitialize a lock key - for cases where there is special locking or 211 * special initialization of locks so that the validator gets the scope 212 * of dependencies wrong: they are either too broad (they need a class-split) 213 * or they are too narrow (they suffer from a false class-split): 214 */ 215 #define lockdep_set_class(lock, key) \ 216 lockdep_init_map(&(lock)->dep_map, #key, key, 0) 217 #define lockdep_set_class_and_name(lock, key, name) \ 218 lockdep_init_map(&(lock)->dep_map, name, key, 0) 219 #define lockdep_set_class_and_subclass(lock, key, sub) \ 220 lockdep_init_map(&(lock)->dep_map, #key, key, sub) 221 #define lockdep_set_subclass(lock, sub) \ 222 lockdep_init_map(&(lock)->dep_map, #lock, \ 223 (lock)->dep_map.key, sub) 224 225 /* 226 * Acquire a lock. 227 * 228 * Values for "read": 229 * 230 * 0: exclusive (write) acquire 231 * 1: read-acquire (no recursion allowed) 232 * 2: read-acquire with same-instance recursion allowed 233 * 234 * Values for check: 235 * 236 * 0: disabled 237 * 1: simple checks (freeing, held-at-exit-time, etc.) 238 * 2: full validation 239 */ 240 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 241 int trylock, int read, int check, unsigned long ip); 242 243 extern void lock_release(struct lockdep_map *lock, int nested, 244 unsigned long ip); 245 246 # define INIT_LOCKDEP .lockdep_recursion = 0, 247 248 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 249 250 #else /* !LOCKDEP */ 251 252 static inline void lockdep_off(void) 253 { 254 } 255 256 static inline void lockdep_on(void) 257 { 258 } 259 260 # define lock_acquire(l, s, t, r, c, i) do { } while (0) 261 # define lock_release(l, n, i) do { } while (0) 262 # define lockdep_init() do { } while (0) 263 # define lockdep_info() do { } while (0) 264 # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0) 265 # define lockdep_set_class(lock, key) do { (void)(key); } while (0) 266 # define lockdep_set_class_and_name(lock, key, name) \ 267 do { (void)(key); } while (0) 268 #define lockdep_set_class_and_subclass(lock, key, sub) \ 269 do { (void)(key); } while (0) 270 #define lockdep_set_subclass(lock, sub) do { } while (0) 271 272 # define INIT_LOCKDEP 273 # define lockdep_reset() do { debug_locks = 1; } while (0) 274 # define lockdep_free_key_range(start, size) do { } while (0) 275 /* 276 * The class key takes no space if lockdep is disabled: 277 */ 278 struct lock_class_key { }; 279 280 #define lockdep_depth(tsk) (0) 281 282 #endif /* !LOCKDEP */ 283 284 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) 285 extern void early_init_irq_lock_class(void); 286 #else 287 static inline void early_init_irq_lock_class(void) 288 { 289 } 290 #endif 291 292 #ifdef CONFIG_TRACE_IRQFLAGS 293 extern void early_boot_irqs_off(void); 294 extern void early_boot_irqs_on(void); 295 extern void print_irqtrace_events(struct task_struct *curr); 296 #else 297 static inline void early_boot_irqs_off(void) 298 { 299 } 300 static inline void early_boot_irqs_on(void) 301 { 302 } 303 static inline void print_irqtrace_events(struct task_struct *curr) 304 { 305 } 306 #endif 307 308 /* 309 * For trivial one-depth nesting of a lock-class, the following 310 * global define can be used. (Subsystems with multiple levels 311 * of nesting should define their own lock-nesting subclasses.) 312 */ 313 #define SINGLE_DEPTH_NESTING 1 314 315 /* 316 * Map the dependency ops to NOP or to real lockdep ops, depending 317 * on the per lock-class debug mode: 318 */ 319 320 #ifdef CONFIG_DEBUG_LOCK_ALLOC 321 # ifdef CONFIG_PROVE_LOCKING 322 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) 323 # else 324 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) 325 # endif 326 # define spin_release(l, n, i) lock_release(l, n, i) 327 #else 328 # define spin_acquire(l, s, t, i) do { } while (0) 329 # define spin_release(l, n, i) do { } while (0) 330 #endif 331 332 #ifdef CONFIG_DEBUG_LOCK_ALLOC 333 # ifdef CONFIG_PROVE_LOCKING 334 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) 335 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i) 336 # else 337 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) 338 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i) 339 # endif 340 # define rwlock_release(l, n, i) lock_release(l, n, i) 341 #else 342 # define rwlock_acquire(l, s, t, i) do { } while (0) 343 # define rwlock_acquire_read(l, s, t, i) do { } while (0) 344 # define rwlock_release(l, n, i) do { } while (0) 345 #endif 346 347 #ifdef CONFIG_DEBUG_LOCK_ALLOC 348 # ifdef CONFIG_PROVE_LOCKING 349 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) 350 # else 351 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) 352 # endif 353 # define mutex_release(l, n, i) lock_release(l, n, i) 354 #else 355 # define mutex_acquire(l, s, t, i) do { } while (0) 356 # define mutex_release(l, n, i) do { } while (0) 357 #endif 358 359 #ifdef CONFIG_DEBUG_LOCK_ALLOC 360 # ifdef CONFIG_PROVE_LOCKING 361 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) 362 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i) 363 # else 364 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) 365 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i) 366 # endif 367 # define rwsem_release(l, n, i) lock_release(l, n, i) 368 #else 369 # define rwsem_acquire(l, s, t, i) do { } while (0) 370 # define rwsem_acquire_read(l, s, t, i) do { } while (0) 371 # define rwsem_release(l, n, i) do { } while (0) 372 #endif 373 374 #endif /* __LINUX_LOCKDEP_H */ 375