1 /* 2 * Runtime locking correctness validator 3 * 4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <[email protected]> 5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <[email protected]> 6 * 7 * see Documentation/lockdep-design.txt for more details. 8 */ 9 #ifndef __LINUX_LOCKDEP_H 10 #define __LINUX_LOCKDEP_H 11 12 struct task_struct; 13 struct lockdep_map; 14 15 #ifdef CONFIG_LOCKDEP 16 17 #include <linux/linkage.h> 18 #include <linux/list.h> 19 #include <linux/debug_locks.h> 20 #include <linux/stacktrace.h> 21 22 /* 23 * Lock-class usage-state bits: 24 */ 25 enum lock_usage_bit 26 { 27 LOCK_USED = 0, 28 LOCK_USED_IN_HARDIRQ, 29 LOCK_USED_IN_SOFTIRQ, 30 LOCK_ENABLED_SOFTIRQS, 31 LOCK_ENABLED_HARDIRQS, 32 LOCK_USED_IN_HARDIRQ_READ, 33 LOCK_USED_IN_SOFTIRQ_READ, 34 LOCK_ENABLED_SOFTIRQS_READ, 35 LOCK_ENABLED_HARDIRQS_READ, 36 LOCK_USAGE_STATES 37 }; 38 39 /* 40 * Usage-state bitmasks: 41 */ 42 #define LOCKF_USED (1 << LOCK_USED) 43 #define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) 44 #define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) 45 #define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS) 46 #define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS) 47 48 #define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS) 49 #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) 50 51 #define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) 52 #define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) 53 #define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ) 54 #define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ) 55 56 #define LOCKF_ENABLED_IRQS_READ \ 57 (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ) 58 #define LOCKF_USED_IN_IRQ_READ \ 59 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) 60 61 #define MAX_LOCKDEP_SUBCLASSES 8UL 62 63 /* 64 * Lock-classes are keyed via unique addresses, by embedding the 65 * lockclass-key into the kernel (or module) .data section. (For 66 * static locks we use the lock address itself as the key.) 67 */ 68 struct lockdep_subclass_key { 69 char __one_byte; 70 } __attribute__ ((__packed__)); 71 72 struct lock_class_key { 73 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; 74 }; 75 76 /* 77 * The lock-class itself: 78 */ 79 struct lock_class { 80 /* 81 * class-hash: 82 */ 83 struct list_head hash_entry; 84 85 /* 86 * global list of all lock-classes: 87 */ 88 struct list_head lock_entry; 89 90 struct lockdep_subclass_key *key; 91 unsigned int subclass; 92 93 /* 94 * IRQ/softirq usage tracking bits: 95 */ 96 unsigned long usage_mask; 97 struct stack_trace usage_traces[LOCK_USAGE_STATES]; 98 99 /* 100 * These fields represent a directed graph of lock dependencies, 101 * to every node we attach a list of "forward" and a list of 102 * "backward" graph nodes. 103 */ 104 struct list_head locks_after, locks_before; 105 106 /* 107 * Generation counter, when doing certain classes of graph walking, 108 * to ensure that we check one node only once: 109 */ 110 unsigned int version; 111 112 /* 113 * Statistics counter: 114 */ 115 unsigned long ops; 116 117 const char *name; 118 int name_version; 119 120 #ifdef CONFIG_LOCK_STAT 121 unsigned long contention_point[4]; 122 #endif 123 }; 124 125 #ifdef CONFIG_LOCK_STAT 126 struct lock_time { 127 s64 min; 128 s64 max; 129 s64 total; 130 unsigned long nr; 131 }; 132 133 enum bounce_type { 134 bounce_acquired_write, 135 bounce_acquired_read, 136 bounce_contended_write, 137 bounce_contended_read, 138 nr_bounce_types, 139 140 bounce_acquired = bounce_acquired_write, 141 bounce_contended = bounce_contended_write, 142 }; 143 144 struct lock_class_stats { 145 unsigned long contention_point[4]; 146 struct lock_time read_waittime; 147 struct lock_time write_waittime; 148 struct lock_time read_holdtime; 149 struct lock_time write_holdtime; 150 unsigned long bounces[nr_bounce_types]; 151 }; 152 153 struct lock_class_stats lock_stats(struct lock_class *class); 154 void clear_lock_stats(struct lock_class *class); 155 #endif 156 157 /* 158 * Map the lock object (the lock instance) to the lock-class object. 159 * This is embedded into specific lock instances: 160 */ 161 struct lockdep_map { 162 struct lock_class_key *key; 163 struct lock_class *class_cache; 164 const char *name; 165 #ifdef CONFIG_LOCK_STAT 166 int cpu; 167 #endif 168 }; 169 170 /* 171 * Every lock has a list of other locks that were taken after it. 172 * We only grow the list, never remove from it: 173 */ 174 struct lock_list { 175 struct list_head entry; 176 struct lock_class *class; 177 struct stack_trace trace; 178 int distance; 179 }; 180 181 /* 182 * We record lock dependency chains, so that we can cache them: 183 */ 184 struct lock_chain { 185 u8 irq_context; 186 u8 depth; 187 u16 base; 188 struct list_head entry; 189 u64 chain_key; 190 }; 191 192 struct held_lock { 193 /* 194 * One-way hash of the dependency chain up to this point. We 195 * hash the hashes step by step as the dependency chain grows. 196 * 197 * We use it for dependency-caching and we skip detection 198 * passes and dependency-updates if there is a cache-hit, so 199 * it is absolutely critical for 100% coverage of the validator 200 * to have a unique key value for every unique dependency path 201 * that can occur in the system, to make a unique hash value 202 * as likely as possible - hence the 64-bit width. 203 * 204 * The task struct holds the current hash value (initialized 205 * with zero), here we store the previous hash value: 206 */ 207 u64 prev_chain_key; 208 struct lock_class *class; 209 unsigned long acquire_ip; 210 struct lockdep_map *instance; 211 212 #ifdef CONFIG_LOCK_STAT 213 u64 waittime_stamp; 214 u64 holdtime_stamp; 215 #endif 216 /* 217 * The lock-stack is unified in that the lock chains of interrupt 218 * contexts nest ontop of process context chains, but we 'separate' 219 * the hashes by starting with 0 if we cross into an interrupt 220 * context, and we also keep do not add cross-context lock 221 * dependencies - the lock usage graph walking covers that area 222 * anyway, and we'd just unnecessarily increase the number of 223 * dependencies otherwise. [Note: hardirq and softirq contexts 224 * are separated from each other too.] 225 * 226 * The following field is used to detect when we cross into an 227 * interrupt context: 228 */ 229 int irq_context; 230 int trylock; 231 int read; 232 int check; 233 int hardirqs_off; 234 }; 235 236 /* 237 * Initialization, self-test and debugging-output methods: 238 */ 239 extern void lockdep_init(void); 240 extern void lockdep_info(void); 241 extern void lockdep_reset(void); 242 extern void lockdep_reset_lock(struct lockdep_map *lock); 243 extern void lockdep_free_key_range(void *start, unsigned long size); 244 extern void lockdep_sys_exit(void); 245 246 extern void lockdep_off(void); 247 extern void lockdep_on(void); 248 249 /* 250 * These methods are used by specific locking variants (spinlocks, 251 * rwlocks, mutexes and rwsems) to pass init/acquire/release events 252 * to lockdep: 253 */ 254 255 extern void lockdep_init_map(struct lockdep_map *lock, const char *name, 256 struct lock_class_key *key, int subclass); 257 258 /* 259 * To initialize a lockdep_map statically use this macro. 260 * Note that _name must not be NULL. 261 */ 262 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ 263 { .name = (_name), .key = (void *)(_key), } 264 265 /* 266 * Reinitialize a lock key - for cases where there is special locking or 267 * special initialization of locks so that the validator gets the scope 268 * of dependencies wrong: they are either too broad (they need a class-split) 269 * or they are too narrow (they suffer from a false class-split): 270 */ 271 #define lockdep_set_class(lock, key) \ 272 lockdep_init_map(&(lock)->dep_map, #key, key, 0) 273 #define lockdep_set_class_and_name(lock, key, name) \ 274 lockdep_init_map(&(lock)->dep_map, name, key, 0) 275 #define lockdep_set_class_and_subclass(lock, key, sub) \ 276 lockdep_init_map(&(lock)->dep_map, #key, key, sub) 277 #define lockdep_set_subclass(lock, sub) \ 278 lockdep_init_map(&(lock)->dep_map, #lock, \ 279 (lock)->dep_map.key, sub) 280 281 /* 282 * Acquire a lock. 283 * 284 * Values for "read": 285 * 286 * 0: exclusive (write) acquire 287 * 1: read-acquire (no recursion allowed) 288 * 2: read-acquire with same-instance recursion allowed 289 * 290 * Values for check: 291 * 292 * 0: disabled 293 * 1: simple checks (freeing, held-at-exit-time, etc.) 294 * 2: full validation 295 */ 296 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 297 int trylock, int read, int check, unsigned long ip); 298 299 extern void lock_release(struct lockdep_map *lock, int nested, 300 unsigned long ip); 301 302 # define INIT_LOCKDEP .lockdep_recursion = 0, 303 304 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 305 306 #else /* !LOCKDEP */ 307 308 static inline void lockdep_off(void) 309 { 310 } 311 312 static inline void lockdep_on(void) 313 { 314 } 315 316 # define lock_acquire(l, s, t, r, c, i) do { } while (0) 317 # define lock_release(l, n, i) do { } while (0) 318 # define lockdep_init() do { } while (0) 319 # define lockdep_info() do { } while (0) 320 # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0) 321 # define lockdep_set_class(lock, key) do { (void)(key); } while (0) 322 # define lockdep_set_class_and_name(lock, key, name) \ 323 do { (void)(key); } while (0) 324 #define lockdep_set_class_and_subclass(lock, key, sub) \ 325 do { (void)(key); } while (0) 326 #define lockdep_set_subclass(lock, sub) do { } while (0) 327 328 # define INIT_LOCKDEP 329 # define lockdep_reset() do { debug_locks = 1; } while (0) 330 # define lockdep_free_key_range(start, size) do { } while (0) 331 # define lockdep_sys_exit() do { } while (0) 332 /* 333 * The class key takes no space if lockdep is disabled: 334 */ 335 struct lock_class_key { }; 336 337 #define lockdep_depth(tsk) (0) 338 339 #endif /* !LOCKDEP */ 340 341 #ifdef CONFIG_LOCK_STAT 342 343 extern void lock_contended(struct lockdep_map *lock, unsigned long ip); 344 extern void lock_acquired(struct lockdep_map *lock); 345 346 #define LOCK_CONTENDED(_lock, try, lock) \ 347 do { \ 348 if (!try(_lock)) { \ 349 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 350 lock(_lock); \ 351 } \ 352 lock_acquired(&(_lock)->dep_map); \ 353 } while (0) 354 355 #else /* CONFIG_LOCK_STAT */ 356 357 #define lock_contended(lockdep_map, ip) do {} while (0) 358 #define lock_acquired(lockdep_map) do {} while (0) 359 360 #define LOCK_CONTENDED(_lock, try, lock) \ 361 lock(_lock) 362 363 #endif /* CONFIG_LOCK_STAT */ 364 365 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) 366 extern void early_init_irq_lock_class(void); 367 #else 368 static inline void early_init_irq_lock_class(void) 369 { 370 } 371 #endif 372 373 #ifdef CONFIG_TRACE_IRQFLAGS 374 extern void early_boot_irqs_off(void); 375 extern void early_boot_irqs_on(void); 376 extern void print_irqtrace_events(struct task_struct *curr); 377 #else 378 static inline void early_boot_irqs_off(void) 379 { 380 } 381 static inline void early_boot_irqs_on(void) 382 { 383 } 384 static inline void print_irqtrace_events(struct task_struct *curr) 385 { 386 } 387 #endif 388 389 /* 390 * For trivial one-depth nesting of a lock-class, the following 391 * global define can be used. (Subsystems with multiple levels 392 * of nesting should define their own lock-nesting subclasses.) 393 */ 394 #define SINGLE_DEPTH_NESTING 1 395 396 /* 397 * Map the dependency ops to NOP or to real lockdep ops, depending 398 * on the per lock-class debug mode: 399 */ 400 401 #ifdef CONFIG_DEBUG_LOCK_ALLOC 402 # ifdef CONFIG_PROVE_LOCKING 403 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) 404 # else 405 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) 406 # endif 407 # define spin_release(l, n, i) lock_release(l, n, i) 408 #else 409 # define spin_acquire(l, s, t, i) do { } while (0) 410 # define spin_release(l, n, i) do { } while (0) 411 #endif 412 413 #ifdef CONFIG_DEBUG_LOCK_ALLOC 414 # ifdef CONFIG_PROVE_LOCKING 415 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) 416 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i) 417 # else 418 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) 419 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i) 420 # endif 421 # define rwlock_release(l, n, i) lock_release(l, n, i) 422 #else 423 # define rwlock_acquire(l, s, t, i) do { } while (0) 424 # define rwlock_acquire_read(l, s, t, i) do { } while (0) 425 # define rwlock_release(l, n, i) do { } while (0) 426 #endif 427 428 #ifdef CONFIG_DEBUG_LOCK_ALLOC 429 # ifdef CONFIG_PROVE_LOCKING 430 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) 431 # else 432 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) 433 # endif 434 # define mutex_release(l, n, i) lock_release(l, n, i) 435 #else 436 # define mutex_acquire(l, s, t, i) do { } while (0) 437 # define mutex_release(l, n, i) do { } while (0) 438 #endif 439 440 #ifdef CONFIG_DEBUG_LOCK_ALLOC 441 # ifdef CONFIG_PROVE_LOCKING 442 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) 443 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i) 444 # else 445 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) 446 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i) 447 # endif 448 # define rwsem_release(l, n, i) lock_release(l, n, i) 449 #else 450 # define rwsem_acquire(l, s, t, i) do { } while (0) 451 # define rwsem_acquire_read(l, s, t, i) do { } while (0) 452 # define rwsem_release(l, n, i) do { } while (0) 453 #endif 454 455 #endif /* __LINUX_LOCKDEP_H */ 456