1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Runtime locking correctness validator 4 * 5 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <[email protected]> 6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 7 * 8 * see Documentation/locking/lockdep-design.rst for more details. 9 */ 10 #ifndef __LINUX_LOCKDEP_H 11 #define __LINUX_LOCKDEP_H 12 13 #include <linux/lockdep_types.h> 14 #include <linux/smp.h> 15 #include <asm/percpu.h> 16 17 struct task_struct; 18 19 #ifdef CONFIG_LOCKDEP 20 21 #include <linux/linkage.h> 22 #include <linux/list.h> 23 #include <linux/debug_locks.h> 24 #include <linux/stacktrace.h> 25 26 static inline void lockdep_copy_map(struct lockdep_map *to, 27 struct lockdep_map *from) 28 { 29 int i; 30 31 *to = *from; 32 /* 33 * Since the class cache can be modified concurrently we could observe 34 * half pointers (64bit arch using 32bit copy insns). Therefore clear 35 * the caches and take the performance hit. 36 * 37 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since 38 * that relies on cache abuse. 39 */ 40 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) 41 to->class_cache[i] = NULL; 42 } 43 44 /* 45 * Every lock has a list of other locks that were taken after it. 46 * We only grow the list, never remove from it: 47 */ 48 struct lock_list { 49 struct list_head entry; 50 struct lock_class *class; 51 struct lock_class *links_to; 52 const struct lock_trace *trace; 53 u16 distance; 54 /* bitmap of different dependencies from head to this */ 55 u8 dep; 56 /* used by BFS to record whether "prev -> this" only has -(*R)-> */ 57 u8 only_xr; 58 59 /* 60 * The parent field is used to implement breadth-first search, and the 61 * bit 0 is reused to indicate if the lock has been accessed in BFS. 62 */ 63 struct lock_list *parent; 64 }; 65 66 /** 67 * struct lock_chain - lock dependency chain record 68 * 69 * @irq_context: the same as irq_context in held_lock below 70 * @depth: the number of held locks in this chain 71 * @base: the index in chain_hlocks for this chain 72 * @entry: the collided lock chains in lock_chain hash list 73 * @chain_key: the hash key of this lock_chain 74 */ 75 struct lock_chain { 76 /* see BUILD_BUG_ON()s in add_chain_cache() */ 77 unsigned int irq_context : 2, 78 depth : 6, 79 base : 24; 80 /* 4 byte hole */ 81 struct hlist_node entry; 82 u64 chain_key; 83 }; 84 85 /* 86 * Initialization, self-test and debugging-output methods: 87 */ 88 extern void lockdep_init(void); 89 extern void lockdep_reset(void); 90 extern void lockdep_reset_lock(struct lockdep_map *lock); 91 extern void lockdep_free_key_range(void *start, unsigned long size); 92 extern asmlinkage void lockdep_sys_exit(void); 93 extern void lockdep_set_selftest_task(struct task_struct *task); 94 95 extern void lockdep_init_task(struct task_struct *task); 96 97 /* 98 * Split the recursion counter in two to readily detect 'off' vs recursion. 99 */ 100 #define LOCKDEP_RECURSION_BITS 16 101 #define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS) 102 #define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1) 103 104 /* 105 * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due 106 * to header dependencies. 107 */ 108 109 #define lockdep_off() \ 110 do { \ 111 current->lockdep_recursion += LOCKDEP_OFF; \ 112 } while (0) 113 114 #define lockdep_on() \ 115 do { \ 116 current->lockdep_recursion -= LOCKDEP_OFF; \ 117 } while (0) 118 119 extern void lockdep_register_key(struct lock_class_key *key); 120 extern void lockdep_unregister_key(struct lock_class_key *key); 121 122 /* 123 * These methods are used by specific locking variants (spinlocks, 124 * rwlocks, mutexes and rwsems) to pass init/acquire/release events 125 * to lockdep: 126 */ 127 128 extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name, 129 struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type); 130 131 static inline void 132 lockdep_init_map_waits(struct lockdep_map *lock, const char *name, 133 struct lock_class_key *key, int subclass, u8 inner, u8 outer) 134 { 135 lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL); 136 } 137 138 static inline void 139 lockdep_init_map_wait(struct lockdep_map *lock, const char *name, 140 struct lock_class_key *key, int subclass, u8 inner) 141 { 142 lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV); 143 } 144 145 static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, 146 struct lock_class_key *key, int subclass) 147 { 148 lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV); 149 } 150 151 /* 152 * Reinitialize a lock key - for cases where there is special locking or 153 * special initialization of locks so that the validator gets the scope 154 * of dependencies wrong: they are either too broad (they need a class-split) 155 * or they are too narrow (they suffer from a false class-split): 156 */ 157 #define lockdep_set_class(lock, key) \ 158 lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, \ 159 (lock)->dep_map.wait_type_inner, \ 160 (lock)->dep_map.wait_type_outer, \ 161 (lock)->dep_map.lock_type) 162 163 #define lockdep_set_class_and_name(lock, key, name) \ 164 lockdep_init_map_type(&(lock)->dep_map, name, key, 0, \ 165 (lock)->dep_map.wait_type_inner, \ 166 (lock)->dep_map.wait_type_outer, \ 167 (lock)->dep_map.lock_type) 168 169 #define lockdep_set_class_and_subclass(lock, key, sub) \ 170 lockdep_init_map_type(&(lock)->dep_map, #key, key, sub, \ 171 (lock)->dep_map.wait_type_inner, \ 172 (lock)->dep_map.wait_type_outer, \ 173 (lock)->dep_map.lock_type) 174 175 #define lockdep_set_subclass(lock, sub) \ 176 lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ 177 (lock)->dep_map.wait_type_inner, \ 178 (lock)->dep_map.wait_type_outer, \ 179 (lock)->dep_map.lock_type) 180 181 #define lockdep_set_novalidate_class(lock) \ 182 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) 183 184 /* 185 * Compare locking classes 186 */ 187 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) 188 189 static inline int lockdep_match_key(struct lockdep_map *lock, 190 struct lock_class_key *key) 191 { 192 return lock->key == key; 193 } 194 195 /* 196 * Acquire a lock. 197 * 198 * Values for "read": 199 * 200 * 0: exclusive (write) acquire 201 * 1: read-acquire (no recursion allowed) 202 * 2: read-acquire with same-instance recursion allowed 203 * 204 * Values for check: 205 * 206 * 0: simple checks (freeing, held-at-exit-time, etc.) 207 * 1: full validation 208 */ 209 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 210 int trylock, int read, int check, 211 struct lockdep_map *nest_lock, unsigned long ip); 212 213 extern void lock_release(struct lockdep_map *lock, unsigned long ip); 214 215 extern void lock_sync(struct lockdep_map *lock, unsigned int subclass, 216 int read, int check, struct lockdep_map *nest_lock, 217 unsigned long ip); 218 219 /* lock_is_held_type() returns */ 220 #define LOCK_STATE_UNKNOWN -1 221 #define LOCK_STATE_NOT_HELD 0 222 #define LOCK_STATE_HELD 1 223 224 /* 225 * Same "read" as for lock_acquire(), except -1 means any. 226 */ 227 extern int lock_is_held_type(const struct lockdep_map *lock, int read); 228 229 static inline int lock_is_held(const struct lockdep_map *lock) 230 { 231 return lock_is_held_type(lock, -1); 232 } 233 234 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) 235 #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) 236 237 extern void lock_set_class(struct lockdep_map *lock, const char *name, 238 struct lock_class_key *key, unsigned int subclass, 239 unsigned long ip); 240 241 #define lock_set_novalidate_class(l, n, i) \ 242 lock_set_class(l, n, &__lockdep_no_validate__, 0, i) 243 244 static inline void lock_set_subclass(struct lockdep_map *lock, 245 unsigned int subclass, unsigned long ip) 246 { 247 lock_set_class(lock, lock->name, lock->key, subclass, ip); 248 } 249 250 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); 251 252 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, } 253 254 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); 255 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie); 256 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); 257 258 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 259 260 #define lockdep_assert(cond) \ 261 do { WARN_ON(debug_locks && !(cond)); } while (0) 262 263 #define lockdep_assert_once(cond) \ 264 do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0) 265 266 #define lockdep_assert_held(l) \ 267 lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD) 268 269 #define lockdep_assert_not_held(l) \ 270 lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD) 271 272 #define lockdep_assert_held_write(l) \ 273 lockdep_assert(lockdep_is_held_type(l, 0)) 274 275 #define lockdep_assert_held_read(l) \ 276 lockdep_assert(lockdep_is_held_type(l, 1)) 277 278 #define lockdep_assert_held_once(l) \ 279 lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD) 280 281 #define lockdep_assert_none_held_once() \ 282 lockdep_assert_once(!current->lockdep_depth) 283 284 #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) 285 286 #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) 287 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) 288 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) 289 290 /* 291 * Must use lock_map_aquire_try() with override maps to avoid 292 * lockdep thinking they participate in the block chain. 293 */ 294 #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \ 295 struct lockdep_map _name = { \ 296 .name = #_name "-wait-type-override", \ 297 .wait_type_inner = _wait_type, \ 298 .lock_type = LD_LOCK_WAIT_OVERRIDE, } 299 300 #define lock_map_assert_held(l) \ 301 lockdep_assert(lock_is_held(l) != LOCK_STATE_NOT_HELD) 302 303 #else /* !CONFIG_LOCKDEP */ 304 305 static inline void lockdep_init_task(struct task_struct *task) 306 { 307 } 308 309 static inline void lockdep_off(void) 310 { 311 } 312 313 static inline void lockdep_on(void) 314 { 315 } 316 317 static inline void lockdep_set_selftest_task(struct task_struct *task) 318 { 319 } 320 321 # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) 322 # define lock_release(l, i) do { } while (0) 323 # define lock_downgrade(l, i) do { } while (0) 324 # define lock_set_class(l, n, key, s, i) do { (void)(key); } while (0) 325 # define lock_set_novalidate_class(l, n, i) do { } while (0) 326 # define lock_set_subclass(l, s, i) do { } while (0) 327 # define lockdep_init() do { } while (0) 328 # define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \ 329 do { (void)(name); (void)(key); } while (0) 330 # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \ 331 do { (void)(name); (void)(key); } while (0) 332 # define lockdep_init_map_wait(lock, name, key, sub, inner) \ 333 do { (void)(name); (void)(key); } while (0) 334 # define lockdep_init_map(lock, name, key, sub) \ 335 do { (void)(name); (void)(key); } while (0) 336 # define lockdep_set_class(lock, key) do { (void)(key); } while (0) 337 # define lockdep_set_class_and_name(lock, key, name) \ 338 do { (void)(key); (void)(name); } while (0) 339 #define lockdep_set_class_and_subclass(lock, key, sub) \ 340 do { (void)(key); } while (0) 341 #define lockdep_set_subclass(lock, sub) do { } while (0) 342 343 #define lockdep_set_novalidate_class(lock) do { } while (0) 344 345 /* 346 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP 347 * case since the result is not well defined and the caller should rather 348 * #ifdef the call himself. 349 */ 350 351 # define lockdep_reset() do { debug_locks = 1; } while (0) 352 # define lockdep_free_key_range(start, size) do { } while (0) 353 # define lockdep_sys_exit() do { } while (0) 354 355 static inline void lockdep_register_key(struct lock_class_key *key) 356 { 357 } 358 359 static inline void lockdep_unregister_key(struct lock_class_key *key) 360 { 361 } 362 363 #define lockdep_depth(tsk) (0) 364 365 /* 366 * Dummy forward declarations, allow users to write less ifdef-y code 367 * and depend on dead code elimination. 368 */ 369 extern int lock_is_held(const void *); 370 extern int lockdep_is_held(const void *); 371 #define lockdep_is_held_type(l, r) (1) 372 373 #define lockdep_assert(c) do { } while (0) 374 #define lockdep_assert_once(c) do { } while (0) 375 376 #define lockdep_assert_held(l) do { (void)(l); } while (0) 377 #define lockdep_assert_not_held(l) do { (void)(l); } while (0) 378 #define lockdep_assert_held_write(l) do { (void)(l); } while (0) 379 #define lockdep_assert_held_read(l) do { (void)(l); } while (0) 380 #define lockdep_assert_held_once(l) do { (void)(l); } while (0) 381 #define lockdep_assert_none_held_once() do { } while (0) 382 383 #define lockdep_recursing(tsk) (0) 384 385 #define NIL_COOKIE (struct pin_cookie){ } 386 387 #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) 388 #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) 389 #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) 390 391 #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \ 392 struct lockdep_map __maybe_unused _name = {} 393 394 #define lock_map_assert_held(l) do { (void)(l); } while (0) 395 396 #endif /* !LOCKDEP */ 397 398 #ifdef CONFIG_PROVE_LOCKING 399 void lockdep_set_lock_cmp_fn(struct lockdep_map *, lock_cmp_fn, lock_print_fn); 400 401 #define lock_set_cmp_fn(lock, ...) lockdep_set_lock_cmp_fn(&(lock)->dep_map, __VA_ARGS__) 402 #else 403 #define lock_set_cmp_fn(lock, ...) do { } while (0) 404 #endif 405 406 enum xhlock_context_t { 407 XHLOCK_HARD, 408 XHLOCK_SOFT, 409 XHLOCK_CTX_NR, 410 }; 411 412 /* 413 * To initialize a lockdep_map statically use this macro. 414 * Note that _name must not be NULL. 415 */ 416 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ 417 { .name = (_name), .key = (void *)(_key), } 418 419 static inline void lockdep_invariant_state(bool force) {} 420 static inline void lockdep_free_task(struct task_struct *task) {} 421 422 #ifdef CONFIG_LOCK_STAT 423 424 extern void lock_contended(struct lockdep_map *lock, unsigned long ip); 425 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); 426 427 #define LOCK_CONTENDED(_lock, try, lock) \ 428 do { \ 429 if (!try(_lock)) { \ 430 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 431 lock(_lock); \ 432 } \ 433 lock_acquired(&(_lock)->dep_map, _RET_IP_); \ 434 } while (0) 435 436 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ 437 ({ \ 438 int ____err = 0; \ 439 if (!try(_lock)) { \ 440 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 441 ____err = lock(_lock); \ 442 } \ 443 if (!____err) \ 444 lock_acquired(&(_lock)->dep_map, _RET_IP_); \ 445 ____err; \ 446 }) 447 448 #else /* CONFIG_LOCK_STAT */ 449 450 #define lock_contended(lockdep_map, ip) do {} while (0) 451 #define lock_acquired(lockdep_map, ip) do {} while (0) 452 453 #define LOCK_CONTENDED(_lock, try, lock) \ 454 lock(_lock) 455 456 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ 457 lock(_lock) 458 459 #endif /* CONFIG_LOCK_STAT */ 460 461 #ifdef CONFIG_PROVE_LOCKING 462 extern void print_irqtrace_events(struct task_struct *curr); 463 #else 464 static inline void print_irqtrace_events(struct task_struct *curr) 465 { 466 } 467 #endif 468 469 /* Variable used to make lockdep treat read_lock() as recursive in selftests */ 470 #ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS 471 extern unsigned int force_read_lock_recursive; 472 #else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ 473 #define force_read_lock_recursive 0 474 #endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ 475 476 #ifdef CONFIG_LOCKDEP 477 extern bool read_lock_is_recursive(void); 478 #else /* CONFIG_LOCKDEP */ 479 /* If !LOCKDEP, the value is meaningless */ 480 #define read_lock_is_recursive() 0 481 #endif 482 483 /* 484 * For trivial one-depth nesting of a lock-class, the following 485 * global define can be used. (Subsystems with multiple levels 486 * of nesting should define their own lock-nesting subclasses.) 487 */ 488 #define SINGLE_DEPTH_NESTING 1 489 490 /* 491 * Map the dependency ops to NOP or to real lockdep ops, depending 492 * on the per lock-class debug mode: 493 */ 494 495 #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) 496 #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) 497 #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) 498 499 #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 500 #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 501 #define spin_release(l, i) lock_release(l, i) 502 503 #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 504 #define rwlock_acquire_read(l, s, t, i) \ 505 do { \ 506 if (read_lock_is_recursive()) \ 507 lock_acquire_shared_recursive(l, s, t, NULL, i); \ 508 else \ 509 lock_acquire_shared(l, s, t, NULL, i); \ 510 } while (0) 511 512 #define rwlock_release(l, i) lock_release(l, i) 513 514 #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 515 #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) 516 #define seqcount_release(l, i) lock_release(l, i) 517 518 #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 519 #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 520 #define mutex_release(l, i) lock_release(l, i) 521 522 #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 523 #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 524 #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) 525 #define rwsem_release(l, i) lock_release(l, i) 526 527 #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) 528 #define lock_map_acquire_try(l) lock_acquire_exclusive(l, 0, 1, NULL, _THIS_IP_) 529 #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) 530 #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) 531 #define lock_map_release(l) lock_release(l, _THIS_IP_) 532 #define lock_map_sync(l) lock_sync(l, 0, 0, 1, NULL, _THIS_IP_) 533 534 #ifdef CONFIG_PROVE_LOCKING 535 # define might_lock(lock) \ 536 do { \ 537 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 538 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ 539 lock_release(&(lock)->dep_map, _THIS_IP_); \ 540 } while (0) 541 # define might_lock_read(lock) \ 542 do { \ 543 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 544 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ 545 lock_release(&(lock)->dep_map, _THIS_IP_); \ 546 } while (0) 547 # define might_lock_nested(lock, subclass) \ 548 do { \ 549 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 550 lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \ 551 _THIS_IP_); \ 552 lock_release(&(lock)->dep_map, _THIS_IP_); \ 553 } while (0) 554 555 DECLARE_PER_CPU(int, hardirqs_enabled); 556 DECLARE_PER_CPU(int, hardirq_context); 557 DECLARE_PER_CPU(unsigned int, lockdep_recursion); 558 559 #define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion)) 560 561 #define lockdep_assert_irqs_enabled() \ 562 do { \ 563 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \ 564 } while (0) 565 566 #define lockdep_assert_irqs_disabled() \ 567 do { \ 568 WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \ 569 } while (0) 570 571 #define lockdep_assert_in_irq() \ 572 do { \ 573 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \ 574 } while (0) 575 576 #define lockdep_assert_no_hardirq() \ 577 do { \ 578 WARN_ON_ONCE(__lockdep_enabled && (this_cpu_read(hardirq_context) || \ 579 !this_cpu_read(hardirqs_enabled))); \ 580 } while (0) 581 582 #define lockdep_assert_preemption_enabled() \ 583 do { \ 584 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ 585 __lockdep_enabled && \ 586 (preempt_count() != 0 || \ 587 !this_cpu_read(hardirqs_enabled))); \ 588 } while (0) 589 590 #define lockdep_assert_preemption_disabled() \ 591 do { \ 592 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ 593 __lockdep_enabled && \ 594 (preempt_count() == 0 && \ 595 this_cpu_read(hardirqs_enabled))); \ 596 } while (0) 597 598 /* 599 * Acceptable for protecting per-CPU resources accessed from BH. 600 * Much like in_softirq() - semantics are ambiguous, use carefully. 601 */ 602 #define lockdep_assert_in_softirq() \ 603 do { \ 604 WARN_ON_ONCE(__lockdep_enabled && \ 605 (!in_softirq() || in_irq() || in_nmi())); \ 606 } while (0) 607 608 #else 609 # define might_lock(lock) do { } while (0) 610 # define might_lock_read(lock) do { } while (0) 611 # define might_lock_nested(lock, subclass) do { } while (0) 612 613 # define lockdep_assert_irqs_enabled() do { } while (0) 614 # define lockdep_assert_irqs_disabled() do { } while (0) 615 # define lockdep_assert_in_irq() do { } while (0) 616 # define lockdep_assert_no_hardirq() do { } while (0) 617 618 # define lockdep_assert_preemption_enabled() do { } while (0) 619 # define lockdep_assert_preemption_disabled() do { } while (0) 620 # define lockdep_assert_in_softirq() do { } while (0) 621 #endif 622 623 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING 624 625 # define lockdep_assert_RT_in_threaded_ctx() do { \ 626 WARN_ONCE(debug_locks && !current->lockdep_recursion && \ 627 lockdep_hardirq_context() && \ 628 !(current->hardirq_threaded || current->irq_config), \ 629 "Not in threaded context on PREEMPT_RT as expected\n"); \ 630 } while (0) 631 632 #else 633 634 # define lockdep_assert_RT_in_threaded_ctx() do { } while (0) 635 636 #endif 637 638 #ifdef CONFIG_LOCKDEP 639 void lockdep_rcu_suspicious(const char *file, const int line, const char *s); 640 #else 641 static inline void 642 lockdep_rcu_suspicious(const char *file, const int line, const char *s) 643 { 644 } 645 #endif 646 647 #endif /* __LINUX_LOCKDEP_H */ 648