1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Runtime locking correctness validator 4 * 5 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <[email protected]> 6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 7 * 8 * see Documentation/locking/lockdep-design.rst for more details. 9 */ 10 #ifndef __LINUX_LOCKDEP_H 11 #define __LINUX_LOCKDEP_H 12 13 #include <linux/lockdep_types.h> 14 #include <asm/percpu.h> 15 16 struct task_struct; 17 18 /* for sysctl */ 19 extern int prove_locking; 20 extern int lock_stat; 21 22 #ifdef CONFIG_LOCKDEP 23 24 #include <linux/linkage.h> 25 #include <linux/list.h> 26 #include <linux/debug_locks.h> 27 #include <linux/stacktrace.h> 28 29 static inline void lockdep_copy_map(struct lockdep_map *to, 30 struct lockdep_map *from) 31 { 32 int i; 33 34 *to = *from; 35 /* 36 * Since the class cache can be modified concurrently we could observe 37 * half pointers (64bit arch using 32bit copy insns). Therefore clear 38 * the caches and take the performance hit. 39 * 40 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since 41 * that relies on cache abuse. 42 */ 43 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) 44 to->class_cache[i] = NULL; 45 } 46 47 /* 48 * Every lock has a list of other locks that were taken after it. 49 * We only grow the list, never remove from it: 50 */ 51 struct lock_list { 52 struct list_head entry; 53 struct lock_class *class; 54 struct lock_class *links_to; 55 const struct lock_trace *trace; 56 int distance; 57 58 /* 59 * The parent field is used to implement breadth-first search, and the 60 * bit 0 is reused to indicate if the lock has been accessed in BFS. 61 */ 62 struct lock_list *parent; 63 }; 64 65 /** 66 * struct lock_chain - lock dependency chain record 67 * 68 * @irq_context: the same as irq_context in held_lock below 69 * @depth: the number of held locks in this chain 70 * @base: the index in chain_hlocks for this chain 71 * @entry: the collided lock chains in lock_chain hash list 72 * @chain_key: the hash key of this lock_chain 73 */ 74 struct lock_chain { 75 /* see BUILD_BUG_ON()s in add_chain_cache() */ 76 unsigned int irq_context : 2, 77 depth : 6, 78 base : 24; 79 /* 4 byte hole */ 80 struct hlist_node entry; 81 u64 chain_key; 82 }; 83 84 #define MAX_LOCKDEP_KEYS_BITS 13 85 #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) 86 #define INITIAL_CHAIN_KEY -1 87 88 struct held_lock { 89 /* 90 * One-way hash of the dependency chain up to this point. We 91 * hash the hashes step by step as the dependency chain grows. 92 * 93 * We use it for dependency-caching and we skip detection 94 * passes and dependency-updates if there is a cache-hit, so 95 * it is absolutely critical for 100% coverage of the validator 96 * to have a unique key value for every unique dependency path 97 * that can occur in the system, to make a unique hash value 98 * as likely as possible - hence the 64-bit width. 99 * 100 * The task struct holds the current hash value (initialized 101 * with zero), here we store the previous hash value: 102 */ 103 u64 prev_chain_key; 104 unsigned long acquire_ip; 105 struct lockdep_map *instance; 106 struct lockdep_map *nest_lock; 107 #ifdef CONFIG_LOCK_STAT 108 u64 waittime_stamp; 109 u64 holdtime_stamp; 110 #endif 111 /* 112 * class_idx is zero-indexed; it points to the element in 113 * lock_classes this held lock instance belongs to. class_idx is in 114 * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. 115 */ 116 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; 117 /* 118 * The lock-stack is unified in that the lock chains of interrupt 119 * contexts nest ontop of process context chains, but we 'separate' 120 * the hashes by starting with 0 if we cross into an interrupt 121 * context, and we also keep do not add cross-context lock 122 * dependencies - the lock usage graph walking covers that area 123 * anyway, and we'd just unnecessarily increase the number of 124 * dependencies otherwise. [Note: hardirq and softirq contexts 125 * are separated from each other too.] 126 * 127 * The following field is used to detect when we cross into an 128 * interrupt context: 129 */ 130 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ 131 unsigned int trylock:1; /* 16 bits */ 132 133 unsigned int read:2; /* see lock_acquire() comment */ 134 unsigned int check:1; /* see lock_acquire() comment */ 135 unsigned int hardirqs_off:1; 136 unsigned int references:12; /* 32 bits */ 137 unsigned int pin_count; 138 }; 139 140 /* 141 * Initialization, self-test and debugging-output methods: 142 */ 143 extern void lockdep_init(void); 144 extern void lockdep_reset(void); 145 extern void lockdep_reset_lock(struct lockdep_map *lock); 146 extern void lockdep_free_key_range(void *start, unsigned long size); 147 extern asmlinkage void lockdep_sys_exit(void); 148 extern void lockdep_set_selftest_task(struct task_struct *task); 149 150 extern void lockdep_init_task(struct task_struct *task); 151 152 /* 153 * Split the recrursion counter in two to readily detect 'off' vs recursion. 154 */ 155 #define LOCKDEP_RECURSION_BITS 16 156 #define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS) 157 #define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1) 158 159 /* 160 * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due 161 * to header dependencies. 162 */ 163 164 #define lockdep_off() \ 165 do { \ 166 current->lockdep_recursion += LOCKDEP_OFF; \ 167 } while (0) 168 169 #define lockdep_on() \ 170 do { \ 171 current->lockdep_recursion -= LOCKDEP_OFF; \ 172 } while (0) 173 174 extern void lockdep_register_key(struct lock_class_key *key); 175 extern void lockdep_unregister_key(struct lock_class_key *key); 176 177 /* 178 * These methods are used by specific locking variants (spinlocks, 179 * rwlocks, mutexes and rwsems) to pass init/acquire/release events 180 * to lockdep: 181 */ 182 183 extern void lockdep_init_map_waits(struct lockdep_map *lock, const char *name, 184 struct lock_class_key *key, int subclass, short inner, short outer); 185 186 static inline void 187 lockdep_init_map_wait(struct lockdep_map *lock, const char *name, 188 struct lock_class_key *key, int subclass, short inner) 189 { 190 lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV); 191 } 192 193 static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, 194 struct lock_class_key *key, int subclass) 195 { 196 lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV); 197 } 198 199 /* 200 * Reinitialize a lock key - for cases where there is special locking or 201 * special initialization of locks so that the validator gets the scope 202 * of dependencies wrong: they are either too broad (they need a class-split) 203 * or they are too narrow (they suffer from a false class-split): 204 */ 205 #define lockdep_set_class(lock, key) \ 206 lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \ 207 (lock)->dep_map.wait_type_inner, \ 208 (lock)->dep_map.wait_type_outer) 209 210 #define lockdep_set_class_and_name(lock, key, name) \ 211 lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \ 212 (lock)->dep_map.wait_type_inner, \ 213 (lock)->dep_map.wait_type_outer) 214 215 #define lockdep_set_class_and_subclass(lock, key, sub) \ 216 lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\ 217 (lock)->dep_map.wait_type_inner, \ 218 (lock)->dep_map.wait_type_outer) 219 220 #define lockdep_set_subclass(lock, sub) \ 221 lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ 222 (lock)->dep_map.wait_type_inner, \ 223 (lock)->dep_map.wait_type_outer) 224 225 #define lockdep_set_novalidate_class(lock) \ 226 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) 227 228 /* 229 * Compare locking classes 230 */ 231 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) 232 233 static inline int lockdep_match_key(struct lockdep_map *lock, 234 struct lock_class_key *key) 235 { 236 return lock->key == key; 237 } 238 239 /* 240 * Acquire a lock. 241 * 242 * Values for "read": 243 * 244 * 0: exclusive (write) acquire 245 * 1: read-acquire (no recursion allowed) 246 * 2: read-acquire with same-instance recursion allowed 247 * 248 * Values for check: 249 * 250 * 0: simple checks (freeing, held-at-exit-time, etc.) 251 * 1: full validation 252 */ 253 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 254 int trylock, int read, int check, 255 struct lockdep_map *nest_lock, unsigned long ip); 256 257 extern void lock_release(struct lockdep_map *lock, unsigned long ip); 258 259 /* 260 * Same "read" as for lock_acquire(), except -1 means any. 261 */ 262 extern int lock_is_held_type(const struct lockdep_map *lock, int read); 263 264 static inline int lock_is_held(const struct lockdep_map *lock) 265 { 266 return lock_is_held_type(lock, -1); 267 } 268 269 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) 270 #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) 271 272 extern void lock_set_class(struct lockdep_map *lock, const char *name, 273 struct lock_class_key *key, unsigned int subclass, 274 unsigned long ip); 275 276 static inline void lock_set_subclass(struct lockdep_map *lock, 277 unsigned int subclass, unsigned long ip) 278 { 279 lock_set_class(lock, lock->name, lock->key, subclass, ip); 280 } 281 282 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); 283 284 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, } 285 286 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); 287 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie); 288 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); 289 290 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 291 292 #define lockdep_assert_held(l) do { \ 293 WARN_ON(debug_locks && !lockdep_is_held(l)); \ 294 } while (0) 295 296 #define lockdep_assert_held_write(l) do { \ 297 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ 298 } while (0) 299 300 #define lockdep_assert_held_read(l) do { \ 301 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \ 302 } while (0) 303 304 #define lockdep_assert_held_once(l) do { \ 305 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ 306 } while (0) 307 308 #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) 309 310 #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) 311 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) 312 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) 313 314 #else /* !CONFIG_LOCKDEP */ 315 316 static inline void lockdep_init_task(struct task_struct *task) 317 { 318 } 319 320 static inline void lockdep_off(void) 321 { 322 } 323 324 static inline void lockdep_on(void) 325 { 326 } 327 328 static inline void lockdep_set_selftest_task(struct task_struct *task) 329 { 330 } 331 332 # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) 333 # define lock_release(l, i) do { } while (0) 334 # define lock_downgrade(l, i) do { } while (0) 335 # define lock_set_class(l, n, k, s, i) do { } while (0) 336 # define lock_set_subclass(l, s, i) do { } while (0) 337 # define lockdep_init() do { } while (0) 338 # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \ 339 do { (void)(name); (void)(key); } while (0) 340 # define lockdep_init_map_wait(lock, name, key, sub, inner) \ 341 do { (void)(name); (void)(key); } while (0) 342 # define lockdep_init_map(lock, name, key, sub) \ 343 do { (void)(name); (void)(key); } while (0) 344 # define lockdep_set_class(lock, key) do { (void)(key); } while (0) 345 # define lockdep_set_class_and_name(lock, key, name) \ 346 do { (void)(key); (void)(name); } while (0) 347 #define lockdep_set_class_and_subclass(lock, key, sub) \ 348 do { (void)(key); } while (0) 349 #define lockdep_set_subclass(lock, sub) do { } while (0) 350 351 #define lockdep_set_novalidate_class(lock) do { } while (0) 352 353 /* 354 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP 355 * case since the result is not well defined and the caller should rather 356 * #ifdef the call himself. 357 */ 358 359 # define lockdep_reset() do { debug_locks = 1; } while (0) 360 # define lockdep_free_key_range(start, size) do { } while (0) 361 # define lockdep_sys_exit() do { } while (0) 362 363 static inline void lockdep_register_key(struct lock_class_key *key) 364 { 365 } 366 367 static inline void lockdep_unregister_key(struct lock_class_key *key) 368 { 369 } 370 371 #define lockdep_depth(tsk) (0) 372 373 #define lockdep_is_held_type(l, r) (1) 374 375 #define lockdep_assert_held(l) do { (void)(l); } while (0) 376 #define lockdep_assert_held_write(l) do { (void)(l); } while (0) 377 #define lockdep_assert_held_read(l) do { (void)(l); } while (0) 378 #define lockdep_assert_held_once(l) do { (void)(l); } while (0) 379 380 #define lockdep_recursing(tsk) (0) 381 382 #define NIL_COOKIE (struct pin_cookie){ } 383 384 #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) 385 #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) 386 #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) 387 388 #endif /* !LOCKDEP */ 389 390 enum xhlock_context_t { 391 XHLOCK_HARD, 392 XHLOCK_SOFT, 393 XHLOCK_CTX_NR, 394 }; 395 396 #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) 397 /* 398 * To initialize a lockdep_map statically use this macro. 399 * Note that _name must not be NULL. 400 */ 401 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ 402 { .name = (_name), .key = (void *)(_key), } 403 404 static inline void lockdep_invariant_state(bool force) {} 405 static inline void lockdep_free_task(struct task_struct *task) {} 406 407 #ifdef CONFIG_LOCK_STAT 408 409 extern void lock_contended(struct lockdep_map *lock, unsigned long ip); 410 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); 411 412 #define LOCK_CONTENDED(_lock, try, lock) \ 413 do { \ 414 if (!try(_lock)) { \ 415 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 416 lock(_lock); \ 417 } \ 418 lock_acquired(&(_lock)->dep_map, _RET_IP_); \ 419 } while (0) 420 421 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ 422 ({ \ 423 int ____err = 0; \ 424 if (!try(_lock)) { \ 425 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 426 ____err = lock(_lock); \ 427 } \ 428 if (!____err) \ 429 lock_acquired(&(_lock)->dep_map, _RET_IP_); \ 430 ____err; \ 431 }) 432 433 #else /* CONFIG_LOCK_STAT */ 434 435 #define lock_contended(lockdep_map, ip) do {} while (0) 436 #define lock_acquired(lockdep_map, ip) do {} while (0) 437 438 #define LOCK_CONTENDED(_lock, try, lock) \ 439 lock(_lock) 440 441 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ 442 lock(_lock) 443 444 #endif /* CONFIG_LOCK_STAT */ 445 446 #ifdef CONFIG_LOCKDEP 447 448 /* 449 * On lockdep we dont want the hand-coded irq-enable of 450 * _raw_*_lock_flags() code, because lockdep assumes 451 * that interrupts are not re-enabled during lock-acquire: 452 */ 453 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ 454 LOCK_CONTENDED((_lock), (try), (lock)) 455 456 #else /* CONFIG_LOCKDEP */ 457 458 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ 459 lockfl((_lock), (flags)) 460 461 #endif /* CONFIG_LOCKDEP */ 462 463 #ifdef CONFIG_PROVE_LOCKING 464 extern void print_irqtrace_events(struct task_struct *curr); 465 #else 466 static inline void print_irqtrace_events(struct task_struct *curr) 467 { 468 } 469 #endif 470 471 /* 472 * For trivial one-depth nesting of a lock-class, the following 473 * global define can be used. (Subsystems with multiple levels 474 * of nesting should define their own lock-nesting subclasses.) 475 */ 476 #define SINGLE_DEPTH_NESTING 1 477 478 /* 479 * Map the dependency ops to NOP or to real lockdep ops, depending 480 * on the per lock-class debug mode: 481 */ 482 483 #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) 484 #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) 485 #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) 486 487 #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 488 #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 489 #define spin_release(l, i) lock_release(l, i) 490 491 #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 492 #define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) 493 #define rwlock_release(l, i) lock_release(l, i) 494 495 #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 496 #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) 497 #define seqcount_release(l, i) lock_release(l, i) 498 499 #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 500 #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 501 #define mutex_release(l, i) lock_release(l, i) 502 503 #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 504 #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 505 #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) 506 #define rwsem_release(l, i) lock_release(l, i) 507 508 #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) 509 #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) 510 #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) 511 #define lock_map_release(l) lock_release(l, _THIS_IP_) 512 513 #ifdef CONFIG_PROVE_LOCKING 514 # define might_lock(lock) \ 515 do { \ 516 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 517 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ 518 lock_release(&(lock)->dep_map, _THIS_IP_); \ 519 } while (0) 520 # define might_lock_read(lock) \ 521 do { \ 522 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 523 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ 524 lock_release(&(lock)->dep_map, _THIS_IP_); \ 525 } while (0) 526 # define might_lock_nested(lock, subclass) \ 527 do { \ 528 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 529 lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \ 530 _THIS_IP_); \ 531 lock_release(&(lock)->dep_map, _THIS_IP_); \ 532 } while (0) 533 534 DECLARE_PER_CPU(int, hardirqs_enabled); 535 DECLARE_PER_CPU(int, hardirq_context); 536 537 #define lockdep_assert_irqs_enabled() \ 538 do { \ 539 WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirqs_enabled)); \ 540 } while (0) 541 542 #define lockdep_assert_irqs_disabled() \ 543 do { \ 544 WARN_ON_ONCE(debug_locks && this_cpu_read(hardirqs_enabled)); \ 545 } while (0) 546 547 #define lockdep_assert_in_irq() \ 548 do { \ 549 WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirq_context)); \ 550 } while (0) 551 552 #define lockdep_assert_preemption_enabled() \ 553 do { \ 554 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ 555 debug_locks && \ 556 (preempt_count() != 0 || \ 557 !this_cpu_read(hardirqs_enabled))); \ 558 } while (0) 559 560 #define lockdep_assert_preemption_disabled() \ 561 do { \ 562 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ 563 debug_locks && \ 564 (preempt_count() == 0 && \ 565 this_cpu_read(hardirqs_enabled))); \ 566 } while (0) 567 568 #else 569 # define might_lock(lock) do { } while (0) 570 # define might_lock_read(lock) do { } while (0) 571 # define might_lock_nested(lock, subclass) do { } while (0) 572 573 # define lockdep_assert_irqs_enabled() do { } while (0) 574 # define lockdep_assert_irqs_disabled() do { } while (0) 575 # define lockdep_assert_in_irq() do { } while (0) 576 577 # define lockdep_assert_preemption_enabled() do { } while (0) 578 # define lockdep_assert_preemption_disabled() do { } while (0) 579 #endif 580 581 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING 582 583 # define lockdep_assert_RT_in_threaded_ctx() do { \ 584 WARN_ONCE(debug_locks && !current->lockdep_recursion && \ 585 lockdep_hardirq_context() && \ 586 !(current->hardirq_threaded || current->irq_config), \ 587 "Not in threaded context on PREEMPT_RT as expected\n"); \ 588 } while (0) 589 590 #else 591 592 # define lockdep_assert_RT_in_threaded_ctx() do { } while (0) 593 594 #endif 595 596 #ifdef CONFIG_LOCKDEP 597 void lockdep_rcu_suspicious(const char *file, const int line, const char *s); 598 #else 599 static inline void 600 lockdep_rcu_suspicious(const char *file, const int line, const char *s) 601 { 602 } 603 #endif 604 605 #endif /* __LINUX_LOCKDEP_H */ 606