1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LINUX_SPINLOCK_H 3 #define __LINUX_SPINLOCK_H 4 5 /* 6 * include/linux/spinlock.h - generic spinlock/rwlock declarations 7 * 8 * here's the role of the various spinlock/rwlock related include files: 9 * 10 * on SMP builds: 11 * 12 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the 13 * initializers 14 * 15 * linux/spinlock_types_raw: 16 * The raw types and initializers 17 * linux/spinlock_types.h: 18 * defines the generic type and initializers 19 * 20 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel 21 * implementations, mostly inline assembly code 22 * 23 * (also included on UP-debug builds:) 24 * 25 * linux/spinlock_api_smp.h: 26 * contains the prototypes for the _spin_*() APIs. 27 * 28 * linux/spinlock.h: builds the final spin_*() APIs. 29 * 30 * on UP builds: 31 * 32 * linux/spinlock_type_up.h: 33 * contains the generic, simplified UP spinlock type. 34 * (which is an empty structure on non-debug builds) 35 * 36 * linux/spinlock_types_raw: 37 * The raw RT types and initializers 38 * linux/spinlock_types.h: 39 * defines the generic type and initializers 40 * 41 * linux/spinlock_up.h: 42 * contains the arch_spin_*()/etc. version of UP 43 * builds. (which are NOPs on non-debug, non-preempt 44 * builds) 45 * 46 * (included on UP-non-debug builds:) 47 * 48 * linux/spinlock_api_up.h: 49 * builds the _spin_*() APIs. 50 * 51 * linux/spinlock.h: builds the final spin_*() APIs. 52 */ 53 54 #include <linux/typecheck.h> 55 #include <linux/preempt.h> 56 #include <linux/linkage.h> 57 #include <linux/compiler.h> 58 #include <linux/irqflags.h> 59 #include <linux/thread_info.h> 60 #include <linux/kernel.h> 61 #include <linux/stringify.h> 62 #include <linux/bottom_half.h> 63 #include <linux/lockdep.h> 64 #include <asm/barrier.h> 65 #include <asm/mmiowb.h> 66 67 68 /* 69 * Must define these before including other files, inline functions need them 70 */ 71 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME 72 73 #define LOCK_SECTION_START(extra) \ 74 ".subsection 1\n\t" \ 75 extra \ 76 ".ifndef " LOCK_SECTION_NAME "\n\t" \ 77 LOCK_SECTION_NAME ":\n\t" \ 78 ".endif\n" 79 80 #define LOCK_SECTION_END \ 81 ".previous\n\t" 82 83 #define __lockfunc __section(".spinlock.text") 84 85 /* 86 * Pull the arch_spinlock_t and arch_rwlock_t definitions: 87 */ 88 #include <linux/spinlock_types.h> 89 90 /* 91 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): 92 */ 93 #ifdef CONFIG_SMP 94 # include <asm/spinlock.h> 95 #else 96 # include <linux/spinlock_up.h> 97 #endif 98 99 #ifdef CONFIG_DEBUG_SPINLOCK 100 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, 101 struct lock_class_key *key, short inner); 102 103 # define raw_spin_lock_init(lock) \ 104 do { \ 105 static struct lock_class_key __key; \ 106 \ 107 __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \ 108 } while (0) 109 110 #else 111 # define raw_spin_lock_init(lock) \ 112 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) 113 #endif 114 115 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) 116 117 #ifdef arch_spin_is_contended 118 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) 119 #else 120 #define raw_spin_is_contended(lock) (((void)(lock), 0)) 121 #endif /*arch_spin_is_contended*/ 122 123 /* 124 * smp_mb__after_spinlock() provides the equivalent of a full memory barrier 125 * between program-order earlier lock acquisitions and program-order later 126 * memory accesses. 127 * 128 * This guarantees that the following two properties hold: 129 * 130 * 1) Given the snippet: 131 * 132 * { X = 0; Y = 0; } 133 * 134 * CPU0 CPU1 135 * 136 * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1); 137 * spin_lock(S); smp_mb(); 138 * smp_mb__after_spinlock(); r1 = READ_ONCE(X); 139 * r0 = READ_ONCE(Y); 140 * spin_unlock(S); 141 * 142 * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0) 143 * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments 144 * preceding the call to smp_mb__after_spinlock() in __schedule() and in 145 * try_to_wake_up(). 146 * 147 * 2) Given the snippet: 148 * 149 * { X = 0; Y = 0; } 150 * 151 * CPU0 CPU1 CPU2 152 * 153 * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y); 154 * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb(); 155 * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X); 156 * WRITE_ONCE(Y, 1); 157 * spin_unlock(S); 158 * 159 * it is forbidden that CPU0's critical section executes before CPU1's 160 * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1) 161 * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments 162 * preceding the calls to smp_rmb() in try_to_wake_up() for similar 163 * snippets but "projected" onto two CPUs. 164 * 165 * Property (2) upgrades the lock to an RCsc lock. 166 * 167 * Since most load-store architectures implement ACQUIRE with an smp_mb() after 168 * the LL/SC loop, they need no further barriers. Similarly all our TSO 169 * architectures imply an smp_mb() for each atomic instruction and equally don't 170 * need more. 171 * 172 * Architectures that can implement ACQUIRE better need to take care. 173 */ 174 #ifndef smp_mb__after_spinlock 175 #define smp_mb__after_spinlock() do { } while (0) 176 #endif 177 178 #ifdef CONFIG_DEBUG_SPINLOCK 179 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 180 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) 181 extern int do_raw_spin_trylock(raw_spinlock_t *lock); 182 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); 183 #else 184 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) 185 { 186 __acquire(lock); 187 arch_spin_lock(&lock->raw_lock); 188 mmiowb_spin_lock(); 189 } 190 191 #ifndef arch_spin_lock_flags 192 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 193 #endif 194 195 static inline void 196 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) 197 { 198 __acquire(lock); 199 arch_spin_lock_flags(&lock->raw_lock, *flags); 200 mmiowb_spin_lock(); 201 } 202 203 static inline int do_raw_spin_trylock(raw_spinlock_t *lock) 204 { 205 int ret = arch_spin_trylock(&(lock)->raw_lock); 206 207 if (ret) 208 mmiowb_spin_lock(); 209 210 return ret; 211 } 212 213 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) 214 { 215 mmiowb_spin_unlock(); 216 arch_spin_unlock(&lock->raw_lock); 217 __release(lock); 218 } 219 #endif 220 221 /* 222 * Define the various spin_lock methods. Note we define these 223 * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The 224 * various methods are defined as nops in the case they are not 225 * required. 226 */ 227 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) 228 229 #define raw_spin_lock(lock) _raw_spin_lock(lock) 230 231 #ifdef CONFIG_DEBUG_LOCK_ALLOC 232 # define raw_spin_lock_nested(lock, subclass) \ 233 _raw_spin_lock_nested(lock, subclass) 234 235 # define raw_spin_lock_nest_lock(lock, nest_lock) \ 236 do { \ 237 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ 238 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 239 } while (0) 240 #else 241 /* 242 * Always evaluate the 'subclass' argument to avoid that the compiler 243 * warns about set-but-not-used variables when building with 244 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. 245 */ 246 # define raw_spin_lock_nested(lock, subclass) \ 247 _raw_spin_lock(((void)(subclass), (lock))) 248 # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) 249 #endif 250 251 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 252 253 #define raw_spin_lock_irqsave(lock, flags) \ 254 do { \ 255 typecheck(unsigned long, flags); \ 256 flags = _raw_spin_lock_irqsave(lock); \ 257 } while (0) 258 259 #ifdef CONFIG_DEBUG_LOCK_ALLOC 260 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 261 do { \ 262 typecheck(unsigned long, flags); \ 263 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ 264 } while (0) 265 #else 266 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 267 do { \ 268 typecheck(unsigned long, flags); \ 269 flags = _raw_spin_lock_irqsave(lock); \ 270 } while (0) 271 #endif 272 273 #else 274 275 #define raw_spin_lock_irqsave(lock, flags) \ 276 do { \ 277 typecheck(unsigned long, flags); \ 278 _raw_spin_lock_irqsave(lock, flags); \ 279 } while (0) 280 281 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 282 raw_spin_lock_irqsave(lock, flags) 283 284 #endif 285 286 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) 287 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) 288 #define raw_spin_unlock(lock) _raw_spin_unlock(lock) 289 #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) 290 291 #define raw_spin_unlock_irqrestore(lock, flags) \ 292 do { \ 293 typecheck(unsigned long, flags); \ 294 _raw_spin_unlock_irqrestore(lock, flags); \ 295 } while (0) 296 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) 297 298 #define raw_spin_trylock_bh(lock) \ 299 __cond_lock(lock, _raw_spin_trylock_bh(lock)) 300 301 #define raw_spin_trylock_irq(lock) \ 302 ({ \ 303 local_irq_disable(); \ 304 raw_spin_trylock(lock) ? \ 305 1 : ({ local_irq_enable(); 0; }); \ 306 }) 307 308 #define raw_spin_trylock_irqsave(lock, flags) \ 309 ({ \ 310 local_irq_save(flags); \ 311 raw_spin_trylock(lock) ? \ 312 1 : ({ local_irq_restore(flags); 0; }); \ 313 }) 314 315 /* Include rwlock functions */ 316 #include <linux/rwlock.h> 317 318 /* 319 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: 320 */ 321 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 322 # include <linux/spinlock_api_smp.h> 323 #else 324 # include <linux/spinlock_api_up.h> 325 #endif 326 327 /* 328 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n 329 */ 330 331 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) 332 { 333 return &lock->rlock; 334 } 335 336 #ifdef CONFIG_DEBUG_SPINLOCK 337 338 # define spin_lock_init(lock) \ 339 do { \ 340 static struct lock_class_key __key; \ 341 \ 342 __raw_spin_lock_init(spinlock_check(lock), \ 343 #lock, &__key, LD_WAIT_CONFIG); \ 344 } while (0) 345 346 #else 347 348 # define spin_lock_init(_lock) \ 349 do { \ 350 spinlock_check(_lock); \ 351 *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \ 352 } while (0) 353 354 #endif 355 356 static __always_inline void spin_lock(spinlock_t *lock) 357 { 358 raw_spin_lock(&lock->rlock); 359 } 360 361 static __always_inline void spin_lock_bh(spinlock_t *lock) 362 { 363 raw_spin_lock_bh(&lock->rlock); 364 } 365 366 static __always_inline int spin_trylock(spinlock_t *lock) 367 { 368 return raw_spin_trylock(&lock->rlock); 369 } 370 371 #define spin_lock_nested(lock, subclass) \ 372 do { \ 373 raw_spin_lock_nested(spinlock_check(lock), subclass); \ 374 } while (0) 375 376 #define spin_lock_nest_lock(lock, nest_lock) \ 377 do { \ 378 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ 379 } while (0) 380 381 static __always_inline void spin_lock_irq(spinlock_t *lock) 382 { 383 raw_spin_lock_irq(&lock->rlock); 384 } 385 386 #define spin_lock_irqsave(lock, flags) \ 387 do { \ 388 raw_spin_lock_irqsave(spinlock_check(lock), flags); \ 389 } while (0) 390 391 #define spin_lock_irqsave_nested(lock, flags, subclass) \ 392 do { \ 393 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ 394 } while (0) 395 396 static __always_inline void spin_unlock(spinlock_t *lock) 397 { 398 raw_spin_unlock(&lock->rlock); 399 } 400 401 static __always_inline void spin_unlock_bh(spinlock_t *lock) 402 { 403 raw_spin_unlock_bh(&lock->rlock); 404 } 405 406 static __always_inline void spin_unlock_irq(spinlock_t *lock) 407 { 408 raw_spin_unlock_irq(&lock->rlock); 409 } 410 411 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 412 { 413 raw_spin_unlock_irqrestore(&lock->rlock, flags); 414 } 415 416 static __always_inline int spin_trylock_bh(spinlock_t *lock) 417 { 418 return raw_spin_trylock_bh(&lock->rlock); 419 } 420 421 static __always_inline int spin_trylock_irq(spinlock_t *lock) 422 { 423 return raw_spin_trylock_irq(&lock->rlock); 424 } 425 426 #define spin_trylock_irqsave(lock, flags) \ 427 ({ \ 428 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ 429 }) 430 431 /** 432 * spin_is_locked() - Check whether a spinlock is locked. 433 * @lock: Pointer to the spinlock. 434 * 435 * This function is NOT required to provide any memory ordering 436 * guarantees; it could be used for debugging purposes or, when 437 * additional synchronization is needed, accompanied with other 438 * constructs (memory barriers) enforcing the synchronization. 439 * 440 * Returns: 1 if @lock is locked, 0 otherwise. 441 * 442 * Note that the function only tells you that the spinlock is 443 * seen to be locked, not that it is locked on your CPU. 444 * 445 * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n, 446 * the return value is always 0 (see include/linux/spinlock_up.h). 447 * Therefore you should not rely heavily on the return value. 448 */ 449 static __always_inline int spin_is_locked(spinlock_t *lock) 450 { 451 return raw_spin_is_locked(&lock->rlock); 452 } 453 454 static __always_inline int spin_is_contended(spinlock_t *lock) 455 { 456 return raw_spin_is_contended(&lock->rlock); 457 } 458 459 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) 460 461 /* 462 * Pull the atomic_t declaration: 463 * (asm-mips/atomic.h needs above definitions) 464 */ 465 #include <linux/atomic.h> 466 /** 467 * atomic_dec_and_lock - lock on reaching reference count zero 468 * @atomic: the atomic counter 469 * @lock: the spinlock in question 470 * 471 * Decrements @atomic by 1. If the result is 0, returns true and locks 472 * @lock. Returns false for all other cases. 473 */ 474 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); 475 #define atomic_dec_and_lock(atomic, lock) \ 476 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) 477 478 extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, 479 unsigned long *flags); 480 #define atomic_dec_and_lock_irqsave(atomic, lock, flags) \ 481 __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))) 482 483 int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, 484 size_t max_size, unsigned int cpu_mult, 485 gfp_t gfp, const char *name, 486 struct lock_class_key *key); 487 488 #define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \ 489 ({ \ 490 static struct lock_class_key key; \ 491 int ret; \ 492 \ 493 ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \ 494 cpu_mult, gfp, #locks, &key); \ 495 ret; \ 496 }) 497 498 void free_bucket_spinlocks(spinlock_t *locks); 499 500 #endif /* __LINUX_SPINLOCK_H */ 501