1 #ifndef __LINUX_SPINLOCK_H 2 #define __LINUX_SPINLOCK_H 3 4 /* 5 * include/linux/spinlock.h - generic spinlock/rwlock declarations 6 * 7 * here's the role of the various spinlock/rwlock related include files: 8 * 9 * on SMP builds: 10 * 11 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the 12 * initializers 13 * 14 * linux/spinlock_types.h: 15 * defines the generic type and initializers 16 * 17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel 18 * implementations, mostly inline assembly code 19 * 20 * (also included on UP-debug builds:) 21 * 22 * linux/spinlock_api_smp.h: 23 * contains the prototypes for the _spin_*() APIs. 24 * 25 * linux/spinlock.h: builds the final spin_*() APIs. 26 * 27 * on UP builds: 28 * 29 * linux/spinlock_type_up.h: 30 * contains the generic, simplified UP spinlock type. 31 * (which is an empty structure on non-debug builds) 32 * 33 * linux/spinlock_types.h: 34 * defines the generic type and initializers 35 * 36 * linux/spinlock_up.h: 37 * contains the arch_spin_*()/etc. version of UP 38 * builds. (which are NOPs on non-debug, non-preempt 39 * builds) 40 * 41 * (included on UP-non-debug builds:) 42 * 43 * linux/spinlock_api_up.h: 44 * builds the _spin_*() APIs. 45 * 46 * linux/spinlock.h: builds the final spin_*() APIs. 47 */ 48 49 #include <linux/typecheck.h> 50 #include <linux/preempt.h> 51 #include <linux/linkage.h> 52 #include <linux/compiler.h> 53 #include <linux/irqflags.h> 54 #include <linux/thread_info.h> 55 #include <linux/kernel.h> 56 #include <linux/stringify.h> 57 #include <linux/bottom_half.h> 58 #include <asm/barrier.h> 59 60 61 /* 62 * Must define these before including other files, inline functions need them 63 */ 64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME 65 66 #define LOCK_SECTION_START(extra) \ 67 ".subsection 1\n\t" \ 68 extra \ 69 ".ifndef " LOCK_SECTION_NAME "\n\t" \ 70 LOCK_SECTION_NAME ":\n\t" \ 71 ".endif\n" 72 73 #define LOCK_SECTION_END \ 74 ".previous\n\t" 75 76 #define __lockfunc __attribute__((section(".spinlock.text"))) 77 78 /* 79 * Pull the arch_spinlock_t and arch_rwlock_t definitions: 80 */ 81 #include <linux/spinlock_types.h> 82 83 /* 84 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): 85 */ 86 #ifdef CONFIG_SMP 87 # include <asm/spinlock.h> 88 #else 89 # include <linux/spinlock_up.h> 90 #endif 91 92 #ifdef CONFIG_DEBUG_SPINLOCK 93 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, 94 struct lock_class_key *key); 95 # define raw_spin_lock_init(lock) \ 96 do { \ 97 static struct lock_class_key __key; \ 98 \ 99 __raw_spin_lock_init((lock), #lock, &__key); \ 100 } while (0) 101 102 #else 103 # define raw_spin_lock_init(lock) \ 104 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) 105 #endif 106 107 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) 108 109 #ifdef CONFIG_GENERIC_LOCKBREAK 110 #define raw_spin_is_contended(lock) ((lock)->break_lock) 111 #else 112 113 #ifdef arch_spin_is_contended 114 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) 115 #else 116 #define raw_spin_is_contended(lock) (((void)(lock), 0)) 117 #endif /*arch_spin_is_contended*/ 118 #endif 119 120 /* 121 * Despite its name it doesn't necessarily has to be a full barrier. 122 * It should only guarantee that a STORE before the critical section 123 * can not be reordered with LOADs and STOREs inside this section. 124 * spin_lock() is the one-way barrier, this LOAD can not escape out 125 * of the region. So the default implementation simply ensures that 126 * a STORE can not move into the critical section, smp_wmb() should 127 * serialize it with another STORE done by spin_lock(). 128 */ 129 #ifndef smp_mb__before_spinlock 130 #define smp_mb__before_spinlock() smp_wmb() 131 #endif 132 133 /** 134 * raw_spin_unlock_wait - wait until the spinlock gets unlocked 135 * @lock: the spinlock in question. 136 */ 137 #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) 138 139 #ifdef CONFIG_DEBUG_SPINLOCK 140 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 141 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) 142 extern int do_raw_spin_trylock(raw_spinlock_t *lock); 143 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); 144 #else 145 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) 146 { 147 __acquire(lock); 148 arch_spin_lock(&lock->raw_lock); 149 } 150 151 static inline void 152 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) 153 { 154 __acquire(lock); 155 arch_spin_lock_flags(&lock->raw_lock, *flags); 156 } 157 158 static inline int do_raw_spin_trylock(raw_spinlock_t *lock) 159 { 160 return arch_spin_trylock(&(lock)->raw_lock); 161 } 162 163 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) 164 { 165 arch_spin_unlock(&lock->raw_lock); 166 __release(lock); 167 } 168 #endif 169 170 /* 171 * Define the various spin_lock methods. Note we define these 172 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The 173 * various methods are defined as nops in the case they are not 174 * required. 175 */ 176 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) 177 178 #define raw_spin_lock(lock) _raw_spin_lock(lock) 179 180 #ifdef CONFIG_DEBUG_LOCK_ALLOC 181 # define raw_spin_lock_nested(lock, subclass) \ 182 _raw_spin_lock_nested(lock, subclass) 183 184 # define raw_spin_lock_nest_lock(lock, nest_lock) \ 185 do { \ 186 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ 187 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 188 } while (0) 189 #else 190 /* 191 * Always evaluate the 'subclass' argument to avoid that the compiler 192 * warns about set-but-not-used variables when building with 193 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. 194 */ 195 # define raw_spin_lock_nested(lock, subclass) \ 196 _raw_spin_lock(((void)(subclass), (lock))) 197 # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) 198 #endif 199 200 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 201 202 #define raw_spin_lock_irqsave(lock, flags) \ 203 do { \ 204 typecheck(unsigned long, flags); \ 205 flags = _raw_spin_lock_irqsave(lock); \ 206 } while (0) 207 208 #ifdef CONFIG_DEBUG_LOCK_ALLOC 209 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 210 do { \ 211 typecheck(unsigned long, flags); \ 212 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ 213 } while (0) 214 #else 215 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 216 do { \ 217 typecheck(unsigned long, flags); \ 218 flags = _raw_spin_lock_irqsave(lock); \ 219 } while (0) 220 #endif 221 222 #else 223 224 #define raw_spin_lock_irqsave(lock, flags) \ 225 do { \ 226 typecheck(unsigned long, flags); \ 227 _raw_spin_lock_irqsave(lock, flags); \ 228 } while (0) 229 230 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 231 raw_spin_lock_irqsave(lock, flags) 232 233 #endif 234 235 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) 236 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) 237 #define raw_spin_unlock(lock) _raw_spin_unlock(lock) 238 #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) 239 240 #define raw_spin_unlock_irqrestore(lock, flags) \ 241 do { \ 242 typecheck(unsigned long, flags); \ 243 _raw_spin_unlock_irqrestore(lock, flags); \ 244 } while (0) 245 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) 246 247 #define raw_spin_trylock_bh(lock) \ 248 __cond_lock(lock, _raw_spin_trylock_bh(lock)) 249 250 #define raw_spin_trylock_irq(lock) \ 251 ({ \ 252 local_irq_disable(); \ 253 raw_spin_trylock(lock) ? \ 254 1 : ({ local_irq_enable(); 0; }); \ 255 }) 256 257 #define raw_spin_trylock_irqsave(lock, flags) \ 258 ({ \ 259 local_irq_save(flags); \ 260 raw_spin_trylock(lock) ? \ 261 1 : ({ local_irq_restore(flags); 0; }); \ 262 }) 263 264 /** 265 * raw_spin_can_lock - would raw_spin_trylock() succeed? 266 * @lock: the spinlock in question. 267 */ 268 #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) 269 270 /* Include rwlock functions */ 271 #include <linux/rwlock.h> 272 273 /* 274 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: 275 */ 276 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 277 # include <linux/spinlock_api_smp.h> 278 #else 279 # include <linux/spinlock_api_up.h> 280 #endif 281 282 /* 283 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n 284 */ 285 286 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) 287 { 288 return &lock->rlock; 289 } 290 291 #define spin_lock_init(_lock) \ 292 do { \ 293 spinlock_check(_lock); \ 294 raw_spin_lock_init(&(_lock)->rlock); \ 295 } while (0) 296 297 static __always_inline void spin_lock(spinlock_t *lock) 298 { 299 raw_spin_lock(&lock->rlock); 300 } 301 302 static __always_inline void spin_lock_bh(spinlock_t *lock) 303 { 304 raw_spin_lock_bh(&lock->rlock); 305 } 306 307 static __always_inline int spin_trylock(spinlock_t *lock) 308 { 309 return raw_spin_trylock(&lock->rlock); 310 } 311 312 #define spin_lock_nested(lock, subclass) \ 313 do { \ 314 raw_spin_lock_nested(spinlock_check(lock), subclass); \ 315 } while (0) 316 317 #define spin_lock_nest_lock(lock, nest_lock) \ 318 do { \ 319 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ 320 } while (0) 321 322 static __always_inline void spin_lock_irq(spinlock_t *lock) 323 { 324 raw_spin_lock_irq(&lock->rlock); 325 } 326 327 #define spin_lock_irqsave(lock, flags) \ 328 do { \ 329 raw_spin_lock_irqsave(spinlock_check(lock), flags); \ 330 } while (0) 331 332 #define spin_lock_irqsave_nested(lock, flags, subclass) \ 333 do { \ 334 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ 335 } while (0) 336 337 static __always_inline void spin_unlock(spinlock_t *lock) 338 { 339 raw_spin_unlock(&lock->rlock); 340 } 341 342 static __always_inline void spin_unlock_bh(spinlock_t *lock) 343 { 344 raw_spin_unlock_bh(&lock->rlock); 345 } 346 347 static __always_inline void spin_unlock_irq(spinlock_t *lock) 348 { 349 raw_spin_unlock_irq(&lock->rlock); 350 } 351 352 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 353 { 354 raw_spin_unlock_irqrestore(&lock->rlock, flags); 355 } 356 357 static __always_inline int spin_trylock_bh(spinlock_t *lock) 358 { 359 return raw_spin_trylock_bh(&lock->rlock); 360 } 361 362 static __always_inline int spin_trylock_irq(spinlock_t *lock) 363 { 364 return raw_spin_trylock_irq(&lock->rlock); 365 } 366 367 #define spin_trylock_irqsave(lock, flags) \ 368 ({ \ 369 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ 370 }) 371 372 /** 373 * spin_unlock_wait - Interpose between successive critical sections 374 * @lock: the spinlock whose critical sections are to be interposed. 375 * 376 * Semantically this is equivalent to a spin_lock() immediately 377 * followed by a spin_unlock(). However, most architectures have 378 * more efficient implementations in which the spin_unlock_wait() 379 * cannot block concurrent lock acquisition, and in some cases 380 * where spin_unlock_wait() does not write to the lock variable. 381 * Nevertheless, spin_unlock_wait() can have high overhead, so if 382 * you feel the need to use it, please check to see if there is 383 * a better way to get your job done. 384 * 385 * The ordering guarantees provided by spin_unlock_wait() are: 386 * 387 * 1. All accesses preceding the spin_unlock_wait() happen before 388 * any accesses in later critical sections for this same lock. 389 * 2. All accesses following the spin_unlock_wait() happen after 390 * any accesses in earlier critical sections for this same lock. 391 */ 392 static __always_inline void spin_unlock_wait(spinlock_t *lock) 393 { 394 raw_spin_unlock_wait(&lock->rlock); 395 } 396 397 static __always_inline int spin_is_locked(spinlock_t *lock) 398 { 399 return raw_spin_is_locked(&lock->rlock); 400 } 401 402 static __always_inline int spin_is_contended(spinlock_t *lock) 403 { 404 return raw_spin_is_contended(&lock->rlock); 405 } 406 407 static __always_inline int spin_can_lock(spinlock_t *lock) 408 { 409 return raw_spin_can_lock(&lock->rlock); 410 } 411 412 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) 413 414 /* 415 * Pull the atomic_t declaration: 416 * (asm-mips/atomic.h needs above definitions) 417 */ 418 #include <linux/atomic.h> 419 /** 420 * atomic_dec_and_lock - lock on reaching reference count zero 421 * @atomic: the atomic counter 422 * @lock: the spinlock in question 423 * 424 * Decrements @atomic by 1. If the result is 0, returns true and locks 425 * @lock. Returns false for all other cases. 426 */ 427 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); 428 #define atomic_dec_and_lock(atomic, lock) \ 429 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) 430 431 #endif /* __LINUX_SPINLOCK_H */ 432