1 /* 2 * Queued spinlock 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. 15 * (C) Copyright 2013-2014 Red Hat, Inc. 16 * (C) Copyright 2015 Intel Corp. 17 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP 18 * 19 * Authors: Waiman Long <[email protected]> 20 * Peter Zijlstra <[email protected]> 21 */ 22 23 #ifndef _GEN_PV_LOCK_SLOWPATH 24 25 #include <linux/smp.h> 26 #include <linux/bug.h> 27 #include <linux/cpumask.h> 28 #include <linux/percpu.h> 29 #include <linux/hardirq.h> 30 #include <linux/mutex.h> 31 #include <linux/prefetch.h> 32 #include <asm/byteorder.h> 33 #include <asm/qspinlock.h> 34 35 /* 36 * The basic principle of a queue-based spinlock can best be understood 37 * by studying a classic queue-based spinlock implementation called the 38 * MCS lock. The paper below provides a good description for this kind 39 * of lock. 40 * 41 * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf 42 * 43 * This queued spinlock implementation is based on the MCS lock, however to make 44 * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing 45 * API, we must modify it somehow. 46 * 47 * In particular; where the traditional MCS lock consists of a tail pointer 48 * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to 49 * unlock the next pending (next->locked), we compress both these: {tail, 50 * next->locked} into a single u32 value. 51 * 52 * Since a spinlock disables recursion of its own context and there is a limit 53 * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there 54 * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now 55 * we can encode the tail by combining the 2-bit nesting level with the cpu 56 * number. With one byte for the lock value and 3 bytes for the tail, only a 57 * 32-bit word is now needed. Even though we only need 1 bit for the lock, 58 * we extend it to a full byte to achieve better performance for architectures 59 * that support atomic byte write. 60 * 61 * We also change the first spinner to spin on the lock bit instead of its 62 * node; whereby avoiding the need to carry a node from lock to unlock, and 63 * preserving existing lock API. This also makes the unlock code simpler and 64 * faster. 65 * 66 * N.B. The current implementation only supports architectures that allow 67 * atomic operations on smaller 8-bit and 16-bit data types. 68 * 69 */ 70 71 #include "mcs_spinlock.h" 72 73 #ifdef CONFIG_PARAVIRT_SPINLOCKS 74 #define MAX_NODES 8 75 #else 76 #define MAX_NODES 4 77 #endif 78 79 /* 80 * The pending bit spinning loop count. 81 * This heuristic is used to limit the number of lockword accesses 82 * made by atomic_cond_read_relaxed when waiting for the lock to 83 * transition out of the "== _Q_PENDING_VAL" state. We don't spin 84 * indefinitely because there's no guarantee that we'll make forward 85 * progress. 86 */ 87 #ifndef _Q_PENDING_LOOPS 88 #define _Q_PENDING_LOOPS 1 89 #endif 90 91 /* 92 * Per-CPU queue node structures; we can never have more than 4 nested 93 * contexts: task, softirq, hardirq, nmi. 94 * 95 * Exactly fits one 64-byte cacheline on a 64-bit architecture. 96 * 97 * PV doubles the storage and uses the second cacheline for PV state. 98 */ 99 static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]); 100 101 /* 102 * We must be able to distinguish between no-tail and the tail at 0:0, 103 * therefore increment the cpu number by one. 104 */ 105 106 static inline __pure u32 encode_tail(int cpu, int idx) 107 { 108 u32 tail; 109 110 #ifdef CONFIG_DEBUG_SPINLOCK 111 BUG_ON(idx > 3); 112 #endif 113 tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET; 114 tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ 115 116 return tail; 117 } 118 119 static inline __pure struct mcs_spinlock *decode_tail(u32 tail) 120 { 121 int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1; 122 int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; 123 124 return per_cpu_ptr(&mcs_nodes[idx], cpu); 125 } 126 127 #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK) 128 129 #if _Q_PENDING_BITS == 8 130 /** 131 * clear_pending - clear the pending bit. 132 * @lock: Pointer to queued spinlock structure 133 * 134 * *,1,* -> *,0,* 135 */ 136 static __always_inline void clear_pending(struct qspinlock *lock) 137 { 138 WRITE_ONCE(lock->pending, 0); 139 } 140 141 /** 142 * clear_pending_set_locked - take ownership and clear the pending bit. 143 * @lock: Pointer to queued spinlock structure 144 * 145 * *,1,0 -> *,0,1 146 * 147 * Lock stealing is not allowed if this function is used. 148 */ 149 static __always_inline void clear_pending_set_locked(struct qspinlock *lock) 150 { 151 WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL); 152 } 153 154 /* 155 * xchg_tail - Put in the new queue tail code word & retrieve previous one 156 * @lock : Pointer to queued spinlock structure 157 * @tail : The new queue tail code word 158 * Return: The previous queue tail code word 159 * 160 * xchg(lock, tail), which heads an address dependency 161 * 162 * p,*,* -> n,*,* ; prev = xchg(lock, node) 163 */ 164 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) 165 { 166 /* 167 * Use release semantics to make sure that the MCS node is properly 168 * initialized before changing the tail code. 169 */ 170 return (u32)xchg_release(&lock->tail, 171 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET; 172 } 173 174 #else /* _Q_PENDING_BITS == 8 */ 175 176 /** 177 * clear_pending - clear the pending bit. 178 * @lock: Pointer to queued spinlock structure 179 * 180 * *,1,* -> *,0,* 181 */ 182 static __always_inline void clear_pending(struct qspinlock *lock) 183 { 184 atomic_andnot(_Q_PENDING_VAL, &lock->val); 185 } 186 187 /** 188 * clear_pending_set_locked - take ownership and clear the pending bit. 189 * @lock: Pointer to queued spinlock structure 190 * 191 * *,1,0 -> *,0,1 192 */ 193 static __always_inline void clear_pending_set_locked(struct qspinlock *lock) 194 { 195 atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val); 196 } 197 198 /** 199 * xchg_tail - Put in the new queue tail code word & retrieve previous one 200 * @lock : Pointer to queued spinlock structure 201 * @tail : The new queue tail code word 202 * Return: The previous queue tail code word 203 * 204 * xchg(lock, tail) 205 * 206 * p,*,* -> n,*,* ; prev = xchg(lock, node) 207 */ 208 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) 209 { 210 u32 old, new, val = atomic_read(&lock->val); 211 212 for (;;) { 213 new = (val & _Q_LOCKED_PENDING_MASK) | tail; 214 /* 215 * Use release semantics to make sure that the MCS node is 216 * properly initialized before changing the tail code. 217 */ 218 old = atomic_cmpxchg_release(&lock->val, val, new); 219 if (old == val) 220 break; 221 222 val = old; 223 } 224 return old; 225 } 226 #endif /* _Q_PENDING_BITS == 8 */ 227 228 /** 229 * set_locked - Set the lock bit and own the lock 230 * @lock: Pointer to queued spinlock structure 231 * 232 * *,*,0 -> *,0,1 233 */ 234 static __always_inline void set_locked(struct qspinlock *lock) 235 { 236 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); 237 } 238 239 240 /* 241 * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for 242 * all the PV callbacks. 243 */ 244 245 static __always_inline void __pv_init_node(struct mcs_spinlock *node) { } 246 static __always_inline void __pv_wait_node(struct mcs_spinlock *node, 247 struct mcs_spinlock *prev) { } 248 static __always_inline void __pv_kick_node(struct qspinlock *lock, 249 struct mcs_spinlock *node) { } 250 static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, 251 struct mcs_spinlock *node) 252 { return 0; } 253 254 #define pv_enabled() false 255 256 #define pv_init_node __pv_init_node 257 #define pv_wait_node __pv_wait_node 258 #define pv_kick_node __pv_kick_node 259 #define pv_wait_head_or_lock __pv_wait_head_or_lock 260 261 #ifdef CONFIG_PARAVIRT_SPINLOCKS 262 #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath 263 #endif 264 265 #endif /* _GEN_PV_LOCK_SLOWPATH */ 266 267 /** 268 * queued_spin_lock_slowpath - acquire the queued spinlock 269 * @lock: Pointer to queued spinlock structure 270 * @val: Current value of the queued spinlock 32-bit word 271 * 272 * (queue tail, pending bit, lock value) 273 * 274 * fast : slow : unlock 275 * : : 276 * uncontended (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0) 277 * : | ^--------.------. / : 278 * : v \ \ | : 279 * pending : (0,1,1) +--> (0,1,0) \ | : 280 * : | ^--' | | : 281 * : v | | : 282 * uncontended : (n,x,y) +--> (n,0,0) --' | : 283 * queue : | ^--' | : 284 * : v | : 285 * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' : 286 * queue : ^--' : 287 */ 288 void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) 289 { 290 struct mcs_spinlock *prev, *next, *node; 291 u32 old, tail; 292 int idx; 293 294 BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); 295 296 if (pv_enabled()) 297 goto queue; 298 299 if (virt_spin_lock(lock)) 300 return; 301 302 /* 303 * Wait for in-progress pending->locked hand-overs with a bounded 304 * number of spins so that we guarantee forward progress. 305 * 306 * 0,1,0 -> 0,0,1 307 */ 308 if (val == _Q_PENDING_VAL) { 309 int cnt = _Q_PENDING_LOOPS; 310 val = atomic_cond_read_relaxed(&lock->val, 311 (VAL != _Q_PENDING_VAL) || !cnt--); 312 } 313 314 /* 315 * If we observe any contention; queue. 316 */ 317 if (val & ~_Q_LOCKED_MASK) 318 goto queue; 319 320 /* 321 * trylock || pending 322 * 323 * 0,0,0 -> 0,0,1 ; trylock 324 * 0,0,1 -> 0,1,1 ; pending 325 */ 326 val = atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val); 327 if (!(val & ~_Q_LOCKED_MASK)) { 328 /* 329 * We're pending, wait for the owner to go away. 330 * 331 * *,1,1 -> *,1,0 332 * 333 * this wait loop must be a load-acquire such that we match the 334 * store-release that clears the locked bit and create lock 335 * sequentiality; this is because not all 336 * clear_pending_set_locked() implementations imply full 337 * barriers. 338 */ 339 if (val & _Q_LOCKED_MASK) { 340 atomic_cond_read_acquire(&lock->val, 341 !(VAL & _Q_LOCKED_MASK)); 342 } 343 344 /* 345 * take ownership and clear the pending bit. 346 * 347 * *,1,0 -> *,0,1 348 */ 349 clear_pending_set_locked(lock); 350 return; 351 } 352 353 /* 354 * If pending was clear but there are waiters in the queue, then 355 * we need to undo our setting of pending before we queue ourselves. 356 */ 357 if (!(val & _Q_PENDING_MASK)) 358 clear_pending(lock); 359 360 /* 361 * End of pending bit optimistic spinning and beginning of MCS 362 * queuing. 363 */ 364 queue: 365 node = this_cpu_ptr(&mcs_nodes[0]); 366 idx = node->count++; 367 tail = encode_tail(smp_processor_id(), idx); 368 369 node += idx; 370 371 /* 372 * Ensure that we increment the head node->count before initialising 373 * the actual node. If the compiler is kind enough to reorder these 374 * stores, then an IRQ could overwrite our assignments. 375 */ 376 barrier(); 377 378 node->locked = 0; 379 node->next = NULL; 380 pv_init_node(node); 381 382 /* 383 * We touched a (possibly) cold cacheline in the per-cpu queue node; 384 * attempt the trylock once more in the hope someone let go while we 385 * weren't watching. 386 */ 387 if (queued_spin_trylock(lock)) 388 goto release; 389 390 /* 391 * We have already touched the queueing cacheline; don't bother with 392 * pending stuff. 393 * 394 * p,*,* -> n,*,* 395 * 396 * RELEASE, such that the stores to @node must be complete. 397 */ 398 old = xchg_tail(lock, tail); 399 next = NULL; 400 401 /* 402 * if there was a previous node; link it and wait until reaching the 403 * head of the waitqueue. 404 */ 405 if (old & _Q_TAIL_MASK) { 406 prev = decode_tail(old); 407 408 /* 409 * We must ensure that the stores to @node are observed before 410 * the write to prev->next. The address dependency from 411 * xchg_tail is not sufficient to ensure this because the read 412 * component of xchg_tail is unordered with respect to the 413 * initialisation of @node. 414 */ 415 smp_store_release(&prev->next, node); 416 417 pv_wait_node(node, prev); 418 arch_mcs_spin_lock_contended(&node->locked); 419 420 /* 421 * While waiting for the MCS lock, the next pointer may have 422 * been set by another lock waiter. We optimistically load 423 * the next pointer & prefetch the cacheline for writing 424 * to reduce latency in the upcoming MCS unlock operation. 425 */ 426 next = READ_ONCE(node->next); 427 if (next) 428 prefetchw(next); 429 } 430 431 /* 432 * we're at the head of the waitqueue, wait for the owner & pending to 433 * go away. 434 * 435 * *,x,y -> *,0,0 436 * 437 * this wait loop must use a load-acquire such that we match the 438 * store-release that clears the locked bit and create lock 439 * sequentiality; this is because the set_locked() function below 440 * does not imply a full barrier. 441 * 442 * The PV pv_wait_head_or_lock function, if active, will acquire 443 * the lock and return a non-zero value. So we have to skip the 444 * atomic_cond_read_acquire() call. As the next PV queue head hasn't 445 * been designated yet, there is no way for the locked value to become 446 * _Q_SLOW_VAL. So both the set_locked() and the 447 * atomic_cmpxchg_relaxed() calls will be safe. 448 * 449 * If PV isn't active, 0 will be returned instead. 450 * 451 */ 452 if ((val = pv_wait_head_or_lock(lock, node))) 453 goto locked; 454 455 val = atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK)); 456 457 locked: 458 /* 459 * claim the lock: 460 * 461 * n,0,0 -> 0,0,1 : lock, uncontended 462 * *,*,0 -> *,*,1 : lock, contended 463 * 464 * If the queue head is the only one in the queue (lock value == tail) 465 * and nobody is pending, clear the tail code and grab the lock. 466 * Otherwise, we only need to grab the lock. 467 */ 468 469 /* In the PV case we might already have _Q_LOCKED_VAL set */ 470 if ((val & _Q_TAIL_MASK) == tail) { 471 /* 472 * The atomic_cond_read_acquire() call above has provided the 473 * necessary acquire semantics required for locking. 474 */ 475 old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL); 476 if (old == val) 477 goto release; /* No contention */ 478 } 479 480 /* Either somebody is queued behind us or _Q_PENDING_VAL is set */ 481 set_locked(lock); 482 483 /* 484 * contended path; wait for next if not observed yet, release. 485 */ 486 if (!next) { 487 while (!(next = READ_ONCE(node->next))) 488 cpu_relax(); 489 } 490 491 arch_mcs_spin_unlock_contended(&next->locked); 492 pv_kick_node(lock, next); 493 494 release: 495 /* 496 * release the node 497 */ 498 __this_cpu_dec(mcs_nodes[0].count); 499 } 500 EXPORT_SYMBOL(queued_spin_lock_slowpath); 501 502 /* 503 * Generate the paravirt code for queued_spin_unlock_slowpath(). 504 */ 505 #if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS) 506 #define _GEN_PV_LOCK_SLOWPATH 507 508 #undef pv_enabled 509 #define pv_enabled() true 510 511 #undef pv_init_node 512 #undef pv_wait_node 513 #undef pv_kick_node 514 #undef pv_wait_head_or_lock 515 516 #undef queued_spin_lock_slowpath 517 #define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath 518 519 #include "qspinlock_paravirt.h" 520 #include "qspinlock.c" 521 522 #endif 523