1a9643ea8Slogwang /*- 2*22ce4affSfengbojiang * SPDX-License-Identifier: BSD-3-Clause 3*22ce4affSfengbojiang * 4a9643ea8Slogwang * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. 5a9643ea8Slogwang * 6a9643ea8Slogwang * Redistribution and use in source and binary forms, with or without 7a9643ea8Slogwang * modification, are permitted provided that the following conditions 8a9643ea8Slogwang * are met: 9a9643ea8Slogwang * 1. Redistributions of source code must retain the above copyright 10a9643ea8Slogwang * notice, this list of conditions and the following disclaimer. 11a9643ea8Slogwang * 2. Redistributions in binary form must reproduce the above copyright 12a9643ea8Slogwang * notice, this list of conditions and the following disclaimer in the 13a9643ea8Slogwang * documentation and/or other materials provided with the distribution. 14a9643ea8Slogwang * 3. Berkeley Software Design Inc's name may not be used to endorse or 15a9643ea8Slogwang * promote products derived from this software without specific prior 16a9643ea8Slogwang * written permission. 17a9643ea8Slogwang * 18a9643ea8Slogwang * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 19a9643ea8Slogwang * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20a9643ea8Slogwang * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21a9643ea8Slogwang * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 22a9643ea8Slogwang * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23a9643ea8Slogwang * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24a9643ea8Slogwang * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25a9643ea8Slogwang * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26a9643ea8Slogwang * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27a9643ea8Slogwang * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28a9643ea8Slogwang * SUCH DAMAGE. 29a9643ea8Slogwang * 30a9643ea8Slogwang * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $ 31a9643ea8Slogwang * $FreeBSD$ 32a9643ea8Slogwang */ 33a9643ea8Slogwang 34a9643ea8Slogwang #ifndef _SYS_MUTEX_H_ 35a9643ea8Slogwang #define _SYS_MUTEX_H_ 36a9643ea8Slogwang 37a9643ea8Slogwang #include <sys/queue.h> 38a9643ea8Slogwang #include <sys/_lock.h> 39a9643ea8Slogwang #include <sys/_mutex.h> 40a9643ea8Slogwang 41a9643ea8Slogwang #ifdef _KERNEL 42a9643ea8Slogwang #include <sys/pcpu.h> 43a9643ea8Slogwang #include <sys/lock_profile.h> 44a9643ea8Slogwang #include <sys/lockstat.h> 45a9643ea8Slogwang #include <machine/atomic.h> 46a9643ea8Slogwang #include <machine/cpufunc.h> 47a9643ea8Slogwang 48a9643ea8Slogwang /* 49a9643ea8Slogwang * Mutex types and options passed to mtx_init(). MTX_QUIET and MTX_DUPOK 50a9643ea8Slogwang * can also be passed in. 51a9643ea8Slogwang */ 52a9643ea8Slogwang #define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */ 53a9643ea8Slogwang #define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */ 54a9643ea8Slogwang #define MTX_RECURSE 0x00000004 /* Option: lock allowed to recurse */ 55a9643ea8Slogwang #define MTX_NOWITNESS 0x00000008 /* Don't do any witness checking. */ 56a9643ea8Slogwang #define MTX_NOPROFILE 0x00000020 /* Don't profile this lock */ 57a9643ea8Slogwang #define MTX_NEW 0x00000040 /* Don't check for double-init */ 58a9643ea8Slogwang 59a9643ea8Slogwang /* 60a9643ea8Slogwang * Option flags passed to certain lock/unlock routines, through the use 61a9643ea8Slogwang * of corresponding mtx_{lock,unlock}_flags() interface macros. 62a9643ea8Slogwang */ 63a9643ea8Slogwang #define MTX_QUIET LOP_QUIET /* Don't log a mutex event */ 64a9643ea8Slogwang #define MTX_DUPOK LOP_DUPOK /* Don't log a duplicate acquire */ 65a9643ea8Slogwang 66a9643ea8Slogwang /* 67a9643ea8Slogwang * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this, 68a9643ea8Slogwang * with the exception of MTX_UNOWNED, applies to spin locks. 69a9643ea8Slogwang */ 70*22ce4affSfengbojiang #define MTX_UNOWNED 0x00000000 /* Cookie for free mutex */ 71a9643ea8Slogwang #define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */ 72a9643ea8Slogwang #define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */ 73*22ce4affSfengbojiang #define MTX_DESTROYED 0x00000004 /* lock destroyed */ 74*22ce4affSfengbojiang #define MTX_FLAGMASK (MTX_RECURSED | MTX_CONTESTED | MTX_DESTROYED) 75a9643ea8Slogwang 76a9643ea8Slogwang /* 77a9643ea8Slogwang * Prototypes 78a9643ea8Slogwang * 79a9643ea8Slogwang * NOTE: Functions prepended with `_' (underscore) are exported to other parts 80a9643ea8Slogwang * of the kernel via macros, thus allowing us to use the cpp LOCK_FILE 81a9643ea8Slogwang * and LOCK_LINE or for hiding the lock cookie crunching to the 82a9643ea8Slogwang * consumers. These functions should not be called directly by any 83a9643ea8Slogwang * code using the API. Their macros cover their functionality. 84a9643ea8Slogwang * Functions with a `_' suffix are the entrypoint for the common 85a9643ea8Slogwang * KPI covering both compat shims and fast path case. These can be 86a9643ea8Slogwang * used by consumers willing to pass options, file and line 87a9643ea8Slogwang * informations, in an option-independent way. 88a9643ea8Slogwang * 89a9643ea8Slogwang * [See below for descriptions] 90a9643ea8Slogwang * 91a9643ea8Slogwang */ 92a9643ea8Slogwang void _mtx_init(volatile uintptr_t *c, const char *name, const char *type, 93a9643ea8Slogwang int opts); 94a9643ea8Slogwang void _mtx_destroy(volatile uintptr_t *c); 95a9643ea8Slogwang void mtx_sysinit(void *arg); 96*22ce4affSfengbojiang int _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF); 97a9643ea8Slogwang int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, 98a9643ea8Slogwang int line); 99a9643ea8Slogwang void mutex_init(void); 100*22ce4affSfengbojiang #if LOCK_DEBUG > 0 101*22ce4affSfengbojiang void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, 102a9643ea8Slogwang const char *file, int line); 103*22ce4affSfengbojiang void __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, 104*22ce4affSfengbojiang const char *file, int line); 105*22ce4affSfengbojiang #else 106*22ce4affSfengbojiang void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v); 107*22ce4affSfengbojiang void __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v); 108*22ce4affSfengbojiang #endif 109*22ce4affSfengbojiang void mtx_wait_unlocked(struct mtx *m); 110*22ce4affSfengbojiang 111a9643ea8Slogwang #ifdef SMP 112*22ce4affSfengbojiang #if LOCK_DEBUG > 0 113*22ce4affSfengbojiang void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts, 114a9643ea8Slogwang const char *file, int line); 115*22ce4affSfengbojiang #else 116*22ce4affSfengbojiang void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v); 117*22ce4affSfengbojiang #endif 118a9643ea8Slogwang #endif 119a9643ea8Slogwang void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, 120a9643ea8Slogwang int line); 121a9643ea8Slogwang void __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, 122a9643ea8Slogwang int line); 123a9643ea8Slogwang void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 124a9643ea8Slogwang int line); 125a9643ea8Slogwang int __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, 126a9643ea8Slogwang const char *file, int line); 127a9643ea8Slogwang void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, 128a9643ea8Slogwang const char *file, int line); 129*22ce4affSfengbojiang void mtx_spin_wait_unlocked(struct mtx *m); 130*22ce4affSfengbojiang 131a9643ea8Slogwang #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 132a9643ea8Slogwang void __mtx_assert(const volatile uintptr_t *c, int what, const char *file, 133a9643ea8Slogwang int line); 134a9643ea8Slogwang #endif 135a9643ea8Slogwang void thread_lock_flags_(struct thread *, int, const char *, int); 136*22ce4affSfengbojiang #if LOCK_DEBUG > 0 137*22ce4affSfengbojiang void _thread_lock(struct thread *td, int opts, const char *file, int line); 138*22ce4affSfengbojiang #else 139*22ce4affSfengbojiang void _thread_lock(struct thread *); 140*22ce4affSfengbojiang #endif 141a9643ea8Slogwang 142*22ce4affSfengbojiang #if defined(LOCK_PROFILING) || (defined(KLD_MODULE) && !defined(KLD_TIED)) 143a9643ea8Slogwang #define thread_lock(tdp) \ 144a9643ea8Slogwang thread_lock_flags_((tdp), 0, __FILE__, __LINE__) 145*22ce4affSfengbojiang #elif LOCK_DEBUG > 0 146*22ce4affSfengbojiang #define thread_lock(tdp) \ 147*22ce4affSfengbojiang _thread_lock((tdp), 0, __FILE__, __LINE__) 148*22ce4affSfengbojiang #else 149*22ce4affSfengbojiang #define thread_lock(tdp) \ 150*22ce4affSfengbojiang _thread_lock((tdp)) 151*22ce4affSfengbojiang #endif 152*22ce4affSfengbojiang 153*22ce4affSfengbojiang #if LOCK_DEBUG > 0 154a9643ea8Slogwang #define thread_lock_flags(tdp, opt) \ 155a9643ea8Slogwang thread_lock_flags_((tdp), (opt), __FILE__, __LINE__) 156*22ce4affSfengbojiang #else 157*22ce4affSfengbojiang #define thread_lock_flags(tdp, opt) \ 158*22ce4affSfengbojiang _thread_lock(tdp) 159*22ce4affSfengbojiang #endif 160*22ce4affSfengbojiang 161a9643ea8Slogwang #define thread_unlock(tdp) \ 162a9643ea8Slogwang mtx_unlock_spin((tdp)->td_lock) 163a9643ea8Slogwang 164a9643ea8Slogwang /* 165a9643ea8Slogwang * Top-level macros to provide lock cookie once the actual mtx is passed. 166a9643ea8Slogwang * They will also prevent passing a malformed object to the mtx KPI by 167a9643ea8Slogwang * failing compilation as the mtx_lock reserved member will not be found. 168a9643ea8Slogwang */ 169a9643ea8Slogwang #define mtx_init(m, n, t, o) \ 170a9643ea8Slogwang _mtx_init(&(m)->mtx_lock, n, t, o) 171a9643ea8Slogwang #define mtx_destroy(m) \ 172a9643ea8Slogwang _mtx_destroy(&(m)->mtx_lock) 173a9643ea8Slogwang #define mtx_trylock_flags_(m, o, f, l) \ 174a9643ea8Slogwang _mtx_trylock_flags_(&(m)->mtx_lock, o, f, l) 175*22ce4affSfengbojiang #if LOCK_DEBUG > 0 176*22ce4affSfengbojiang #define _mtx_lock_sleep(m, v, o, f, l) \ 177*22ce4affSfengbojiang __mtx_lock_sleep(&(m)->mtx_lock, v, o, f, l) 178*22ce4affSfengbojiang #define _mtx_unlock_sleep(m, v, o, f, l) \ 179*22ce4affSfengbojiang __mtx_unlock_sleep(&(m)->mtx_lock, v, o, f, l) 180*22ce4affSfengbojiang #else 181*22ce4affSfengbojiang #define _mtx_lock_sleep(m, v, o, f, l) \ 182*22ce4affSfengbojiang __mtx_lock_sleep(&(m)->mtx_lock, v) 183*22ce4affSfengbojiang #define _mtx_unlock_sleep(m, v, o, f, l) \ 184*22ce4affSfengbojiang __mtx_unlock_sleep(&(m)->mtx_lock, v) 185*22ce4affSfengbojiang #endif 186a9643ea8Slogwang #ifdef SMP 187*22ce4affSfengbojiang #if LOCK_DEBUG > 0 188*22ce4affSfengbojiang #define _mtx_lock_spin(m, v, o, f, l) \ 189*22ce4affSfengbojiang _mtx_lock_spin_cookie(&(m)->mtx_lock, v, o, f, l) 190*22ce4affSfengbojiang #else 191*22ce4affSfengbojiang #define _mtx_lock_spin(m, v, o, f, l) \ 192*22ce4affSfengbojiang _mtx_lock_spin_cookie(&(m)->mtx_lock, v) 193*22ce4affSfengbojiang #endif 194a9643ea8Slogwang #endif 195a9643ea8Slogwang #define _mtx_lock_flags(m, o, f, l) \ 196a9643ea8Slogwang __mtx_lock_flags(&(m)->mtx_lock, o, f, l) 197a9643ea8Slogwang #define _mtx_unlock_flags(m, o, f, l) \ 198a9643ea8Slogwang __mtx_unlock_flags(&(m)->mtx_lock, o, f, l) 199a9643ea8Slogwang #define _mtx_lock_spin_flags(m, o, f, l) \ 200a9643ea8Slogwang __mtx_lock_spin_flags(&(m)->mtx_lock, o, f, l) 201a9643ea8Slogwang #define _mtx_trylock_spin_flags(m, o, f, l) \ 202a9643ea8Slogwang __mtx_trylock_spin_flags(&(m)->mtx_lock, o, f, l) 203a9643ea8Slogwang #define _mtx_unlock_spin_flags(m, o, f, l) \ 204a9643ea8Slogwang __mtx_unlock_spin_flags(&(m)->mtx_lock, o, f, l) 205a9643ea8Slogwang #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 206a9643ea8Slogwang #define _mtx_assert(m, w, f, l) \ 207a9643ea8Slogwang __mtx_assert(&(m)->mtx_lock, w, f, l) 208a9643ea8Slogwang #endif 209a9643ea8Slogwang 210a9643ea8Slogwang #define mtx_recurse lock_object.lo_data 211a9643ea8Slogwang 212a9643ea8Slogwang /* Very simple operations on mtx_lock. */ 213a9643ea8Slogwang 214a9643ea8Slogwang /* Try to obtain mtx_lock once. */ 215a9643ea8Slogwang #define _mtx_obtain_lock(mp, tid) \ 216a9643ea8Slogwang atomic_cmpset_acq_ptr(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) 217a9643ea8Slogwang 218*22ce4affSfengbojiang #define _mtx_obtain_lock_fetch(mp, vp, tid) \ 219*22ce4affSfengbojiang atomic_fcmpset_acq_ptr(&(mp)->mtx_lock, vp, (tid)) 220*22ce4affSfengbojiang 221a9643ea8Slogwang /* Try to release mtx_lock if it is unrecursed and uncontested. */ 222a9643ea8Slogwang #define _mtx_release_lock(mp, tid) \ 223a9643ea8Slogwang atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), MTX_UNOWNED) 224a9643ea8Slogwang 225a9643ea8Slogwang /* Release mtx_lock quickly, assuming we own it. */ 226a9643ea8Slogwang #define _mtx_release_lock_quick(mp) \ 227a9643ea8Slogwang atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED) 228a9643ea8Slogwang 229*22ce4affSfengbojiang #define _mtx_release_lock_fetch(mp, vp) \ 230*22ce4affSfengbojiang atomic_fcmpset_rel_ptr(&(mp)->mtx_lock, (vp), MTX_UNOWNED) 231*22ce4affSfengbojiang 232a9643ea8Slogwang /* 233a9643ea8Slogwang * Full lock operations that are suitable to be inlined in non-debug 234a9643ea8Slogwang * kernels. If the lock cannot be acquired or released trivially then 235a9643ea8Slogwang * the work is deferred to another function. 236a9643ea8Slogwang */ 237a9643ea8Slogwang 238a9643ea8Slogwang /* Lock a normal mutex. */ 239a9643ea8Slogwang #define __mtx_lock(mp, tid, opts, file, line) do { \ 240a9643ea8Slogwang uintptr_t _tid = (uintptr_t)(tid); \ 241*22ce4affSfengbojiang uintptr_t _v = MTX_UNOWNED; \ 242a9643ea8Slogwang \ 243*22ce4affSfengbojiang if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__acquire) ||\ 244*22ce4affSfengbojiang !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \ 245*22ce4affSfengbojiang _mtx_lock_sleep((mp), _v, (opts), (file), (line)); \ 246a9643ea8Slogwang } while (0) 247a9643ea8Slogwang 248a9643ea8Slogwang /* 249a9643ea8Slogwang * Lock a spin mutex. For spinlocks, we handle recursion inline (it 250a9643ea8Slogwang * turns out that function calls can be significantly expensive on 251a9643ea8Slogwang * some architectures). Since spin locks are not _too_ common, 252a9643ea8Slogwang * inlining this code is not too big a deal. 253a9643ea8Slogwang */ 254a9643ea8Slogwang #ifdef SMP 255a9643ea8Slogwang #define __mtx_lock_spin(mp, tid, opts, file, line) do { \ 256a9643ea8Slogwang uintptr_t _tid = (uintptr_t)(tid); \ 257*22ce4affSfengbojiang uintptr_t _v = MTX_UNOWNED; \ 258a9643ea8Slogwang \ 259a9643ea8Slogwang spinlock_enter(); \ 260*22ce4affSfengbojiang if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire) || \ 261*22ce4affSfengbojiang !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \ 262*22ce4affSfengbojiang _mtx_lock_spin((mp), _v, (opts), (file), (line)); \ 263a9643ea8Slogwang } while (0) 264a9643ea8Slogwang #define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ 265a9643ea8Slogwang uintptr_t _tid = (uintptr_t)(tid); \ 266a9643ea8Slogwang int _ret; \ 267a9643ea8Slogwang \ 268a9643ea8Slogwang spinlock_enter(); \ 269a9643ea8Slogwang if (((mp)->mtx_lock != MTX_UNOWNED || !_mtx_obtain_lock((mp), _tid))) {\ 270a9643ea8Slogwang spinlock_exit(); \ 271a9643ea8Slogwang _ret = 0; \ 272a9643ea8Slogwang } else { \ 273a9643ea8Slogwang LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, \ 274a9643ea8Slogwang mp, 0, 0, file, line); \ 275a9643ea8Slogwang _ret = 1; \ 276a9643ea8Slogwang } \ 277a9643ea8Slogwang _ret; \ 278a9643ea8Slogwang }) 279a9643ea8Slogwang #else /* SMP */ 280a9643ea8Slogwang #define __mtx_lock_spin(mp, tid, opts, file, line) do { \ 281a9643ea8Slogwang uintptr_t _tid = (uintptr_t)(tid); \ 282a9643ea8Slogwang \ 283a9643ea8Slogwang spinlock_enter(); \ 284a9643ea8Slogwang if ((mp)->mtx_lock == _tid) \ 285a9643ea8Slogwang (mp)->mtx_recurse++; \ 286a9643ea8Slogwang else { \ 287a9643ea8Slogwang KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \ 288a9643ea8Slogwang (mp)->mtx_lock = _tid; \ 289a9643ea8Slogwang } \ 290a9643ea8Slogwang } while (0) 291a9643ea8Slogwang #define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ 292a9643ea8Slogwang uintptr_t _tid = (uintptr_t)(tid); \ 293a9643ea8Slogwang int _ret; \ 294a9643ea8Slogwang \ 295a9643ea8Slogwang spinlock_enter(); \ 296a9643ea8Slogwang if ((mp)->mtx_lock != MTX_UNOWNED) { \ 297a9643ea8Slogwang spinlock_exit(); \ 298a9643ea8Slogwang _ret = 0; \ 299a9643ea8Slogwang } else { \ 300a9643ea8Slogwang (mp)->mtx_lock = _tid; \ 301a9643ea8Slogwang _ret = 1; \ 302a9643ea8Slogwang } \ 303a9643ea8Slogwang _ret; \ 304a9643ea8Slogwang }) 305a9643ea8Slogwang #endif /* SMP */ 306a9643ea8Slogwang 307a9643ea8Slogwang /* Unlock a normal mutex. */ 308a9643ea8Slogwang #define __mtx_unlock(mp, tid, opts, file, line) do { \ 309*22ce4affSfengbojiang uintptr_t _v = (uintptr_t)(tid); \ 310a9643ea8Slogwang \ 311*22ce4affSfengbojiang if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__release) ||\ 312*22ce4affSfengbojiang !_mtx_release_lock_fetch((mp), &_v))) \ 313*22ce4affSfengbojiang _mtx_unlock_sleep((mp), _v, (opts), (file), (line)); \ 314a9643ea8Slogwang } while (0) 315a9643ea8Slogwang 316a9643ea8Slogwang /* 317a9643ea8Slogwang * Unlock a spin mutex. For spinlocks, we can handle everything 318a9643ea8Slogwang * inline, as it's pretty simple and a function call would be too 319a9643ea8Slogwang * expensive (at least on some architectures). Since spin locks are 320a9643ea8Slogwang * not _too_ common, inlining this code is not too big a deal. 321a9643ea8Slogwang * 322a9643ea8Slogwang * Since we always perform a spinlock_enter() when attempting to acquire a 323a9643ea8Slogwang * spin lock, we need to always perform a matching spinlock_exit() when 324a9643ea8Slogwang * releasing a spin lock. This includes the recursion cases. 325a9643ea8Slogwang */ 326a9643ea8Slogwang #ifdef SMP 327a9643ea8Slogwang #define __mtx_unlock_spin(mp) do { \ 328a9643ea8Slogwang if (mtx_recursed((mp))) \ 329a9643ea8Slogwang (mp)->mtx_recurse--; \ 330a9643ea8Slogwang else { \ 331a9643ea8Slogwang LOCKSTAT_PROFILE_RELEASE_LOCK(spin__release, mp); \ 332a9643ea8Slogwang _mtx_release_lock_quick((mp)); \ 333a9643ea8Slogwang } \ 334a9643ea8Slogwang spinlock_exit(); \ 335a9643ea8Slogwang } while (0) 336a9643ea8Slogwang #else /* SMP */ 337a9643ea8Slogwang #define __mtx_unlock_spin(mp) do { \ 338a9643ea8Slogwang if (mtx_recursed((mp))) \ 339a9643ea8Slogwang (mp)->mtx_recurse--; \ 340a9643ea8Slogwang else { \ 341a9643ea8Slogwang LOCKSTAT_PROFILE_RELEASE_LOCK(spin__release, mp); \ 342a9643ea8Slogwang (mp)->mtx_lock = MTX_UNOWNED; \ 343a9643ea8Slogwang } \ 344a9643ea8Slogwang spinlock_exit(); \ 345a9643ea8Slogwang } while (0) 346a9643ea8Slogwang #endif /* SMP */ 347a9643ea8Slogwang 348a9643ea8Slogwang /* 349a9643ea8Slogwang * Exported lock manipulation interface. 350a9643ea8Slogwang * 351a9643ea8Slogwang * mtx_lock(m) locks MTX_DEF mutex `m' 352a9643ea8Slogwang * 353a9643ea8Slogwang * mtx_lock_spin(m) locks MTX_SPIN mutex `m' 354a9643ea8Slogwang * 355a9643ea8Slogwang * mtx_unlock(m) unlocks MTX_DEF mutex `m' 356a9643ea8Slogwang * 357a9643ea8Slogwang * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m' 358a9643ea8Slogwang * 359a9643ea8Slogwang * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m' 360a9643ea8Slogwang * and passes option flags `opts' to the "hard" function, if required. 361a9643ea8Slogwang * With these routines, it is possible to pass flags such as MTX_QUIET 362a9643ea8Slogwang * to the appropriate lock manipulation routines. 363a9643ea8Slogwang * 364a9643ea8Slogwang * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if 365a9643ea8Slogwang * it cannot. Rather, it returns 0 on failure and non-zero on success. 366a9643ea8Slogwang * It does NOT handle recursion as we assume that if a caller is properly 367a9643ea8Slogwang * using this part of the interface, he will know that the lock in question 368a9643ea8Slogwang * is _not_ recursed. 369a9643ea8Slogwang * 370a9643ea8Slogwang * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts 371a9643ea8Slogwang * relevant option flags `opts.' 372a9643ea8Slogwang * 373a9643ea8Slogwang * mtx_trylock_spin(m) attempts to acquire MTX_SPIN mutex `m' but doesn't 374a9643ea8Slogwang * spin if it cannot. Rather, it returns 0 on failure and non-zero on 375a9643ea8Slogwang * success. It always returns failure for recursed lock attempts. 376a9643ea8Slogwang * 377a9643ea8Slogwang * mtx_initialized(m) returns non-zero if the lock `m' has been initialized. 378a9643ea8Slogwang * 379a9643ea8Slogwang * mtx_owned(m) returns non-zero if the current thread owns the lock `m' 380a9643ea8Slogwang * 381a9643ea8Slogwang * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed. 382a9643ea8Slogwang */ 383a9643ea8Slogwang #define mtx_lock(m) mtx_lock_flags((m), 0) 384a9643ea8Slogwang #define mtx_lock_spin(m) mtx_lock_spin_flags((m), 0) 385a9643ea8Slogwang #define mtx_trylock(m) mtx_trylock_flags((m), 0) 386a9643ea8Slogwang #define mtx_trylock_spin(m) mtx_trylock_spin_flags((m), 0) 387a9643ea8Slogwang #define mtx_unlock(m) mtx_unlock_flags((m), 0) 388a9643ea8Slogwang #define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0) 389a9643ea8Slogwang 390a9643ea8Slogwang struct mtx_pool; 391a9643ea8Slogwang 392a9643ea8Slogwang struct mtx_pool *mtx_pool_create(const char *mtx_name, int pool_size, int opts); 393a9643ea8Slogwang void mtx_pool_destroy(struct mtx_pool **poolp); 394a9643ea8Slogwang struct mtx *mtx_pool_find(struct mtx_pool *pool, void *ptr); 395a9643ea8Slogwang struct mtx *mtx_pool_alloc(struct mtx_pool *pool); 396a9643ea8Slogwang #define mtx_pool_lock(pool, ptr) \ 397a9643ea8Slogwang mtx_lock(mtx_pool_find((pool), (ptr))) 398a9643ea8Slogwang #define mtx_pool_lock_spin(pool, ptr) \ 399a9643ea8Slogwang mtx_lock_spin(mtx_pool_find((pool), (ptr))) 400a9643ea8Slogwang #define mtx_pool_unlock(pool, ptr) \ 401a9643ea8Slogwang mtx_unlock(mtx_pool_find((pool), (ptr))) 402a9643ea8Slogwang #define mtx_pool_unlock_spin(pool, ptr) \ 403a9643ea8Slogwang mtx_unlock_spin(mtx_pool_find((pool), (ptr))) 404a9643ea8Slogwang 405a9643ea8Slogwang /* 406a9643ea8Slogwang * mtxpool_sleep is a general purpose pool of sleep mutexes. 407a9643ea8Slogwang */ 408a9643ea8Slogwang extern struct mtx_pool *mtxpool_sleep; 409a9643ea8Slogwang 410a9643ea8Slogwang #ifndef LOCK_DEBUG 411a9643ea8Slogwang #error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/mutex.h> 412a9643ea8Slogwang #endif 413a9643ea8Slogwang #if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE) 414a9643ea8Slogwang #define mtx_lock_flags_(m, opts, file, line) \ 415a9643ea8Slogwang _mtx_lock_flags((m), (opts), (file), (line)) 416a9643ea8Slogwang #define mtx_unlock_flags_(m, opts, file, line) \ 417a9643ea8Slogwang _mtx_unlock_flags((m), (opts), (file), (line)) 418a9643ea8Slogwang #define mtx_lock_spin_flags_(m, opts, file, line) \ 419a9643ea8Slogwang _mtx_lock_spin_flags((m), (opts), (file), (line)) 420a9643ea8Slogwang #define mtx_trylock_spin_flags_(m, opts, file, line) \ 421a9643ea8Slogwang _mtx_trylock_spin_flags((m), (opts), (file), (line)) 422a9643ea8Slogwang #define mtx_unlock_spin_flags_(m, opts, file, line) \ 423a9643ea8Slogwang _mtx_unlock_spin_flags((m), (opts), (file), (line)) 424a9643ea8Slogwang #else /* LOCK_DEBUG == 0 && !MUTEX_NOINLINE */ 425a9643ea8Slogwang #define mtx_lock_flags_(m, opts, file, line) \ 426a9643ea8Slogwang __mtx_lock((m), curthread, (opts), (file), (line)) 427a9643ea8Slogwang #define mtx_unlock_flags_(m, opts, file, line) \ 428a9643ea8Slogwang __mtx_unlock((m), curthread, (opts), (file), (line)) 429a9643ea8Slogwang #define mtx_lock_spin_flags_(m, opts, file, line) \ 430a9643ea8Slogwang __mtx_lock_spin((m), curthread, (opts), (file), (line)) 431a9643ea8Slogwang #define mtx_trylock_spin_flags_(m, opts, file, line) \ 432a9643ea8Slogwang __mtx_trylock_spin((m), curthread, (opts), (file), (line)) 433a9643ea8Slogwang #define mtx_unlock_spin_flags_(m, opts, file, line) \ 434a9643ea8Slogwang __mtx_unlock_spin((m)) 435a9643ea8Slogwang #endif /* LOCK_DEBUG > 0 || MUTEX_NOINLINE */ 436a9643ea8Slogwang 437a9643ea8Slogwang #ifdef INVARIANTS 438a9643ea8Slogwang #define mtx_assert_(m, what, file, line) \ 439a9643ea8Slogwang _mtx_assert((m), (what), (file), (line)) 440a9643ea8Slogwang 441a9643ea8Slogwang #define GIANT_REQUIRED mtx_assert_(&Giant, MA_OWNED, __FILE__, __LINE__) 442a9643ea8Slogwang 443a9643ea8Slogwang #else /* INVARIANTS */ 444a9643ea8Slogwang #define mtx_assert_(m, what, file, line) (void)0 445a9643ea8Slogwang #define GIANT_REQUIRED 446a9643ea8Slogwang #endif /* INVARIANTS */ 447a9643ea8Slogwang 448a9643ea8Slogwang #define mtx_lock_flags(m, opts) \ 449a9643ea8Slogwang mtx_lock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 450a9643ea8Slogwang #define mtx_unlock_flags(m, opts) \ 451a9643ea8Slogwang mtx_unlock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 452a9643ea8Slogwang #define mtx_lock_spin_flags(m, opts) \ 453a9643ea8Slogwang mtx_lock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 454a9643ea8Slogwang #define mtx_unlock_spin_flags(m, opts) \ 455a9643ea8Slogwang mtx_unlock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 456a9643ea8Slogwang #define mtx_trylock_flags(m, opts) \ 457a9643ea8Slogwang mtx_trylock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 458a9643ea8Slogwang #define mtx_trylock_spin_flags(m, opts) \ 459a9643ea8Slogwang mtx_trylock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 460a9643ea8Slogwang #define mtx_assert(m, what) \ 461a9643ea8Slogwang mtx_assert_((m), (what), __FILE__, __LINE__) 462a9643ea8Slogwang 463a9643ea8Slogwang #define mtx_sleep(chan, mtx, pri, wmesg, timo) \ 464a9643ea8Slogwang _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), \ 465a9643ea8Slogwang tick_sbt * (timo), 0, C_HARDCLOCK) 466a9643ea8Slogwang 467*22ce4affSfengbojiang #define MTX_READ_VALUE(m) ((m)->mtx_lock) 468*22ce4affSfengbojiang 469a9643ea8Slogwang #define mtx_initialized(m) lock_initialized(&(m)->lock_object) 470a9643ea8Slogwang 471*22ce4affSfengbojiang #define lv_mtx_owner(v) ((struct thread *)((v) & ~MTX_FLAGMASK)) 472*22ce4affSfengbojiang 473*22ce4affSfengbojiang #define mtx_owner(m) lv_mtx_owner(MTX_READ_VALUE(m)) 474*22ce4affSfengbojiang 475*22ce4affSfengbojiang #define mtx_owned(m) (mtx_owner(m) == curthread) 476a9643ea8Slogwang 477a9643ea8Slogwang #define mtx_recursed(m) ((m)->mtx_recurse != 0) 478a9643ea8Slogwang 479a9643ea8Slogwang #define mtx_name(m) ((m)->lock_object.lo_name) 480a9643ea8Slogwang 481a9643ea8Slogwang /* 482a9643ea8Slogwang * Global locks. 483a9643ea8Slogwang */ 484a9643ea8Slogwang extern struct mtx Giant; 485a9643ea8Slogwang extern struct mtx blocked_lock; 486a9643ea8Slogwang 487a9643ea8Slogwang /* 488a9643ea8Slogwang * Giant lock manipulation and clean exit macros. 489a9643ea8Slogwang * Used to replace return with an exit Giant and return. 490a9643ea8Slogwang * 491a9643ea8Slogwang * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT() 492a9643ea8Slogwang * The #ifndef is to allow lint-like tools to redefine DROP_GIANT. 493a9643ea8Slogwang */ 494a9643ea8Slogwang #ifndef DROP_GIANT 495a9643ea8Slogwang #define DROP_GIANT() \ 496a9643ea8Slogwang do { \ 497a9643ea8Slogwang int _giantcnt = 0; \ 498a9643ea8Slogwang WITNESS_SAVE_DECL(Giant); \ 499a9643ea8Slogwang \ 500*22ce4affSfengbojiang if (__predict_false(mtx_owned(&Giant))) { \ 501a9643ea8Slogwang WITNESS_SAVE(&Giant.lock_object, Giant); \ 502a9643ea8Slogwang for (_giantcnt = 0; mtx_owned(&Giant) && \ 503a9643ea8Slogwang !SCHEDULER_STOPPED(); _giantcnt++) \ 504a9643ea8Slogwang mtx_unlock(&Giant); \ 505a9643ea8Slogwang } 506a9643ea8Slogwang 507a9643ea8Slogwang #define PICKUP_GIANT() \ 508a9643ea8Slogwang PARTIAL_PICKUP_GIANT(); \ 509a9643ea8Slogwang } while (0) 510a9643ea8Slogwang 511a9643ea8Slogwang #define PARTIAL_PICKUP_GIANT() \ 512a9643ea8Slogwang mtx_assert(&Giant, MA_NOTOWNED); \ 513*22ce4affSfengbojiang if (__predict_false(_giantcnt > 0)) { \ 514a9643ea8Slogwang while (_giantcnt--) \ 515a9643ea8Slogwang mtx_lock(&Giant); \ 516a9643ea8Slogwang WITNESS_RESTORE(&Giant.lock_object, Giant); \ 517a9643ea8Slogwang } 518a9643ea8Slogwang #endif 519a9643ea8Slogwang 520a9643ea8Slogwang struct mtx_args { 521a9643ea8Slogwang void *ma_mtx; 522a9643ea8Slogwang const char *ma_desc; 523a9643ea8Slogwang int ma_opts; 524a9643ea8Slogwang }; 525a9643ea8Slogwang 526a9643ea8Slogwang #define MTX_SYSINIT(name, mtx, desc, opts) \ 527a9643ea8Slogwang static struct mtx_args name##_args = { \ 528a9643ea8Slogwang (mtx), \ 529a9643ea8Slogwang (desc), \ 530a9643ea8Slogwang (opts) \ 531a9643ea8Slogwang }; \ 532a9643ea8Slogwang SYSINIT(name##_mtx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 533a9643ea8Slogwang mtx_sysinit, &name##_args); \ 534a9643ea8Slogwang SYSUNINIT(name##_mtx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 535a9643ea8Slogwang _mtx_destroy, __DEVOLATILE(void *, &(mtx)->mtx_lock)) 536a9643ea8Slogwang 537a9643ea8Slogwang /* 538a9643ea8Slogwang * The INVARIANTS-enabled mtx_assert() functionality. 539a9643ea8Slogwang * 540a9643ea8Slogwang * The constants need to be defined for INVARIANT_SUPPORT infrastructure 541a9643ea8Slogwang * support as _mtx_assert() itself uses them and the latter implies that 542a9643ea8Slogwang * _mtx_assert() must build. 543a9643ea8Slogwang */ 544a9643ea8Slogwang #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 545a9643ea8Slogwang #define MA_OWNED LA_XLOCKED 546a9643ea8Slogwang #define MA_NOTOWNED LA_UNLOCKED 547a9643ea8Slogwang #define MA_RECURSED LA_RECURSED 548a9643ea8Slogwang #define MA_NOTRECURSED LA_NOTRECURSED 549a9643ea8Slogwang #endif 550a9643ea8Slogwang 551a9643ea8Slogwang /* 552a9643ea8Slogwang * Common lock type names. 553a9643ea8Slogwang */ 554a9643ea8Slogwang #define MTX_NETWORK_LOCK "network driver" 555a9643ea8Slogwang 556a9643ea8Slogwang #endif /* _KERNEL */ 557a9643ea8Slogwang #endif /* _SYS_MUTEX_H_ */ 558