1 /* 2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 #include <IOKit/system.h> 30 31 #include <IOKit/IOReturn.h> 32 #include <IOKit/IOLib.h> 33 #include <IOKit/assert.h> 34 35 #include <IOKit/IOLocksPrivate.h> 36 37 extern "C" { 38 #include <kern/locks.h> 39 40 #if defined(__x86_64__) 41 /* Synthetic event if none is specified, for backwards compatibility only. */ 42 static bool IOLockSleep_NO_EVENT __attribute__((used)) = 0; 43 #endif 44 45 void 46 IOLockInitWithState( IOLock * lock, IOLockState state) 47 { 48 if (state == kIOLockStateLocked) { 49 lck_mtx_lock( lock); 50 } 51 } 52 53 IOLock * 54 IOLockAlloc( void ) 55 { 56 return lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); 57 } 58 59 void 60 IOLockInlineInit( IOLock *lock ) 61 { 62 lck_mtx_init(lock, IOLockGroup, LCK_ATTR_NULL); 63 } 64 65 void 66 IOLockInlineDestroy( IOLock * lock) 67 { 68 lck_mtx_destroy( lock, IOLockGroup); 69 } 70 71 void 72 IOLockFree( IOLock * lock) 73 { 74 lck_mtx_free( lock, IOLockGroup); 75 } 76 77 lck_mtx_t * 78 IOLockGetMachLock( IOLock * lock) 79 { 80 return (lck_mtx_t *)lock; 81 } 82 83 int 84 IOLockSleep( IOLock * lock, void *event, UInt32 interType) 85 { 86 return (int) lck_mtx_sleep(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType); 87 } 88 89 int 90 IOLockSleepDeadline( IOLock * lock, void *event, 91 AbsoluteTime deadline, UInt32 interType) 92 { 93 return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, 94 (wait_interrupt_t) interType, __OSAbsoluteTime(deadline)); 95 } 96 97 int 98 IOLockSleepWithInheritor( IOLock *lock, UInt32 lck_sleep_action, 99 void *event, thread_t inheritor, UInt32 interType, uint64_t deadline) 100 { 101 return (int) lck_mtx_sleep_with_inheritor(lock, (lck_sleep_action_t) lck_sleep_action, (event_t) event, inheritor, 102 (wait_interrupt_t) interType, deadline); 103 } 104 105 void 106 IOLockWakeup(IOLock * lock, void *event, bool oneThread) 107 { 108 thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED); 109 } 110 111 void 112 IOLockWakeupAllWithInheritor(IOLock * lock, void *event) 113 { 114 wakeup_all_with_inheritor((event_t) event, THREAD_AWAKENED); 115 } 116 117 118 #if defined(__x86_64__) 119 /* 120 * For backwards compatibility, kexts built against pre-Darwin 14 headers will bind at runtime to this function, 121 * which supports a NULL event, 122 */ 123 int IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) __asm("_IOLockSleep"); 124 int IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event, 125 AbsoluteTime deadline, UInt32 interType) __asm("_IOLockSleepDeadline"); 126 void IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) __asm("_IOLockWakeup"); 127 128 int 129 IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) 130 { 131 if (event == NULL) { 132 event = (void *)&IOLockSleep_NO_EVENT; 133 } 134 135 return IOLockSleep(lock, event, interType); 136 } 137 138 int 139 IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event, 140 AbsoluteTime deadline, UInt32 interType) 141 { 142 if (event == NULL) { 143 event = (void *)&IOLockSleep_NO_EVENT; 144 } 145 146 return IOLockSleepDeadline(lock, event, deadline, interType); 147 } 148 149 void 150 IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) 151 { 152 if (event == NULL) { 153 event = (void *)&IOLockSleep_NO_EVENT; 154 } 155 156 IOLockWakeup(lock, event, oneThread); 157 } 158 #endif /* defined(__x86_64__) */ 159 160 161 struct _IORecursiveLock { 162 lck_mtx_t mutex; 163 lck_grp_t *group; 164 thread_t thread; 165 UInt32 count; 166 }; 167 168 IORecursiveLock * 169 IORecursiveLockAllocWithLockGroup( lck_grp_t * lockGroup ) 170 { 171 _IORecursiveLock * lock; 172 173 if (lockGroup == NULL) { 174 return NULL; 175 } 176 177 lock = IOMallocType( _IORecursiveLock ); 178 if (!lock) { 179 return NULL; 180 } 181 182 lck_mtx_init( &lock->mutex, lockGroup, LCK_ATTR_NULL ); 183 lock->group = lockGroup; 184 lock->thread = NULL; 185 lock->count = 0; 186 187 return (IORecursiveLock *) lock; 188 } 189 190 191 IORecursiveLock * 192 IORecursiveLockAlloc( void ) 193 { 194 return IORecursiveLockAllocWithLockGroup( IOLockGroup ); 195 } 196 197 void 198 IORecursiveLockFree( IORecursiveLock * _lock ) 199 { 200 _IORecursiveLock * lock = (_IORecursiveLock *)_lock; 201 202 lck_mtx_destroy(&lock->mutex, lock->group); 203 IOFreeType( lock, _IORecursiveLock ); 204 } 205 206 lck_mtx_t * 207 IORecursiveLockGetMachLock( IORecursiveLock * lock ) 208 { 209 return &lock->mutex; 210 } 211 212 void 213 IORecursiveLockLock( IORecursiveLock * _lock) 214 { 215 _IORecursiveLock * lock = (_IORecursiveLock *)_lock; 216 217 if (lock->thread == IOThreadSelf()) { 218 lock->count++; 219 } else { 220 lck_mtx_lock( &lock->mutex ); 221 assert( lock->thread == NULL ); 222 assert( lock->count == 0 ); 223 lock->thread = IOThreadSelf(); 224 lock->count = 1; 225 } 226 } 227 228 boolean_t 229 IORecursiveLockTryLock( IORecursiveLock * _lock) 230 { 231 _IORecursiveLock * lock = (_IORecursiveLock *)_lock; 232 233 if (lock->thread == IOThreadSelf()) { 234 lock->count++; 235 return true; 236 } else { 237 if (lck_mtx_try_lock( &lock->mutex )) { 238 assert( lock->thread == NULL ); 239 assert( lock->count == 0 ); 240 lock->thread = IOThreadSelf(); 241 lock->count = 1; 242 return true; 243 } 244 } 245 return false; 246 } 247 248 void 249 IORecursiveLockUnlock( IORecursiveLock * _lock) 250 { 251 _IORecursiveLock * lock = (_IORecursiveLock *)_lock; 252 253 assert( lock->thread == IOThreadSelf()); 254 255 if (0 == (--lock->count)) { 256 lock->thread = NULL; 257 lck_mtx_unlock( &lock->mutex ); 258 } 259 } 260 261 boolean_t 262 IORecursiveLockHaveLock( const IORecursiveLock * _lock) 263 { 264 _IORecursiveLock * lock = (_IORecursiveLock *)_lock; 265 266 return lock->thread == IOThreadSelf(); 267 } 268 269 int 270 IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType) 271 { 272 _IORecursiveLock * lock = (_IORecursiveLock *)_lock; 273 UInt32 count = lock->count; 274 int res; 275 276 assert(lock->thread == IOThreadSelf()); 277 278 lock->count = 0; 279 lock->thread = NULL; 280 res = lck_mtx_sleep(&lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType); 281 282 // Must re-establish the recursive lock no matter why we woke up 283 // otherwise we would potentially leave the return path corrupted. 284 assert(lock->thread == NULL); 285 assert(lock->count == 0); 286 lock->thread = IOThreadSelf(); 287 lock->count = count; 288 return res; 289 } 290 291 int 292 IORecursiveLockSleepDeadline( IORecursiveLock * _lock, void *event, 293 AbsoluteTime deadline, UInt32 interType) 294 { 295 _IORecursiveLock * lock = (_IORecursiveLock *)_lock; 296 UInt32 count = lock->count; 297 int res; 298 299 assert(lock->thread == IOThreadSelf()); 300 301 lock->count = 0; 302 lock->thread = NULL; 303 res = lck_mtx_sleep_deadline(&lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, 304 (wait_interrupt_t) interType, __OSAbsoluteTime(deadline)); 305 306 // Must re-establish the recursive lock no matter why we woke up 307 // otherwise we would potentially leave the return path corrupted. 308 assert(lock->thread == NULL); 309 assert(lock->count == 0); 310 lock->thread = IOThreadSelf(); 311 lock->count = count; 312 return res; 313 } 314 315 void 316 IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread) 317 { 318 thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED); 319 } 320 321 /* 322 * Complex (read/write) lock operations 323 */ 324 325 IORWLock * 326 IORWLockAlloc( void ) 327 { 328 return lck_rw_alloc_init(IOLockGroup, LCK_ATTR_NULL); 329 } 330 331 void 332 IORWLockInlineInit( IORWLock *lock ) 333 { 334 lck_rw_init(lock, IOLockGroup, LCK_ATTR_NULL); 335 } 336 337 void 338 IORWLockInlineDestroy( IORWLock * lock) 339 { 340 lck_rw_destroy( lock, IOLockGroup); 341 } 342 343 void 344 IORWLockFree( IORWLock * lock) 345 { 346 lck_rw_free( lock, IOLockGroup); 347 } 348 349 lck_rw_t * 350 IORWLockGetMachLock( IORWLock * lock) 351 { 352 return (lck_rw_t *)lock; 353 } 354 355 356 /* 357 * Spin locks 358 */ 359 360 IOSimpleLock * 361 IOSimpleLockAlloc( void ) 362 { 363 return lck_spin_alloc_init( IOLockGroup, LCK_ATTR_NULL); 364 } 365 366 void 367 IOSimpleLockInit( IOSimpleLock * lock) 368 { 369 lck_spin_init( lock, IOLockGroup, LCK_ATTR_NULL); 370 } 371 372 void 373 IOSimpleLockDestroy( IOSimpleLock * lock ) 374 { 375 lck_spin_destroy(lock, IOLockGroup); 376 } 377 378 void 379 IOSimpleLockFree( IOSimpleLock * lock ) 380 { 381 lck_spin_free( lock, IOLockGroup); 382 } 383 384 lck_spin_t * 385 IOSimpleLockGetMachLock( IOSimpleLock * lock) 386 { 387 return (lck_spin_t *)lock; 388 } 389 390 #ifndef IOLOCKS_INLINE 391 /* 392 * Lock assertions 393 */ 394 395 void 396 IOLockAssert(IOLock * lock, IOLockAssertState type) 397 { 398 LCK_MTX_ASSERT(lock, type); 399 } 400 401 void 402 IORWLockAssert(IORWLock * lock, IORWLockAssertState type) 403 { 404 LCK_RW_ASSERT(lock, type); 405 } 406 407 void 408 IOSimpleLockAssert(IOSimpleLock *lock, IOSimpleLockAssertState type) 409 { 410 LCK_SPIN_ASSERT(l, type); 411 } 412 #endif /* !IOLOCKS_INLINE */ 413 } /* extern "C" */ 414