xref: /xnu-11215/iokit/Kernel/IOLocks.cpp (revision aca3beaa)
1 /*
2  * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <IOKit/system.h>
30 
31 #include <IOKit/IOReturn.h>
32 #include <IOKit/IOLib.h>
33 #include <IOKit/assert.h>
34 
35 #include <IOKit/IOLocksPrivate.h>
36 
37 extern "C" {
38 #include <kern/locks.h>
39 
40 #if defined(__x86_64__)
41 /* Synthetic event if none is specified, for backwards compatibility only. */
42 static bool IOLockSleep_NO_EVENT __attribute__((used)) = 0;
43 #endif
44 
45 void
46 IOLockInitWithState( IOLock * lock, IOLockState state)
47 {
48 	if (state == kIOLockStateLocked) {
49 		lck_mtx_lock( lock);
50 	}
51 }
52 
53 IOLock *
54 IOLockAlloc( void )
55 {
56 	return lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
57 }
58 
59 void
60 IOLockInlineInit( IOLock *lock )
61 {
62 	lck_mtx_init(lock, IOLockGroup, LCK_ATTR_NULL);
63 }
64 
65 void
66 IOLockInlineDestroy( IOLock * lock)
67 {
68 	lck_mtx_destroy( lock, IOLockGroup);
69 }
70 
71 void
72 IOLockFree( IOLock * lock)
73 {
74 	lck_mtx_free( lock, IOLockGroup);
75 }
76 
77 lck_mtx_t *
78 IOLockGetMachLock( IOLock * lock)
79 {
80 	return (lck_mtx_t *)lock;
81 }
82 
83 int
84 IOLockSleep( IOLock * lock, void *event, UInt32 interType)
85 {
86 	return (int) lck_mtx_sleep(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType);
87 }
88 
89 int
90 IOLockSleepDeadline( IOLock * lock, void *event,
91     AbsoluteTime deadline, UInt32 interType)
92 {
93 	return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event,
94 	           (wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
95 }
96 
97 void
98 IOLockWakeup(IOLock * lock, void *event, bool oneThread)
99 {
100 	thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
101 }
102 
103 
104 #if defined(__x86_64__)
105 /*
106  * For backwards compatibility, kexts built against pre-Darwin 14 headers will bind at runtime to this function,
107  * which supports a NULL event,
108  */
109 int     IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) __asm("_IOLockSleep");
110 int     IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event,
111     AbsoluteTime deadline, UInt32 interType) __asm("_IOLockSleepDeadline");
112 void    IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) __asm("_IOLockWakeup");
113 
114 int
115 IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType)
116 {
117 	if (event == NULL) {
118 		event = (void *)&IOLockSleep_NO_EVENT;
119 	}
120 
121 	return IOLockSleep(lock, event, interType);
122 }
123 
124 int
125 IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event,
126     AbsoluteTime deadline, UInt32 interType)
127 {
128 	if (event == NULL) {
129 		event = (void *)&IOLockSleep_NO_EVENT;
130 	}
131 
132 	return IOLockSleepDeadline(lock, event, deadline, interType);
133 }
134 
135 void
136 IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread)
137 {
138 	if (event == NULL) {
139 		event = (void *)&IOLockSleep_NO_EVENT;
140 	}
141 
142 	IOLockWakeup(lock, event, oneThread);
143 }
144 #endif /* defined(__x86_64__) */
145 
146 
147 struct _IORecursiveLock {
148 	lck_mtx_t       mutex;
149 	lck_grp_t       *group;
150 	thread_t        thread;
151 	UInt32          count;
152 };
153 
154 IORecursiveLock *
155 IORecursiveLockAllocWithLockGroup( lck_grp_t * lockGroup )
156 {
157 	_IORecursiveLock * lock;
158 
159 	if (lockGroup == NULL) {
160 		return NULL;
161 	}
162 
163 	lock = IOMallocType( _IORecursiveLock );
164 	if (!lock) {
165 		return NULL;
166 	}
167 
168 	lck_mtx_init( &lock->mutex, lockGroup, LCK_ATTR_NULL );
169 	lock->group = lockGroup;
170 	lock->thread = NULL;
171 	lock->count  = 0;
172 
173 	return (IORecursiveLock *) lock;
174 }
175 
176 
177 IORecursiveLock *
178 IORecursiveLockAlloc( void )
179 {
180 	return IORecursiveLockAllocWithLockGroup( IOLockGroup );
181 }
182 
183 void
184 IORecursiveLockFree( IORecursiveLock * _lock )
185 {
186 	_IORecursiveLock * lock = (_IORecursiveLock *)_lock;
187 
188 	lck_mtx_destroy(&lock->mutex, lock->group);
189 	IOFreeType( lock, _IORecursiveLock );
190 }
191 
192 lck_mtx_t *
193 IORecursiveLockGetMachLock( IORecursiveLock * lock )
194 {
195 	return &lock->mutex;
196 }
197 
198 void
199 IORecursiveLockLock( IORecursiveLock * _lock)
200 {
201 	_IORecursiveLock * lock = (_IORecursiveLock *)_lock;
202 
203 	if (lock->thread == IOThreadSelf()) {
204 		lock->count++;
205 	} else {
206 		lck_mtx_lock( &lock->mutex );
207 		assert( lock->thread == NULL );
208 		assert( lock->count == 0 );
209 		lock->thread = IOThreadSelf();
210 		lock->count = 1;
211 	}
212 }
213 
214 boolean_t
215 IORecursiveLockTryLock( IORecursiveLock * _lock)
216 {
217 	_IORecursiveLock * lock = (_IORecursiveLock *)_lock;
218 
219 	if (lock->thread == IOThreadSelf()) {
220 		lock->count++;
221 		return true;
222 	} else {
223 		if (lck_mtx_try_lock( &lock->mutex )) {
224 			assert( lock->thread == NULL );
225 			assert( lock->count == 0 );
226 			lock->thread = IOThreadSelf();
227 			lock->count = 1;
228 			return true;
229 		}
230 	}
231 	return false;
232 }
233 
234 void
235 IORecursiveLockUnlock( IORecursiveLock * _lock)
236 {
237 	_IORecursiveLock * lock = (_IORecursiveLock *)_lock;
238 
239 	assert( lock->thread == IOThreadSelf());
240 
241 	if (0 == (--lock->count)) {
242 		lock->thread = NULL;
243 		lck_mtx_unlock( &lock->mutex );
244 	}
245 }
246 
247 boolean_t
248 IORecursiveLockHaveLock( const IORecursiveLock * _lock)
249 {
250 	_IORecursiveLock * lock = (_IORecursiveLock *)_lock;
251 
252 	return lock->thread == IOThreadSelf();
253 }
254 
255 int
256 IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType)
257 {
258 	_IORecursiveLock * lock = (_IORecursiveLock *)_lock;
259 	UInt32 count = lock->count;
260 	int res;
261 
262 	assert(lock->thread == IOThreadSelf());
263 
264 	lock->count = 0;
265 	lock->thread = NULL;
266 	res = lck_mtx_sleep(&lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType);
267 
268 	// Must re-establish the recursive lock no matter why we woke up
269 	// otherwise we would potentially leave the return path corrupted.
270 	assert(lock->thread == NULL);
271 	assert(lock->count == 0);
272 	lock->thread = IOThreadSelf();
273 	lock->count = count;
274 	return res;
275 }
276 
277 int
278 IORecursiveLockSleepDeadline( IORecursiveLock * _lock, void *event,
279     AbsoluteTime deadline, UInt32 interType)
280 {
281 	_IORecursiveLock * lock = (_IORecursiveLock *)_lock;
282 	UInt32 count = lock->count;
283 	int res;
284 
285 	assert(lock->thread == IOThreadSelf());
286 
287 	lock->count = 0;
288 	lock->thread = NULL;
289 	res = lck_mtx_sleep_deadline(&lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event,
290 	    (wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
291 
292 	// Must re-establish the recursive lock no matter why we woke up
293 	// otherwise we would potentially leave the return path corrupted.
294 	assert(lock->thread == NULL);
295 	assert(lock->count == 0);
296 	lock->thread = IOThreadSelf();
297 	lock->count = count;
298 	return res;
299 }
300 
301 void
302 IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread)
303 {
304 	thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
305 }
306 
307 /*
308  * Complex (read/write) lock operations
309  */
310 
311 IORWLock *
312 IORWLockAlloc( void )
313 {
314 	return lck_rw_alloc_init(IOLockGroup, LCK_ATTR_NULL);
315 }
316 
317 void
318 IORWLockInlineInit( IORWLock *lock )
319 {
320 	lck_rw_init(lock, IOLockGroup, LCK_ATTR_NULL);
321 }
322 
323 void
324 IORWLockInlineDestroy( IORWLock * lock)
325 {
326 	lck_rw_destroy( lock, IOLockGroup);
327 }
328 
329 void
330 IORWLockFree( IORWLock * lock)
331 {
332 	lck_rw_free( lock, IOLockGroup);
333 }
334 
335 lck_rw_t *
336 IORWLockGetMachLock( IORWLock * lock)
337 {
338 	return (lck_rw_t *)lock;
339 }
340 
341 
342 /*
343  * Spin locks
344  */
345 
346 IOSimpleLock *
347 IOSimpleLockAlloc( void )
348 {
349 	return lck_spin_alloc_init( IOLockGroup, LCK_ATTR_NULL);
350 }
351 
352 void
353 IOSimpleLockInit( IOSimpleLock * lock)
354 {
355 	lck_spin_init( lock, IOLockGroup, LCK_ATTR_NULL);
356 }
357 
358 void
359 IOSimpleLockDestroy( IOSimpleLock * lock )
360 {
361 	lck_spin_destroy(lock, IOLockGroup);
362 }
363 
364 void
365 IOSimpleLockFree( IOSimpleLock * lock )
366 {
367 	lck_spin_free( lock, IOLockGroup);
368 }
369 
370 lck_spin_t *
371 IOSimpleLockGetMachLock( IOSimpleLock * lock)
372 {
373 	return (lck_spin_t *)lock;
374 }
375 
376 #ifndef IOLOCKS_INLINE
377 /*
378  * Lock assertions
379  */
380 
381 void
382 IOLockAssert(IOLock * lock, IOLockAssertState type)
383 {
384 	LCK_MTX_ASSERT(lock, type);
385 }
386 
387 void
388 IORWLockAssert(IORWLock * lock, IORWLockAssertState type)
389 {
390 	LCK_RW_ASSERT(lock, type);
391 }
392 
393 void
394 IOSimpleLockAssert(IOSimpleLock *lock, IOSimpleLockAssertState type)
395 {
396 	LCK_SPIN_ASSERT(l, type);
397 }
398 #endif /* !IOLOCKS_INLINE */
399 } /* extern "C" */
400