xref: /xnu-11215/iokit/Kernel/IOLocks.cpp (revision e6231be0)
1 /*
2  * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <IOKit/system.h>
30 
31 #include <IOKit/IOReturn.h>
32 #include <IOKit/IOLib.h>
33 #include <IOKit/assert.h>
34 
35 #include <IOKit/IOLocksPrivate.h>
36 
37 extern "C" {
38 #include <kern/locks.h>
39 
40 #if defined(__x86_64__)
41 /* Synthetic event if none is specified, for backwards compatibility only. */
42 static bool IOLockSleep_NO_EVENT __attribute__((used)) = 0;
43 #endif
44 
45 void
46 IOLockInitWithState( IOLock * lock, IOLockState state)
47 {
48 	if (state == kIOLockStateLocked) {
49 		lck_mtx_lock( lock);
50 	}
51 }
52 
53 IOLock *
54 IOLockAlloc( void )
55 {
56 	return lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
57 }
58 
59 void
60 IOLockFree( IOLock * lock)
61 {
62 	lck_mtx_free( lock, IOLockGroup);
63 }
64 
65 lck_mtx_t *
66 IOLockGetMachLock( IOLock * lock)
67 {
68 	return (lck_mtx_t *)lock;
69 }
70 
71 int
72 IOLockSleep( IOLock * lock, void *event, UInt32 interType)
73 {
74 	return (int) lck_mtx_sleep(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType);
75 }
76 
77 int
78 IOLockSleepDeadline( IOLock * lock, void *event,
79     AbsoluteTime deadline, UInt32 interType)
80 {
81 	return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event,
82 	           (wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
83 }
84 
85 void
86 IOLockWakeup(IOLock * lock, void *event, bool oneThread)
87 {
88 	thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
89 }
90 
91 
92 #if defined(__x86_64__)
93 /*
94  * For backwards compatibility, kexts built against pre-Darwin 14 headers will bind at runtime to this function,
95  * which supports a NULL event,
96  */
97 int     IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) __asm("_IOLockSleep");
98 int     IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event,
99     AbsoluteTime deadline, UInt32 interType) __asm("_IOLockSleepDeadline");
100 void    IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) __asm("_IOLockWakeup");
101 
102 int
103 IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType)
104 {
105 	if (event == NULL) {
106 		event = (void *)&IOLockSleep_NO_EVENT;
107 	}
108 
109 	return IOLockSleep(lock, event, interType);
110 }
111 
112 int
113 IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event,
114     AbsoluteTime deadline, UInt32 interType)
115 {
116 	if (event == NULL) {
117 		event = (void *)&IOLockSleep_NO_EVENT;
118 	}
119 
120 	return IOLockSleepDeadline(lock, event, deadline, interType);
121 }
122 
123 void
124 IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread)
125 {
126 	if (event == NULL) {
127 		event = (void *)&IOLockSleep_NO_EVENT;
128 	}
129 
130 	IOLockWakeup(lock, event, oneThread);
131 }
132 #endif /* defined(__x86_64__) */
133 
134 
135 struct _IORecursiveLock {
136 	lck_mtx_t       mutex;
137 	lck_grp_t       *group;
138 	thread_t        thread;
139 	UInt32          count;
140 };
141 
142 IORecursiveLock *
143 IORecursiveLockAllocWithLockGroup( lck_grp_t * lockGroup )
144 {
145 	_IORecursiveLock * lock;
146 
147 	if (lockGroup == NULL) {
148 		return NULL;
149 	}
150 
151 	lock = IOMallocType( _IORecursiveLock );
152 	if (!lock) {
153 		return NULL;
154 	}
155 
156 	lck_mtx_init( &lock->mutex, lockGroup, LCK_ATTR_NULL );
157 	lock->group = lockGroup;
158 	lock->thread = NULL;
159 	lock->count  = 0;
160 
161 	return (IORecursiveLock *) lock;
162 }
163 
164 
165 IORecursiveLock *
166 IORecursiveLockAlloc( void )
167 {
168 	return IORecursiveLockAllocWithLockGroup( IOLockGroup );
169 }
170 
171 void
172 IORecursiveLockFree( IORecursiveLock * _lock )
173 {
174 	_IORecursiveLock * lock = (_IORecursiveLock *)_lock;
175 
176 	lck_mtx_destroy(&lock->mutex, lock->group);
177 	IOFreeType( lock, _IORecursiveLock );
178 }
179 
180 lck_mtx_t *
181 IORecursiveLockGetMachLock( IORecursiveLock * lock )
182 {
183 	return &lock->mutex;
184 }
185 
186 void
187 IORecursiveLockLock( IORecursiveLock * _lock)
188 {
189 	_IORecursiveLock * lock = (_IORecursiveLock *)_lock;
190 
191 	if (lock->thread == IOThreadSelf()) {
192 		lock->count++;
193 	} else {
194 		lck_mtx_lock( &lock->mutex );
195 		assert( lock->thread == NULL );
196 		assert( lock->count == 0 );
197 		lock->thread = IOThreadSelf();
198 		lock->count = 1;
199 	}
200 }
201 
202 boolean_t
203 IORecursiveLockTryLock( IORecursiveLock * _lock)
204 {
205 	_IORecursiveLock * lock = (_IORecursiveLock *)_lock;
206 
207 	if (lock->thread == IOThreadSelf()) {
208 		lock->count++;
209 		return true;
210 	} else {
211 		if (lck_mtx_try_lock( &lock->mutex )) {
212 			assert( lock->thread == NULL );
213 			assert( lock->count == 0 );
214 			lock->thread = IOThreadSelf();
215 			lock->count = 1;
216 			return true;
217 		}
218 	}
219 	return false;
220 }
221 
222 void
223 IORecursiveLockUnlock( IORecursiveLock * _lock)
224 {
225 	_IORecursiveLock * lock = (_IORecursiveLock *)_lock;
226 
227 	assert( lock->thread == IOThreadSelf());
228 
229 	if (0 == (--lock->count)) {
230 		lock->thread = NULL;
231 		lck_mtx_unlock( &lock->mutex );
232 	}
233 }
234 
235 boolean_t
236 IORecursiveLockHaveLock( const IORecursiveLock * _lock)
237 {
238 	_IORecursiveLock * lock = (_IORecursiveLock *)_lock;
239 
240 	return lock->thread == IOThreadSelf();
241 }
242 
243 int
244 IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType)
245 {
246 	_IORecursiveLock * lock = (_IORecursiveLock *)_lock;
247 	UInt32 count = lock->count;
248 	int res;
249 
250 	assert(lock->thread == IOThreadSelf());
251 
252 	lock->count = 0;
253 	lock->thread = NULL;
254 	res = lck_mtx_sleep(&lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType);
255 
256 	// Must re-establish the recursive lock no matter why we woke up
257 	// otherwise we would potentially leave the return path corrupted.
258 	assert(lock->thread == NULL);
259 	assert(lock->count == 0);
260 	lock->thread = IOThreadSelf();
261 	lock->count = count;
262 	return res;
263 }
264 
265 int
266 IORecursiveLockSleepDeadline( IORecursiveLock * _lock, void *event,
267     AbsoluteTime deadline, UInt32 interType)
268 {
269 	_IORecursiveLock * lock = (_IORecursiveLock *)_lock;
270 	UInt32 count = lock->count;
271 	int res;
272 
273 	assert(lock->thread == IOThreadSelf());
274 
275 	lock->count = 0;
276 	lock->thread = NULL;
277 	res = lck_mtx_sleep_deadline(&lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event,
278 	    (wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
279 
280 	// Must re-establish the recursive lock no matter why we woke up
281 	// otherwise we would potentially leave the return path corrupted.
282 	assert(lock->thread == NULL);
283 	assert(lock->count == 0);
284 	lock->thread = IOThreadSelf();
285 	lock->count = count;
286 	return res;
287 }
288 
289 void
290 IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread)
291 {
292 	thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
293 }
294 
295 /*
296  * Complex (read/write) lock operations
297  */
298 
299 IORWLock *
300 IORWLockAlloc( void )
301 {
302 	return lck_rw_alloc_init(IOLockGroup, LCK_ATTR_NULL);
303 }
304 
305 void
306 IORWLockFree( IORWLock * lock)
307 {
308 	lck_rw_free( lock, IOLockGroup);
309 }
310 
311 lck_rw_t *
312 IORWLockGetMachLock( IORWLock * lock)
313 {
314 	return (lck_rw_t *)lock;
315 }
316 
317 
318 /*
319  * Spin locks
320  */
321 
322 IOSimpleLock *
323 IOSimpleLockAlloc( void )
324 {
325 	return lck_spin_alloc_init( IOLockGroup, LCK_ATTR_NULL);
326 }
327 
328 void
329 IOSimpleLockInit( IOSimpleLock * lock)
330 {
331 	lck_spin_init( lock, IOLockGroup, LCK_ATTR_NULL);
332 }
333 
334 void
335 IOSimpleLockDestroy( IOSimpleLock * lock )
336 {
337 	lck_spin_destroy(lock, IOLockGroup);
338 }
339 
340 void
341 IOSimpleLockFree( IOSimpleLock * lock )
342 {
343 	lck_spin_free( lock, IOLockGroup);
344 }
345 
346 lck_spin_t *
347 IOSimpleLockGetMachLock( IOSimpleLock * lock)
348 {
349 	return (lck_spin_t *)lock;
350 }
351 
352 #ifndef IOLOCKS_INLINE
353 /*
354  * Lock assertions
355  */
356 
357 void
358 IOLockAssert(IOLock * lock, IOLockAssertState type)
359 {
360 	LCK_MTX_ASSERT(lock, type);
361 }
362 
363 void
364 IORWLockAssert(IORWLock * lock, IORWLockAssertState type)
365 {
366 	LCK_RW_ASSERT(lock, type);
367 }
368 
369 void
370 IOSimpleLockAssert(IOSimpleLock *lock, IOSimpleLockAssertState type)
371 {
372 	LCK_SPIN_ASSERT(l, type);
373 }
374 #endif /* !IOLOCKS_INLINE */
375 } /* extern "C" */
376