xref: /xnu-11215/iokit/Kernel/IOLocks.cpp (revision cc9a6355)
1 /*
2  * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <IOKit/system.h>
30 
31 #include <IOKit/IOReturn.h>
32 #include <IOKit/IOLib.h>
33 #include <IOKit/assert.h>
34 
35 #include <IOKit/IOLocksPrivate.h>
36 
37 extern "C" {
38 #include <kern/locks.h>
39 
40 #if defined(__x86_64__)
41 /* Synthetic event if none is specified, for backwards compatibility only. */
42 static bool IOLockSleep_NO_EVENT __attribute__((used)) = 0;
43 #endif
44 
45 void	IOLockInitWithState( IOLock * lock, IOLockState state)
46 {
47     if( state == kIOLockStateLocked)
48         lck_mtx_lock( lock);
49 }
50 
51 IOLock * IOLockAlloc( void )
52 {
53     return( lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL) );
54 }
55 
56 void	IOLockFree( IOLock * lock)
57 {
58     lck_mtx_free( lock, IOLockGroup);
59 }
60 
61 lck_mtx_t * IOLockGetMachLock( IOLock * lock)
62 {
63     return( (lck_mtx_t *)lock);
64 }
65 
66 int	IOLockSleep( IOLock * lock, void *event, UInt32 interType)
67 {
68     return (int) lck_mtx_sleep(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType);
69 }
70 
71 int	IOLockSleepDeadline( IOLock * lock, void *event,
72                                 AbsoluteTime deadline, UInt32 interType)
73 {
74     return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event,
75     					(wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
76 }
77 
78 void	IOLockWakeup(IOLock * lock, void *event, bool oneThread)
79 {
80 	thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
81 }
82 
83 
84 #if defined(__x86_64__)
85 /*
86  * For backwards compatibility, kexts built against pre-Darwin 14 headers will bind at runtime to this function,
87  * which supports a NULL event,
88  */
89 int	IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) __asm("_IOLockSleep");
90 int	IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event,
91 					   AbsoluteTime deadline, UInt32 interType) __asm("_IOLockSleepDeadline");
92 void	IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) __asm("_IOLockWakeup");
93 
94 int	IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType)
95 {
96     if (event == NULL)
97         event = (void *)&IOLockSleep_NO_EVENT;
98 
99     return IOLockSleep(lock, event, interType);
100 }
101 
102 int	IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event,
103 			     AbsoluteTime deadline, UInt32 interType)
104 {
105     if (event == NULL)
106         event = (void *)&IOLockSleep_NO_EVENT;
107 
108     return IOLockSleepDeadline(lock, event, deadline, interType);
109 }
110 
111 void	IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread)
112 {
113     if (event == NULL)
114         event = (void *)&IOLockSleep_NO_EVENT;
115 
116     IOLockWakeup(lock, event, oneThread);
117 }
118 #endif /* defined(__x86_64__) */
119 
120 
121 struct _IORecursiveLock {
122 	lck_mtx_t	mutex;
123 	lck_grp_t	*group;
124 	thread_t	thread;
125 	UInt32		count;
126 };
127 
128 IORecursiveLock * IORecursiveLockAllocWithLockGroup( lck_grp_t * lockGroup )
129 {
130     _IORecursiveLock * lock;
131 
132     if( lockGroup == 0 )
133         return( 0 );
134 
135     lock = IONew( _IORecursiveLock, 1 );
136     if( !lock )
137         return( 0 );
138 
139     lck_mtx_init( &lock->mutex, lockGroup, LCK_ATTR_NULL );
140     lock->group = lockGroup;
141     lock->thread = 0;
142     lock->count  = 0;
143 
144     return( (IORecursiveLock *) lock );
145 }
146 
147 
148 IORecursiveLock * IORecursiveLockAlloc( void )
149 {
150     return IORecursiveLockAllocWithLockGroup( IOLockGroup );
151 }
152 
153 void IORecursiveLockFree( IORecursiveLock * _lock )
154 {
155     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
156 
157     lck_mtx_destroy(&lock->mutex, lock->group);
158     IODelete( lock, _IORecursiveLock, 1 );
159 }
160 
161 lck_mtx_t * IORecursiveLockGetMachLock( IORecursiveLock * lock )
162 {
163     return( &lock->mutex );
164 }
165 
166 void IORecursiveLockLock( IORecursiveLock * _lock)
167 {
168     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
169 
170     if( lock->thread == IOThreadSelf())
171         lock->count++;
172     else {
173         lck_mtx_lock( &lock->mutex );
174         assert( lock->thread == 0 );
175         assert( lock->count == 0 );
176         lock->thread = IOThreadSelf();
177         lock->count = 1;
178     }
179 }
180 
181 boolean_t IORecursiveLockTryLock( IORecursiveLock * _lock)
182 {
183     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
184 
185     if( lock->thread == IOThreadSelf()) {
186         lock->count++;
187 	return( true );
188     } else {
189         if( lck_mtx_try_lock( &lock->mutex )) {
190             assert( lock->thread == 0 );
191             assert( lock->count == 0 );
192             lock->thread = IOThreadSelf();
193             lock->count = 1;
194             return( true );
195 	}
196     }
197     return( false );
198 }
199 
200 void IORecursiveLockUnlock( IORecursiveLock * _lock)
201 {
202     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
203 
204     assert( lock->thread == IOThreadSelf() );
205 
206     if( 0 == (--lock->count)) {
207         lock->thread = 0;
208         lck_mtx_unlock( &lock->mutex );
209     }
210 }
211 
212 boolean_t IORecursiveLockHaveLock( const IORecursiveLock * _lock)
213 {
214     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
215 
216     return( lock->thread == IOThreadSelf());
217 }
218 
219 int IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType)
220 {
221     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
222     UInt32 count = lock->count;
223     int res;
224 
225     assert(lock->thread == IOThreadSelf());
226 
227     lock->count = 0;
228     lock->thread = 0;
229     res = lck_mtx_sleep(&lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType);
230 
231     // Must re-establish the recursive lock no matter why we woke up
232     // otherwise we would potentially leave the return path corrupted.
233     assert(lock->thread == 0);
234     assert(lock->count == 0);
235     lock->thread = IOThreadSelf();
236     lock->count = count;
237     return res;
238 }
239 
240 int	IORecursiveLockSleepDeadline( IORecursiveLock * _lock, void *event,
241                                   AbsoluteTime deadline, UInt32 interType)
242 {
243     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
244     UInt32 count = lock->count;
245     int res;
246 
247     assert(lock->thread == IOThreadSelf());
248 
249     lock->count = 0;
250     lock->thread = 0;
251     res = lck_mtx_sleep_deadline(&lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event,
252 								      (wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
253 
254     // Must re-establish the recursive lock no matter why we woke up
255     // otherwise we would potentially leave the return path corrupted.
256     assert(lock->thread == 0);
257     assert(lock->count == 0);
258     lock->thread = IOThreadSelf();
259     lock->count = count;
260     return res;
261 }
262 
263 void IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread)
264 {
265     thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
266 }
267 
268 /*
269  * Complex (read/write) lock operations
270  */
271 
272 IORWLock * IORWLockAlloc( void )
273 {
274     return(  lck_rw_alloc_init(IOLockGroup, LCK_ATTR_NULL)  );
275 }
276 
277 void	IORWLockFree( IORWLock * lock)
278 {
279     lck_rw_free( lock, IOLockGroup);
280 }
281 
282 lck_rw_t * IORWLockGetMachLock( IORWLock * lock)
283 {
284     return( (lck_rw_t *)lock);
285 }
286 
287 
288 /*
289  * Spin locks
290  */
291 
292 IOSimpleLock * IOSimpleLockAlloc( void )
293 {
294     return( lck_spin_alloc_init( IOLockGroup, LCK_ATTR_NULL) );
295 }
296 
297 void IOSimpleLockInit( IOSimpleLock * lock)
298 {
299     lck_spin_init( lock, IOLockGroup, LCK_ATTR_NULL);
300 }
301 
302 void IOSimpleLockFree( IOSimpleLock * lock )
303 {
304     lck_spin_free( lock, IOLockGroup);
305 }
306 
307 lck_spin_t * IOSimpleLockGetMachLock( IOSimpleLock * lock)
308 {
309     return( (lck_spin_t *)lock);
310 }
311 
312 #ifndef IOLOCKS_INLINE
313 /*
314  * Lock assertions
315  */
316 
317 void
318 IOLockAssert(IOLock * lock, IOLockAssertState type)
319 {
320     LCK_MTX_ASSERT(lock, type);
321 }
322 
323 void
324 IORWLockAssert(IORWLock * lock, IORWLockAssertState type)
325 {
326     LCK_RW_ASSERT(lock, type);
327 }
328 
329 void
330 IOSimpleLockAssert(IOSimpleLock *lock, IOSimpleLockAssertState type)
331 {
332     LCK_SPIN_ASSERT(l, type);
333 }
334 #endif /* !IOLOCKS_INLINE */
335 
336 } /* extern "C" */
337 
338 
339