xref: /xnu-11215/iokit/Kernel/IOLocks.cpp (revision 76e12aa3)
1 /*
2  * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <IOKit/system.h>
30 
31 #include <IOKit/IOReturn.h>
32 #include <IOKit/IOLib.h>
33 #include <IOKit/assert.h>
34 
35 #include <IOKit/IOLocksPrivate.h>
36 
37 extern "C" {
38 #include <kern/locks.h>
39 
40 #if defined(__x86_64__)
41 /* Synthetic event if none is specified, for backwards compatibility only. */
42 static bool IOLockSleep_NO_EVENT __attribute__((used)) = 0;
43 #endif
44 
45 void	IOLockInitWithState( IOLock * lock, IOLockState state)
46 {
47     if( state == kIOLockStateLocked)
48         lck_mtx_lock( lock);
49 }
50 
51 IOLock * IOLockAlloc( void )
52 {
53     return( lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL) );
54 }
55 
56 void	IOLockFree( IOLock * lock)
57 {
58     lck_mtx_free( lock, IOLockGroup);
59 }
60 
61 lck_mtx_t * IOLockGetMachLock( IOLock * lock)
62 {
63     return( (lck_mtx_t *)lock);
64 }
65 
66 int	IOLockSleep( IOLock * lock, void *event, UInt32 interType)
67 {
68     return (int) lck_mtx_sleep(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType);
69 }
70 
71 int	IOLockSleepDeadline( IOLock * lock, void *event,
72                                 AbsoluteTime deadline, UInt32 interType)
73 {
74     return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event,
75     					(wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
76 }
77 
78 void	IOLockWakeup(IOLock * lock, void *event, bool oneThread)
79 {
80 	thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
81 }
82 
83 #if defined(__x86_64__)
84 /*
85  * For backwards compatibility, kexts built against pre-Darwin 14 headers will bind at runtime to this function,
86  * which supports a NULL event,
87  */
88 int	IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) __asm("_IOLockSleep");
89 int	IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event,
90 					   AbsoluteTime deadline, UInt32 interType) __asm("_IOLockSleepDeadline");
91 void	IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) __asm("_IOLockWakeup");
92 
93 int	IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType)
94 {
95     if (event == NULL)
96         event = (void *)&IOLockSleep_NO_EVENT;
97 
98     return IOLockSleep(lock, event, interType);
99 }
100 
101 int	IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event,
102 			     AbsoluteTime deadline, UInt32 interType)
103 {
104     if (event == NULL)
105         event = (void *)&IOLockSleep_NO_EVENT;
106 
107     return IOLockSleepDeadline(lock, event, deadline, interType);
108 }
109 
110 void	IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread)
111 {
112     if (event == NULL)
113         event = (void *)&IOLockSleep_NO_EVENT;
114 
115     IOLockWakeup(lock, event, oneThread);
116 }
117 #endif /* defined(__x86_64__) */
118 
119 
120 struct _IORecursiveLock {
121 	lck_mtx_t	mutex;
122 	lck_grp_t	*group;
123 	thread_t	thread;
124 	UInt32		count;
125 };
126 
127 IORecursiveLock * IORecursiveLockAllocWithLockGroup( lck_grp_t * lockGroup )
128 {
129     _IORecursiveLock * lock;
130 
131     if( lockGroup == 0 )
132         return( 0 );
133 
134     lock = IONew( _IORecursiveLock, 1 );
135     if( !lock )
136         return( 0 );
137 
138     lck_mtx_init( &lock->mutex, lockGroup, LCK_ATTR_NULL );
139     lock->group = lockGroup;
140     lock->thread = 0;
141     lock->count  = 0;
142 
143     return( (IORecursiveLock *) lock );
144 }
145 
146 
147 IORecursiveLock * IORecursiveLockAlloc( void )
148 {
149     return IORecursiveLockAllocWithLockGroup( IOLockGroup );
150 }
151 
152 void IORecursiveLockFree( IORecursiveLock * _lock )
153 {
154     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
155 
156     lck_mtx_destroy(&lock->mutex, lock->group);
157     IODelete( lock, _IORecursiveLock, 1 );
158 }
159 
160 lck_mtx_t * IORecursiveLockGetMachLock( IORecursiveLock * lock )
161 {
162     return( &lock->mutex );
163 }
164 
165 void IORecursiveLockLock( IORecursiveLock * _lock)
166 {
167     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
168 
169     if( lock->thread == IOThreadSelf())
170         lock->count++;
171     else {
172         lck_mtx_lock( &lock->mutex );
173         assert( lock->thread == 0 );
174         assert( lock->count == 0 );
175         lock->thread = IOThreadSelf();
176         lock->count = 1;
177     }
178 }
179 
180 boolean_t IORecursiveLockTryLock( IORecursiveLock * _lock)
181 {
182     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
183 
184     if( lock->thread == IOThreadSelf()) {
185         lock->count++;
186 	return( true );
187     } else {
188         if( lck_mtx_try_lock( &lock->mutex )) {
189             assert( lock->thread == 0 );
190             assert( lock->count == 0 );
191             lock->thread = IOThreadSelf();
192             lock->count = 1;
193             return( true );
194 	}
195     }
196     return( false );
197 }
198 
199 void IORecursiveLockUnlock( IORecursiveLock * _lock)
200 {
201     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
202 
203     assert( lock->thread == IOThreadSelf() );
204 
205     if( 0 == (--lock->count)) {
206         lock->thread = 0;
207         lck_mtx_unlock( &lock->mutex );
208     }
209 }
210 
211 boolean_t IORecursiveLockHaveLock( const IORecursiveLock * _lock)
212 {
213     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
214 
215     return( lock->thread == IOThreadSelf());
216 }
217 
218 int IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType)
219 {
220     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
221     UInt32 count = lock->count;
222     int res;
223 
224     assert(lock->thread == IOThreadSelf());
225 
226     lock->count = 0;
227     lock->thread = 0;
228     res = lck_mtx_sleep(&lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType);
229 
230     // Must re-establish the recursive lock no matter why we woke up
231     // otherwise we would potentially leave the return path corrupted.
232     assert(lock->thread == 0);
233     assert(lock->count == 0);
234     lock->thread = IOThreadSelf();
235     lock->count = count;
236     return res;
237 }
238 
239 int	IORecursiveLockSleepDeadline( IORecursiveLock * _lock, void *event,
240                                   AbsoluteTime deadline, UInt32 interType)
241 {
242     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
243     UInt32 count = lock->count;
244     int res;
245 
246     assert(lock->thread == IOThreadSelf());
247 
248     lock->count = 0;
249     lock->thread = 0;
250     res = lck_mtx_sleep_deadline(&lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event,
251 								      (wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
252 
253     // Must re-establish the recursive lock no matter why we woke up
254     // otherwise we would potentially leave the return path corrupted.
255     assert(lock->thread == 0);
256     assert(lock->count == 0);
257     lock->thread = IOThreadSelf();
258     lock->count = count;
259     return res;
260 }
261 
262 void IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread)
263 {
264     thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
265 }
266 
267 /*
268  * Complex (read/write) lock operations
269  */
270 
271 IORWLock * IORWLockAlloc( void )
272 {
273     return(  lck_rw_alloc_init(IOLockGroup, LCK_ATTR_NULL)  );
274 }
275 
276 void	IORWLockFree( IORWLock * lock)
277 {
278     lck_rw_free( lock, IOLockGroup);
279 }
280 
281 lck_rw_t * IORWLockGetMachLock( IORWLock * lock)
282 {
283     return( (lck_rw_t *)lock);
284 }
285 
286 
287 /*
288  * Spin locks
289  */
290 
291 IOSimpleLock * IOSimpleLockAlloc( void )
292 {
293     return( lck_spin_alloc_init( IOLockGroup, LCK_ATTR_NULL) );
294 }
295 
296 void IOSimpleLockInit( IOSimpleLock * lock)
297 {
298     lck_spin_init( lock, IOLockGroup, LCK_ATTR_NULL);
299 }
300 
301 void IOSimpleLockFree( IOSimpleLock * lock )
302 {
303     lck_spin_free( lock, IOLockGroup);
304 }
305 
306 lck_spin_t * IOSimpleLockGetMachLock( IOSimpleLock * lock)
307 {
308     return( (lck_spin_t *)lock);
309 }
310 
311 } /* extern "C" */
312 
313 
314