xref: /xnu-11215/iokit/Kernel/IOLocks.cpp (revision e13b1fa5)
1 /*
2  * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1998 Apple Computer, Inc.  All rights reserved.
30  *
31  * HISTORY
32  *
33  */
34 
35 
36 #define	IOLOCKS_CPP	1
37 
38 #include <IOKit/system.h>
39 
40 #include <IOKit/IOReturn.h>
41 #include <IOKit/IOLib.h>
42 #include <IOKit/assert.h>
43 
44 #include <IOKit/IOLocksPrivate.h>
45 
46 extern "C" {
47 #include <kern/locks.h>
48 
49 void	IOLockInitWithState( IOLock * lock, IOLockState state)
50 {
51     if( state == kIOLockStateLocked)
52         lck_mtx_lock( lock);
53 }
54 
55 IOLock * IOLockAlloc( void )
56 {
57     return( lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL) );
58 }
59 
60 void	IOLockFree( IOLock * lock)
61 {
62     lck_mtx_free( lock, IOLockGroup);
63 }
64 
65 lck_mtx_t * IOLockGetMachLock( IOLock * lock)
66 {
67     return( (lck_mtx_t *)lock);
68 }
69 
70 int	IOLockSleep( IOLock * lock, void *event, UInt32 interType)
71 {
72     return (int) lck_mtx_sleep(lock, LCK_SLEEP_DEFAULT, (event_t) event, (wait_interrupt_t) interType);
73 }
74 
75 int	IOLockSleepDeadline( IOLock * lock, void *event,
76                                 AbsoluteTime deadline, UInt32 interType)
77 {
78     return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_DEFAULT, (event_t) event,
79     					(wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
80 }
81 
82 void	IOLockWakeup(IOLock * lock, void *event, bool oneThread)
83 {
84 	thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
85 }
86 
87 
88 struct _IORecursiveLock {
89 	lck_mtx_t	*mutex;
90 	lck_grp_t	*group;
91 	thread_t	thread;
92 	UInt32		count;
93 };
94 
95 IORecursiveLock * IORecursiveLockAllocWithLockGroup( lck_grp_t * lockGroup )
96 {
97     _IORecursiveLock * lock;
98 
99     if( lockGroup == 0 )
100         return( 0 );
101 
102     lock = IONew( _IORecursiveLock, 1 );
103     if( !lock )
104         return( 0 );
105 
106     lock->mutex = lck_mtx_alloc_init( lockGroup, LCK_ATTR_NULL );
107     if( lock->mutex ) {
108 		lock->group = lockGroup;
109         lock->thread = 0;
110         lock->count  = 0;
111     } else {
112         IODelete( lock, _IORecursiveLock, 1 );
113         lock = 0;
114     }
115 
116     return( (IORecursiveLock *) lock );
117 }
118 
119 
120 IORecursiveLock * IORecursiveLockAlloc( void )
121 {
122     return IORecursiveLockAllocWithLockGroup( IOLockGroup );
123 }
124 
125 void IORecursiveLockFree( IORecursiveLock * _lock )
126 {
127     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
128 
129     lck_mtx_free( lock->mutex, lock->group );
130     IODelete( lock, _IORecursiveLock, 1 );
131 }
132 
133 lck_mtx_t * IORecursiveLockGetMachLock( IORecursiveLock * lock )
134 {
135     return( lock->mutex );
136 }
137 
138 void IORecursiveLockLock( IORecursiveLock * _lock)
139 {
140     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
141 
142     if( lock->thread == IOThreadSelf())
143         lock->count++;
144     else {
145         lck_mtx_lock( lock->mutex );
146         assert( lock->thread == 0 );
147         assert( lock->count == 0 );
148         lock->thread = IOThreadSelf();
149         lock->count = 1;
150     }
151 }
152 
153 boolean_t IORecursiveLockTryLock( IORecursiveLock * _lock)
154 {
155     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
156 
157     if( lock->thread == IOThreadSelf()) {
158         lock->count++;
159 	return( true );
160     } else {
161         if( lck_mtx_try_lock( lock->mutex )) {
162             assert( lock->thread == 0 );
163             assert( lock->count == 0 );
164             lock->thread = IOThreadSelf();
165             lock->count = 1;
166             return( true );
167 	}
168     }
169     return( false );
170 }
171 
172 void IORecursiveLockUnlock( IORecursiveLock * _lock)
173 {
174     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
175 
176     assert( lock->thread == IOThreadSelf() );
177 
178     if( 0 == (--lock->count)) {
179         lock->thread = 0;
180         lck_mtx_unlock( lock->mutex );
181     }
182 }
183 
184 boolean_t IORecursiveLockHaveLock( const IORecursiveLock * _lock)
185 {
186     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
187 
188     return( lock->thread == IOThreadSelf());
189 }
190 
191 int IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType)
192 {
193     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
194     UInt32 count = lock->count;
195     int res;
196 
197     assert(lock->thread == IOThreadSelf());
198     assert(lock->count == 1 || interType == THREAD_UNINT);
199 
200     lock->count = 0;
201     lock->thread = 0;
202     res = lck_mtx_sleep(lock->mutex, LCK_SLEEP_DEFAULT, (event_t) event, (wait_interrupt_t) interType);
203 
204     // Must re-establish the recursive lock no matter why we woke up
205     // otherwise we would potentially leave the return path corrupted.
206     assert(lock->thread == 0);
207     assert(lock->count == 0);
208     lock->thread = IOThreadSelf();
209     lock->count = count;
210     return res;
211 }
212 
213 void IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread)
214 {
215     thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
216 }
217 
218 /*
219  * Complex (read/write) lock operations
220  */
221 
222 IORWLock * IORWLockAlloc( void )
223 {
224     return(  lck_rw_alloc_init(IOLockGroup, LCK_ATTR_NULL)  );
225 }
226 
227 void	IORWLockFree( IORWLock * lock)
228 {
229     lck_rw_free( lock, IOLockGroup);
230 }
231 
232 lck_rw_t * IORWLockGetMachLock( IORWLock * lock)
233 {
234     return( (lck_rw_t *)lock);
235 }
236 
237 
238 /*
239  * Spin locks
240  */
241 
242 IOSimpleLock * IOSimpleLockAlloc( void )
243 {
244     return( lck_spin_alloc_init( IOLockGroup, LCK_ATTR_NULL) );
245 }
246 
247 void IOSimpleLockInit( IOSimpleLock * lock)
248 {
249     lck_spin_init( lock, IOLockGroup, LCK_ATTR_NULL);
250 }
251 
252 void IOSimpleLockFree( IOSimpleLock * lock )
253 {
254     lck_spin_free( lock, IOLockGroup);
255 }
256 
257 lck_spin_t * IOSimpleLockGetMachLock( IOSimpleLock * lock)
258 {
259     return( (lck_spin_t *)lock);
260 }
261 
262 } /* extern "C" */
263 
264 
265