xref: /xnu-11215/iokit/Kernel/IOLocks.cpp (revision c1dac77f)
1 /*
2  * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_LICENSE_HEADER_START@
5  *
6  * The contents of this file constitute Original Code as defined in and
7  * are subject to the Apple Public Source License Version 1.1 (the
8  * "License").  You may not use this file except in compliance with the
9  * License.  Please obtain a copy of the License at
10  * http://www.apple.com/publicsource and read it before using this file.
11  *
12  * This Original Code and all software distributed under the License are
13  * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
17  * License for the specific language governing rights and limitations
18  * under the License.
19  *
20  * @APPLE_LICENSE_HEADER_END@
21  */
22 /*
23  * Copyright (c) 1998 Apple Computer, Inc.  All rights reserved.
24  *
25  * HISTORY
26  *
27  */
28 
29 
30 #include <IOKit/system.h>
31 
32 #include <IOKit/IOReturn.h>
33 #include <IOKit/IOLib.h>
34 #include <IOKit/assert.h>
35 
36 extern "C" {
37 #include <kern/simple_lock.h>
38 #include <machine/machine_routines.h>
39 
40 IOLock * IOLockAlloc( void )
41 {
42     return( mutex_alloc(ETAP_IO_AHA) );
43 }
44 
45 void	IOLockFree( IOLock * lock)
46 {
47     mutex_free( lock );
48 }
49 
50 void	IOLockInitWithState( IOLock * lock, IOLockState state)
51 {
52     mutex_init( lock, ETAP_IO_AHA);
53 
54     if( state == kIOLockStateLocked)
55         IOLockLock( lock);
56 }
57 
58 struct _IORecursiveLock {
59     mutex_t  *	mutex;
60     thread_t	thread;
61     UInt32	count;
62 };
63 
64 IORecursiveLock * IORecursiveLockAlloc( void )
65 {
66     _IORecursiveLock * lock;
67 
68     lock = IONew( _IORecursiveLock, 1);
69     if( !lock)
70         return( 0 );
71 
72     lock->mutex = mutex_alloc(ETAP_IO_AHA);
73     if( lock->mutex) {
74         lock->thread = 0;
75         lock->count  = 0;
76     } else {
77         IODelete( lock, _IORecursiveLock, 1);
78         lock = 0;
79     }
80 
81     return( (IORecursiveLock *) lock );
82 }
83 
84 void IORecursiveLockFree( IORecursiveLock * _lock )
85 {
86     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
87 
88     mutex_free( lock->mutex );
89     IODelete( lock, _IORecursiveLock, 1);
90 }
91 
92 void IORecursiveLockLock( IORecursiveLock * _lock)
93 {
94     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
95 
96     if( lock->thread == IOThreadSelf())
97         lock->count++;
98     else {
99         _mutex_lock( lock->mutex );
100         assert( lock->thread == 0 );
101         assert( lock->count == 0 );
102         lock->thread = IOThreadSelf();
103         lock->count = 1;
104     }
105 }
106 
107 boolean_t IORecursiveLockTryLock( IORecursiveLock * _lock)
108 {
109     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
110 
111     if( lock->thread == IOThreadSelf()) {
112         lock->count++;
113 	return( true );
114     } else {
115         if( _mutex_try( lock->mutex )) {
116             assert( lock->thread == 0 );
117             assert( lock->count == 0 );
118             lock->thread = IOThreadSelf();
119             lock->count = 1;
120             return( true );
121 	}
122     }
123     return( false );
124 }
125 
126 void IORecursiveLockUnlock( IORecursiveLock * _lock)
127 {
128     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
129 
130     assert( lock->thread == IOThreadSelf() );
131 
132     if( 0 == (--lock->count)) {
133         lock->thread = 0;
134         mutex_unlock( lock->mutex );
135     }
136 }
137 
138 boolean_t IORecursiveLockHaveLock( const IORecursiveLock * _lock)
139 {
140     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
141 
142     return( lock->thread == IOThreadSelf());
143 }
144 
145 int IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType)
146 {
147     _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
148     UInt32 count = lock->count;
149     int res;
150 
151     assert(lock->thread == IOThreadSelf());
152     assert(lock->count == 1 || interType == THREAD_UNINT);
153 
154     assert_wait((event_t) event, (int) interType);
155     lock->count = 0;
156     lock->thread = 0;
157     mutex_unlock(lock->mutex);
158 
159     res = thread_block(0);
160 
161     if (THREAD_AWAKENED == res) {
162         _mutex_lock(lock->mutex);
163         assert(lock->thread == 0);
164         assert(lock->count == 0);
165         lock->thread = IOThreadSelf();
166         lock->count = count;
167     }
168 
169     return res;
170 }
171 
172 void IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread)
173 {
174     thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
175 }
176 
177 /*
178  * Complex (read/write) lock operations
179  */
180 
181 IORWLock * IORWLockAlloc( void )
182 {
183     IORWLock * lock;
184 
185     lock = lock_alloc( true, ETAP_IO_AHA, ETAP_IO_AHA);
186 
187     return( lock);
188 }
189 
190 void	IORWLockFree( IORWLock * lock)
191 {
192     lock_free( lock );
193 }
194 
195 
196 /*
197  * Spin locks
198  */
199 
200 IOSimpleLock * IOSimpleLockAlloc( void )
201 {
202     IOSimpleLock *	lock;
203 
204     lock = (IOSimpleLock *) IOMalloc( sizeof(IOSimpleLock));
205     if( lock)
206 	IOSimpleLockInit( lock );
207 
208     return( lock );
209 }
210 
211 void IOSimpleLockInit( IOSimpleLock * lock)
212 {
213     simple_lock_init( (simple_lock_t) lock, ETAP_IO_AHA );
214 }
215 
216 void IOSimpleLockFree( IOSimpleLock * lock )
217 {
218     IOFree( lock, sizeof(IOSimpleLock));
219 }
220 
221 } /* extern "C" */
222 
223 
224