1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define IOKIT_ENABLE_SHARED_PTR
30
31 #define DISABLE_DATAQUEUE_WARNING
32
33 #include <IOKit/IODataQueue.h>
34
35 #undef DISABLE_DATAQUEUE_WARNING
36 #include <vm/vm_kern_xnu.h>
37
38 #include <IOKit/IODataQueueShared.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOMemoryDescriptor.h>
41 #include <libkern/OSAtomic.h>
42 #include <libkern/c++/OSSharedPtr.h>
43
44 struct IODataQueueInternal {
45 mach_msg_header_t msg;
46 UInt32 queueSize;
47 };
48
49 #ifdef enqueue
50 #undef enqueue
51 #endif
52
53 #ifdef dequeue
54 #undef dequeue
55 #endif
56
57 #define super OSObject
58
OSDefineMetaClassAndStructors(IODataQueue,OSObject)59 OSDefineMetaClassAndStructors(IODataQueue, OSObject)
60
61 OSSharedPtr<IODataQueue>
62 IODataQueue::withCapacity(UInt32 size)
63 {
64 OSSharedPtr<IODataQueue> dataQueue = OSMakeShared<IODataQueue>();
65
66 if (dataQueue) {
67 if (!dataQueue->initWithCapacity(size)) {
68 return nullptr;
69 }
70 }
71
72 return dataQueue;
73 }
74
75 OSSharedPtr<IODataQueue>
withEntries(UInt32 numEntries,UInt32 entrySize)76 IODataQueue::withEntries(UInt32 numEntries, UInt32 entrySize)
77 {
78 OSSharedPtr<IODataQueue> dataQueue = OSMakeShared<IODataQueue>();
79
80 if (dataQueue) {
81 if (!dataQueue->initWithEntries(numEntries, entrySize)) {
82 return nullptr;
83 }
84 }
85
86 return dataQueue;
87 }
88
89 Boolean
initWithCapacity(UInt32 size)90 IODataQueue::initWithCapacity(UInt32 size)
91 {
92 vm_size_t allocSize = 0;
93 kern_return_t kr;
94
95 if (!super::init()) {
96 return false;
97 }
98
99 if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE) {
100 return false;
101 }
102
103 allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE);
104
105 if (allocSize < size) {
106 return false;
107 }
108
109 assert(!notifyMsg);
110 notifyMsg = IOMallocType(IODataQueueInternal);
111 ((IODataQueueInternal *)notifyMsg)->queueSize = size;
112
113 kr = kmem_alloc(kernel_map, (vm_offset_t *)&dataQueue, allocSize,
114 (kma_flags_t)(KMA_DATA | KMA_ZERO), IOMemoryTag(kernel_map));
115 if (kr != KERN_SUCCESS) {
116 return false;
117 }
118
119 dataQueue->queueSize = size;
120 // dataQueue->head = 0;
121 // dataQueue->tail = 0;
122
123 return true;
124 }
125
126 Boolean
initWithEntries(UInt32 numEntries,UInt32 entrySize)127 IODataQueue::initWithEntries(UInt32 numEntries, UInt32 entrySize)
128 {
129 // Checking overflow for (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE):
130 // check (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE)
131 if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
132 // check (numEntries + 1)
133 (numEntries > UINT32_MAX - 1) ||
134 // check (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE)
135 (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX / (numEntries + 1))) {
136 return false;
137 }
138
139 return initWithCapacity((numEntries + 1) * (DATA_QUEUE_ENTRY_HEADER_SIZE + entrySize));
140 }
141
142 void
free()143 IODataQueue::free()
144 {
145 if (notifyMsg) {
146 if (dataQueue) {
147 kmem_free(kernel_map, (vm_offset_t)dataQueue,
148 round_page(((IODataQueueInternal *)notifyMsg)->queueSize +
149 DATA_QUEUE_MEMORY_HEADER_SIZE));
150 dataQueue = NULL;
151 }
152
153 IOFreeType(notifyMsg, IODataQueueInternal);
154 notifyMsg = NULL;
155 }
156
157 super::free();
158
159 return;
160 }
161
162 Boolean
enqueue(void * data,UInt32 dataSize)163 IODataQueue::enqueue(void * data, UInt32 dataSize)
164 {
165 UInt32 head;
166 UInt32 tail;
167 UInt32 newTail;
168 const UInt32 entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
169 UInt32 queueSize;
170 IODataQueueEntry * entry;
171
172 // Check for overflow of entrySize
173 if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
174 return false;
175 }
176
177 // Force a single read of head and tail
178 // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
179 tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
180 head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE);
181
182 // Check for underflow of (dataQueue->queueSize - tail)
183 queueSize = ((IODataQueueInternal *) notifyMsg)->queueSize;
184 if ((queueSize < tail) || (queueSize < head)) {
185 return false;
186 }
187
188 if (tail >= head) {
189 // Is there enough room at the end for the entry?
190 if ((entrySize <= UINT32_MAX - tail) &&
191 ((tail + entrySize) <= queueSize)) {
192 entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
193
194 entry->size = dataSize;
195 __nochk_memcpy(&entry->data, data, dataSize);
196
197 // The tail can be out of bound when the size of the new entry
198 // exactly matches the available space at the end of the queue.
199 // The tail can range from 0 to dataQueue->queueSize inclusive.
200
201 newTail = tail + entrySize;
202 } else if (head > entrySize) { // Is there enough room at the beginning?
203 // Wrap around to the beginning, but do not allow the tail to catch
204 // up to the head.
205
206 dataQueue->queue->size = dataSize;
207
208 // We need to make sure that there is enough room to set the size before
209 // doing this. The user client checks for this and will look for the size
210 // at the beginning if there isn't room for it at the end.
211
212 if ((queueSize - tail) >= DATA_QUEUE_ENTRY_HEADER_SIZE) {
213 ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
214 }
215
216 __nochk_memcpy(&dataQueue->queue->data, data, dataSize);
217 newTail = entrySize;
218 } else {
219 return false; // queue is full
220 }
221 } else {
222 // Do not allow the tail to catch up to the head when the queue is full.
223 // That's why the comparison uses a '>' rather than '>='.
224
225 if ((head - tail) > entrySize) {
226 entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
227
228 entry->size = dataSize;
229 __nochk_memcpy(&entry->data, data, dataSize);
230 newTail = tail + entrySize;
231 } else {
232 return false; // queue is full
233 }
234 }
235
236 // Publish the data we just enqueued
237 __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);
238
239 if (tail != head) {
240 //
241 // The memory barrier below paris with the one in ::dequeue
242 // so that either our store to the tail cannot be missed by
243 // the next dequeue attempt, or we will observe the dequeuer
244 // making the queue empty.
245 //
246 // Of course, if we already think the queue is empty,
247 // there's no point paying this extra cost.
248 //
249 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
250 head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
251 }
252
253 if (tail == head) {
254 // Send notification (via mach message) that data is now available.
255 sendDataAvailableNotification();
256 }
257 return true;
258 }
259
260 void
setNotificationPort(mach_port_t port)261 IODataQueue::setNotificationPort(mach_port_t port)
262 {
263 mach_msg_header_t * msgh;
264
265 msgh = &((IODataQueueInternal *) notifyMsg)->msg;
266 bzero(msgh, sizeof(mach_msg_header_t));
267 msgh->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
268 msgh->msgh_size = sizeof(mach_msg_header_t);
269 msgh->msgh_remote_port = port;
270 }
271
272 void
sendDataAvailableNotification()273 IODataQueue::sendDataAvailableNotification()
274 {
275 kern_return_t kr;
276 mach_msg_header_t * msgh;
277
278 msgh = &((IODataQueueInternal *) notifyMsg)->msg;
279 if (msgh->msgh_remote_port) {
280 kr = mach_msg_send_from_kernel_with_options(msgh, msgh->msgh_size,
281 MACH64_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
282 switch (kr) {
283 case MACH_SEND_TIMED_OUT: // Notification already sent
284 case MACH_MSG_SUCCESS:
285 case MACH_SEND_NO_BUFFER:
286 break;
287 default:
288 IOLog("%s: dataAvailableNotification failed - msg_send returned: %d\n", /*getName()*/ "IODataQueue", kr);
289 break;
290 }
291 }
292 }
293
294 OSSharedPtr<IOMemoryDescriptor>
getMemoryDescriptor()295 IODataQueue::getMemoryDescriptor()
296 {
297 OSSharedPtr<IOMemoryDescriptor> descriptor;
298 UInt32 queueSize;
299
300 queueSize = ((IODataQueueInternal *) notifyMsg)->queueSize;
301 if (dataQueue != NULL) {
302 descriptor = IOMemoryDescriptor::withAddress(dataQueue, queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE, kIODirectionOutIn);
303 }
304
305 return descriptor;
306 }
307