1 /*
2  * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <IOKit/IOSharedDataQueue.h>
30 #include <IOKit/IODataQueueShared.h>
31 #include <IOKit/IOLib.h>
32 #include <IOKit/IOMemoryDescriptor.h>
33 
34 #ifdef enqueue
35 #undef enqueue
36 #endif
37 
38 #ifdef dequeue
39 #undef dequeue
40 #endif
41 
42 #define super IODataQueue
43 
44 OSDefineMetaClassAndStructors(IOSharedDataQueue, IODataQueue)
45 
46 IOSharedDataQueue *IOSharedDataQueue::withCapacity(UInt32 size)
47 {
48     IOSharedDataQueue *dataQueue = new IOSharedDataQueue;
49 
50     if (dataQueue) {
51         if  (!dataQueue->initWithCapacity(size)) {
52             dataQueue->release();
53             dataQueue = 0;
54         }
55     }
56 
57     return dataQueue;
58 }
59 
60 IOSharedDataQueue *IOSharedDataQueue::withEntries(UInt32 numEntries, UInt32 entrySize)
61 {
62     IOSharedDataQueue *dataQueue = new IOSharedDataQueue;
63 
64     if (dataQueue) {
65         if (!dataQueue->initWithEntries(numEntries, entrySize)) {
66             dataQueue->release();
67             dataQueue = 0;
68         }
69     }
70 
71     return dataQueue;
72 }
73 
74 Boolean IOSharedDataQueue::initWithCapacity(UInt32 size)
75 {
76     IODataQueueAppendix *   appendix;
77     vm_size_t               allocSize;
78 
79     if (!super::init()) {
80         return false;
81     }
82 
83     _reserved = (ExpansionData *)IOMalloc(sizeof(struct ExpansionData));
84     if (!_reserved) {
85         return false;
86     }
87 
88     if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE - DATA_QUEUE_MEMORY_APPENDIX_SIZE) {
89         return false;
90     }
91 
92     allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE);
93 
94     if (allocSize < size) {
95         return false;
96     }
97 
98     dataQueue = (IODataQueueMemory *)IOMallocAligned(allocSize, PAGE_SIZE);
99     if (dataQueue == 0) {
100         return false;
101     }
102     bzero(dataQueue, allocSize);
103 
104     dataQueue->queueSize    = size;
105 //  dataQueue->head         = 0;
106 //  dataQueue->tail         = 0;
107 
108     if (!setQueueSize(size)) {
109         return false;
110     }
111 
112     appendix            = (IODataQueueAppendix *)((UInt8 *)dataQueue + size + DATA_QUEUE_MEMORY_HEADER_SIZE);
113     appendix->version   = 0;
114 
115     if (!notifyMsg) {
116         notifyMsg = IOMalloc(sizeof(mach_msg_header_t));
117         if (!notifyMsg)
118             return false;
119     }
120     bzero(notifyMsg, sizeof(mach_msg_header_t));
121 
122     setNotificationPort(MACH_PORT_NULL);
123 
124     return true;
125 }
126 
127 void IOSharedDataQueue::free()
128 {
129     if (dataQueue) {
130         IOFreeAligned(dataQueue, round_page(getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE));
131         dataQueue = NULL;
132         if (notifyMsg) {
133             IOFree(notifyMsg, sizeof(mach_msg_header_t));
134             notifyMsg = NULL;
135         }
136     }
137 
138     if (_reserved) {
139         IOFree (_reserved, sizeof(struct ExpansionData));
140         _reserved = NULL;
141     }
142 
143     super::free();
144 }
145 
146 IOMemoryDescriptor *IOSharedDataQueue::getMemoryDescriptor()
147 {
148     IOMemoryDescriptor *descriptor = 0;
149 
150     if (dataQueue != 0) {
151         descriptor = IOMemoryDescriptor::withAddress(dataQueue, getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE, kIODirectionOutIn);
152     }
153 
154     return descriptor;
155 }
156 
157 
158 IODataQueueEntry * IOSharedDataQueue::peek()
159 {
160     IODataQueueEntry *entry      = 0;
161     UInt32            headOffset;
162     UInt32            tailOffset;
163 
164     if (!dataQueue) {
165         return NULL;
166     }
167 
168     // Read head and tail with acquire barrier
169     headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
170     tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);
171 
172     if (headOffset != tailOffset) {
173         IODataQueueEntry *  head        = 0;
174         UInt32              headSize    = 0;
175         UInt32              headOffset  = dataQueue->head;
176         UInt32              queueSize   = getQueueSize();
177 
178         if (headOffset >= queueSize) {
179             return NULL;
180         }
181 
182         head         = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
183         headSize     = head->size;
184 
185         // Check if there's enough room before the end of the queue for a header.
186         // If there is room, check if there's enough room to hold the header and
187         // the data.
188 
189         if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
190             (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
191             (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
192             (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
193             // No room for the header or the data, wrap to the beginning of the queue.
194             // Note: wrapping even with the UINT32_MAX checks, as we have to support
195             // queueSize of UINT32_MAX
196             entry = dataQueue->queue;
197         } else {
198             entry = head;
199         }
200     }
201 
202     return entry;
203 }
204 
205 Boolean IOSharedDataQueue::enqueue(void * data, UInt32 dataSize)
206 {
207     UInt32             head;
208     UInt32             tail;
209     UInt32             newTail;
210     const UInt32       entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
211     IODataQueueEntry * entry;
212 
213     // Force a single read of head and tail
214     head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
215     tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
216 
217     // Check for overflow of entrySize
218     if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
219         return false;
220     }
221     // Check for underflow of (getQueueSize() - tail)
222     if (getQueueSize() < tail || getQueueSize() < head) {
223         return false;
224     }
225 
226     if ( tail >= head )
227     {
228         // Is there enough room at the end for the entry?
229         if ((entrySize <= UINT32_MAX - tail) &&
230             ((tail + entrySize) <= getQueueSize()) )
231         {
232             entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
233 
234             entry->size = dataSize;
235             memcpy(&entry->data, data, dataSize);
236 
237             // The tail can be out of bound when the size of the new entry
238             // exactly matches the available space at the end of the queue.
239             // The tail can range from 0 to dataQueue->queueSize inclusive.
240 
241             newTail = tail + entrySize;
242         }
243         else if ( head > entrySize )     // Is there enough room at the beginning?
244         {
245             // Wrap around to the beginning, but do not allow the tail to catch
246             // up to the head.
247 
248             dataQueue->queue->size = dataSize;
249 
250             // We need to make sure that there is enough room to set the size before
251             // doing this. The user client checks for this and will look for the size
252             // at the beginning if there isn't room for it at the end.
253 
254             if ( ( getQueueSize() - tail ) >= DATA_QUEUE_ENTRY_HEADER_SIZE )
255             {
256                 ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
257             }
258 
259             memcpy(&dataQueue->queue->data, data, dataSize);
260             newTail = entrySize;
261         }
262         else
263         {
264             return false;    // queue is full
265         }
266     }
267     else
268     {
269         // Do not allow the tail to catch up to the head when the queue is full.
270         // That's why the comparison uses a '>' rather than '>='.
271 
272         if ( (head - tail) > entrySize )
273         {
274             entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
275 
276             entry->size = dataSize;
277             memcpy(&entry->data, data, dataSize);
278             newTail = tail + entrySize;
279         }
280         else
281         {
282             return false;    // queue is full
283         }
284     }
285 
286     // Update tail with release barrier
287     __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);
288 
289     // Send notification (via mach message) that data is available.
290 
291     if ( ( tail == head )                                                   /* queue was empty prior to enqueue() */
292       || ( tail == __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED) ) )   /* queue was emptied during enqueue() */
293     {
294         sendDataAvailableNotification();
295     }
296 
297     return true;
298 }
299 
300 Boolean IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize)
301 {
302     Boolean             retVal          = TRUE;
303     IODataQueueEntry *  entry           = 0;
304     UInt32              entrySize       = 0;
305     UInt32              headOffset      = 0;
306     UInt32              tailOffset      = 0;
307     UInt32              newHeadOffset   = 0;
308 
309     if (!dataQueue) {
310         return false;
311     }
312 
313     // Read head and tail with acquire barrier
314     tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
315     headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE);
316 
317     if (headOffset != tailOffset) {
318         IODataQueueEntry *  head        = 0;
319         UInt32              headSize    = 0;
320         UInt32              queueSize   = getQueueSize();
321 
322         if (headOffset > queueSize) {
323             return false;
324         }
325 
326         head         = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
327         headSize     = head->size;
328 
329         // we wrapped around to beginning, so read from there
330         // either there was not even room for the header
331         if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
332             (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
333             // or there was room for the header, but not for the data
334             (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
335             (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
336             // Note: we have to wrap to the beginning even with the UINT32_MAX checks
337             // because we have to support a queueSize of UINT32_MAX.
338             entry           = dataQueue->queue;
339             entrySize       = entry->size;
340             if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
341                 (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
342                 return false;
343             }
344             newHeadOffset   = entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
345             // else it is at the end
346         } else {
347             entry           = head;
348             entrySize       = entry->size;
349             if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
350                 (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headOffset) ||
351                 (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE + headOffset > queueSize)) {
352                 return false;
353             }
354             newHeadOffset   = headOffset + entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
355         }
356     }
357 
358     if (entry) {
359         if (data) {
360             if (dataSize) {
361                 if (entrySize <= *dataSize) {
362                     memcpy(data, &(entry->data), entrySize);
363                     __c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE);
364                 } else {
365                     retVal = FALSE;
366                 }
367             } else {
368                 retVal = FALSE;
369             }
370         } else {
371             __c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE);
372         }
373 
374         if (dataSize) {
375             *dataSize = entrySize;
376         }
377     } else {
378         retVal = FALSE;
379     }
380 
381     return retVal;
382 }
383 
384 UInt32 IOSharedDataQueue::getQueueSize()
385 {
386     if (!_reserved) {
387         return 0;
388     }
389     return _reserved->queueSize;
390 }
391 
392 Boolean IOSharedDataQueue::setQueueSize(UInt32 size)
393 {
394     if (!_reserved) {
395         return false;
396     }
397     _reserved->queueSize = size;
398     return true;
399 }
400 
401 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 0);
402 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 1);
403 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 2);
404 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 3);
405 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 4);
406 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 5);
407 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 6);
408 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 7);
409