1 /*
2  * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <IOKit/IOSharedDataQueue.h>
30 #include <IOKit/IODataQueueShared.h>
31 #include <IOKit/IOLib.h>
32 #include <IOKit/IOMemoryDescriptor.h>
33 
34 #ifdef enqueue
35 #undef enqueue
36 #endif
37 
38 #ifdef dequeue
39 #undef dequeue
40 #endif
41 
42 #define super IODataQueue
43 
44 OSDefineMetaClassAndStructors(IOSharedDataQueue, IODataQueue)
45 
46 IOSharedDataQueue *IOSharedDataQueue::withCapacity(UInt32 size)
47 {
48     IOSharedDataQueue *dataQueue = new IOSharedDataQueue;
49 
50     if (dataQueue) {
51         if  (!dataQueue->initWithCapacity(size)) {
52             dataQueue->release();
53             dataQueue = 0;
54         }
55     }
56 
57     return dataQueue;
58 }
59 
60 IOSharedDataQueue *IOSharedDataQueue::withEntries(UInt32 numEntries, UInt32 entrySize)
61 {
62     IOSharedDataQueue *dataQueue = new IOSharedDataQueue;
63 
64     if (dataQueue) {
65         if (!dataQueue->initWithEntries(numEntries, entrySize)) {
66             dataQueue->release();
67             dataQueue = 0;
68         }
69     }
70 
71     return dataQueue;
72 }
73 
74 Boolean IOSharedDataQueue::initWithCapacity(UInt32 size)
75 {
76     IODataQueueAppendix *   appendix;
77     vm_size_t               allocSize;
78 
79     if (!super::init()) {
80         return false;
81     }
82 
83     _reserved = (ExpansionData *)IOMalloc(sizeof(struct ExpansionData));
84     if (!_reserved) {
85         return false;
86     }
87 
88     if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE - DATA_QUEUE_MEMORY_APPENDIX_SIZE) {
89         return false;
90     }
91 
92     allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE);
93 
94     if (allocSize < size) {
95         return false;
96     }
97 
98     dataQueue = (IODataQueueMemory *)IOMallocAligned(allocSize, PAGE_SIZE);
99     if (dataQueue == 0) {
100         return false;
101     }
102     bzero(dataQueue, allocSize);
103 
104     dataQueue->queueSize    = size;
105 //  dataQueue->head         = 0;
106 //  dataQueue->tail         = 0;
107 
108     if (!setQueueSize(size)) {
109         return false;
110     }
111 
112     appendix            = (IODataQueueAppendix *)((UInt8 *)dataQueue + size + DATA_QUEUE_MEMORY_HEADER_SIZE);
113     appendix->version   = 0;
114 
115     if (!notifyMsg) {
116         notifyMsg = IOMalloc(sizeof(mach_msg_header_t));
117         if (!notifyMsg)
118             return false;
119     }
120     bzero(notifyMsg, sizeof(mach_msg_header_t));
121 
122     setNotificationPort(MACH_PORT_NULL);
123 
124     return true;
125 }
126 
127 void IOSharedDataQueue::free()
128 {
129     if (dataQueue) {
130         IOFreeAligned(dataQueue, round_page(getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE));
131         dataQueue = NULL;
132         if (notifyMsg) {
133             IOFree(notifyMsg, sizeof(mach_msg_header_t));
134             notifyMsg = NULL;
135         }
136     }
137 
138     if (_reserved) {
139         IOFree (_reserved, sizeof(struct ExpansionData));
140         _reserved = NULL;
141     }
142 
143     super::free();
144 }
145 
146 IOMemoryDescriptor *IOSharedDataQueue::getMemoryDescriptor()
147 {
148     IOMemoryDescriptor *descriptor = 0;
149 
150     if (dataQueue != 0) {
151         descriptor = IOMemoryDescriptor::withAddress(dataQueue, getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE, kIODirectionOutIn);
152     }
153 
154     return descriptor;
155 }
156 
157 
158 IODataQueueEntry * IOSharedDataQueue::peek()
159 {
160     IODataQueueEntry *entry      = 0;
161     UInt32            headOffset;
162     UInt32            tailOffset;
163 
164     if (!dataQueue) {
165         return NULL;
166     }
167 
168     // Read head and tail with acquire barrier
169     // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
170     headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
171     tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);
172 
173     if (headOffset != tailOffset) {
174         IODataQueueEntry *  head        = 0;
175         UInt32              headSize    = 0;
176         UInt32              headOffset  = dataQueue->head;
177         UInt32              queueSize   = getQueueSize();
178 
179         if (headOffset >= queueSize) {
180             return NULL;
181         }
182 
183         head         = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
184         headSize     = head->size;
185 
186         // Check if there's enough room before the end of the queue for a header.
187         // If there is room, check if there's enough room to hold the header and
188         // the data.
189 
190         if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
191             (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
192             (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
193             (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
194             // No room for the header or the data, wrap to the beginning of the queue.
195             // Note: wrapping even with the UINT32_MAX checks, as we have to support
196             // queueSize of UINT32_MAX
197             entry = dataQueue->queue;
198         } else {
199             entry = head;
200         }
201     }
202 
203     return entry;
204 }
205 
206 Boolean IOSharedDataQueue::enqueue(void * data, UInt32 dataSize)
207 {
208     UInt32             head;
209     UInt32             tail;
210     UInt32             newTail;
211     const UInt32       entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
212     IODataQueueEntry * entry;
213 
214     // Force a single read of head and tail
215     // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
216     tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
217     head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE);
218 
219     // Check for overflow of entrySize
220     if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
221         return false;
222     }
223     // Check for underflow of (getQueueSize() - tail)
224     if (getQueueSize() < tail || getQueueSize() < head) {
225         return false;
226     }
227 
228     if ( tail >= head )
229     {
230         // Is there enough room at the end for the entry?
231         if ((entrySize <= UINT32_MAX - tail) &&
232             ((tail + entrySize) <= getQueueSize()) )
233         {
234             entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
235 
236             entry->size = dataSize;
237             memcpy(&entry->data, data, dataSize);
238 
239             // The tail can be out of bound when the size of the new entry
240             // exactly matches the available space at the end of the queue.
241             // The tail can range from 0 to dataQueue->queueSize inclusive.
242 
243             newTail = tail + entrySize;
244         }
245         else if ( head > entrySize )     // Is there enough room at the beginning?
246         {
247             // Wrap around to the beginning, but do not allow the tail to catch
248             // up to the head.
249 
250             dataQueue->queue->size = dataSize;
251 
252             // We need to make sure that there is enough room to set the size before
253             // doing this. The user client checks for this and will look for the size
254             // at the beginning if there isn't room for it at the end.
255 
256             if ( ( getQueueSize() - tail ) >= DATA_QUEUE_ENTRY_HEADER_SIZE )
257             {
258                 ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
259             }
260 
261             memcpy(&dataQueue->queue->data, data, dataSize);
262             newTail = entrySize;
263         }
264         else
265         {
266             return false;    // queue is full
267         }
268     }
269     else
270     {
271         // Do not allow the tail to catch up to the head when the queue is full.
272         // That's why the comparison uses a '>' rather than '>='.
273 
274         if ( (head - tail) > entrySize )
275         {
276             entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
277 
278             entry->size = dataSize;
279             memcpy(&entry->data, data, dataSize);
280             newTail = tail + entrySize;
281         }
282         else
283         {
284             return false;    // queue is full
285         }
286     }
287 
288 	// Publish the data we just enqueued
289 	__c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);
290 
291 	if (tail != head) {
292 		//
293 		// The memory barrier below paris with the one in ::dequeue
294 		// so that either our store to the tail cannot be missed by
295 		// the next dequeue attempt, or we will observe the dequeuer
296 		// making the queue empty.
297 		//
298 		// Of course, if we already think the queue is empty,
299 		// there's no point paying this extra cost.
300 		//
301 		__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
302 		head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
303 	}
304 
305 	if (tail == head) {
306 		// Send notification (via mach message) that data is now available.
307 		sendDataAvailableNotification();
308 	}
309 	return true;
310 }
311 
312 Boolean IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize)
313 {
314     Boolean             retVal          = TRUE;
315     IODataQueueEntry *  entry           = 0;
316     UInt32              entrySize       = 0;
317     UInt32              headOffset      = 0;
318     UInt32              tailOffset      = 0;
319     UInt32              newHeadOffset   = 0;
320 
321 	if (!dataQueue || (data && !dataSize)) {
322         return false;
323     }
324 
325     // Read head and tail with acquire barrier
326     // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
327     headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
328     tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);
329 
330     if (headOffset != tailOffset) {
331         IODataQueueEntry *  head        = 0;
332         UInt32              headSize    = 0;
333         UInt32              queueSize   = getQueueSize();
334 
335         if (headOffset > queueSize) {
336             return false;
337         }
338 
339         head         = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
340         headSize     = head->size;
341 
342         // we wrapped around to beginning, so read from there
343         // either there was not even room for the header
344         if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
345             (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
346             // or there was room for the header, but not for the data
347             (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
348             (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
349             // Note: we have to wrap to the beginning even with the UINT32_MAX checks
350             // because we have to support a queueSize of UINT32_MAX.
351             entry           = dataQueue->queue;
352             entrySize       = entry->size;
353             if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
354                 (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
355                 return false;
356             }
357             newHeadOffset   = entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
358             // else it is at the end
359         } else {
360             entry           = head;
361             entrySize       = entry->size;
362             if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
363                 (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headOffset) ||
364                 (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE + headOffset > queueSize)) {
365                 return false;
366             }
367             newHeadOffset   = headOffset + entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
368         }
369 	} else {
370 		// empty queue
371 		return false;
372 	}
373 
374 	if (data) {
375 		if (entrySize > *dataSize) {
376 			// not enough space
377 			return false;
378 		}
379 		memcpy(data, &(entry->data), entrySize);
380 		*dataSize = entrySize;
381 	}
382 
383 	__c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE);
384 
385 	if (newHeadOffset == tailOffset) {
386 		//
387 		// If we are making the queue empty, then we need to make sure
388 		// that either the enqueuer notices, or we notice the enqueue
389 		// that raced with our making of the queue empty.
390 		//
391 		__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
392 	}
393 
394     return retVal;
395 }
396 
397 UInt32 IOSharedDataQueue::getQueueSize()
398 {
399     if (!_reserved) {
400         return 0;
401     }
402     return _reserved->queueSize;
403 }
404 
405 Boolean IOSharedDataQueue::setQueueSize(UInt32 size)
406 {
407     if (!_reserved) {
408         return false;
409     }
410     _reserved->queueSize = size;
411     return true;
412 }
413 
414 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 0);
415 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 1);
416 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 2);
417 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 3);
418 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 4);
419 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 5);
420 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 6);
421 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 7);
422