1 /*
2  * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <IOKit/IOSharedDataQueue.h>
30 #include <IOKit/IODataQueueShared.h>
31 #include <IOKit/IOLib.h>
32 #include <IOKit/IOMemoryDescriptor.h>
33 
34 #ifdef enqueue
35 #undef enqueue
36 #endif
37 
38 #ifdef dequeue
39 #undef dequeue
40 #endif
41 
42 #define super IODataQueue
43 
44 OSDefineMetaClassAndStructors(IOSharedDataQueue, IODataQueue)
45 
46 IOSharedDataQueue *IOSharedDataQueue::withCapacity(UInt32 size)
47 {
48 	IOSharedDataQueue *dataQueue = new IOSharedDataQueue;
49 
50 	if (dataQueue) {
51 		if (!dataQueue->initWithCapacity(size)) {
52 			dataQueue->release();
53 			dataQueue = NULL;
54 		}
55 	}
56 
57 	return dataQueue;
58 }
59 
60 IOSharedDataQueue *
61 IOSharedDataQueue::withEntries(UInt32 numEntries, UInt32 entrySize)
62 {
63 	IOSharedDataQueue *dataQueue = new IOSharedDataQueue;
64 
65 	if (dataQueue) {
66 		if (!dataQueue->initWithEntries(numEntries, entrySize)) {
67 			dataQueue->release();
68 			dataQueue = NULL;
69 		}
70 	}
71 
72 	return dataQueue;
73 }
74 
75 Boolean
76 IOSharedDataQueue::initWithCapacity(UInt32 size)
77 {
78 	IODataQueueAppendix *   appendix;
79 	vm_size_t               allocSize;
80 
81 	if (!super::init()) {
82 		return false;
83 	}
84 
85 	_reserved = (ExpansionData *)IOMalloc(sizeof(struct ExpansionData));
86 	if (!_reserved) {
87 		return false;
88 	}
89 
90 	if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE - DATA_QUEUE_MEMORY_APPENDIX_SIZE) {
91 		return false;
92 	}
93 
94 	allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE);
95 
96 	if (allocSize < size) {
97 		return false;
98 	}
99 
100 	dataQueue = (IODataQueueMemory *)IOMallocAligned(allocSize, PAGE_SIZE);
101 	if (dataQueue == NULL) {
102 		return false;
103 	}
104 	bzero(dataQueue, allocSize);
105 
106 	dataQueue->queueSize    = size;
107 //  dataQueue->head         = 0;
108 //  dataQueue->tail         = 0;
109 
110 	if (!setQueueSize(size)) {
111 		return false;
112 	}
113 
114 	appendix            = (IODataQueueAppendix *)((UInt8 *)dataQueue + size + DATA_QUEUE_MEMORY_HEADER_SIZE);
115 	appendix->version   = 0;
116 
117 	if (!notifyMsg) {
118 		notifyMsg = IOMalloc(sizeof(mach_msg_header_t));
119 		if (!notifyMsg) {
120 			return false;
121 		}
122 	}
123 	bzero(notifyMsg, sizeof(mach_msg_header_t));
124 
125 	setNotificationPort(MACH_PORT_NULL);
126 
127 	return true;
128 }
129 
130 void
131 IOSharedDataQueue::free()
132 {
133 	if (dataQueue) {
134 		IOFreeAligned(dataQueue, round_page(getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE));
135 		dataQueue = NULL;
136 		if (notifyMsg) {
137 			IOFree(notifyMsg, sizeof(mach_msg_header_t));
138 			notifyMsg = NULL;
139 		}
140 	}
141 
142 	if (_reserved) {
143 		IOFree(_reserved, sizeof(struct ExpansionData));
144 		_reserved = NULL;
145 	}
146 
147 	super::free();
148 }
149 
150 IOMemoryDescriptor *
151 IOSharedDataQueue::getMemoryDescriptor()
152 {
153 	IOMemoryDescriptor *descriptor = NULL;
154 
155 	if (dataQueue != NULL) {
156 		descriptor = IOMemoryDescriptor::withAddress(dataQueue, getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE, kIODirectionOutIn);
157 	}
158 
159 	return descriptor;
160 }
161 
162 
163 IODataQueueEntry *
164 IOSharedDataQueue::peek()
165 {
166 	IODataQueueEntry *entry      = NULL;
167 	UInt32            headOffset;
168 	UInt32            tailOffset;
169 
170 	if (!dataQueue) {
171 		return NULL;
172 	}
173 
174 	// Read head and tail with acquire barrier
175 	// See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
176 	headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
177 	tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);
178 
179 	if (headOffset != tailOffset) {
180 		volatile IODataQueueEntry * head = NULL;
181 		UInt32              headSize     = 0;
182 		UInt32              headOffset   = dataQueue->head;
183 		UInt32              queueSize    = getQueueSize();
184 
185 		if (headOffset >= queueSize) {
186 			return NULL;
187 		}
188 
189 		head         = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
190 		headSize     = head->size;
191 
192 		// Check if there's enough room before the end of the queue for a header.
193 		// If there is room, check if there's enough room to hold the header and
194 		// the data.
195 
196 		if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
197 		    (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
198 		    (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
199 		    (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
200 			// No room for the header or the data, wrap to the beginning of the queue.
201 			// Note: wrapping even with the UINT32_MAX checks, as we have to support
202 			// queueSize of UINT32_MAX
203 			entry = dataQueue->queue;
204 		} else {
205 			entry = (IODataQueueEntry *)head;
206 		}
207 	}
208 
209 	return entry;
210 }
211 
212 Boolean
213 IOSharedDataQueue::enqueue(void * data, UInt32 dataSize)
214 {
215 	UInt32             head;
216 	UInt32             tail;
217 	UInt32             newTail;
218 	const UInt32       entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
219 	IODataQueueEntry * entry;
220 
221 	// Force a single read of head and tail
222 	// See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
223 	tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
224 	head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE);
225 
226 	// Check for overflow of entrySize
227 	if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
228 		return false;
229 	}
230 	// Check for underflow of (getQueueSize() - tail)
231 	if (getQueueSize() < tail || getQueueSize() < head) {
232 		return false;
233 	}
234 
235 	if (tail >= head) {
236 		// Is there enough room at the end for the entry?
237 		if ((entrySize <= UINT32_MAX - tail) &&
238 		    ((tail + entrySize) <= getQueueSize())) {
239 			entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
240 
241 			entry->size = dataSize;
242 			__nochk_memcpy(&entry->data, data, dataSize);
243 
244 			// The tail can be out of bound when the size of the new entry
245 			// exactly matches the available space at the end of the queue.
246 			// The tail can range from 0 to dataQueue->queueSize inclusive.
247 
248 			newTail = tail + entrySize;
249 		} else if (head > entrySize) { // Is there enough room at the beginning?
250 			// Wrap around to the beginning, but do not allow the tail to catch
251 			// up to the head.
252 
253 			dataQueue->queue->size = dataSize;
254 
255 			// We need to make sure that there is enough room to set the size before
256 			// doing this. The user client checks for this and will look for the size
257 			// at the beginning if there isn't room for it at the end.
258 
259 			if ((getQueueSize() - tail) >= DATA_QUEUE_ENTRY_HEADER_SIZE) {
260 				((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
261 			}
262 
263 			__nochk_memcpy(&dataQueue->queue->data, data, dataSize);
264 			newTail = entrySize;
265 		} else {
266 			return false; // queue is full
267 		}
268 	} else {
269 		// Do not allow the tail to catch up to the head when the queue is full.
270 		// That's why the comparison uses a '>' rather than '>='.
271 
272 		if ((head - tail) > entrySize) {
273 			entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
274 
275 			entry->size = dataSize;
276 			__nochk_memcpy(&entry->data, data, dataSize);
277 			newTail = tail + entrySize;
278 		} else {
279 			return false; // queue is full
280 		}
281 	}
282 
283 	// Publish the data we just enqueued
284 	__c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);
285 
286 	if (tail != head) {
287 		//
288 		// The memory barrier below paris with the one in ::dequeue
289 		// so that either our store to the tail cannot be missed by
290 		// the next dequeue attempt, or we will observe the dequeuer
291 		// making the queue empty.
292 		//
293 		// Of course, if we already think the queue is empty,
294 		// there's no point paying this extra cost.
295 		//
296 		__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
297 		head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
298 	}
299 
300 	if (tail == head) {
301 		// Send notification (via mach message) that data is now available.
302 		sendDataAvailableNotification();
303 	}
304 	return true;
305 }
306 
307 Boolean
308 IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize)
309 {
310 	Boolean             retVal          = TRUE;
311 	volatile IODataQueueEntry *  entry  = NULL;
312 	UInt32              entrySize       = 0;
313 	UInt32              headOffset      = 0;
314 	UInt32              tailOffset      = 0;
315 	UInt32              newHeadOffset   = 0;
316 
317 	if (!dataQueue || (data && !dataSize)) {
318 		return false;
319 	}
320 
321 	// Read head and tail with acquire barrier
322 	// See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
323 	headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
324 	tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);
325 
326 	if (headOffset != tailOffset) {
327 		volatile IODataQueueEntry * head = NULL;
328 		UInt32              headSize     = 0;
329 		UInt32              queueSize    = getQueueSize();
330 
331 		if (headOffset > queueSize) {
332 			return false;
333 		}
334 
335 		head         = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
336 		headSize     = head->size;
337 
338 		// we wrapped around to beginning, so read from there
339 		// either there was not even room for the header
340 		if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
341 		    (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
342 		    // or there was room for the header, but not for the data
343 		    (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
344 		    (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
345 			// Note: we have to wrap to the beginning even with the UINT32_MAX checks
346 			// because we have to support a queueSize of UINT32_MAX.
347 			entry           = dataQueue->queue;
348 			entrySize       = entry->size;
349 			if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
350 			    (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
351 				return false;
352 			}
353 			newHeadOffset   = entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
354 			// else it is at the end
355 		} else {
356 			entry           = head;
357 			entrySize       = entry->size;
358 			if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
359 			    (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headOffset) ||
360 			    (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE + headOffset > queueSize)) {
361 				return false;
362 			}
363 			newHeadOffset   = headOffset + entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
364 		}
365 	} else {
366 		// empty queue
367 		return false;
368 	}
369 
370 	if (data) {
371 		if (entrySize > *dataSize) {
372 			// not enough space
373 			return false;
374 		}
375 		__nochk_memcpy(data, (void *)entry->data, entrySize);
376 		*dataSize = entrySize;
377 	}
378 
379 	__c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE);
380 
381 	if (newHeadOffset == tailOffset) {
382 		//
383 		// If we are making the queue empty, then we need to make sure
384 		// that either the enqueuer notices, or we notice the enqueue
385 		// that raced with our making of the queue empty.
386 		//
387 		__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
388 	}
389 
390 	return retVal;
391 }
392 
393 UInt32
394 IOSharedDataQueue::getQueueSize()
395 {
396 	if (!_reserved) {
397 		return 0;
398 	}
399 	return _reserved->queueSize;
400 }
401 
402 Boolean
403 IOSharedDataQueue::setQueueSize(UInt32 size)
404 {
405 	if (!_reserved) {
406 		return false;
407 	}
408 	_reserved->queueSize = size;
409 	return true;
410 }
411 
412 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 0);
413 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 1);
414 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 2);
415 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 3);
416 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 4);
417 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 5);
418 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 6);
419 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 7);
420