1 /*
2  * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #define IOKIT_ENABLE_SHARED_PTR
30 
31 #include <IOKit/IOSharedDataQueue.h>
32 #include <IOKit/IODataQueueShared.h>
33 #include <IOKit/IOLib.h>
34 #include <IOKit/IOMemoryDescriptor.h>
35 #include <libkern/c++/OSSharedPtr.h>
36 
37 #ifdef enqueue
38 #undef enqueue
39 #endif
40 
41 #ifdef dequeue
42 #undef dequeue
43 #endif
44 
45 #define super IODataQueue
46 
47 OSDefineMetaClassAndStructors(IOSharedDataQueue, IODataQueue)
48 
49 OSSharedPtr<IOSharedDataQueue>
50 IOSharedDataQueue::withCapacity(UInt32 size)
51 {
52 	OSSharedPtr<IOSharedDataQueue> dataQueue = OSMakeShared<IOSharedDataQueue>();
53 
54 	if (dataQueue) {
55 		if (!dataQueue->initWithCapacity(size)) {
56 			return nullptr;
57 		}
58 	}
59 
60 	return dataQueue;
61 }
62 
63 OSSharedPtr<IOSharedDataQueue>
64 IOSharedDataQueue::withEntries(UInt32 numEntries, UInt32 entrySize)
65 {
66 	OSSharedPtr<IOSharedDataQueue> dataQueue = OSMakeShared<IOSharedDataQueue>();
67 
68 	if (dataQueue) {
69 		if (!dataQueue->initWithEntries(numEntries, entrySize)) {
70 			return nullptr;
71 		}
72 	}
73 
74 	return dataQueue;
75 }
76 
77 Boolean
78 IOSharedDataQueue::initWithCapacity(UInt32 size)
79 {
80 	IODataQueueAppendix *   appendix;
81 	vm_size_t               allocSize;
82 
83 	if (!super::init()) {
84 		return false;
85 	}
86 
87 	_reserved = IOMallocType(ExpansionData);
88 	if (!_reserved) {
89 		return false;
90 	}
91 
92 	if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE - DATA_QUEUE_MEMORY_APPENDIX_SIZE) {
93 		return false;
94 	}
95 
96 	allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE);
97 
98 	if (allocSize < size) {
99 		return false;
100 	}
101 
102 	dataQueue = (IODataQueueMemory *)IOMallocAligned(allocSize, PAGE_SIZE);
103 	if (dataQueue == NULL) {
104 		return false;
105 	}
106 	bzero(dataQueue, allocSize);
107 
108 	dataQueue->queueSize    = size;
109 //  dataQueue->head         = 0;
110 //  dataQueue->tail         = 0;
111 
112 	if (!setQueueSize(size)) {
113 		return false;
114 	}
115 
116 	appendix            = (IODataQueueAppendix *)((UInt8 *)dataQueue + size + DATA_QUEUE_MEMORY_HEADER_SIZE);
117 	appendix->version   = 0;
118 
119 	if (!notifyMsg) {
120 		notifyMsg = IOMallocType(mach_msg_header_t);
121 		if (!notifyMsg) {
122 			return false;
123 		}
124 	}
125 	bzero(notifyMsg, sizeof(mach_msg_header_t));
126 
127 	setNotificationPort(MACH_PORT_NULL);
128 
129 	return true;
130 }
131 
132 void
133 IOSharedDataQueue::free()
134 {
135 	if (dataQueue) {
136 		IOFreeAligned(dataQueue, round_page(getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE));
137 		dataQueue = NULL;
138 		if (notifyMsg) {
139 			IOFreeType(notifyMsg, mach_msg_header_t);
140 			notifyMsg = NULL;
141 		}
142 	}
143 
144 	if (_reserved) {
145 		IOFreeType(_reserved, ExpansionData);
146 		_reserved = NULL;
147 	}
148 
149 	super::free();
150 }
151 
152 OSSharedPtr<IOMemoryDescriptor>
153 IOSharedDataQueue::getMemoryDescriptor()
154 {
155 	OSSharedPtr<IOMemoryDescriptor> descriptor;
156 
157 	if (dataQueue != NULL) {
158 		descriptor = IOMemoryDescriptor::withAddress(dataQueue, getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE, kIODirectionOutIn);
159 	}
160 
161 	return descriptor;
162 }
163 
164 
165 IODataQueueEntry *
166 IOSharedDataQueue::peek()
167 {
168 	IODataQueueEntry *entry      = NULL;
169 	UInt32            headOffset;
170 	UInt32            tailOffset;
171 
172 	if (!dataQueue) {
173 		return NULL;
174 	}
175 
176 	// Read head and tail with acquire barrier
177 	// See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
178 	headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
179 	tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);
180 
181 	if (headOffset != tailOffset) {
182 		volatile IODataQueueEntry * head = NULL;
183 		UInt32              headSize     = 0;
184 		UInt32              headOffset   = dataQueue->head;
185 		UInt32              queueSize    = getQueueSize();
186 
187 		if (headOffset > queueSize) {
188 			return NULL;
189 		}
190 
191 		head         = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
192 		headSize     = head->size;
193 
194 		// Check if there's enough room before the end of the queue for a header.
195 		// If there is room, check if there's enough room to hold the header and
196 		// the data.
197 
198 		if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
199 		    (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
200 		    (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
201 		    (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
202 			// No room for the header or the data, wrap to the beginning of the queue.
203 			// Note: wrapping even with the UINT32_MAX checks, as we have to support
204 			// queueSize of UINT32_MAX
205 			entry = dataQueue->queue;
206 		} else {
207 			entry = (IODataQueueEntry *)head;
208 		}
209 	}
210 
211 	return entry;
212 }
213 
214 Boolean
215 IOSharedDataQueue::enqueue(void * data, UInt32 dataSize)
216 {
217 	UInt32             head;
218 	UInt32             tail;
219 	UInt32             newTail;
220 	const UInt32       entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
221 	IODataQueueEntry * entry;
222 
223 	// Force a single read of head and tail
224 	// See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
225 	tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
226 	head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE);
227 
228 	// Check for overflow of entrySize
229 	if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
230 		return false;
231 	}
232 	// Check for underflow of (getQueueSize() - tail)
233 	if (getQueueSize() < tail || getQueueSize() < head) {
234 		return false;
235 	}
236 
237 	if (tail >= head) {
238 		// Is there enough room at the end for the entry?
239 		if ((entrySize <= UINT32_MAX - tail) &&
240 		    ((tail + entrySize) <= getQueueSize())) {
241 			entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
242 
243 			entry->size = dataSize;
244 			__nochk_memcpy(&entry->data, data, dataSize);
245 
246 			// The tail can be out of bound when the size of the new entry
247 			// exactly matches the available space at the end of the queue.
248 			// The tail can range from 0 to dataQueue->queueSize inclusive.
249 
250 			newTail = tail + entrySize;
251 		} else if (head > entrySize) { // Is there enough room at the beginning?
252 			// Wrap around to the beginning, but do not allow the tail to catch
253 			// up to the head.
254 
255 			dataQueue->queue->size = dataSize;
256 
257 			// We need to make sure that there is enough room to set the size before
258 			// doing this. The user client checks for this and will look for the size
259 			// at the beginning if there isn't room for it at the end.
260 
261 			if ((getQueueSize() - tail) >= DATA_QUEUE_ENTRY_HEADER_SIZE) {
262 				((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
263 			}
264 
265 			__nochk_memcpy(&dataQueue->queue->data, data, dataSize);
266 			newTail = entrySize;
267 		} else {
268 			return false; // queue is full
269 		}
270 	} else {
271 		// Do not allow the tail to catch up to the head when the queue is full.
272 		// That's why the comparison uses a '>' rather than '>='.
273 
274 		if ((head - tail) > entrySize) {
275 			entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
276 
277 			entry->size = dataSize;
278 			__nochk_memcpy(&entry->data, data, dataSize);
279 			newTail = tail + entrySize;
280 		} else {
281 			return false; // queue is full
282 		}
283 	}
284 
285 	// Publish the data we just enqueued
286 	__c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);
287 
288 	if (tail != head) {
289 		//
290 		// The memory barrier below paris with the one in ::dequeue
291 		// so that either our store to the tail cannot be missed by
292 		// the next dequeue attempt, or we will observe the dequeuer
293 		// making the queue empty.
294 		//
295 		// Of course, if we already think the queue is empty,
296 		// there's no point paying this extra cost.
297 		//
298 		__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
299 		head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
300 	}
301 
302 	if (tail == head) {
303 		// Send notification (via mach message) that data is now available.
304 		sendDataAvailableNotification();
305 	}
306 	return true;
307 }
308 
309 Boolean
310 IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize)
311 {
312 	Boolean             retVal          = TRUE;
313 	volatile IODataQueueEntry *  entry  = NULL;
314 	UInt32              entrySize       = 0;
315 	UInt32              headOffset      = 0;
316 	UInt32              tailOffset      = 0;
317 	UInt32              newHeadOffset   = 0;
318 
319 	if (!dataQueue || (data && !dataSize)) {
320 		return false;
321 	}
322 
323 	// Read head and tail with acquire barrier
324 	// See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
325 	headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
326 	tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);
327 
328 	if (headOffset != tailOffset) {
329 		volatile IODataQueueEntry * head = NULL;
330 		UInt32              headSize     = 0;
331 		UInt32              queueSize    = getQueueSize();
332 
333 		if (headOffset > queueSize) {
334 			return false;
335 		}
336 
337 		head         = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
338 		headSize     = head->size;
339 
340 		// we wrapped around to beginning, so read from there
341 		// either there was not even room for the header
342 		if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
343 		    (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
344 		    // or there was room for the header, but not for the data
345 		    (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
346 		    (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
347 			// Note: we have to wrap to the beginning even with the UINT32_MAX checks
348 			// because we have to support a queueSize of UINT32_MAX.
349 			entry           = dataQueue->queue;
350 			entrySize       = entry->size;
351 			if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
352 			    (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
353 				return false;
354 			}
355 			newHeadOffset   = entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
356 			// else it is at the end
357 		} else {
358 			entry           = head;
359 			entrySize       = entry->size;
360 			if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
361 			    (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headOffset) ||
362 			    (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE + headOffset > queueSize)) {
363 				return false;
364 			}
365 			newHeadOffset   = headOffset + entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
366 		}
367 	} else {
368 		// empty queue
369 		return false;
370 	}
371 
372 	if (data) {
373 		if (entrySize > *dataSize) {
374 			// not enough space
375 			return false;
376 		}
377 		__nochk_memcpy(data, (void *)entry->data, entrySize);
378 		*dataSize = entrySize;
379 	}
380 
381 	__c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE);
382 
383 	if (newHeadOffset == tailOffset) {
384 		//
385 		// If we are making the queue empty, then we need to make sure
386 		// that either the enqueuer notices, or we notice the enqueue
387 		// that raced with our making of the queue empty.
388 		//
389 		__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
390 	}
391 
392 	return retVal;
393 }
394 
395 UInt32
396 IOSharedDataQueue::getQueueSize()
397 {
398 	if (!_reserved) {
399 		return 0;
400 	}
401 	return _reserved->queueSize;
402 }
403 
404 Boolean
405 IOSharedDataQueue::setQueueSize(UInt32 size)
406 {
407 	if (!_reserved) {
408 		return false;
409 	}
410 	_reserved->queueSize = size;
411 	return true;
412 }
413 
414 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 0);
415 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 1);
416 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 2);
417 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 3);
418 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 4);
419 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 5);
420 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 6);
421 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 7);
422