1 /*
2  * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #define IOKIT_ENABLE_SHARED_PTR
30 
31 #include <IOKit/IOSharedDataQueue.h>
32 #include <IOKit/IODataQueueShared.h>
33 #include <IOKit/IOLib.h>
34 #include <IOKit/IOMemoryDescriptor.h>
35 #include <libkern/c++/OSSharedPtr.h>
36 
37 #include <vm/vm_kern_xnu.h>
38 
39 #ifdef enqueue
40 #undef enqueue
41 #endif
42 
43 #ifdef dequeue
44 #undef dequeue
45 #endif
46 
47 #define super IODataQueue
48 
OSDefineMetaClassAndStructors(IOSharedDataQueue,IODataQueue)49 OSDefineMetaClassAndStructors(IOSharedDataQueue, IODataQueue)
50 
51 OSSharedPtr<IOSharedDataQueue>
52 IOSharedDataQueue::withCapacity(UInt32 size)
53 {
54 	OSSharedPtr<IOSharedDataQueue> dataQueue = OSMakeShared<IOSharedDataQueue>();
55 
56 	if (dataQueue) {
57 		if (!dataQueue->initWithCapacity(size)) {
58 			return nullptr;
59 		}
60 	}
61 
62 	return dataQueue;
63 }
64 
65 OSSharedPtr<IOSharedDataQueue>
withEntries(UInt32 numEntries,UInt32 entrySize)66 IOSharedDataQueue::withEntries(UInt32 numEntries, UInt32 entrySize)
67 {
68 	OSSharedPtr<IOSharedDataQueue> dataQueue = OSMakeShared<IOSharedDataQueue>();
69 
70 	if (dataQueue) {
71 		if (!dataQueue->initWithEntries(numEntries, entrySize)) {
72 			return nullptr;
73 		}
74 	}
75 
76 	return dataQueue;
77 }
78 
79 Boolean
initWithCapacity(UInt32 size)80 IOSharedDataQueue::initWithCapacity(UInt32 size)
81 {
82 	IODataQueueAppendix *   appendix;
83 	vm_size_t               allocSize;
84 	kern_return_t           kr;
85 
86 	if (!super::init()) {
87 		return false;
88 	}
89 
90 	_reserved = IOMallocType(ExpansionData);
91 	if (!_reserved) {
92 		return false;
93 	}
94 
95 	if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE - DATA_QUEUE_MEMORY_APPENDIX_SIZE) {
96 		return false;
97 	}
98 
99 	allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE);
100 
101 	if (allocSize < size) {
102 		return false;
103 	}
104 
105 	kr = kmem_alloc(kernel_map, (vm_offset_t *)&dataQueue, allocSize,
106 	    (kma_flags_t)(KMA_DATA | KMA_ZERO), IOMemoryTag(kernel_map));
107 	if (kr != KERN_SUCCESS) {
108 		return false;
109 	}
110 
111 	dataQueue->queueSize    = size;
112 //  dataQueue->head         = 0;
113 //  dataQueue->tail         = 0;
114 
115 	if (!setQueueSize(size)) {
116 		return false;
117 	}
118 
119 	appendix            = (IODataQueueAppendix *)((UInt8 *)dataQueue + size + DATA_QUEUE_MEMORY_HEADER_SIZE);
120 	appendix->version   = 0;
121 
122 	if (!notifyMsg) {
123 		notifyMsg = IOMallocType(mach_msg_header_t);
124 		if (!notifyMsg) {
125 			return false;
126 		}
127 	}
128 	bzero(notifyMsg, sizeof(mach_msg_header_t));
129 
130 	setNotificationPort(MACH_PORT_NULL);
131 
132 	return true;
133 }
134 
135 void
free()136 IOSharedDataQueue::free()
137 {
138 	if (dataQueue) {
139 		kmem_free(kernel_map, (vm_offset_t)dataQueue, round_page(getQueueSize() +
140 		    DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE));
141 		dataQueue = NULL;
142 		if (notifyMsg) {
143 			IOFreeType(notifyMsg, mach_msg_header_t);
144 			notifyMsg = NULL;
145 		}
146 	}
147 
148 	if (_reserved) {
149 		IOFreeType(_reserved, ExpansionData);
150 		_reserved = NULL;
151 	}
152 
153 	super::free();
154 }
155 
156 OSSharedPtr<IOMemoryDescriptor>
getMemoryDescriptor()157 IOSharedDataQueue::getMemoryDescriptor()
158 {
159 	OSSharedPtr<IOMemoryDescriptor> descriptor;
160 
161 	if (dataQueue != NULL) {
162 		descriptor = IOMemoryDescriptor::withAddress(dataQueue, getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE, kIODirectionOutIn);
163 	}
164 
165 	return descriptor;
166 }
167 
168 
169 IODataQueueEntry *
peek()170 IOSharedDataQueue::peek()
171 {
172 	IODataQueueEntry *entry      = NULL;
173 	UInt32            headOffset;
174 	UInt32            tailOffset;
175 
176 	if (!dataQueue) {
177 		return NULL;
178 	}
179 
180 	// Read head and tail with acquire barrier
181 	// See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
182 	headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
183 	tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);
184 
185 	if (headOffset != tailOffset) {
186 		volatile IODataQueueEntry * head = NULL;
187 		UInt32              headSize     = 0;
188 		UInt32              headOffset   = dataQueue->head;
189 		UInt32              queueSize    = getQueueSize();
190 
191 		if (headOffset > queueSize) {
192 			return NULL;
193 		}
194 
195 		head         = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
196 		headSize     = head->size;
197 
198 		// Check if there's enough room before the end of the queue for a header.
199 		// If there is room, check if there's enough room to hold the header and
200 		// the data.
201 
202 		if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
203 		    (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
204 		    (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
205 		    (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
206 			// No room for the header or the data, wrap to the beginning of the queue.
207 			// Note: wrapping even with the UINT32_MAX checks, as we have to support
208 			// queueSize of UINT32_MAX
209 			entry = dataQueue->queue;
210 		} else {
211 			entry = (IODataQueueEntry *)head;
212 		}
213 	}
214 
215 	return entry;
216 }
217 
218 Boolean
enqueue(void * data,UInt32 dataSize)219 IOSharedDataQueue::enqueue(void * data, UInt32 dataSize)
220 {
221 	UInt32             head;
222 	UInt32             tail;
223 	UInt32             newTail;
224 	const UInt32       entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
225 	IODataQueueEntry * entry;
226 
227 	// Force a single read of head and tail
228 	// See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
229 	tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
230 	head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE);
231 
232 	// Check for overflow of entrySize
233 	if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
234 		return false;
235 	}
236 	// Check for underflow of (getQueueSize() - tail)
237 	if (getQueueSize() < tail || getQueueSize() < head) {
238 		return false;
239 	}
240 
241 	if (tail >= head) {
242 		// Is there enough room at the end for the entry?
243 		if ((entrySize <= UINT32_MAX - tail) &&
244 		    ((tail + entrySize) <= getQueueSize())) {
245 			entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
246 
247 			entry->size = dataSize;
248 			__nochk_memcpy(&entry->data, data, dataSize);
249 
250 			// The tail can be out of bound when the size of the new entry
251 			// exactly matches the available space at the end of the queue.
252 			// The tail can range from 0 to dataQueue->queueSize inclusive.
253 
254 			newTail = tail + entrySize;
255 		} else if (head > entrySize) { // Is there enough room at the beginning?
256 			// Wrap around to the beginning, but do not allow the tail to catch
257 			// up to the head.
258 
259 			dataQueue->queue->size = dataSize;
260 
261 			// We need to make sure that there is enough room to set the size before
262 			// doing this. The user client checks for this and will look for the size
263 			// at the beginning if there isn't room for it at the end.
264 
265 			if ((getQueueSize() - tail) >= DATA_QUEUE_ENTRY_HEADER_SIZE) {
266 				((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
267 			}
268 
269 			__nochk_memcpy(&dataQueue->queue->data, data, dataSize);
270 			newTail = entrySize;
271 		} else {
272 			return false; // queue is full
273 		}
274 	} else {
275 		// Do not allow the tail to catch up to the head when the queue is full.
276 		// That's why the comparison uses a '>' rather than '>='.
277 
278 		if ((head - tail) > entrySize) {
279 			entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
280 
281 			entry->size = dataSize;
282 			__nochk_memcpy(&entry->data, data, dataSize);
283 			newTail = tail + entrySize;
284 		} else {
285 			return false; // queue is full
286 		}
287 	}
288 
289 	// Publish the data we just enqueued
290 	__c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);
291 
292 	if (tail != head) {
293 		//
294 		// The memory barrier below paris with the one in ::dequeue
295 		// so that either our store to the tail cannot be missed by
296 		// the next dequeue attempt, or we will observe the dequeuer
297 		// making the queue empty.
298 		//
299 		// Of course, if we already think the queue is empty,
300 		// there's no point paying this extra cost.
301 		//
302 		__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
303 		head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
304 	}
305 
306 	if (tail == head) {
307 		// Send notification (via mach message) that data is now available.
308 		sendDataAvailableNotification();
309 	}
310 	return true;
311 }
312 
313 Boolean
dequeue(void * data,UInt32 * dataSize)314 IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize)
315 {
316 	Boolean             retVal          = TRUE;
317 	volatile IODataQueueEntry *  entry  = NULL;
318 	UInt32              entrySize       = 0;
319 	UInt32              headOffset      = 0;
320 	UInt32              tailOffset      = 0;
321 	UInt32              newHeadOffset   = 0;
322 
323 	if (!dataQueue || (data && !dataSize)) {
324 		return false;
325 	}
326 
327 	// Read head and tail with acquire barrier
328 	// See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
329 	headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
330 	tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);
331 
332 	if (headOffset != tailOffset) {
333 		volatile IODataQueueEntry * head = NULL;
334 		UInt32              headSize     = 0;
335 		UInt32              queueSize    = getQueueSize();
336 
337 		if (headOffset > queueSize) {
338 			return false;
339 		}
340 
341 		head         = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
342 		headSize     = head->size;
343 
344 		// we wrapped around to beginning, so read from there
345 		// either there was not even room for the header
346 		if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
347 		    (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
348 		    // or there was room for the header, but not for the data
349 		    (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
350 		    (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
351 			// Note: we have to wrap to the beginning even with the UINT32_MAX checks
352 			// because we have to support a queueSize of UINT32_MAX.
353 			entry           = dataQueue->queue;
354 			entrySize       = entry->size;
355 			if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
356 			    (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
357 				return false;
358 			}
359 			newHeadOffset   = entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
360 			// else it is at the end
361 		} else {
362 			entry           = head;
363 			entrySize       = entry->size;
364 			if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
365 			    (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headOffset) ||
366 			    (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE + headOffset > queueSize)) {
367 				return false;
368 			}
369 			newHeadOffset   = headOffset + entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
370 		}
371 	} else {
372 		// empty queue
373 		return false;
374 	}
375 
376 	if (data) {
377 		if (entrySize > *dataSize) {
378 			// not enough space
379 			return false;
380 		}
381 		__nochk_memcpy(data, (void *)entry->data, entrySize);
382 		*dataSize = entrySize;
383 	}
384 
385 	__c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE);
386 
387 	if (newHeadOffset == tailOffset) {
388 		//
389 		// If we are making the queue empty, then we need to make sure
390 		// that either the enqueuer notices, or we notice the enqueue
391 		// that raced with our making of the queue empty.
392 		//
393 		__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
394 	}
395 
396 	return retVal;
397 }
398 
399 UInt32
getQueueSize()400 IOSharedDataQueue::getQueueSize()
401 {
402 	if (!_reserved) {
403 		return 0;
404 	}
405 	return _reserved->queueSize;
406 }
407 
408 Boolean
setQueueSize(UInt32 size)409 IOSharedDataQueue::setQueueSize(UInt32 size)
410 {
411 	if (!_reserved) {
412 		return false;
413 	}
414 	_reserved->queueSize = size;
415 	return true;
416 }
417 
418 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 0);
419 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 1);
420 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 2);
421 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 3);
422 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 4);
423 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 5);
424 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 6);
425 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 7);
426