1 /*
2  * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #define IOKIT_ENABLE_SHARED_PTR
30 
31 #include <IOKit/IOSharedDataQueue.h>
32 #include <IOKit/IODataQueueShared.h>
33 #include <IOKit/IOLib.h>
34 #include <IOKit/IOMemoryDescriptor.h>
35 #include <libkern/c++/OSSharedPtr.h>
36 
37 #ifdef enqueue
38 #undef enqueue
39 #endif
40 
41 #ifdef dequeue
42 #undef dequeue
43 #endif
44 
45 #define super IODataQueue
46 
47 OSDefineMetaClassAndStructors(IOSharedDataQueue, IODataQueue)
48 
49 OSSharedPtr<IOSharedDataQueue>
50 IOSharedDataQueue::withCapacity(UInt32 size)
51 {
52 	OSSharedPtr<IOSharedDataQueue> dataQueue = OSMakeShared<IOSharedDataQueue>();
53 
54 	if (dataQueue) {
55 		if (!dataQueue->initWithCapacity(size)) {
56 			return nullptr;
57 		}
58 	}
59 
60 	return dataQueue;
61 }
62 
63 OSSharedPtr<IOSharedDataQueue>
64 IOSharedDataQueue::withEntries(UInt32 numEntries, UInt32 entrySize)
65 {
66 	OSSharedPtr<IOSharedDataQueue> dataQueue = OSMakeShared<IOSharedDataQueue>();
67 
68 	if (dataQueue) {
69 		if (!dataQueue->initWithEntries(numEntries, entrySize)) {
70 			return nullptr;
71 		}
72 	}
73 
74 	return dataQueue;
75 }
76 
77 Boolean
78 IOSharedDataQueue::initWithCapacity(UInt32 size)
79 {
80 	IODataQueueAppendix *   appendix;
81 	vm_size_t               allocSize;
82 	kern_return_t           kr;
83 
84 	if (!super::init()) {
85 		return false;
86 	}
87 
88 	_reserved = IOMallocType(ExpansionData);
89 	if (!_reserved) {
90 		return false;
91 	}
92 
93 	if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE - DATA_QUEUE_MEMORY_APPENDIX_SIZE) {
94 		return false;
95 	}
96 
97 	allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE);
98 
99 	if (allocSize < size) {
100 		return false;
101 	}
102 
103 	kr = kmem_alloc(kernel_map, (vm_offset_t *)&dataQueue, allocSize,
104 	    (kma_flags_t)(KMA_DATA | KMA_ZERO), IOMemoryTag(kernel_map));
105 	if (kr != KERN_SUCCESS) {
106 		return false;
107 	}
108 
109 	dataQueue->queueSize    = size;
110 //  dataQueue->head         = 0;
111 //  dataQueue->tail         = 0;
112 
113 	if (!setQueueSize(size)) {
114 		return false;
115 	}
116 
117 	appendix            = (IODataQueueAppendix *)((UInt8 *)dataQueue + size + DATA_QUEUE_MEMORY_HEADER_SIZE);
118 	appendix->version   = 0;
119 
120 	if (!notifyMsg) {
121 		notifyMsg = IOMallocType(mach_msg_header_t);
122 		if (!notifyMsg) {
123 			return false;
124 		}
125 	}
126 	bzero(notifyMsg, sizeof(mach_msg_header_t));
127 
128 	setNotificationPort(MACH_PORT_NULL);
129 
130 	return true;
131 }
132 
133 void
134 IOSharedDataQueue::free()
135 {
136 	if (dataQueue) {
137 		kmem_free(kernel_map, (vm_offset_t)dataQueue, round_page(getQueueSize() +
138 		    DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE));
139 		dataQueue = NULL;
140 		if (notifyMsg) {
141 			IOFreeType(notifyMsg, mach_msg_header_t);
142 			notifyMsg = NULL;
143 		}
144 	}
145 
146 	if (_reserved) {
147 		IOFreeType(_reserved, ExpansionData);
148 		_reserved = NULL;
149 	}
150 
151 	super::free();
152 }
153 
154 OSSharedPtr<IOMemoryDescriptor>
155 IOSharedDataQueue::getMemoryDescriptor()
156 {
157 	OSSharedPtr<IOMemoryDescriptor> descriptor;
158 
159 	if (dataQueue != NULL) {
160 		descriptor = IOMemoryDescriptor::withAddress(dataQueue, getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE, kIODirectionOutIn);
161 	}
162 
163 	return descriptor;
164 }
165 
166 
167 IODataQueueEntry *
168 IOSharedDataQueue::peek()
169 {
170 	IODataQueueEntry *entry      = NULL;
171 	UInt32            headOffset;
172 	UInt32            tailOffset;
173 
174 	if (!dataQueue) {
175 		return NULL;
176 	}
177 
178 	// Read head and tail with acquire barrier
179 	// See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
180 	headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
181 	tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);
182 
183 	if (headOffset != tailOffset) {
184 		volatile IODataQueueEntry * head = NULL;
185 		UInt32              headSize     = 0;
186 		UInt32              headOffset   = dataQueue->head;
187 		UInt32              queueSize    = getQueueSize();
188 
189 		if (headOffset > queueSize) {
190 			return NULL;
191 		}
192 
193 		head         = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
194 		headSize     = head->size;
195 
196 		// Check if there's enough room before the end of the queue for a header.
197 		// If there is room, check if there's enough room to hold the header and
198 		// the data.
199 
200 		if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
201 		    (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
202 		    (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
203 		    (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
204 			// No room for the header or the data, wrap to the beginning of the queue.
205 			// Note: wrapping even with the UINT32_MAX checks, as we have to support
206 			// queueSize of UINT32_MAX
207 			entry = dataQueue->queue;
208 		} else {
209 			entry = (IODataQueueEntry *)head;
210 		}
211 	}
212 
213 	return entry;
214 }
215 
216 Boolean
217 IOSharedDataQueue::enqueue(void * data, UInt32 dataSize)
218 {
219 	UInt32             head;
220 	UInt32             tail;
221 	UInt32             newTail;
222 	const UInt32       entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
223 	IODataQueueEntry * entry;
224 
225 	// Force a single read of head and tail
226 	// See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
227 	tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
228 	head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE);
229 
230 	// Check for overflow of entrySize
231 	if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
232 		return false;
233 	}
234 	// Check for underflow of (getQueueSize() - tail)
235 	if (getQueueSize() < tail || getQueueSize() < head) {
236 		return false;
237 	}
238 
239 	if (tail >= head) {
240 		// Is there enough room at the end for the entry?
241 		if ((entrySize <= UINT32_MAX - tail) &&
242 		    ((tail + entrySize) <= getQueueSize())) {
243 			entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
244 
245 			entry->size = dataSize;
246 			__nochk_memcpy(&entry->data, data, dataSize);
247 
248 			// The tail can be out of bound when the size of the new entry
249 			// exactly matches the available space at the end of the queue.
250 			// The tail can range from 0 to dataQueue->queueSize inclusive.
251 
252 			newTail = tail + entrySize;
253 		} else if (head > entrySize) { // Is there enough room at the beginning?
254 			// Wrap around to the beginning, but do not allow the tail to catch
255 			// up to the head.
256 
257 			dataQueue->queue->size = dataSize;
258 
259 			// We need to make sure that there is enough room to set the size before
260 			// doing this. The user client checks for this and will look for the size
261 			// at the beginning if there isn't room for it at the end.
262 
263 			if ((getQueueSize() - tail) >= DATA_QUEUE_ENTRY_HEADER_SIZE) {
264 				((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
265 			}
266 
267 			__nochk_memcpy(&dataQueue->queue->data, data, dataSize);
268 			newTail = entrySize;
269 		} else {
270 			return false; // queue is full
271 		}
272 	} else {
273 		// Do not allow the tail to catch up to the head when the queue is full.
274 		// That's why the comparison uses a '>' rather than '>='.
275 
276 		if ((head - tail) > entrySize) {
277 			entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
278 
279 			entry->size = dataSize;
280 			__nochk_memcpy(&entry->data, data, dataSize);
281 			newTail = tail + entrySize;
282 		} else {
283 			return false; // queue is full
284 		}
285 	}
286 
287 	// Publish the data we just enqueued
288 	__c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);
289 
290 	if (tail != head) {
291 		//
292 		// The memory barrier below paris with the one in ::dequeue
293 		// so that either our store to the tail cannot be missed by
294 		// the next dequeue attempt, or we will observe the dequeuer
295 		// making the queue empty.
296 		//
297 		// Of course, if we already think the queue is empty,
298 		// there's no point paying this extra cost.
299 		//
300 		__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
301 		head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
302 	}
303 
304 	if (tail == head) {
305 		// Send notification (via mach message) that data is now available.
306 		sendDataAvailableNotification();
307 	}
308 	return true;
309 }
310 
311 Boolean
312 IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize)
313 {
314 	Boolean             retVal          = TRUE;
315 	volatile IODataQueueEntry *  entry  = NULL;
316 	UInt32              entrySize       = 0;
317 	UInt32              headOffset      = 0;
318 	UInt32              tailOffset      = 0;
319 	UInt32              newHeadOffset   = 0;
320 
321 	if (!dataQueue || (data && !dataSize)) {
322 		return false;
323 	}
324 
325 	// Read head and tail with acquire barrier
326 	// See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
327 	headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
328 	tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);
329 
330 	if (headOffset != tailOffset) {
331 		volatile IODataQueueEntry * head = NULL;
332 		UInt32              headSize     = 0;
333 		UInt32              queueSize    = getQueueSize();
334 
335 		if (headOffset > queueSize) {
336 			return false;
337 		}
338 
339 		head         = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset);
340 		headSize     = head->size;
341 
342 		// we wrapped around to beginning, so read from there
343 		// either there was not even room for the header
344 		if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
345 		    (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) ||
346 		    // or there was room for the header, but not for the data
347 		    (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) ||
348 		    (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
349 			// Note: we have to wrap to the beginning even with the UINT32_MAX checks
350 			// because we have to support a queueSize of UINT32_MAX.
351 			entry           = dataQueue->queue;
352 			entrySize       = entry->size;
353 			if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
354 			    (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) {
355 				return false;
356 			}
357 			newHeadOffset   = entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
358 			// else it is at the end
359 		} else {
360 			entry           = head;
361 			entrySize       = entry->size;
362 			if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
363 			    (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headOffset) ||
364 			    (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE + headOffset > queueSize)) {
365 				return false;
366 			}
367 			newHeadOffset   = headOffset + entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE;
368 		}
369 	} else {
370 		// empty queue
371 		return false;
372 	}
373 
374 	if (data) {
375 		if (entrySize > *dataSize) {
376 			// not enough space
377 			return false;
378 		}
379 		__nochk_memcpy(data, (void *)entry->data, entrySize);
380 		*dataSize = entrySize;
381 	}
382 
383 	__c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE);
384 
385 	if (newHeadOffset == tailOffset) {
386 		//
387 		// If we are making the queue empty, then we need to make sure
388 		// that either the enqueuer notices, or we notice the enqueue
389 		// that raced with our making of the queue empty.
390 		//
391 		__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
392 	}
393 
394 	return retVal;
395 }
396 
397 UInt32
398 IOSharedDataQueue::getQueueSize()
399 {
400 	if (!_reserved) {
401 		return 0;
402 	}
403 	return _reserved->queueSize;
404 }
405 
406 Boolean
407 IOSharedDataQueue::setQueueSize(UInt32 size)
408 {
409 	if (!_reserved) {
410 		return false;
411 	}
412 	_reserved->queueSize = size;
413 	return true;
414 }
415 
416 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 0);
417 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 1);
418 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 2);
419 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 3);
420 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 4);
421 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 5);
422 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 6);
423 OSMetaClassDefineReservedUnused(IOSharedDataQueue, 7);
424