xref: /xnu-11215/iokit/Kernel/IODataQueue.cpp (revision bb611c8f)
1 /*
2  * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #define IOKIT_ENABLE_SHARED_PTR
30 
31 #define DISABLE_DATAQUEUE_WARNING
32 
33 #include <IOKit/IODataQueue.h>
34 
35 #undef DISABLE_DATAQUEUE_WARNING
36 
37 #include <IOKit/IODataQueueShared.h>
38 #include <IOKit/IOLib.h>
39 #include <IOKit/IOMemoryDescriptor.h>
40 #include <libkern/OSAtomic.h>
41 #include <libkern/c++/OSSharedPtr.h>
42 
43 struct IODataQueueInternal {
44 	mach_msg_header_t msg;
45 	UInt32            queueSize;
46 };
47 
48 #ifdef enqueue
49 #undef enqueue
50 #endif
51 
52 #ifdef dequeue
53 #undef dequeue
54 #endif
55 
56 #define super OSObject
57 
58 OSDefineMetaClassAndStructors(IODataQueue, OSObject)
59 
60 OSSharedPtr<IODataQueue>
61 IODataQueue::withCapacity(UInt32 size)
62 {
63 	OSSharedPtr<IODataQueue> dataQueue = OSMakeShared<IODataQueue>();
64 
65 	if (dataQueue) {
66 		if (!dataQueue->initWithCapacity(size)) {
67 			return nullptr;
68 		}
69 	}
70 
71 	return dataQueue;
72 }
73 
74 OSSharedPtr<IODataQueue>
75 IODataQueue::withEntries(UInt32 numEntries, UInt32 entrySize)
76 {
77 	OSSharedPtr<IODataQueue> dataQueue = OSMakeShared<IODataQueue>();
78 
79 	if (dataQueue) {
80 		if (!dataQueue->initWithEntries(numEntries, entrySize)) {
81 			return nullptr;
82 		}
83 	}
84 
85 	return dataQueue;
86 }
87 
88 Boolean
89 IODataQueue::initWithCapacity(UInt32 size)
90 {
91 	vm_size_t allocSize = 0;
92 
93 	if (!super::init()) {
94 		return false;
95 	}
96 
97 	if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE) {
98 		return false;
99 	}
100 
101 	allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE);
102 
103 	if (allocSize < size) {
104 		return false;
105 	}
106 
107 	assert(!notifyMsg);
108 	notifyMsg = IONew(IODataQueueInternal, 1);
109 	if (!notifyMsg) {
110 		return false;
111 	}
112 	bzero(notifyMsg, sizeof(IODataQueueInternal));
113 	((IODataQueueInternal *)notifyMsg)->queueSize = size;
114 
115 	dataQueue = (IODataQueueMemory *)IOMallocAligned(allocSize, PAGE_SIZE);
116 	if (dataQueue == NULL) {
117 		return false;
118 	}
119 	bzero(dataQueue, allocSize);
120 
121 	dataQueue->queueSize    = size;
122 //  dataQueue->head         = 0;
123 //  dataQueue->tail         = 0;
124 
125 	return true;
126 }
127 
128 Boolean
129 IODataQueue::initWithEntries(UInt32 numEntries, UInt32 entrySize)
130 {
131 	// Checking overflow for (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE):
132 	//  check (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE)
133 	if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
134 	    //  check (numEntries + 1)
135 	    (numEntries > UINT32_MAX - 1) ||
136 	    //  check (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE)
137 	    (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX / (numEntries + 1))) {
138 		return false;
139 	}
140 
141 	return initWithCapacity((numEntries + 1) * (DATA_QUEUE_ENTRY_HEADER_SIZE + entrySize));
142 }
143 
144 void
145 IODataQueue::free()
146 {
147 	if (notifyMsg) {
148 		if (dataQueue) {
149 			IOFreeAligned(dataQueue, round_page(((IODataQueueInternal *)notifyMsg)->queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE));
150 			dataQueue = NULL;
151 		}
152 
153 		IODelete(notifyMsg, IODataQueueInternal, 1);
154 		notifyMsg = NULL;
155 	}
156 
157 	super::free();
158 
159 	return;
160 }
161 
162 Boolean
163 IODataQueue::enqueue(void * data, UInt32 dataSize)
164 {
165 	UInt32             head;
166 	UInt32             tail;
167 	UInt32             newTail;
168 	const UInt32       entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
169 	UInt32             queueSize;
170 	IODataQueueEntry * entry;
171 
172 	// Check for overflow of entrySize
173 	if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
174 		return false;
175 	}
176 
177 	// Force a single read of head and tail
178 	// See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
179 	tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
180 	head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE);
181 
182 	// Check for underflow of (dataQueue->queueSize - tail)
183 	queueSize = ((IODataQueueInternal *) notifyMsg)->queueSize;
184 	if ((queueSize < tail) || (queueSize < head)) {
185 		return false;
186 	}
187 
188 	if (tail >= head) {
189 		// Is there enough room at the end for the entry?
190 		if ((entrySize <= UINT32_MAX - tail) &&
191 		    ((tail + entrySize) <= queueSize)) {
192 			entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
193 
194 			entry->size = dataSize;
195 			__nochk_memcpy(&entry->data, data, dataSize);
196 
197 			// The tail can be out of bound when the size of the new entry
198 			// exactly matches the available space at the end of the queue.
199 			// The tail can range from 0 to dataQueue->queueSize inclusive.
200 
201 			newTail = tail + entrySize;
202 		} else if (head > entrySize) { // Is there enough room at the beginning?
203 			// Wrap around to the beginning, but do not allow the tail to catch
204 			// up to the head.
205 
206 			dataQueue->queue->size = dataSize;
207 
208 			// We need to make sure that there is enough room to set the size before
209 			// doing this. The user client checks for this and will look for the size
210 			// at the beginning if there isn't room for it at the end.
211 
212 			if ((queueSize - tail) >= DATA_QUEUE_ENTRY_HEADER_SIZE) {
213 				((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
214 			}
215 
216 			__nochk_memcpy(&dataQueue->queue->data, data, dataSize);
217 			newTail = entrySize;
218 		} else {
219 			return false; // queue is full
220 		}
221 	} else {
222 		// Do not allow the tail to catch up to the head when the queue is full.
223 		// That's why the comparison uses a '>' rather than '>='.
224 
225 		if ((head - tail) > entrySize) {
226 			entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
227 
228 			entry->size = dataSize;
229 			__nochk_memcpy(&entry->data, data, dataSize);
230 			newTail = tail + entrySize;
231 		} else {
232 			return false; // queue is full
233 		}
234 	}
235 
236 	// Publish the data we just enqueued
237 	__c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);
238 
239 	if (tail != head) {
240 		//
241 		// The memory barrier below paris with the one in ::dequeue
242 		// so that either our store to the tail cannot be missed by
243 		// the next dequeue attempt, or we will observe the dequeuer
244 		// making the queue empty.
245 		//
246 		// Of course, if we already think the queue is empty,
247 		// there's no point paying this extra cost.
248 		//
249 		__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
250 		head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
251 	}
252 
253 	if (tail == head) {
254 		// Send notification (via mach message) that data is now available.
255 		sendDataAvailableNotification();
256 	}
257 	return true;
258 }
259 
260 void
261 IODataQueue::setNotificationPort(mach_port_t port)
262 {
263 	mach_msg_header_t * msgh;
264 
265 	msgh = &((IODataQueueInternal *) notifyMsg)->msg;
266 	bzero(msgh, sizeof(mach_msg_header_t));
267 	msgh->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
268 	msgh->msgh_size = sizeof(mach_msg_header_t);
269 	msgh->msgh_remote_port = port;
270 }
271 
272 void
273 IODataQueue::sendDataAvailableNotification()
274 {
275 	kern_return_t       kr;
276 	mach_msg_header_t * msgh;
277 
278 	msgh = &((IODataQueueInternal *) notifyMsg)->msg;
279 	if (msgh->msgh_remote_port) {
280 		kr = mach_msg_send_from_kernel_with_options(msgh, msgh->msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
281 		switch (kr) {
282 		case MACH_SEND_TIMED_OUT: // Notification already sent
283 		case MACH_MSG_SUCCESS:
284 		case MACH_SEND_NO_BUFFER:
285 			break;
286 		default:
287 			IOLog("%s: dataAvailableNotification failed - msg_send returned: %d\n", /*getName()*/ "IODataQueue", kr);
288 			break;
289 		}
290 	}
291 }
292 
293 OSSharedPtr<IOMemoryDescriptor>
294 IODataQueue::getMemoryDescriptor()
295 {
296 	OSSharedPtr<IOMemoryDescriptor> descriptor;
297 	UInt32              queueSize;
298 
299 	queueSize = ((IODataQueueInternal *) notifyMsg)->queueSize;
300 	if (dataQueue != NULL) {
301 		descriptor = IOMemoryDescriptor::withAddress(dataQueue, queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE, kIODirectionOutIn);
302 	}
303 
304 	return descriptor;
305 }
306