xref: /xnu-11215/iokit/Kernel/IOUserClient.cpp (revision aca3beaa)
1 /*
2  * Copyright (c) 1998-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <libkern/c++/OSKext.h>
30 #include <libkern/c++/OSSharedPtr.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/IODeviceTreeSupport.h>
44 #include <IOKit/IOUserServer.h>
45 #include <IOKit/system.h>
46 #include <libkern/OSDebug.h>
47 #include <DriverKit/OSAction.h>
48 #include <sys/proc.h>
49 #include <sys/kauth.h>
50 #include <sys/codesign.h>
51 #include <sys/code_signing.h>
52 
53 #include <mach/sdt.h>
54 #include <os/hash.h>
55 
56 #include <libkern/amfi/amfi.h>
57 
58 #if CONFIG_MACF
59 
60 extern "C" {
61 #include <security/mac_framework.h>
62 };
63 #include <sys/kauth.h>
64 
65 #define IOMACF_LOG 0
66 
67 #endif /* CONFIG_MACF */
68 
69 #include <IOKit/assert.h>
70 
71 #include "IOServicePrivate.h"
72 #include "IOKitKernelInternal.h"
73 
74 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
75 #define SCALAR32(x) ((uint32_t )x)
76 #define ARG32(x)    ((void *)(uintptr_t)SCALAR32(x))
77 #define REF64(x)    ((io_user_reference_t)((UInt64)(x)))
78 #define REF32(x)    ((int)(x))
79 
80 enum{
81 	kIOUCAsync0Flags          = 3ULL,
82 	kIOUCAsync64Flag          = 1ULL,
83 	kIOUCAsyncErrorLoggedFlag = 2ULL
84 };
85 
86 #if IOKITSTATS
87 
88 #define IOStatisticsRegisterCounter() \
89 do { \
90 	reserved->counter = IOStatistics::registerUserClient(this); \
91 } while (0)
92 
93 #define IOStatisticsUnregisterCounter() \
94 do { \
95 	if (reserved) \
96 	        IOStatistics::unregisterUserClient(reserved->counter); \
97 } while (0)
98 
99 #define IOStatisticsClientCall() \
100 do { \
101 	IOStatistics::countUserClientCall(client); \
102 } while (0)
103 
104 #else
105 
106 #define IOStatisticsRegisterCounter()
107 #define IOStatisticsUnregisterCounter()
108 #define IOStatisticsClientCall()
109 
110 #endif /* IOKITSTATS */
111 
112 #if DEVELOPMENT || DEBUG
113 
114 #define FAKE_STACK_FRAME(a)                                             \
115 	const void ** __frameptr;                                       \
116 	const void  * __retaddr;                                        \
117 	__frameptr = (typeof(__frameptr)) __builtin_frame_address(0);   \
118 	__retaddr = __frameptr[1];                                      \
119 	__frameptr[1] = (a);
120 
121 #define FAKE_STACK_FRAME_END()                                          \
122 	__frameptr[1] = __retaddr;
123 
124 #else /* DEVELOPMENT || DEBUG */
125 
126 #define FAKE_STACK_FRAME(a)
127 #define FAKE_STACK_FRAME_END()
128 
129 #endif /* DEVELOPMENT || DEBUG */
130 
131 #define ASYNC_REF_COUNT         (sizeof(io_async_ref_t) / sizeof(natural_t))
132 #define ASYNC_REF64_COUNT       (sizeof(io_async_ref64_t) / sizeof(io_user_reference_t))
133 
134 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
135 
136 extern "C" {
137 #include <mach/mach_traps.h>
138 #include <vm/vm_map.h>
139 } /* extern "C" */
140 
141 struct IOMachPortHashList;
142 
143 static_assert(IKOT_MAX_TYPE <= 255);
144 
145 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
146 
147 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
148 class IOMachPort : public OSObject
149 {
150 	OSDeclareDefaultStructors(IOMachPort);
151 public:
152 	SLIST_ENTRY(IOMachPort) link;
153 	ipc_port_t  port;
154 	OSObject*   object;
155 	UInt32      mscount;
156 	UInt8       holdDestroy;
157 	UInt8       type;
158 
159 	static IOMachPort* withObjectAndType(OSObject *obj, ipc_kobject_type_t type);
160 
161 	static IOMachPortHashList* bucketForObject(OSObject *obj,
162 	    ipc_kobject_type_t type);
163 
164 	static LIBKERN_RETURNS_NOT_RETAINED IOMachPort* portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type);
165 
166 	static bool noMoreSendersForObject( OSObject * obj,
167 	    ipc_kobject_type_t type, mach_port_mscount_t * mscount );
168 	static void releasePortForObject( OSObject * obj,
169 	    ipc_kobject_type_t type );
170 	static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
171 
172 	static mach_port_name_t makeSendRightForTask( task_t task,
173 	    io_object_t obj, ipc_kobject_type_t type );
174 
175 	virtual void free() APPLE_KEXT_OVERRIDE;
176 };
177 
178 #define super OSObject
179 OSDefineMetaClassAndStructorsWithZone(IOMachPort, OSObject, ZC_ZFREE_CLEARMEM)
180 
181 static IOLock *         gIOObjectPortLock;
182 IOLock *                gIOUserServerLock;
183 
184 SECURITY_READ_ONLY_LATE(const struct io_filter_callbacks *) gIOUCFilterCallbacks;
185 
186 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
187 
188 SLIST_HEAD(IOMachPortHashList, IOMachPort);
189 
190 #if defined(XNU_TARGET_OS_OSX)
191 #define PORT_HASH_SIZE 4096
192 #else /* defined(!XNU_TARGET_OS_OSX) */
193 #define PORT_HASH_SIZE 256
194 #endif /* !defined(!XNU_TARGET_OS_OSX) */
195 
196 IOMachPortHashList gIOMachPortHash[PORT_HASH_SIZE];
197 
198 void
199 IOMachPortInitialize(void)
200 {
201 	for (size_t i = 0; i < PORT_HASH_SIZE; i++) {
202 		SLIST_INIT(&gIOMachPortHash[i]);
203 	}
204 }
205 
206 IOMachPortHashList*
207 IOMachPort::bucketForObject(OSObject *obj, ipc_kobject_type_t type )
208 {
209 	return &gIOMachPortHash[os_hash_kernel_pointer(obj) % PORT_HASH_SIZE];
210 }
211 
212 IOMachPort*
213 IOMachPort::portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type)
214 {
215 	IOMachPort *machPort;
216 
217 	SLIST_FOREACH(machPort, bucket, link) {
218 		if (machPort->object == obj && machPort->type == type) {
219 			return machPort;
220 		}
221 	}
222 	return NULL;
223 }
224 
225 IOMachPort*
226 IOMachPort::withObjectAndType(OSObject *obj, ipc_kobject_type_t type)
227 {
228 	IOMachPort *machPort = NULL;
229 
230 	machPort = new IOMachPort;
231 	if (__improbable(machPort && !machPort->init())) {
232 		OSSafeReleaseNULL(machPort);
233 		return NULL;
234 	}
235 
236 	machPort->object = obj;
237 	machPort->type = (typeof(machPort->type))type;
238 	machPort->port = iokit_alloc_object_port(obj, type);
239 
240 	obj->taggedRetain(OSTypeID(OSCollection));
241 	machPort->mscount++;
242 
243 	return machPort;
244 }
245 
246 bool
247 IOMachPort::noMoreSendersForObject( OSObject * obj,
248     ipc_kobject_type_t type, mach_port_mscount_t * mscount )
249 {
250 	IOMachPort *machPort = NULL;
251 	IOUserClient *uc;
252 	OSAction *action;
253 	bool destroyed = true;
254 
255 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
256 
257 	obj->retain();
258 
259 	lck_mtx_lock(gIOObjectPortLock);
260 
261 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
262 
263 	if (machPort) {
264 		destroyed = (machPort->mscount <= *mscount);
265 		if (!destroyed) {
266 			*mscount = machPort->mscount;
267 			lck_mtx_unlock(gIOObjectPortLock);
268 		} else {
269 			if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj))) {
270 				uc->noMoreSenders();
271 			}
272 			SLIST_REMOVE(bucket, machPort, IOMachPort, link);
273 
274 			lck_mtx_unlock(gIOObjectPortLock);
275 
276 			OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
277 
278 			obj->taggedRelease(OSTypeID(OSCollection));
279 		}
280 	} else {
281 		lck_mtx_unlock(gIOObjectPortLock);
282 	}
283 
284 	if ((IKOT_UEXT_OBJECT == type) && (action = OSDynamicCast(OSAction, obj))) {
285 		action->Aborted();
286 	}
287 
288 	if (IKOT_UEXT_OBJECT == type && IOUserServer::shouldLeakObjects()) {
289 		// Leak object
290 		obj->retain();
291 	}
292 
293 	obj->release();
294 
295 	return destroyed;
296 }
297 
298 void
299 IOMachPort::releasePortForObject( OSObject * obj,
300     ipc_kobject_type_t type )
301 {
302 	IOMachPort *machPort;
303 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
304 
305 	assert(IKOT_IOKIT_CONNECT != type);
306 
307 	lck_mtx_lock(gIOObjectPortLock);
308 
309 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
310 
311 	if (machPort && !machPort->holdDestroy) {
312 		obj->retain();
313 		SLIST_REMOVE(bucket, machPort, IOMachPort, link);
314 
315 		lck_mtx_unlock(gIOObjectPortLock);
316 
317 		OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
318 
319 		obj->taggedRelease(OSTypeID(OSCollection));
320 		obj->release();
321 	} else {
322 		lck_mtx_unlock(gIOObjectPortLock);
323 	}
324 }
325 
326 void
327 IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
328 {
329 	IOMachPort *        machPort;
330 
331 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
332 	lck_mtx_lock(gIOObjectPortLock);
333 
334 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
335 
336 	if (machPort) {
337 		machPort->holdDestroy = true;
338 	}
339 
340 	lck_mtx_unlock(gIOObjectPortLock);
341 }
342 
343 void
344 IOMachPortDestroyUserReferences(OSObject * obj, natural_t type)
345 {
346 	IOMachPort::releasePortForObject(obj, type);
347 }
348 
349 void
350 IOUserClient::destroyUserReferences( OSObject * obj )
351 {
352 	IOMachPort *machPort;
353 
354 	IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
355 
356 	// panther, 3160200
357 	// IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
358 
359 	obj->retain();
360 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, IKOT_IOKIT_CONNECT);
361 	IOMachPortHashList *mappingBucket = NULL;
362 
363 	lck_mtx_lock(gIOObjectPortLock);
364 
365 	IOUserClient * uc = OSDynamicCast(IOUserClient, obj);
366 	if (uc && uc->mappings) {
367 		mappingBucket = IOMachPort::bucketForObject(uc->mappings, IKOT_IOKIT_CONNECT);
368 	}
369 
370 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, IKOT_IOKIT_CONNECT);
371 
372 	if (machPort == NULL) {
373 		lck_mtx_unlock(gIOObjectPortLock);
374 		goto end;
375 	}
376 
377 	SLIST_REMOVE(bucket, machPort, IOMachPort, link);
378 	obj->taggedRelease(OSTypeID(OSCollection));
379 
380 	if (uc) {
381 		uc->noMoreSenders();
382 		if (uc->mappings) {
383 			uc->mappings->taggedRetain(OSTypeID(OSCollection));
384 			machPort->object = uc->mappings;
385 			SLIST_INSERT_HEAD(mappingBucket, machPort, link);
386 			iokit_switch_object_port(machPort->port, uc->mappings, IKOT_IOKIT_CONNECT);
387 
388 			lck_mtx_unlock(gIOObjectPortLock);
389 
390 			OSSafeReleaseNULL(uc->mappings);
391 		} else {
392 			lck_mtx_unlock(gIOObjectPortLock);
393 			OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
394 		}
395 	} else {
396 		lck_mtx_unlock(gIOObjectPortLock);
397 		OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
398 	}
399 
400 
401 end:
402 	OSSafeReleaseNULL(obj);
403 }
404 
405 mach_port_name_t
406 IOMachPort::makeSendRightForTask( task_t task,
407     io_object_t obj, ipc_kobject_type_t type )
408 {
409 	return iokit_make_send_right( task, obj, type );
410 }
411 
412 void
413 IOMachPort::free( void )
414 {
415 	if (port) {
416 		iokit_destroy_object_port( port, type );
417 	}
418 	super::free();
419 }
420 
421 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
422 
423 static bool
424 IOTaskRegistryCompatibility(task_t task)
425 {
426 	return false;
427 }
428 
429 static void
430 IOTaskRegistryCompatibilityMatching(task_t task, OSDictionary * matching)
431 {
432 	matching->setObject(gIOServiceNotificationUserKey, kOSBooleanTrue);
433 	if (!IOTaskRegistryCompatibility(task)) {
434 		return;
435 	}
436 	matching->setObject(gIOCompatibilityMatchKey, kOSBooleanTrue);
437 }
438 
439 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
440 
441 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
442 
443 IOUserIterator *
444 IOUserIterator::withIterator(OSIterator * iter)
445 {
446 	IOUserIterator * me;
447 
448 	if (!iter) {
449 		return NULL;
450 	}
451 
452 	me = new IOUserIterator;
453 	if (me && !me->init()) {
454 		me->release();
455 		me = NULL;
456 	}
457 	if (!me) {
458 		iter->release();
459 		return me;
460 	}
461 	me->userIteratorObject = iter;
462 
463 	return me;
464 }
465 
466 bool
467 IOUserIterator::init( void )
468 {
469 	if (!OSObject::init()) {
470 		return false;
471 	}
472 
473 	IOLockInlineInit(&lock);
474 	return true;
475 }
476 
477 void
478 IOUserIterator::free()
479 {
480 	if (userIteratorObject) {
481 		userIteratorObject->release();
482 	}
483 	IOLockInlineDestroy(&lock);
484 	OSObject::free();
485 }
486 
487 void
488 IOUserIterator::reset()
489 {
490 	IOLockLock(&lock);
491 	assert(OSDynamicCast(OSIterator, userIteratorObject));
492 	((OSIterator *)userIteratorObject)->reset();
493 	IOLockUnlock(&lock);
494 }
495 
496 bool
497 IOUserIterator::isValid()
498 {
499 	bool ret;
500 
501 	IOLockLock(&lock);
502 	assert(OSDynamicCast(OSIterator, userIteratorObject));
503 	ret = ((OSIterator *)userIteratorObject)->isValid();
504 	IOLockUnlock(&lock);
505 
506 	return ret;
507 }
508 
509 OSObject *
510 IOUserIterator::getNextObject()
511 {
512 	assert(false);
513 	return NULL;
514 }
515 
516 OSObject *
517 IOUserIterator::copyNextObject()
518 {
519 	OSObject * ret = NULL;
520 
521 	IOLockLock(&lock);
522 	if (userIteratorObject) {
523 		ret = ((OSIterator *)userIteratorObject)->getNextObject();
524 		if (ret) {
525 			ret->retain();
526 		}
527 	}
528 	IOLockUnlock(&lock);
529 
530 	return ret;
531 }
532 
533 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
534 extern "C" {
535 // functions called from osfmk/device/iokit_rpc.c
536 
537 void
538 iokit_port_object_description(io_object_t obj, kobject_description_t desc)
539 {
540 	IORegistryEntry    * regEntry;
541 	IOUserNotification * __unused noti;
542 	_IOServiceNotifier * __unused serviceNoti;
543 	OSSerialize        * __unused s;
544 	OSDictionary       * __unused matching = NULL;
545 
546 	if ((regEntry = OSDynamicCast(IORegistryEntry, obj))) {
547 		snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(0x%qx)", obj->getMetaClass()->getClassName(), regEntry->getRegistryEntryID());
548 #if DEVELOPMENT || DEBUG
549 	} else if ((noti = OSDynamicCast(IOUserNotification, obj))) {
550 		// serviceNoti->matching may become NULL if the port gets a no-senders notification, so we have to lock gIOObjectPortLock
551 		IOLockLock(gIOObjectPortLock);
552 		serviceNoti = OSDynamicCast(_IOServiceNotifier, noti->userIteratorObject);
553 		if (serviceNoti && (matching = serviceNoti->matching)) {
554 			matching->retain();
555 		}
556 		IOLockUnlock(gIOObjectPortLock);
557 
558 		if (matching) {
559 			s = OSSerialize::withCapacity((unsigned int) page_size);
560 			if (s && matching->serialize(s)) {
561 				snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(%s)", obj->getMetaClass()->getClassName(), s->text());
562 			}
563 			OSSafeReleaseNULL(s);
564 			OSSafeReleaseNULL(matching);
565 		}
566 #endif /* DEVELOPMENT || DEBUG */
567 	} else {
568 		snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s", obj->getMetaClass()->getClassName());
569 	}
570 }
571 
572 // FIXME: Implementation of these functions are hidden from the static analyzer.
573 // As for now, the analyzer doesn't consistently support wrapper functions
574 // for retain and release.
575 #ifndef __clang_analyzer__
576 void
577 iokit_add_reference( io_object_t obj, natural_t type )
578 {
579 	IOUserClient * uc;
580 
581 	if (!obj) {
582 		return;
583 	}
584 
585 	if ((IKOT_IOKIT_CONNECT == type)
586 	    && (uc = OSDynamicCast(IOUserClient, obj))) {
587 		OSIncrementAtomic(&uc->__ipc);
588 	}
589 
590 	obj->retain();
591 }
592 
593 void
594 iokit_remove_reference( io_object_t obj )
595 {
596 	if (obj) {
597 		obj->release();
598 	}
599 }
600 #endif // __clang_analyzer__
601 
602 void
603 iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj )
604 {
605 	IOUserClient * uc;
606 	bool           finalize = false;
607 
608 	if (!obj) {
609 		return;
610 	}
611 
612 	if ((uc = OSDynamicCast(IOUserClient, obj))) {
613 		if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive()) {
614 			IOLockLock(gIOObjectPortLock);
615 			if ((finalize = uc->__ipcFinal)) {
616 				uc->__ipcFinal = false;
617 			}
618 			IOLockUnlock(gIOObjectPortLock);
619 		}
620 		if (finalize) {
621 			uc->scheduleFinalize(true);
622 		}
623 	}
624 
625 	obj->release();
626 }
627 
628 bool
629 IOUserClient::finalizeUserReferences(OSObject * obj)
630 {
631 	IOUserClient * uc;
632 	bool           ok = true;
633 
634 	if ((uc = OSDynamicCast(IOUserClient, obj))) {
635 		IOLockLock(gIOObjectPortLock);
636 		if ((uc->__ipcFinal = (0 != uc->__ipc))) {
637 			ok = false;
638 		}
639 		IOLockUnlock(gIOObjectPortLock);
640 	}
641 	return ok;
642 }
643 
644 ipc_port_t
645 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
646 {
647 	IOMachPort *machPort = NULL;
648 	ipc_port_t   port = NULL;
649 
650 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
651 
652 	lck_mtx_lock(gIOObjectPortLock);
653 
654 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
655 
656 	if (__improbable(machPort == NULL)) {
657 		machPort = IOMachPort::withObjectAndType(obj, type);
658 		if (__improbable(machPort == NULL)) {
659 			goto end;
660 		}
661 		SLIST_INSERT_HEAD(bucket, machPort, link);
662 	} else {
663 		machPort->mscount++;
664 	}
665 
666 	iokit_retain_port(machPort->port);
667 	port = machPort->port;
668 
669 end:
670 	lck_mtx_unlock(gIOObjectPortLock);
671 
672 	return port;
673 }
674 
675 kern_return_t
676 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
677     ipc_kobject_type_t type, mach_port_mscount_t * mscount )
678 {
679 	IOUserClient *      client;
680 	IOMemoryMap *       map;
681 	IOUserNotification * notify;
682 	IOUserServerCheckInToken * token;
683 
684 	if (!IOMachPort::noMoreSendersForObject( obj, type, mscount )) {
685 		return kIOReturnNotReady;
686 	}
687 
688 	switch (type) {
689 	case IKOT_IOKIT_CONNECT:
690 		if ((client = OSDynamicCast( IOUserClient, obj ))) {
691 			IOStatisticsClientCall();
692 			IORWLockWrite(&client->lock);
693 			client->clientDied();
694 			IORWLockUnlock(&client->lock);
695 		}
696 		break;
697 	case IKOT_IOKIT_OBJECT:
698 		if ((map = OSDynamicCast( IOMemoryMap, obj ))) {
699 			map->taskDied();
700 		} else if ((notify = OSDynamicCast( IOUserNotification, obj ))) {
701 			notify->setNotification( NULL );
702 		}
703 		break;
704 	case IKOT_IOKIT_IDENT:
705 		if ((token = OSDynamicCast( IOUserServerCheckInToken, obj ))) {
706 			token->cancel();
707 		}
708 		break;
709 	}
710 
711 	return kIOReturnSuccess;
712 }
713 };      /* extern "C" */
714 
715 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
716 
717 class IOServiceUserNotification : public IOUserNotification
718 {
719 	OSDeclareDefaultStructors(IOServiceUserNotification);
720 
721 	struct PingMsgKdata {
722 		mach_msg_header_t               msgHdr;
723 	};
724 	struct PingMsgUdata {
725 		OSNotificationHeader64          notifyHeader;
726 	};
727 
728 	enum { kMaxOutstanding = 1024 };
729 
730 	ipc_port_t          remotePort;
731 	void                *msgReference;
732 	mach_msg_size_t     msgReferenceSize;
733 	natural_t           msgType;
734 	OSArray     *       newSet;
735 	bool                armed;
736 	bool                ipcLogged;
737 
738 public:
739 
740 	virtual bool init( mach_port_t port, natural_t type,
741 	    void * reference, vm_size_t referenceSize,
742 	    bool clientIs64 );
743 	virtual void free() APPLE_KEXT_OVERRIDE;
744 	void invalidatePort(void);
745 
746 	static bool _handler( void * target,
747 	    void * ref, IOService * newService, IONotifier * notifier );
748 	virtual bool handler( void * ref, IOService * newService );
749 
750 	virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
751 	virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
752 };
753 
754 class IOServiceMessageUserNotification : public IOUserNotification
755 {
756 	OSDeclareDefaultStructors(IOServiceMessageUserNotification);
757 
758 	struct PingMsgKdata {
759 		mach_msg_header_t               msgHdr;
760 		mach_msg_body_t                 msgBody;
761 		mach_msg_port_descriptor_t      ports[1];
762 	};
763 	struct PingMsgUdata {
764 		OSNotificationHeader64          notifyHeader __attribute__ ((packed));
765 	};
766 
767 	ipc_port_t          remotePort;
768 	void                *msgReference;
769 	mach_msg_size_t     msgReferenceSize;
770 	mach_msg_size_t     msgExtraSize;
771 	natural_t           msgType;
772 	uint8_t             clientIs64;
773 	int                 owningPID;
774 	bool                ipcLogged;
775 
776 public:
777 
778 	virtual bool init( mach_port_t port, natural_t type,
779 	    void * reference, vm_size_t referenceSize,
780 	    bool clientIs64 );
781 
782 	virtual void free() APPLE_KEXT_OVERRIDE;
783 	void invalidatePort(void);
784 
785 	static IOReturn _handler( void * target, void * ref,
786 	    UInt32 messageType, IOService * provider,
787 	    void * messageArgument, vm_size_t argSize );
788 	virtual IOReturn handler( void * ref,
789 	    UInt32 messageType, IOService * provider,
790 	    void * messageArgument, vm_size_t argSize );
791 
792 	virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
793 	virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
794 };
795 
796 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
797 
798 #undef super
799 #define super IOUserIterator
800 OSDefineMetaClass( IOUserNotification, IOUserIterator );
801 OSDefineAbstractStructors( IOUserNotification, IOUserIterator );
802 
803 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
804 
805 void
806 IOUserNotification::free( void )
807 {
808 #if DEVELOPMENT || DEBUG
809 	IOLockLock( gIOObjectPortLock);
810 
811 	assert(userIteratorObject == NULL);
812 
813 	IOLockUnlock( gIOObjectPortLock);
814 #endif /* DEVELOPMENT || DEBUG */
815 
816 	super::free();
817 }
818 
819 
820 void
821 IOUserNotification::setNotification( IONotifier * notify )
822 {
823 	OSObject * previousNotify;
824 
825 	/*
826 	 * We must retain this object here before proceeding.
827 	 * Two threads may race in setNotification(). If one thread sets a new notifier while the
828 	 * other thread sets the notifier to NULL, it is possible for the second thread to call release()
829 	 * before the first thread calls retain(). Without the retain here, this thread interleaving
830 	 * would cause the object to get released and freed before it is retained by the first thread,
831 	 * which is a UaF.
832 	 */
833 	retain();
834 
835 	IOLockLock( gIOObjectPortLock);
836 
837 	previousNotify = userIteratorObject;
838 	userIteratorObject = notify;
839 
840 	IOLockUnlock( gIOObjectPortLock);
841 
842 	if (previousNotify) {
843 		assert(OSDynamicCast(IONotifier, previousNotify));
844 		((IONotifier *)previousNotify)->remove();
845 
846 		if (notify == NULL) {
847 			release();
848 		}
849 	} else if (notify) {
850 		// new IONotifier, retain the object. release() will happen in setNotification(NULL)
851 		retain();
852 	}
853 
854 	release(); // paired with retain() at beginning of this method
855 }
856 
857 void
858 IOUserNotification::reset()
859 {
860 	// ?
861 }
862 
863 bool
864 IOUserNotification::isValid()
865 {
866 	return true;
867 }
868 
869 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
870 
871 #undef super
872 #define super IOUserNotification
873 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
874 
875 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
876 
877 bool
878 IOServiceUserNotification::init( mach_port_t port, natural_t type,
879     void * reference, vm_size_t referenceSize,
880     bool clientIs64 )
881 {
882 	if (!super::init()) {
883 		return false;
884 	}
885 
886 	newSet = OSArray::withCapacity( 1 );
887 	if (!newSet) {
888 		return false;
889 	}
890 
891 	if (referenceSize > sizeof(OSAsyncReference64)) {
892 		return false;
893 	}
894 
895 	msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
896 	msgReference = IOMallocZeroData(msgReferenceSize);
897 	if (!msgReference) {
898 		return false;
899 	}
900 
901 	remotePort = port;
902 	msgType = type;
903 	bcopy( reference, msgReference, referenceSize );
904 
905 	return true;
906 }
907 
908 void
909 IOServiceUserNotification::invalidatePort(void)
910 {
911 	remotePort = MACH_PORT_NULL;
912 }
913 
914 void
915 IOServiceUserNotification::free( void )
916 {
917 	if (remotePort) {
918 		iokit_release_port_send(remotePort);
919 	}
920 	IOFreeData(msgReference, msgReferenceSize);
921 	OSSafeReleaseNULL(newSet);
922 
923 	super::free();
924 }
925 
926 bool
927 IOServiceUserNotification::_handler( void * target,
928     void * ref, IOService * newService, IONotifier * notifier )
929 {
930 	IOServiceUserNotification * targetObj = (IOServiceUserNotification *)target;
931 	bool ret;
932 
933 	targetObj->retain();
934 	ret = targetObj->handler( ref, newService );
935 	targetObj->release();
936 	return ret;
937 }
938 
939 bool
940 IOServiceUserNotification::handler( void * ref,
941     IOService * newService )
942 {
943 	unsigned int        count;
944 	kern_return_t       kr;
945 	ipc_port_t          port = NULL;
946 	bool                sendPing = false;
947 	mach_msg_size_t     msgSize, payloadSize;
948 
949 	IOTakeLock( &lock );
950 
951 	count = newSet->getCount();
952 	if (count < kMaxOutstanding) {
953 		newSet->setObject( newService );
954 		if ((sendPing = (armed && (0 == count)))) {
955 			armed = false;
956 		}
957 	}
958 
959 	IOUnlock( &lock );
960 
961 	if (kIOServiceTerminatedNotificationType == msgType) {
962 		IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
963 	}
964 
965 	if (sendPing) {
966 		port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
967 
968 		payloadSize = sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize;
969 		msgSize = (mach_msg_size_t)(sizeof(PingMsgKdata) + payloadSize);
970 
971 		kr = kernel_mach_msg_send_with_builder_internal(0, payloadSize,
972 		    (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
973 		    MACH_MSG_TIMEOUT_NONE, NULL,
974 		    ^(mach_msg_header_t *hdr, __assert_only mach_msg_descriptor_t *descs, void *payload){
975 			PingMsgUdata *udata = (PingMsgUdata *)payload;
976 
977 			hdr->msgh_remote_port    = remotePort;
978 			hdr->msgh_local_port     = port;
979 			hdr->msgh_bits           = MACH_MSGH_BITS(
980 				MACH_MSG_TYPE_COPY_SEND /*remote*/,
981 				MACH_MSG_TYPE_MAKE_SEND /*local*/);
982 			hdr->msgh_size           = msgSize;
983 			hdr->msgh_id             = kOSNotificationMessageID;
984 
985 			assert(descs == NULL);
986 			/* End of kernel processed data */
987 
988 			udata->notifyHeader.size          = 0;
989 			udata->notifyHeader.type          = msgType;
990 
991 			assert((char *)udata->notifyHeader.reference + msgReferenceSize <= (char *)payload + payloadSize);
992 			bcopy( msgReference, udata->notifyHeader.reference, msgReferenceSize );
993 		});
994 
995 		if (port) {
996 			iokit_release_port( port );
997 		}
998 
999 		if ((KERN_SUCCESS != kr) && !ipcLogged) {
1000 			ipcLogged = true;
1001 			IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1002 		}
1003 	}
1004 
1005 	return true;
1006 }
1007 OSObject *
1008 IOServiceUserNotification::getNextObject()
1009 {
1010 	assert(false);
1011 	return NULL;
1012 }
1013 
1014 OSObject *
1015 IOServiceUserNotification::copyNextObject()
1016 {
1017 	unsigned int        count;
1018 	OSObject *          result;
1019 
1020 	IOLockLock(&lock);
1021 
1022 	count = newSet->getCount();
1023 	if (count) {
1024 		result = newSet->getObject( count - 1 );
1025 		result->retain();
1026 		newSet->removeObject( count - 1);
1027 	} else {
1028 		result = NULL;
1029 		armed = true;
1030 	}
1031 
1032 	IOLockUnlock(&lock);
1033 
1034 	return result;
1035 }
1036 
1037 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1038 
1039 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
1040 
1041 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1042 
1043 bool
1044 IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
1045     void * reference, vm_size_t referenceSize, bool client64 )
1046 {
1047 	if (!super::init()) {
1048 		return false;
1049 	}
1050 
1051 	if (referenceSize > sizeof(OSAsyncReference64)) {
1052 		return false;
1053 	}
1054 
1055 	clientIs64 = client64;
1056 
1057 	owningPID = proc_selfpid();
1058 
1059 	msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
1060 	msgReference = IOMallocZeroData(msgReferenceSize);
1061 	if (!msgReference) {
1062 		return false;
1063 	}
1064 
1065 	remotePort = port;
1066 	msgType = type;
1067 	bcopy( reference, msgReference, referenceSize );
1068 
1069 	return true;
1070 }
1071 
1072 void
1073 IOServiceMessageUserNotification::invalidatePort(void)
1074 {
1075 	remotePort = MACH_PORT_NULL;
1076 }
1077 
1078 void
1079 IOServiceMessageUserNotification::free( void )
1080 {
1081 	if (remotePort) {
1082 		iokit_release_port_send(remotePort);
1083 	}
1084 	IOFreeData(msgReference, msgReferenceSize);
1085 
1086 	super::free();
1087 }
1088 
1089 IOReturn
1090 IOServiceMessageUserNotification::_handler( void * target, void * ref,
1091     UInt32 messageType, IOService * provider,
1092     void * argument, vm_size_t argSize )
1093 {
1094 	IOServiceMessageUserNotification * targetObj = (IOServiceMessageUserNotification *)target;
1095 	IOReturn ret;
1096 
1097 	targetObj->retain();
1098 	ret = targetObj->handler(
1099 		ref, messageType, provider, argument, argSize);
1100 	targetObj->release();
1101 	return ret;
1102 }
1103 
1104 IOReturn
1105 IOServiceMessageUserNotification::handler( void * ref,
1106     UInt32 messageType, IOService * provider,
1107     void * messageArgument, vm_size_t callerArgSize )
1108 {
1109 	kern_return_t                kr;
1110 	vm_size_t                    argSize;
1111 	mach_msg_size_t              thisMsgSize;
1112 	ipc_port_t                   thisPort, providerPort;
1113 
1114 	if (kIOMessageCopyClientID == messageType) {
1115 		*((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
1116 		return kIOReturnSuccess;
1117 	}
1118 
1119 	if (callerArgSize == 0) {
1120 		if (clientIs64) {
1121 			argSize = sizeof(io_user_reference_t);
1122 		} else {
1123 			argSize = sizeof(uint32_t);
1124 		}
1125 	} else {
1126 		if (callerArgSize > kIOUserNotifyMaxMessageSize) {
1127 			callerArgSize = kIOUserNotifyMaxMessageSize;
1128 		}
1129 		argSize = callerArgSize;
1130 	}
1131 
1132 	// adjust message size for ipc restrictions
1133 	natural_t type = msgType;
1134 	type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1135 	type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1136 	argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1137 
1138 	mach_msg_size_t extraSize = kIOUserNotifyMaxMessageSize + sizeof(IOServiceInterestContent64);
1139 	mach_msg_size_t msgSize = (mach_msg_size_t) (sizeof(PingMsgKdata) +
1140 	    sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize);
1141 
1142 	if (os_add3_overflow(msgSize, offsetof(IOServiceInterestContent64, messageArgument), argSize, &thisMsgSize)) {
1143 		return kIOReturnBadArgument;
1144 	}
1145 	mach_msg_size_t payloadSize = thisMsgSize - sizeof(PingMsgKdata);
1146 
1147 	providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
1148 	thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
1149 
1150 	kr = kernel_mach_msg_send_with_builder_internal(1, payloadSize,
1151 	    (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1152 	    MACH_MSG_TIMEOUT_NONE, NULL,
1153 	    ^(mach_msg_header_t *hdr, mach_msg_descriptor_t *descs, void *payload){
1154 		mach_msg_port_descriptor_t *port_desc = (mach_msg_port_descriptor_t *)descs;
1155 		PingMsgUdata *udata = (PingMsgUdata *)payload;
1156 		IOServiceInterestContent64 * data;
1157 		mach_msg_size_t dataOffset;
1158 
1159 		hdr->msgh_remote_port    = remotePort;
1160 		hdr->msgh_local_port     = thisPort;
1161 		hdr->msgh_bits           = MACH_MSGH_BITS_COMPLEX
1162 		|  MACH_MSGH_BITS(
1163 			MACH_MSG_TYPE_COPY_SEND /*remote*/,
1164 			MACH_MSG_TYPE_MAKE_SEND /*local*/);
1165 		hdr->msgh_size           = thisMsgSize;
1166 		hdr->msgh_id             = kOSNotificationMessageID;
1167 
1168 		/* body.msgh_descriptor_count is set automatically after the closure */
1169 
1170 		port_desc[0].name              = providerPort;
1171 		port_desc[0].disposition       = MACH_MSG_TYPE_MAKE_SEND;
1172 		port_desc[0].type              = MACH_MSG_PORT_DESCRIPTOR;
1173 		/* End of kernel processed data */
1174 
1175 		udata->notifyHeader.size          = extraSize;
1176 		udata->notifyHeader.type          = type;
1177 		bcopy( msgReference, udata->notifyHeader.reference, msgReferenceSize );
1178 
1179 		/* data is after msgReference */
1180 		dataOffset = sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize;
1181 		data = (IOServiceInterestContent64 *) (((uint8_t *) udata) + dataOffset);
1182 		data->messageType = messageType;
1183 
1184 		if (callerArgSize == 0) {
1185 		        assert((char *)data->messageArgument + argSize <= (char *)payload + payloadSize);
1186 		        data->messageArgument[0] = (io_user_reference_t) messageArgument;
1187 		        if (!clientIs64) {
1188 		                data->messageArgument[0] |= (data->messageArgument[0] << 32);
1189 			}
1190 		} else {
1191 		        assert((char *)data->messageArgument + callerArgSize <= (char *)payload + payloadSize);
1192 		        bcopy(messageArgument, data->messageArgument, callerArgSize);
1193 		}
1194 	});
1195 
1196 	if (thisPort) {
1197 		iokit_release_port( thisPort );
1198 	}
1199 	if (providerPort) {
1200 		iokit_release_port( providerPort );
1201 	}
1202 
1203 	if (kr == MACH_SEND_NO_BUFFER) {
1204 		return kIOReturnNoMemory;
1205 	}
1206 
1207 	if ((KERN_SUCCESS != kr) && !ipcLogged) {
1208 		ipcLogged = true;
1209 		IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1210 	}
1211 
1212 	return kIOReturnSuccess;
1213 }
1214 
1215 OSObject *
1216 IOServiceMessageUserNotification::getNextObject()
1217 {
1218 	return NULL;
1219 }
1220 
1221 OSObject *
1222 IOServiceMessageUserNotification::copyNextObject()
1223 {
1224 	return NULL;
1225 }
1226 
1227 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1228 
1229 #undef super
1230 #define super IOService
1231 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1232 
1233 IOLock       * gIOUserClientOwnersLock;
1234 
1235 static_assert(offsetof(IOUserClient, __opaque_end) -
1236     offsetof(IOUserClient, __opaque_start) == sizeof(void *) * 9,
1237     "ABI check: Opaque ivars for IOUserClient must be 9 void * big");
1238 
1239 void
1240 IOUserClient::initialize( void )
1241 {
1242 	gIOObjectPortLock       = IOLockAlloc();
1243 	gIOUserClientOwnersLock = IOLockAlloc();
1244 	gIOUserServerLock       = IOLockAlloc();
1245 	assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1246 
1247 #if IOTRACKING
1248 	IOTrackingQueueCollectUser(IOUserIterator::gMetaClass.getTracking());
1249 	IOTrackingQueueCollectUser(IOServiceMessageUserNotification::gMetaClass.getTracking());
1250 	IOTrackingQueueCollectUser(IOServiceUserNotification::gMetaClass.getTracking());
1251 	IOTrackingQueueCollectUser(IOUserClient::gMetaClass.getTracking());
1252 	IOTrackingQueueCollectUser(IOMachPort::gMetaClass.getTracking());
1253 #endif /* IOTRACKING */
1254 }
1255 
1256 void
1257 #if __LP64__
1258 __attribute__((__noreturn__))
1259 #endif
1260 IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1261     mach_port_t wakePort,
1262     void *callback, void *refcon)
1263 {
1264 #if __LP64__
1265 	panic("setAsyncReference not valid for 64b");
1266 #else
1267 	asyncRef[kIOAsyncReservedIndex]      = ((uintptr_t) wakePort)
1268 	    | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1269 	asyncRef[kIOAsyncCalloutFuncIndex]   = (uintptr_t) callback;
1270 	asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1271 #endif
1272 }
1273 
1274 void
1275 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1276     mach_port_t wakePort,
1277     mach_vm_address_t callback, io_user_reference_t refcon)
1278 {
1279 	asyncRef[kIOAsyncReservedIndex]      = ((io_user_reference_t) wakePort)
1280 	    | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1281 	asyncRef[kIOAsyncCalloutFuncIndex]   = (io_user_reference_t) callback;
1282 	asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1283 }
1284 
1285 void
1286 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1287     mach_port_t wakePort,
1288     mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1289 {
1290 	setAsyncReference64(asyncRef, wakePort, callback, refcon);
1291 	if (vm_map_is_64bit(get_task_map(task))) {
1292 		asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1293 	}
1294 }
1295 
1296 static OSDictionary *
1297 CopyConsoleUser(UInt32 uid)
1298 {
1299 	OSArray * array;
1300 	OSDictionary * user = NULL;
1301 
1302 	OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1303 	if ((array = OSDynamicCast(OSArray, ioProperty))) {
1304 		for (unsigned int idx = 0;
1305 		    (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1306 		    idx++) {
1307 			OSNumber * num;
1308 
1309 			if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1310 			    && (uid == num->unsigned32BitValue())) {
1311 				user->retain();
1312 				break;
1313 			}
1314 		}
1315 	}
1316 	OSSafeReleaseNULL(ioProperty);
1317 	return user;
1318 }
1319 
1320 static OSDictionary *
1321 CopyUserOnConsole(void)
1322 {
1323 	OSArray * array;
1324 	OSDictionary * user = NULL;
1325 
1326 	OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1327 	if ((array = OSDynamicCast(OSArray, ioProperty))) {
1328 		for (unsigned int idx = 0;
1329 		    (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1330 		    idx++) {
1331 			if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) {
1332 				user->retain();
1333 				break;
1334 			}
1335 		}
1336 	}
1337 	OSSafeReleaseNULL(ioProperty);
1338 	return user;
1339 }
1340 
1341 IOReturn
1342 IOUserClient::clientHasAuthorization( task_t task,
1343     IOService * service )
1344 {
1345 	proc_t p;
1346 
1347 	p = (proc_t) get_bsdtask_info(task);
1348 	if (p) {
1349 		uint64_t authorizationID;
1350 
1351 		authorizationID = proc_uniqueid(p);
1352 		if (authorizationID) {
1353 			if (service->getAuthorizationID() == authorizationID) {
1354 				return kIOReturnSuccess;
1355 			}
1356 		}
1357 	}
1358 
1359 	return kIOReturnNotPermitted;
1360 }
1361 
1362 IOReturn
1363 IOUserClient::clientHasPrivilege( void * securityToken,
1364     const char * privilegeName )
1365 {
1366 	kern_return_t           kr;
1367 	security_token_t        token;
1368 	mach_msg_type_number_t  count;
1369 	task_t                  task;
1370 	OSDictionary *          user;
1371 	bool                    secureConsole;
1372 
1373 
1374 	if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1375 	    sizeof(kIOClientPrivilegeForeground))) {
1376 		if (task_is_gpu_denied(current_task())) {
1377 			return kIOReturnNotPrivileged;
1378 		} else {
1379 			return kIOReturnSuccess;
1380 		}
1381 	}
1382 
1383 	if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1384 	    sizeof(kIOClientPrivilegeConsoleSession))) {
1385 		kauth_cred_t cred;
1386 		proc_t       p;
1387 
1388 		task = (task_t) securityToken;
1389 		if (!task) {
1390 			task = current_task();
1391 		}
1392 		p = (proc_t) get_bsdtask_info(task);
1393 		kr = kIOReturnNotPrivileged;
1394 
1395 		if (p && (cred = kauth_cred_proc_ref(p))) {
1396 			user = CopyUserOnConsole();
1397 			if (user) {
1398 				OSNumber * num;
1399 				if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1400 				    && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) {
1401 					kr = kIOReturnSuccess;
1402 				}
1403 				user->release();
1404 			}
1405 			kauth_cred_unref(&cred);
1406 		}
1407 		return kr;
1408 	}
1409 
1410 	if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1411 	    sizeof(kIOClientPrivilegeSecureConsoleProcess)))) {
1412 		task = (task_t)((IOUCProcessToken *)securityToken)->token;
1413 	} else {
1414 		task = (task_t)securityToken;
1415 	}
1416 
1417 	count = TASK_SECURITY_TOKEN_COUNT;
1418 	kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1419 
1420 	if (KERN_SUCCESS != kr) {
1421 	} else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1422 	    sizeof(kIOClientPrivilegeAdministrator))) {
1423 		if (0 != token.val[0]) {
1424 			kr = kIOReturnNotPrivileged;
1425 		}
1426 	} else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1427 	    sizeof(kIOClientPrivilegeLocalUser))) {
1428 		user = CopyConsoleUser(token.val[0]);
1429 		if (user) {
1430 			user->release();
1431 		} else {
1432 			kr = kIOReturnNotPrivileged;
1433 		}
1434 	} else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1435 	    sizeof(kIOClientPrivilegeConsoleUser))) {
1436 		user = CopyConsoleUser(token.val[0]);
1437 		if (user) {
1438 			if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) {
1439 				kr = kIOReturnNotPrivileged;
1440 			} else if (secureConsole) {
1441 				OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1442 				if (pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) {
1443 					kr = kIOReturnNotPrivileged;
1444 				}
1445 			}
1446 			user->release();
1447 		} else {
1448 			kr = kIOReturnNotPrivileged;
1449 		}
1450 	} else {
1451 		kr = kIOReturnUnsupported;
1452 	}
1453 
1454 	return kr;
1455 }
1456 
1457 OSDictionary *
1458 IOUserClient::copyClientEntitlements(task_t task)
1459 {
1460 	proc_t p = NULL;
1461 	pid_t pid = 0;
1462 	OSDictionary *entitlements = NULL;
1463 
1464 	p = (proc_t)get_bsdtask_info(task);
1465 	if (p == NULL) {
1466 		return NULL;
1467 	}
1468 	pid = proc_pid(p);
1469 
1470 	if (cs_entitlements_dictionary_copy(p, (void **)&entitlements) == 0) {
1471 		if (entitlements) {
1472 			return entitlements;
1473 		}
1474 	}
1475 
1476 	// If the above fails, thats it
1477 	return NULL;
1478 }
1479 
1480 OSDictionary *
1481 IOUserClient::copyClientEntitlementsVnode(vnode_t vnode, off_t offset)
1482 {
1483 	OSDictionary *entitlements = NULL;
1484 
1485 	if (cs_entitlements_dictionary_copy_vnode(vnode, offset, (void**)&entitlements) != 0) {
1486 		return NULL;
1487 	}
1488 	return entitlements;
1489 }
1490 
1491 OSObject *
1492 IOUserClient::copyClientEntitlement( task_t task,
1493     const char * entitlement )
1494 {
1495 	void *entitlement_object = NULL;
1496 
1497 	if (task == NULL) {
1498 		task = current_task();
1499 	}
1500 
1501 	/* Validate input arguments */
1502 	if (task == kernel_task || entitlement == NULL) {
1503 		return NULL;
1504 	}
1505 	proc_t proc = (proc_t)get_bsdtask_info(task);
1506 
1507 	kern_return_t ret = amfi->OSEntitlements.copyEntitlementAsOSObjectWithProc(
1508 		proc,
1509 		entitlement,
1510 		&entitlement_object);
1511 
1512 	if (ret != KERN_SUCCESS) {
1513 		return NULL;
1514 	}
1515 	assert(entitlement_object != NULL);
1516 
1517 	return (OSObject*)entitlement_object;
1518 }
1519 
1520 OSObject *
1521 IOUserClient::copyClientEntitlementVnode(
1522 	struct vnode *vnode,
1523 	off_t offset,
1524 	const char *entitlement)
1525 {
1526 	OSDictionary *entitlements;
1527 	OSObject *value;
1528 
1529 	entitlements = copyClientEntitlementsVnode(vnode, offset);
1530 	if (entitlements == NULL) {
1531 		return NULL;
1532 	}
1533 
1534 	/* Fetch the entitlement value from the dictionary. */
1535 	value = entitlements->getObject(entitlement);
1536 	if (value != NULL) {
1537 		value->retain();
1538 	}
1539 
1540 	entitlements->release();
1541 	return value;
1542 }
1543 
1544 bool
1545 IOUserClient::init()
1546 {
1547 	if (getPropertyTable() || super::init()) {
1548 		return reserve();
1549 	}
1550 
1551 	return false;
1552 }
1553 
1554 bool
1555 IOUserClient::init(OSDictionary * dictionary)
1556 {
1557 	if (getPropertyTable() || super::init(dictionary)) {
1558 		return reserve();
1559 	}
1560 
1561 	return false;
1562 }
1563 
1564 bool
1565 IOUserClient::initWithTask(task_t owningTask,
1566     void * securityID,
1567     UInt32 type )
1568 {
1569 	if (getPropertyTable() || super::init()) {
1570 		return reserve();
1571 	}
1572 
1573 	return false;
1574 }
1575 
1576 bool
1577 IOUserClient::initWithTask(task_t owningTask,
1578     void * securityID,
1579     UInt32 type,
1580     OSDictionary * properties )
1581 {
1582 	bool ok;
1583 
1584 	ok = super::init( properties );
1585 	ok &= initWithTask( owningTask, securityID, type );
1586 
1587 	return ok;
1588 }
1589 
1590 bool
1591 IOUserClient::reserve()
1592 {
1593 	if (!reserved) {
1594 		reserved = IOMallocType(ExpansionData);
1595 	}
1596 	setTerminateDefer(NULL, true);
1597 	IOStatisticsRegisterCounter();
1598 	IORWLockInlineInit(&lock);
1599 	IOLockInlineInit(&filterLock);
1600 
1601 	return true;
1602 }
1603 
1604 struct IOUserClientOwner {
1605 	task_t         task;
1606 	queue_chain_t  taskLink;
1607 	IOUserClient * uc;
1608 	queue_chain_t  ucLink;
1609 };
1610 
1611 IOReturn
1612 IOUserClient::registerOwner(task_t task)
1613 {
1614 	IOUserClientOwner * owner;
1615 	IOReturn            ret;
1616 	bool                newOwner;
1617 
1618 	IOLockLock(gIOUserClientOwnersLock);
1619 
1620 	newOwner = true;
1621 	ret = kIOReturnSuccess;
1622 
1623 	if (!owners.next) {
1624 		queue_init(&owners);
1625 	} else {
1626 		queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1627 		{
1628 			if (task != owner->task) {
1629 				continue;
1630 			}
1631 			newOwner = false;
1632 			break;
1633 		}
1634 	}
1635 	if (newOwner) {
1636 		owner = IOMallocType(IOUserClientOwner);
1637 
1638 		owner->task = task;
1639 		owner->uc   = this;
1640 		queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1641 		queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1642 		if (messageAppSuspended) {
1643 			task_set_message_app_suspended(task, true);
1644 		}
1645 	}
1646 
1647 	IOLockUnlock(gIOUserClientOwnersLock);
1648 
1649 	return ret;
1650 }
1651 
1652 void
1653 IOUserClient::noMoreSenders(void)
1654 {
1655 	IOUserClientOwner * owner;
1656 	IOUserClientOwner * iter;
1657 	queue_head_t      * taskque;
1658 	bool                hasMessageAppSuspended;
1659 
1660 	IOLockLock(gIOUserClientOwnersLock);
1661 
1662 	if (owners.next) {
1663 		while (!queue_empty(&owners)) {
1664 			owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1665 			taskque = task_io_user_clients(owner->task);
1666 			queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1667 			hasMessageAppSuspended = false;
1668 			queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1669 				hasMessageAppSuspended = iter->uc->messageAppSuspended;
1670 				if (hasMessageAppSuspended) {
1671 					break;
1672 				}
1673 			}
1674 			task_set_message_app_suspended(owner->task, hasMessageAppSuspended);
1675 			queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1676 			IOFreeType(owner, IOUserClientOwner);
1677 		}
1678 		owners.next = owners.prev = NULL;
1679 	}
1680 
1681 	IOLockUnlock(gIOUserClientOwnersLock);
1682 }
1683 
1684 
1685 extern "C" void
1686 iokit_task_app_suspended_changed(task_t task)
1687 {
1688 	queue_head_t      * taskque;
1689 	IOUserClientOwner * owner;
1690 	OSSet             * set;
1691 
1692 	IOLockLock(gIOUserClientOwnersLock);
1693 
1694 	taskque = task_io_user_clients(task);
1695 	set = NULL;
1696 	queue_iterate(taskque, owner, IOUserClientOwner *, taskLink) {
1697 		if (!owner->uc->messageAppSuspended) {
1698 			continue;
1699 		}
1700 		if (!set) {
1701 			set = OSSet::withCapacity(4);
1702 			if (!set) {
1703 				break;
1704 			}
1705 		}
1706 		set->setObject(owner->uc);
1707 	}
1708 
1709 	IOLockUnlock(gIOUserClientOwnersLock);
1710 
1711 	if (set) {
1712 		set->iterateObjects(^bool (OSObject * obj) {
1713 			IOUserClient      * uc;
1714 
1715 			uc = (typeof(uc))obj;
1716 #if 0
1717 			{
1718 			        OSString          * str;
1719 			        str = IOCopyLogNameForPID(task_pid(task));
1720 			        IOLog("iokit_task_app_suspended_changed(%s) %s %d\n", str ? str->getCStringNoCopy() : "",
1721 			        uc->getName(), task_is_app_suspended(task));
1722 			        OSSafeReleaseNULL(str);
1723 			}
1724 #endif
1725 			uc->message(kIOMessageTaskAppSuspendedChange, NULL);
1726 
1727 			return false;
1728 		});
1729 		set->release();
1730 	}
1731 }
1732 
1733 extern "C" kern_return_t
1734 iokit_task_terminate(task_t task)
1735 {
1736 	IOUserClientOwner * owner;
1737 	IOUserClient      * dead;
1738 	IOUserClient      * uc;
1739 	queue_head_t      * taskque;
1740 
1741 	IOLockLock(gIOUserClientOwnersLock);
1742 
1743 	taskque = task_io_user_clients(task);
1744 	dead = NULL;
1745 	while (!queue_empty(taskque)) {
1746 		owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1747 		uc = owner->uc;
1748 		queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1749 		queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1750 		if (queue_empty(&uc->owners)) {
1751 			uc->retain();
1752 			IOLog("destroying out of band connect for %s\n", uc->getName());
1753 			// now using the uc queue head as a singly linked queue,
1754 			// leaving .next as NULL to mark it empty
1755 			uc->owners.next = NULL;
1756 			uc->owners.prev = (queue_entry_t) dead;
1757 			dead = uc;
1758 		}
1759 		IOFreeType(owner, IOUserClientOwner);
1760 	}
1761 
1762 	IOLockUnlock(gIOUserClientOwnersLock);
1763 
1764 	while (dead) {
1765 		uc = dead;
1766 		dead = (IOUserClient *)(void *) dead->owners.prev;
1767 		uc->owners.prev = NULL;
1768 		if (uc->sharedInstance || !uc->closed) {
1769 			uc->clientDied();
1770 		}
1771 		uc->release();
1772 	}
1773 
1774 	return KERN_SUCCESS;
1775 }
1776 
1777 struct IOUCFilterPolicy {
1778 	task_t             task;
1779 	io_filter_policy_t filterPolicy;
1780 	IOUCFilterPolicy * next;
1781 };
1782 
1783 io_filter_policy_t
1784 IOUserClient::filterForTask(task_t task, io_filter_policy_t addFilterPolicy)
1785 {
1786 	IOUCFilterPolicy * elem;
1787 	io_filter_policy_t filterPolicy;
1788 
1789 	filterPolicy = 0;
1790 	IOLockLock(&filterLock);
1791 
1792 	for (elem = reserved->filterPolicies; elem && (elem->task != task); elem = elem->next) {
1793 	}
1794 
1795 	if (elem) {
1796 		if (addFilterPolicy) {
1797 			assert(addFilterPolicy == elem->filterPolicy);
1798 		}
1799 		filterPolicy = elem->filterPolicy;
1800 	} else if (addFilterPolicy) {
1801 		elem = IOMallocType(IOUCFilterPolicy);
1802 		elem->task               = task;
1803 		elem->filterPolicy       = addFilterPolicy;
1804 		elem->next               = reserved->filterPolicies;
1805 		reserved->filterPolicies = elem;
1806 		filterPolicy = addFilterPolicy;
1807 	}
1808 
1809 	IOLockUnlock(&filterLock);
1810 	return filterPolicy;
1811 }
1812 
1813 void
1814 IOUserClient::free()
1815 {
1816 	if (mappings) {
1817 		mappings->release();
1818 	}
1819 
1820 	IOStatisticsUnregisterCounter();
1821 
1822 	assert(!owners.next);
1823 	assert(!owners.prev);
1824 
1825 	if (reserved) {
1826 		IOUCFilterPolicy * elem;
1827 		IOUCFilterPolicy * nextElem;
1828 		for (elem = reserved->filterPolicies; elem; elem = nextElem) {
1829 			nextElem = elem->next;
1830 			if (elem->filterPolicy && gIOUCFilterCallbacks->io_filter_release) {
1831 				gIOUCFilterCallbacks->io_filter_release(elem->filterPolicy);
1832 			}
1833 			IOFreeType(elem, IOUCFilterPolicy);
1834 		}
1835 		IOFreeType(reserved, ExpansionData);
1836 		IORWLockInlineDestroy(&lock);
1837 		IOLockInlineDestroy(&filterLock);
1838 	}
1839 
1840 	super::free();
1841 }
1842 
1843 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1844 
1845 OSDefineMetaClassAndAbstractStructors( IOUserClient2022, IOUserClient )
1846 
1847 
1848 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1849 
1850 IOReturn
1851 IOUserClient::clientDied( void )
1852 {
1853 	IOReturn ret = kIOReturnNotReady;
1854 
1855 	if (sharedInstance || OSCompareAndSwap8(0, 1, &closed)) {
1856 		ret = clientClose();
1857 	}
1858 
1859 	return ret;
1860 }
1861 
1862 IOReturn
1863 IOUserClient::clientClose( void )
1864 {
1865 	return kIOReturnUnsupported;
1866 }
1867 
1868 IOService *
1869 IOUserClient::getService( void )
1870 {
1871 	return NULL;
1872 }
1873 
1874 IOReturn
1875 IOUserClient::registerNotificationPort(
1876 	mach_port_t     /* port */,
1877 	UInt32          /* type */,
1878 	UInt32          /* refCon */)
1879 {
1880 	return kIOReturnUnsupported;
1881 }
1882 
1883 IOReturn
1884 IOUserClient::registerNotificationPort(
1885 	mach_port_t port,
1886 	UInt32          type,
1887 	io_user_reference_t refCon)
1888 {
1889 	return registerNotificationPort(port, type, (UInt32) refCon);
1890 }
1891 
1892 IOReturn
1893 IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1894     semaphore_t * semaphore )
1895 {
1896 	return kIOReturnUnsupported;
1897 }
1898 
1899 IOReturn
1900 IOUserClient::connectClient( IOUserClient * /* client */ )
1901 {
1902 	return kIOReturnUnsupported;
1903 }
1904 
1905 IOReturn
1906 IOUserClient::clientMemoryForType( UInt32 type,
1907     IOOptionBits * options,
1908     IOMemoryDescriptor ** memory )
1909 {
1910 	return kIOReturnUnsupported;
1911 }
1912 
1913 IOReturn
1914 IOUserClient::clientMemoryForType( UInt32 type,
1915     IOOptionBits * options,
1916     OSSharedPtr<IOMemoryDescriptor>& memory )
1917 {
1918 	IOMemoryDescriptor* memoryRaw = nullptr;
1919 	IOReturn result = clientMemoryForType(type, options, &memoryRaw);
1920 	memory.reset(memoryRaw, OSNoRetain);
1921 	return result;
1922 }
1923 
1924 #if !__LP64__
1925 IOMemoryMap *
1926 IOUserClient::mapClientMemory(
1927 	IOOptionBits            type,
1928 	task_t                  task,
1929 	IOOptionBits            mapFlags,
1930 	IOVirtualAddress        atAddress )
1931 {
1932 	return NULL;
1933 }
1934 #endif
1935 
1936 IOMemoryMap *
1937 IOUserClient::mapClientMemory64(
1938 	IOOptionBits            type,
1939 	task_t                  task,
1940 	IOOptionBits            mapFlags,
1941 	mach_vm_address_t       atAddress )
1942 {
1943 	IOReturn            err;
1944 	IOOptionBits        options = 0;
1945 	IOMemoryDescriptor * memory = NULL;
1946 	IOMemoryMap *       map = NULL;
1947 
1948 	err = clientMemoryForType((UInt32) type, &options, &memory );
1949 
1950 	if (memory && (kIOReturnSuccess == err)) {
1951 		FAKE_STACK_FRAME(getMetaClass());
1952 
1953 		options = (options & ~kIOMapUserOptionsMask)
1954 		    | (mapFlags & kIOMapUserOptionsMask);
1955 		map = memory->createMappingInTask( task, atAddress, options );
1956 		memory->release();
1957 
1958 		FAKE_STACK_FRAME_END();
1959 	}
1960 
1961 	return map;
1962 }
1963 
1964 IOReturn
1965 IOUserClient::exportObjectToClient(task_t task,
1966     OSObject *obj, io_object_t *clientObj)
1967 {
1968 	mach_port_name_t    name;
1969 
1970 	name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1971 
1972 	*clientObj = (io_object_t)(uintptr_t) name;
1973 
1974 	if (obj) {
1975 		obj->release();
1976 	}
1977 
1978 	return kIOReturnSuccess;
1979 }
1980 
1981 IOReturn
1982 IOUserClient::copyPortNameForObjectInTask(task_t task,
1983     OSObject *obj, mach_port_name_t * port_name)
1984 {
1985 	mach_port_name_t    name;
1986 
1987 	name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT );
1988 
1989 	*(mach_port_name_t *) port_name = name;
1990 
1991 	return kIOReturnSuccess;
1992 }
1993 
1994 IOReturn
1995 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
1996     OSObject **obj)
1997 {
1998 	OSObject * object;
1999 
2000 	object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task);
2001 
2002 	*obj = object;
2003 
2004 	return object ? kIOReturnSuccess : kIOReturnIPCError;
2005 }
2006 
2007 IOReturn
2008 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2009     OSSharedPtr<OSObject>& obj)
2010 {
2011 	OSObject* objRaw = NULL;
2012 	IOReturn result = copyObjectForPortNameInTask(task, port_name, &objRaw);
2013 	obj.reset(objRaw, OSNoRetain);
2014 	return result;
2015 }
2016 
2017 IOReturn
2018 IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta)
2019 {
2020 	return iokit_mod_send_right(task, port_name, delta);
2021 }
2022 
2023 IOExternalMethod *
2024 IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
2025 {
2026 	return NULL;
2027 }
2028 
2029 IOExternalAsyncMethod *
2030 IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
2031 {
2032 	return NULL;
2033 }
2034 
2035 IOExternalTrap *
2036 IOUserClient::
2037 getExternalTrapForIndex(UInt32 index)
2038 {
2039 	return NULL;
2040 }
2041 
2042 #pragma clang diagnostic push
2043 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2044 
2045 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
2046 // functions can break clients of kexts implementing getExternalMethodForIndex()
2047 IOExternalMethod *
2048 IOUserClient::
2049 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
2050 {
2051 	IOExternalMethod *method = getExternalMethodForIndex(index);
2052 
2053 	if (method) {
2054 		*targetP = (IOService *) method->object;
2055 	}
2056 
2057 	return method;
2058 }
2059 
2060 IOExternalMethod *
2061 IOUserClient::
2062 getTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2063 {
2064 	IOService* targetPRaw = NULL;
2065 	IOExternalMethod* result = getTargetAndMethodForIndex(&targetPRaw, index);
2066 	targetP.reset(targetPRaw, OSRetain);
2067 	return result;
2068 }
2069 
2070 IOExternalAsyncMethod *
2071 IOUserClient::
2072 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
2073 {
2074 	IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
2075 
2076 	if (method) {
2077 		*targetP = (IOService *) method->object;
2078 	}
2079 
2080 	return method;
2081 }
2082 
2083 IOExternalAsyncMethod *
2084 IOUserClient::
2085 getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2086 {
2087 	IOService* targetPRaw = NULL;
2088 	IOExternalAsyncMethod* result = getAsyncTargetAndMethodForIndex(&targetPRaw, index);
2089 	targetP.reset(targetPRaw, OSRetain);
2090 	return result;
2091 }
2092 
2093 IOExternalTrap *
2094 IOUserClient::
2095 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
2096 {
2097 	IOExternalTrap *trap = getExternalTrapForIndex(index);
2098 
2099 	if (trap) {
2100 		*targetP = trap->object;
2101 	}
2102 
2103 	return trap;
2104 }
2105 #pragma clang diagnostic pop
2106 
2107 IOReturn
2108 IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
2109 {
2110 	mach_port_t port;
2111 	port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2112 
2113 	if (MACH_PORT_NULL != port) {
2114 		iokit_release_port_send(port);
2115 	}
2116 
2117 	return kIOReturnSuccess;
2118 }
2119 
2120 IOReturn
2121 IOUserClient::releaseNotificationPort(mach_port_t port)
2122 {
2123 	if (MACH_PORT_NULL != port) {
2124 		iokit_release_port_send(port);
2125 	}
2126 
2127 	return kIOReturnSuccess;
2128 }
2129 
2130 IOReturn
2131 IOUserClient::sendAsyncResult(OSAsyncReference reference,
2132     IOReturn result, void *args[], UInt32 numArgs)
2133 {
2134 	OSAsyncReference64  reference64;
2135 	OSBoundedArray<io_user_reference_t, kMaxAsyncArgs> args64;
2136 	unsigned int        idx;
2137 
2138 	if (numArgs > kMaxAsyncArgs) {
2139 		return kIOReturnMessageTooLarge;
2140 	}
2141 
2142 	for (idx = 0; idx < kOSAsyncRef64Count; idx++) {
2143 		reference64[idx] = REF64(reference[idx]);
2144 	}
2145 
2146 	for (idx = 0; idx < numArgs; idx++) {
2147 		args64[idx] = REF64(args[idx]);
2148 	}
2149 
2150 	return sendAsyncResult64(reference64, result, args64.data(), numArgs);
2151 }
2152 
2153 IOReturn
2154 IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
2155     IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2156 {
2157 	return _sendAsyncResult64(reference, result, args, numArgs, options);
2158 }
2159 
2160 IOReturn
2161 IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
2162     IOReturn result, io_user_reference_t args[], UInt32 numArgs)
2163 {
2164 	return _sendAsyncResult64(reference, result, args, numArgs, 0);
2165 }
2166 
2167 IOReturn
2168 IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
2169     IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2170 {
2171 	struct ReplyMsg {
2172 		mach_msg_header_t msgHdr;
2173 		union{
2174 			struct{
2175 				OSNotificationHeader     notifyHdr;
2176 				IOAsyncCompletionContent asyncContent;
2177 				uint32_t                 args[kMaxAsyncArgs];
2178 			} msg32;
2179 			struct{
2180 				OSNotificationHeader64   notifyHdr;
2181 				IOAsyncCompletionContent asyncContent;
2182 				io_user_reference_t      args[kMaxAsyncArgs] __attribute__ ((packed));
2183 			} msg64;
2184 		} m;
2185 	};
2186 	ReplyMsg      replyMsg;
2187 	mach_port_t   replyPort;
2188 	kern_return_t kr;
2189 
2190 	// If no reply port, do nothing.
2191 	replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2192 	if (replyPort == MACH_PORT_NULL) {
2193 		return kIOReturnSuccess;
2194 	}
2195 
2196 	if (numArgs > kMaxAsyncArgs) {
2197 		return kIOReturnMessageTooLarge;
2198 	}
2199 
2200 	bzero(&replyMsg, sizeof(replyMsg));
2201 	replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
2202 	    0 /*local*/);
2203 	replyMsg.msgHdr.msgh_remote_port = replyPort;
2204 	replyMsg.msgHdr.msgh_local_port  = NULL;
2205 	replyMsg.msgHdr.msgh_id          = kOSNotificationMessageID;
2206 	if (kIOUCAsync64Flag & reference[0]) {
2207 		replyMsg.msgHdr.msgh_size =
2208 		    sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
2209 		    - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
2210 		replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2211 		    + numArgs * sizeof(io_user_reference_t);
2212 		replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
2213 		/* Copy reference except for reference[0], which is left as 0 from the earlier bzero */
2214 		bcopy(&reference[1], &replyMsg.m.msg64.notifyHdr.reference[1], sizeof(OSAsyncReference64) - sizeof(reference[0]));
2215 
2216 		replyMsg.m.msg64.asyncContent.result = result;
2217 		if (numArgs) {
2218 			bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
2219 		}
2220 	} else {
2221 		unsigned int idx;
2222 
2223 		replyMsg.msgHdr.msgh_size =
2224 		    sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
2225 		    - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
2226 
2227 		replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2228 		    + numArgs * sizeof(uint32_t);
2229 		replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
2230 
2231 		/* Skip reference[0] which is left as 0 from the earlier bzero */
2232 		for (idx = 1; idx < kOSAsyncRefCount; idx++) {
2233 			replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
2234 		}
2235 
2236 		replyMsg.m.msg32.asyncContent.result = result;
2237 
2238 		for (idx = 0; idx < numArgs; idx++) {
2239 			replyMsg.m.msg32.args[idx] = REF32(args[idx]);
2240 		}
2241 	}
2242 
2243 	if ((options & kIOUserNotifyOptionCanDrop) != 0) {
2244 		kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
2245 		    replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
2246 	} else {
2247 		/* Fail on full queue. */
2248 		kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
2249 		    replyMsg.msgHdr.msgh_size);
2250 	}
2251 	if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0])) {
2252 		reference[0] |= kIOUCAsyncErrorLoggedFlag;
2253 		IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
2254 	}
2255 	return kr;
2256 }
2257 
2258 
2259 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2260 
2261 extern "C" {
2262 #define CHECK(cls, obj, out)                      \
2263 	cls * out;                              \
2264 	if( !(out = OSDynamicCast( cls, obj)))  \
2265 	    return( kIOReturnBadArgument )
2266 
2267 #define CHECKLOCKED(cls, obj, out)                                        \
2268 	IOUserIterator * oIter;                                         \
2269 	cls * out;                                                      \
2270 	if( !(oIter = OSDynamicCast(IOUserIterator, obj)))              \
2271 	    return (kIOReturnBadArgument);                              \
2272 	if( !(out = OSDynamicCast(cls, oIter->userIteratorObject)))     \
2273 	    return (kIOReturnBadArgument)
2274 
2275 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2276 
2277 // Create a vm_map_copy_t or kalloc'ed data for memory
2278 // to be copied out. ipc will free after the copyout.
2279 
2280 static kern_return_t
2281 copyoutkdata( const void * data, vm_size_t len,
2282     io_buf_ptr_t * buf )
2283 {
2284 	kern_return_t       err;
2285 	vm_map_copy_t       copy;
2286 
2287 	err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2288 	    false /* src_destroy */, &copy);
2289 
2290 	assert( err == KERN_SUCCESS );
2291 	if (err == KERN_SUCCESS) {
2292 		*buf = (char *) copy;
2293 	}
2294 
2295 	return err;
2296 }
2297 
2298 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2299 
2300 /* Routine io_server_version */
2301 kern_return_t
2302 is_io_server_version(
2303 	mach_port_t main_port,
2304 	uint64_t *version)
2305 {
2306 	*version = IOKIT_SERVER_VERSION;
2307 	return kIOReturnSuccess;
2308 }
2309 
2310 /* Routine io_object_get_class */
2311 kern_return_t
2312 is_io_object_get_class(
2313 	io_object_t object,
2314 	io_name_t className )
2315 {
2316 	const OSMetaClass* my_obj = NULL;
2317 
2318 	if (!object) {
2319 		return kIOReturnBadArgument;
2320 	}
2321 
2322 	my_obj = object->getMetaClass();
2323 	if (!my_obj) {
2324 		return kIOReturnNotFound;
2325 	}
2326 
2327 	strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
2328 
2329 	return kIOReturnSuccess;
2330 }
2331 
2332 /* Routine io_object_get_superclass */
2333 kern_return_t
2334 is_io_object_get_superclass(
2335 	mach_port_t main_port,
2336 	io_name_t obj_name,
2337 	io_name_t class_name)
2338 {
2339 	IOReturn            ret;
2340 	const OSMetaClass * meta;
2341 	const OSMetaClass * super;
2342 	const OSSymbol    * name;
2343 	const char        * cstr;
2344 
2345 	if (!obj_name || !class_name) {
2346 		return kIOReturnBadArgument;
2347 	}
2348 	if (main_port != main_device_port) {
2349 		return kIOReturnNotPrivileged;
2350 	}
2351 
2352 	ret = kIOReturnNotFound;
2353 	meta = NULL;
2354 	do{
2355 		name = OSSymbol::withCString(obj_name);
2356 		if (!name) {
2357 			break;
2358 		}
2359 		meta = OSMetaClass::copyMetaClassWithName(name);
2360 		if (!meta) {
2361 			break;
2362 		}
2363 		super = meta->getSuperClass();
2364 		if (!super) {
2365 			break;
2366 		}
2367 		cstr = super->getClassName();
2368 		if (!cstr) {
2369 			break;
2370 		}
2371 		strlcpy(class_name, cstr, sizeof(io_name_t));
2372 		ret = kIOReturnSuccess;
2373 	}while (false);
2374 
2375 	OSSafeReleaseNULL(name);
2376 	if (meta) {
2377 		meta->releaseMetaClass();
2378 	}
2379 
2380 	return ret;
2381 }
2382 
2383 /* Routine io_object_get_bundle_identifier */
2384 kern_return_t
2385 is_io_object_get_bundle_identifier(
2386 	mach_port_t main_port,
2387 	io_name_t obj_name,
2388 	io_name_t bundle_name)
2389 {
2390 	IOReturn            ret;
2391 	const OSMetaClass * meta;
2392 	const OSSymbol    * name;
2393 	const OSSymbol    * identifier;
2394 	const char        * cstr;
2395 
2396 	if (!obj_name || !bundle_name) {
2397 		return kIOReturnBadArgument;
2398 	}
2399 	if (main_port != main_device_port) {
2400 		return kIOReturnNotPrivileged;
2401 	}
2402 
2403 	ret = kIOReturnNotFound;
2404 	meta = NULL;
2405 	do{
2406 		name = OSSymbol::withCString(obj_name);
2407 		if (!name) {
2408 			break;
2409 		}
2410 		meta = OSMetaClass::copyMetaClassWithName(name);
2411 		if (!meta) {
2412 			break;
2413 		}
2414 		identifier = meta->getKmodName();
2415 		if (!identifier) {
2416 			break;
2417 		}
2418 		cstr = identifier->getCStringNoCopy();
2419 		if (!cstr) {
2420 			break;
2421 		}
2422 		strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
2423 		ret = kIOReturnSuccess;
2424 	}while (false);
2425 
2426 	OSSafeReleaseNULL(name);
2427 	if (meta) {
2428 		meta->releaseMetaClass();
2429 	}
2430 
2431 	return ret;
2432 }
2433 
2434 /* Routine io_object_conforms_to */
2435 kern_return_t
2436 is_io_object_conforms_to(
2437 	io_object_t object,
2438 	io_name_t className,
2439 	boolean_t *conforms )
2440 {
2441 	if (!object) {
2442 		return kIOReturnBadArgument;
2443 	}
2444 
2445 	*conforms = (NULL != object->metaCast( className ));
2446 
2447 	return kIOReturnSuccess;
2448 }
2449 
2450 /* Routine io_object_get_retain_count */
2451 kern_return_t
2452 is_io_object_get_retain_count(
2453 	io_object_t object,
2454 	uint32_t *retainCount )
2455 {
2456 	if (!object) {
2457 		return kIOReturnBadArgument;
2458 	}
2459 
2460 	*retainCount = object->getRetainCount();
2461 	return kIOReturnSuccess;
2462 }
2463 
2464 /* Routine io_iterator_next */
2465 kern_return_t
2466 is_io_iterator_next(
2467 	io_object_t iterator,
2468 	io_object_t *object )
2469 {
2470 	IOReturn    ret;
2471 	OSObject *  obj;
2472 	OSIterator * iter;
2473 	IOUserIterator * uiter;
2474 
2475 	if ((uiter = OSDynamicCast(IOUserIterator, iterator))) {
2476 		obj = uiter->copyNextObject();
2477 	} else if ((iter = OSDynamicCast(OSIterator, iterator))) {
2478 		obj = iter->getNextObject();
2479 		if (obj) {
2480 			obj->retain();
2481 		}
2482 	} else {
2483 		return kIOReturnBadArgument;
2484 	}
2485 
2486 	if (obj) {
2487 		*object = obj;
2488 		ret = kIOReturnSuccess;
2489 	} else {
2490 		ret = kIOReturnNoDevice;
2491 	}
2492 
2493 	return ret;
2494 }
2495 
2496 /* Routine io_iterator_reset */
2497 kern_return_t
2498 is_io_iterator_reset(
2499 	io_object_t iterator )
2500 {
2501 	CHECK( OSIterator, iterator, iter );
2502 
2503 	iter->reset();
2504 
2505 	return kIOReturnSuccess;
2506 }
2507 
2508 /* Routine io_iterator_is_valid */
2509 kern_return_t
2510 is_io_iterator_is_valid(
2511 	io_object_t iterator,
2512 	boolean_t *is_valid )
2513 {
2514 	CHECK( OSIterator, iterator, iter );
2515 
2516 	*is_valid = iter->isValid();
2517 
2518 	return kIOReturnSuccess;
2519 }
2520 
2521 static kern_return_t
2522 internal_io_service_match_property_table(
2523 	io_service_t _service,
2524 	const char * matching,
2525 	mach_msg_type_number_t matching_size,
2526 	boolean_t *matches)
2527 {
2528 	CHECK( IOService, _service, service );
2529 
2530 	kern_return_t       kr;
2531 	OSObject *          obj;
2532 	OSDictionary *      dict;
2533 
2534 	assert(matching_size);
2535 
2536 
2537 	obj = OSUnserializeXML(matching, matching_size);
2538 
2539 	if ((dict = OSDynamicCast( OSDictionary, obj))) {
2540 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2541 		*matches = service->passiveMatch( dict );
2542 		kr = kIOReturnSuccess;
2543 	} else {
2544 		kr = kIOReturnBadArgument;
2545 	}
2546 
2547 	if (obj) {
2548 		obj->release();
2549 	}
2550 
2551 	return kr;
2552 }
2553 
2554 /* Routine io_service_match_property_table */
2555 kern_return_t
2556 is_io_service_match_property_table(
2557 	io_service_t service,
2558 	io_string_t matching,
2559 	boolean_t *matches )
2560 {
2561 	return kIOReturnUnsupported;
2562 }
2563 
2564 
2565 /* Routine io_service_match_property_table_ool */
2566 kern_return_t
2567 is_io_service_match_property_table_ool(
2568 	io_object_t service,
2569 	io_buf_ptr_t matching,
2570 	mach_msg_type_number_t matchingCnt,
2571 	kern_return_t *result,
2572 	boolean_t *matches )
2573 {
2574 	kern_return_t         kr;
2575 	vm_offset_t           data;
2576 	vm_map_offset_t       map_data;
2577 
2578 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2579 	data = CAST_DOWN(vm_offset_t, map_data);
2580 
2581 	if (KERN_SUCCESS == kr) {
2582 		// must return success after vm_map_copyout() succeeds
2583 		*result = internal_io_service_match_property_table(service,
2584 		    (const char *)data, matchingCnt, matches );
2585 		vm_deallocate( kernel_map, data, matchingCnt );
2586 	}
2587 
2588 	return kr;
2589 }
2590 
2591 /* Routine io_service_match_property_table_bin */
2592 kern_return_t
2593 is_io_service_match_property_table_bin(
2594 	io_object_t service,
2595 	io_struct_inband_t matching,
2596 	mach_msg_type_number_t matchingCnt,
2597 	boolean_t *matches)
2598 {
2599 	return internal_io_service_match_property_table(service, matching, matchingCnt, matches);
2600 }
2601 
2602 static kern_return_t
2603 internal_io_service_get_matching_services(
2604 	mach_port_t main_port,
2605 	const char * matching,
2606 	mach_msg_type_number_t matching_size,
2607 	io_iterator_t *existing )
2608 {
2609 	kern_return_t       kr;
2610 	OSObject *          obj;
2611 	OSDictionary *      dict;
2612 
2613 	if (main_port != main_device_port) {
2614 		return kIOReturnNotPrivileged;
2615 	}
2616 
2617 	assert(matching_size);
2618 	obj = OSUnserializeXML(matching, matching_size);
2619 
2620 	if ((dict = OSDynamicCast( OSDictionary, obj))) {
2621 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2622 		*existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2623 		kr = kIOReturnSuccess;
2624 	} else {
2625 		kr = kIOReturnBadArgument;
2626 	}
2627 
2628 	if (obj) {
2629 		obj->release();
2630 	}
2631 
2632 	return kr;
2633 }
2634 
2635 /* Routine io_service_get_matching_services */
2636 kern_return_t
2637 is_io_service_get_matching_services(
2638 	mach_port_t main_port,
2639 	io_string_t matching,
2640 	io_iterator_t *existing )
2641 {
2642 	return kIOReturnUnsupported;
2643 }
2644 
2645 /* Routine io_service_get_matching_services_ool */
2646 kern_return_t
2647 is_io_service_get_matching_services_ool(
2648 	mach_port_t main_port,
2649 	io_buf_ptr_t matching,
2650 	mach_msg_type_number_t matchingCnt,
2651 	kern_return_t *result,
2652 	io_object_t *existing )
2653 {
2654 	kern_return_t       kr;
2655 	vm_offset_t         data;
2656 	vm_map_offset_t     map_data;
2657 
2658 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2659 	data = CAST_DOWN(vm_offset_t, map_data);
2660 
2661 	if (KERN_SUCCESS == kr) {
2662 		// must return success after vm_map_copyout() succeeds
2663 		// and mig will copy out objects on success
2664 		*existing = NULL;
2665 		*result = internal_io_service_get_matching_services(main_port,
2666 		    (const char *) data, matchingCnt, existing);
2667 		vm_deallocate( kernel_map, data, matchingCnt );
2668 	}
2669 
2670 	return kr;
2671 }
2672 
2673 /* Routine io_service_get_matching_services_bin */
2674 kern_return_t
2675 is_io_service_get_matching_services_bin(
2676 	mach_port_t main_port,
2677 	io_struct_inband_t matching,
2678 	mach_msg_type_number_t matchingCnt,
2679 	io_object_t *existing)
2680 {
2681 	return internal_io_service_get_matching_services(main_port, matching, matchingCnt, existing);
2682 }
2683 
2684 
2685 static kern_return_t
2686 internal_io_service_get_matching_service(
2687 	mach_port_t main_port,
2688 	const char * matching,
2689 	mach_msg_type_number_t matching_size,
2690 	io_service_t *service )
2691 {
2692 	kern_return_t       kr;
2693 	OSObject *          obj;
2694 	OSDictionary *      dict;
2695 
2696 	if (main_port != main_device_port) {
2697 		return kIOReturnNotPrivileged;
2698 	}
2699 
2700 	assert(matching_size);
2701 	obj = OSUnserializeXML(matching, matching_size);
2702 
2703 	if ((dict = OSDynamicCast( OSDictionary, obj))) {
2704 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2705 		*service = IOService::copyMatchingService( dict );
2706 		kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2707 	} else {
2708 		kr = kIOReturnBadArgument;
2709 	}
2710 
2711 	if (obj) {
2712 		obj->release();
2713 	}
2714 
2715 	return kr;
2716 }
2717 
2718 /* Routine io_service_get_matching_service */
2719 kern_return_t
2720 is_io_service_get_matching_service(
2721 	mach_port_t main_port,
2722 	io_string_t matching,
2723 	io_service_t *service )
2724 {
2725 	return kIOReturnUnsupported;
2726 }
2727 
2728 /* Routine io_service_get_matching_services_ool */
2729 kern_return_t
2730 is_io_service_get_matching_service_ool(
2731 	mach_port_t main_port,
2732 	io_buf_ptr_t matching,
2733 	mach_msg_type_number_t matchingCnt,
2734 	kern_return_t *result,
2735 	io_object_t *service )
2736 {
2737 	kern_return_t       kr;
2738 	vm_offset_t         data;
2739 	vm_map_offset_t     map_data;
2740 
2741 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2742 	data = CAST_DOWN(vm_offset_t, map_data);
2743 
2744 	if (KERN_SUCCESS == kr) {
2745 		// must return success after vm_map_copyout() succeeds
2746 		// and mig will copy out objects on success
2747 		*service = NULL;
2748 		*result = internal_io_service_get_matching_service(main_port,
2749 		    (const char *) data, matchingCnt, service );
2750 		vm_deallocate( kernel_map, data, matchingCnt );
2751 	}
2752 
2753 	return kr;
2754 }
2755 
2756 /* Routine io_service_get_matching_service_bin */
2757 kern_return_t
2758 is_io_service_get_matching_service_bin(
2759 	mach_port_t main_port,
2760 	io_struct_inband_t matching,
2761 	mach_msg_type_number_t matchingCnt,
2762 	io_object_t *service)
2763 {
2764 	return internal_io_service_get_matching_service(main_port, matching, matchingCnt, service);
2765 }
2766 
2767 static kern_return_t
2768 internal_io_service_add_notification(
2769 	mach_port_t main_port,
2770 	io_name_t notification_type,
2771 	const char * matching,
2772 	size_t matching_size,
2773 	mach_port_t port,
2774 	void * reference,
2775 	vm_size_t referenceSize,
2776 	bool client64,
2777 	io_object_t * notification )
2778 {
2779 	IOServiceUserNotification * userNotify = NULL;
2780 	IONotifier *                notify = NULL;
2781 	const OSSymbol *            sym;
2782 	OSObject *                  obj;
2783 	OSDictionary *              dict;
2784 	IOReturn                    err;
2785 	natural_t                   userMsgType;
2786 
2787 	if (main_port != main_device_port) {
2788 		return kIOReturnNotPrivileged;
2789 	}
2790 
2791 	do {
2792 		err = kIOReturnNoResources;
2793 
2794 		if (matching_size > (sizeof(io_struct_inband_t) * 1024)) {
2795 			return kIOReturnMessageTooLarge;
2796 		}
2797 
2798 		if (!(sym = OSSymbol::withCString( notification_type ))) {
2799 			err = kIOReturnNoResources;
2800 		}
2801 
2802 		assert(matching_size);
2803 		obj = OSUnserializeXML(matching, matching_size);
2804 		dict = OSDynamicCast(OSDictionary, obj);
2805 		if (!dict) {
2806 			err = kIOReturnBadArgument;
2807 			continue;
2808 		}
2809 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2810 
2811 		if ((sym == gIOPublishNotification)
2812 		    || (sym == gIOFirstPublishNotification)) {
2813 			userMsgType = kIOServicePublishNotificationType;
2814 		} else if ((sym == gIOMatchedNotification)
2815 		    || (sym == gIOFirstMatchNotification)) {
2816 			userMsgType = kIOServiceMatchedNotificationType;
2817 		} else if ((sym == gIOTerminatedNotification)
2818 		    || (sym == gIOWillTerminateNotification)) {
2819 			userMsgType = kIOServiceTerminatedNotificationType;
2820 		} else {
2821 			userMsgType = kLastIOKitNotificationType;
2822 		}
2823 
2824 		userNotify = new IOServiceUserNotification;
2825 
2826 		if (userNotify && !userNotify->init( port, userMsgType,
2827 		    reference, referenceSize, client64)) {
2828 			userNotify->release();
2829 			userNotify = NULL;
2830 		}
2831 		if (!userNotify) {
2832 			continue;
2833 		}
2834 
2835 		notify = IOService::addMatchingNotification( sym, dict,
2836 		    &userNotify->_handler, userNotify );
2837 		if (notify) {
2838 			*notification = userNotify;
2839 			userNotify->setNotification( notify );
2840 			err = kIOReturnSuccess;
2841 		} else {
2842 			err = kIOReturnUnsupported;
2843 		}
2844 	} while (false);
2845 
2846 	if ((kIOReturnSuccess != err) && userNotify) {
2847 		userNotify->setNotification(NULL);
2848 		userNotify->invalidatePort();
2849 		userNotify->release();
2850 		userNotify = NULL;
2851 	}
2852 
2853 	if (sym) {
2854 		sym->release();
2855 	}
2856 	if (obj) {
2857 		obj->release();
2858 	}
2859 
2860 	return err;
2861 }
2862 
2863 
2864 /* Routine io_service_add_notification */
2865 kern_return_t
2866 is_io_service_add_notification(
2867 	mach_port_t main_port,
2868 	io_name_t notification_type,
2869 	io_string_t matching,
2870 	mach_port_t port,
2871 	io_async_ref_t reference,
2872 	mach_msg_type_number_t referenceCnt,
2873 	io_object_t * notification )
2874 {
2875 	return kIOReturnUnsupported;
2876 }
2877 
2878 /* Routine io_service_add_notification_64 */
2879 kern_return_t
2880 is_io_service_add_notification_64(
2881 	mach_port_t main_port,
2882 	io_name_t notification_type,
2883 	io_string_t matching,
2884 	mach_port_t wake_port,
2885 	io_async_ref64_t reference,
2886 	mach_msg_type_number_t referenceCnt,
2887 	io_object_t *notification )
2888 {
2889 	return kIOReturnUnsupported;
2890 }
2891 
2892 /* Routine io_service_add_notification_bin */
2893 kern_return_t
2894 is_io_service_add_notification_bin
2895 (
2896 	mach_port_t main_port,
2897 	io_name_t notification_type,
2898 	io_struct_inband_t matching,
2899 	mach_msg_type_number_t matchingCnt,
2900 	mach_port_t wake_port,
2901 	io_async_ref_t reference,
2902 	mach_msg_type_number_t referenceCnt,
2903 	io_object_t *notification)
2904 {
2905 	io_async_ref_t zreference;
2906 
2907 	if (referenceCnt > ASYNC_REF_COUNT) {
2908 		return kIOReturnBadArgument;
2909 	}
2910 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2911 	bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2912 
2913 	return internal_io_service_add_notification(main_port, notification_type,
2914 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
2915 	           false, notification);
2916 }
2917 
2918 /* Routine io_service_add_notification_bin_64 */
2919 kern_return_t
2920 is_io_service_add_notification_bin_64
2921 (
2922 	mach_port_t main_port,
2923 	io_name_t notification_type,
2924 	io_struct_inband_t matching,
2925 	mach_msg_type_number_t matchingCnt,
2926 	mach_port_t wake_port,
2927 	io_async_ref64_t reference,
2928 	mach_msg_type_number_t referenceCnt,
2929 	io_object_t *notification)
2930 {
2931 	io_async_ref64_t zreference;
2932 
2933 	if (referenceCnt > ASYNC_REF64_COUNT) {
2934 		return kIOReturnBadArgument;
2935 	}
2936 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2937 	bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
2938 
2939 	return internal_io_service_add_notification(main_port, notification_type,
2940 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
2941 	           true, notification);
2942 }
2943 
2944 static kern_return_t
2945 internal_io_service_add_notification_ool(
2946 	mach_port_t main_port,
2947 	io_name_t notification_type,
2948 	io_buf_ptr_t matching,
2949 	mach_msg_type_number_t matchingCnt,
2950 	mach_port_t wake_port,
2951 	void * reference,
2952 	vm_size_t referenceSize,
2953 	bool client64,
2954 	kern_return_t *result,
2955 	io_object_t *notification )
2956 {
2957 	kern_return_t       kr;
2958 	vm_offset_t         data;
2959 	vm_map_offset_t     map_data;
2960 
2961 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2962 	data = CAST_DOWN(vm_offset_t, map_data);
2963 
2964 	if (KERN_SUCCESS == kr) {
2965 		// must return success after vm_map_copyout() succeeds
2966 		// and mig will copy out objects on success
2967 		*notification = NULL;
2968 		*result = internal_io_service_add_notification( main_port, notification_type,
2969 		    (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2970 		vm_deallocate( kernel_map, data, matchingCnt );
2971 	}
2972 
2973 	return kr;
2974 }
2975 
2976 /* Routine io_service_add_notification_ool */
2977 kern_return_t
2978 is_io_service_add_notification_ool(
2979 	mach_port_t main_port,
2980 	io_name_t notification_type,
2981 	io_buf_ptr_t matching,
2982 	mach_msg_type_number_t matchingCnt,
2983 	mach_port_t wake_port,
2984 	io_async_ref_t reference,
2985 	mach_msg_type_number_t referenceCnt,
2986 	kern_return_t *result,
2987 	io_object_t *notification )
2988 {
2989 	io_async_ref_t zreference;
2990 
2991 	if (referenceCnt > ASYNC_REF_COUNT) {
2992 		return kIOReturnBadArgument;
2993 	}
2994 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2995 	bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2996 
2997 	return internal_io_service_add_notification_ool(main_port, notification_type,
2998 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
2999 	           false, result, notification);
3000 }
3001 
3002 /* Routine io_service_add_notification_ool_64 */
3003 kern_return_t
3004 is_io_service_add_notification_ool_64(
3005 	mach_port_t main_port,
3006 	io_name_t notification_type,
3007 	io_buf_ptr_t matching,
3008 	mach_msg_type_number_t matchingCnt,
3009 	mach_port_t wake_port,
3010 	io_async_ref64_t reference,
3011 	mach_msg_type_number_t referenceCnt,
3012 	kern_return_t *result,
3013 	io_object_t *notification )
3014 {
3015 	io_async_ref64_t zreference;
3016 
3017 	if (referenceCnt > ASYNC_REF64_COUNT) {
3018 		return kIOReturnBadArgument;
3019 	}
3020 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3021 	bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3022 
3023 	return internal_io_service_add_notification_ool(main_port, notification_type,
3024 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
3025 	           true, result, notification);
3026 }
3027 
3028 /* Routine io_service_add_notification_old */
3029 kern_return_t
3030 is_io_service_add_notification_old(
3031 	mach_port_t main_port,
3032 	io_name_t notification_type,
3033 	io_string_t matching,
3034 	mach_port_t port,
3035 	// for binary compatibility reasons, this must be natural_t for ILP32
3036 	natural_t ref,
3037 	io_object_t * notification )
3038 {
3039 	return is_io_service_add_notification( main_port, notification_type,
3040 	           matching, port, &ref, 1, notification );
3041 }
3042 
3043 
3044 static kern_return_t
3045 internal_io_service_add_interest_notification(
3046 	io_object_t _service,
3047 	io_name_t type_of_interest,
3048 	mach_port_t port,
3049 	void * reference,
3050 	vm_size_t referenceSize,
3051 	bool client64,
3052 	io_object_t * notification )
3053 {
3054 	IOServiceMessageUserNotification *  userNotify = NULL;
3055 	IONotifier *                        notify = NULL;
3056 	const OSSymbol *                    sym;
3057 	IOReturn                            err;
3058 
3059 	CHECK( IOService, _service, service );
3060 
3061 	err = kIOReturnNoResources;
3062 	if ((sym = OSSymbol::withCString( type_of_interest ))) {
3063 		do {
3064 			userNotify = new IOServiceMessageUserNotification;
3065 
3066 			if (userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
3067 			    reference, referenceSize, client64 )) {
3068 				userNotify->release();
3069 				userNotify = NULL;
3070 			}
3071 			if (!userNotify) {
3072 				continue;
3073 			}
3074 
3075 			notify = service->registerInterest( sym,
3076 			    &userNotify->_handler, userNotify );
3077 			if (notify) {
3078 				*notification = userNotify;
3079 				userNotify->setNotification( notify );
3080 				err = kIOReturnSuccess;
3081 			} else {
3082 				err = kIOReturnUnsupported;
3083 			}
3084 		} while (false);
3085 
3086 		sym->release();
3087 	}
3088 
3089 	if ((kIOReturnSuccess != err) && userNotify) {
3090 		userNotify->setNotification(NULL);
3091 		userNotify->invalidatePort();
3092 		userNotify->release();
3093 		userNotify = NULL;
3094 	}
3095 
3096 	return err;
3097 }
3098 
3099 /* Routine io_service_add_message_notification */
3100 kern_return_t
3101 is_io_service_add_interest_notification(
3102 	io_object_t service,
3103 	io_name_t type_of_interest,
3104 	mach_port_t port,
3105 	io_async_ref_t reference,
3106 	mach_msg_type_number_t referenceCnt,
3107 	io_object_t * notification )
3108 {
3109 	io_async_ref_t zreference;
3110 
3111 	if (referenceCnt > ASYNC_REF_COUNT) {
3112 		return kIOReturnBadArgument;
3113 	}
3114 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3115 	bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3116 
3117 	return internal_io_service_add_interest_notification(service, type_of_interest,
3118 	           port, &zreference[0], sizeof(io_async_ref_t), false, notification);
3119 }
3120 
3121 /* Routine io_service_add_interest_notification_64 */
3122 kern_return_t
3123 is_io_service_add_interest_notification_64(
3124 	io_object_t service,
3125 	io_name_t type_of_interest,
3126 	mach_port_t wake_port,
3127 	io_async_ref64_t reference,
3128 	mach_msg_type_number_t referenceCnt,
3129 	io_object_t *notification )
3130 {
3131 	io_async_ref64_t zreference;
3132 
3133 	if (referenceCnt > ASYNC_REF64_COUNT) {
3134 		return kIOReturnBadArgument;
3135 	}
3136 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3137 	bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3138 
3139 	return internal_io_service_add_interest_notification(service, type_of_interest,
3140 	           wake_port, &zreference[0], sizeof(io_async_ref64_t), true, notification);
3141 }
3142 
3143 
3144 /* Routine io_service_acknowledge_notification */
3145 kern_return_t
3146 is_io_service_acknowledge_notification(
3147 	io_object_t _service,
3148 	natural_t notify_ref,
3149 	natural_t response )
3150 {
3151 	CHECK( IOService, _service, service );
3152 
3153 	return service->acknowledgeNotification((IONotificationRef)(uintptr_t) notify_ref,
3154 	           (IOOptionBits) response );
3155 }
3156 
3157 /* Routine io_connect_get_semaphore */
3158 kern_return_t
3159 is_io_connect_get_notification_semaphore(
3160 	io_connect_t connection,
3161 	natural_t notification_type,
3162 	semaphore_t *semaphore )
3163 {
3164 	IOReturn ret;
3165 	CHECK( IOUserClient, connection, client );
3166 
3167 	IOStatisticsClientCall();
3168 	IORWLockWrite(&client->lock);
3169 	ret = client->getNotificationSemaphore((UInt32) notification_type,
3170 	    semaphore );
3171 	IORWLockUnlock(&client->lock);
3172 
3173 	return ret;
3174 }
3175 
3176 /* Routine io_registry_get_root_entry */
3177 kern_return_t
3178 is_io_registry_get_root_entry(
3179 	mach_port_t main_port,
3180 	io_object_t *root )
3181 {
3182 	IORegistryEntry *   entry;
3183 
3184 	if (main_port != main_device_port) {
3185 		return kIOReturnNotPrivileged;
3186 	}
3187 
3188 	entry = IORegistryEntry::getRegistryRoot();
3189 	if (entry) {
3190 		entry->retain();
3191 	}
3192 	*root = entry;
3193 
3194 	return kIOReturnSuccess;
3195 }
3196 
3197 /* Routine io_registry_create_iterator */
3198 kern_return_t
3199 is_io_registry_create_iterator(
3200 	mach_port_t main_port,
3201 	io_name_t plane,
3202 	uint32_t options,
3203 	io_object_t *iterator )
3204 {
3205 	if (main_port != main_device_port) {
3206 		return kIOReturnNotPrivileged;
3207 	}
3208 
3209 	*iterator = IOUserIterator::withIterator(
3210 		IORegistryIterator::iterateOver(
3211 			IORegistryEntry::getPlane( plane ), options ));
3212 
3213 	return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3214 }
3215 
3216 /* Routine io_registry_entry_create_iterator */
3217 kern_return_t
3218 is_io_registry_entry_create_iterator(
3219 	io_object_t registry_entry,
3220 	io_name_t plane,
3221 	uint32_t options,
3222 	io_object_t *iterator )
3223 {
3224 	CHECK( IORegistryEntry, registry_entry, entry );
3225 
3226 	*iterator = IOUserIterator::withIterator(
3227 		IORegistryIterator::iterateOver( entry,
3228 		IORegistryEntry::getPlane( plane ), options ));
3229 
3230 	return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3231 }
3232 
3233 /* Routine io_registry_iterator_enter */
3234 kern_return_t
3235 is_io_registry_iterator_enter_entry(
3236 	io_object_t iterator )
3237 {
3238 	CHECKLOCKED( IORegistryIterator, iterator, iter );
3239 
3240 	IOLockLock(&oIter->lock);
3241 	iter->enterEntry();
3242 	IOLockUnlock(&oIter->lock);
3243 
3244 	return kIOReturnSuccess;
3245 }
3246 
3247 /* Routine io_registry_iterator_exit */
3248 kern_return_t
3249 is_io_registry_iterator_exit_entry(
3250 	io_object_t iterator )
3251 {
3252 	bool        didIt;
3253 
3254 	CHECKLOCKED( IORegistryIterator, iterator, iter );
3255 
3256 	IOLockLock(&oIter->lock);
3257 	didIt = iter->exitEntry();
3258 	IOLockUnlock(&oIter->lock);
3259 
3260 	return didIt ? kIOReturnSuccess : kIOReturnNoDevice;
3261 }
3262 
3263 /* Routine io_registry_entry_from_path */
3264 kern_return_t
3265 is_io_registry_entry_from_path(
3266 	mach_port_t main_port,
3267 	io_string_t path,
3268 	io_object_t *registry_entry )
3269 {
3270 	IORegistryEntry *   entry;
3271 
3272 	if (main_port != main_device_port) {
3273 		return kIOReturnNotPrivileged;
3274 	}
3275 
3276 	entry = IORegistryEntry::fromPath( path );
3277 
3278 	if (!entry && IOTaskRegistryCompatibility(current_task())) {
3279 		OSDictionary * matching;
3280 		const OSObject * objects[2] = { kOSBooleanTrue, NULL };
3281 		const OSSymbol * keys[2]    = { gIOCompatibilityMatchKey, gIOPathMatchKey };
3282 
3283 		objects[1] = OSString::withCStringNoCopy(path);
3284 		matching = OSDictionary::withObjects(objects, keys, 2, 2);
3285 		if (matching) {
3286 			entry = IOService::copyMatchingService(matching);
3287 		}
3288 		OSSafeReleaseNULL(matching);
3289 		OSSafeReleaseNULL(objects[1]);
3290 	}
3291 
3292 	*registry_entry = entry;
3293 
3294 	return kIOReturnSuccess;
3295 }
3296 
3297 
3298 /* Routine io_registry_entry_from_path */
3299 kern_return_t
3300 is_io_registry_entry_from_path_ool(
3301 	mach_port_t main_port,
3302 	io_string_inband_t path,
3303 	io_buf_ptr_t path_ool,
3304 	mach_msg_type_number_t path_oolCnt,
3305 	kern_return_t *result,
3306 	io_object_t *registry_entry)
3307 {
3308 	IORegistryEntry *   entry;
3309 	vm_map_offset_t     map_data;
3310 	const char *        cpath;
3311 	IOReturn            res;
3312 	kern_return_t       err;
3313 
3314 	if (main_port != main_device_port) {
3315 		return kIOReturnNotPrivileged;
3316 	}
3317 
3318 	map_data = 0;
3319 	entry    = NULL;
3320 	res = err = KERN_SUCCESS;
3321 	if (path[0]) {
3322 		cpath = path;
3323 	} else {
3324 		if (!path_oolCnt) {
3325 			return kIOReturnBadArgument;
3326 		}
3327 		if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) {
3328 			return kIOReturnMessageTooLarge;
3329 		}
3330 
3331 		err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
3332 		if (KERN_SUCCESS == err) {
3333 			// must return success to mig after vm_map_copyout() succeeds, so result is actual
3334 			cpath = CAST_DOWN(const char *, map_data);
3335 			if (cpath[path_oolCnt - 1]) {
3336 				res = kIOReturnBadArgument;
3337 			}
3338 		}
3339 	}
3340 
3341 	if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) {
3342 		entry = IORegistryEntry::fromPath(cpath);
3343 		res = entry ? kIOReturnSuccess : kIOReturnNotFound;
3344 	}
3345 
3346 	if (map_data) {
3347 		vm_deallocate(kernel_map, map_data, path_oolCnt);
3348 	}
3349 
3350 	if (KERN_SUCCESS != err) {
3351 		res = err;
3352 	}
3353 	*registry_entry = entry;
3354 	*result = res;
3355 
3356 	return err;
3357 }
3358 
3359 
3360 /* Routine io_registry_entry_in_plane */
3361 kern_return_t
3362 is_io_registry_entry_in_plane(
3363 	io_object_t registry_entry,
3364 	io_name_t plane,
3365 	boolean_t *inPlane )
3366 {
3367 	CHECK( IORegistryEntry, registry_entry, entry );
3368 
3369 	*inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
3370 
3371 	return kIOReturnSuccess;
3372 }
3373 
3374 
3375 /* Routine io_registry_entry_get_path */
3376 kern_return_t
3377 is_io_registry_entry_get_path(
3378 	io_object_t registry_entry,
3379 	io_name_t plane,
3380 	io_string_t path )
3381 {
3382 	int         length;
3383 	CHECK( IORegistryEntry, registry_entry, entry );
3384 
3385 	length = sizeof(io_string_t);
3386 	if (entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) {
3387 		return kIOReturnSuccess;
3388 	} else {
3389 		return kIOReturnBadArgument;
3390 	}
3391 }
3392 
3393 /* Routine io_registry_entry_get_path */
3394 kern_return_t
3395 is_io_registry_entry_get_path_ool(
3396 	io_object_t registry_entry,
3397 	io_name_t plane,
3398 	io_string_inband_t path,
3399 	io_buf_ptr_t *path_ool,
3400 	mach_msg_type_number_t *path_oolCnt)
3401 {
3402 	enum   { kMaxPath = 16384 };
3403 	IOReturn err;
3404 	int      length;
3405 	char   * buf;
3406 
3407 	CHECK( IORegistryEntry, registry_entry, entry );
3408 
3409 	*path_ool    = NULL;
3410 	*path_oolCnt = 0;
3411 	length = sizeof(io_string_inband_t);
3412 	if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) {
3413 		err = kIOReturnSuccess;
3414 	} else {
3415 		length = kMaxPath;
3416 		buf = IONewData(char, length);
3417 		if (!buf) {
3418 			err = kIOReturnNoMemory;
3419 		} else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) {
3420 			err = kIOReturnError;
3421 		} else {
3422 			*path_oolCnt = length;
3423 			err = copyoutkdata(buf, length, path_ool);
3424 		}
3425 		if (buf) {
3426 			IODeleteData(buf, char, kMaxPath);
3427 		}
3428 	}
3429 
3430 	return err;
3431 }
3432 
3433 
3434 /* Routine io_registry_entry_get_name */
3435 kern_return_t
3436 is_io_registry_entry_get_name(
3437 	io_object_t registry_entry,
3438 	io_name_t name )
3439 {
3440 	CHECK( IORegistryEntry, registry_entry, entry );
3441 
3442 	strncpy( name, entry->getName(), sizeof(io_name_t));
3443 
3444 	return kIOReturnSuccess;
3445 }
3446 
3447 /* Routine io_registry_entry_get_name_in_plane */
3448 kern_return_t
3449 is_io_registry_entry_get_name_in_plane(
3450 	io_object_t registry_entry,
3451 	io_name_t planeName,
3452 	io_name_t name )
3453 {
3454 	const IORegistryPlane * plane;
3455 	CHECK( IORegistryEntry, registry_entry, entry );
3456 
3457 	if (planeName[0]) {
3458 		plane = IORegistryEntry::getPlane( planeName );
3459 	} else {
3460 		plane = NULL;
3461 	}
3462 
3463 	strncpy( name, entry->getName( plane), sizeof(io_name_t));
3464 
3465 	return kIOReturnSuccess;
3466 }
3467 
3468 /* Routine io_registry_entry_get_location_in_plane */
3469 kern_return_t
3470 is_io_registry_entry_get_location_in_plane(
3471 	io_object_t registry_entry,
3472 	io_name_t planeName,
3473 	io_name_t location )
3474 {
3475 	const IORegistryPlane * plane;
3476 	CHECK( IORegistryEntry, registry_entry, entry );
3477 
3478 	if (planeName[0]) {
3479 		plane = IORegistryEntry::getPlane( planeName );
3480 	} else {
3481 		plane = NULL;
3482 	}
3483 
3484 	const char * cstr = entry->getLocation( plane );
3485 
3486 	if (cstr) {
3487 		strncpy( location, cstr, sizeof(io_name_t));
3488 		return kIOReturnSuccess;
3489 	} else {
3490 		return kIOReturnNotFound;
3491 	}
3492 }
3493 
3494 /* Routine io_registry_entry_get_registry_entry_id */
3495 kern_return_t
3496 is_io_registry_entry_get_registry_entry_id(
3497 	io_object_t registry_entry,
3498 	uint64_t *entry_id )
3499 {
3500 	CHECK( IORegistryEntry, registry_entry, entry );
3501 
3502 	*entry_id = entry->getRegistryEntryID();
3503 
3504 	return kIOReturnSuccess;
3505 }
3506 
3507 
3508 static OSObject *
3509 IOCopyPropertyCompatible(IORegistryEntry * regEntry, const char * name)
3510 {
3511 	OSObject     * obj;
3512 	OSObject     * compatProperties;
3513 	OSDictionary * props;
3514 
3515 	obj = regEntry->copyProperty(name);
3516 	if (obj) {
3517 		return obj;
3518 	}
3519 
3520 	compatProperties = regEntry->copyProperty(gIOUserServicePropertiesKey);
3521 	if (!compatProperties
3522 	    && IOTaskRegistryCompatibility(current_task())) {
3523 		compatProperties = regEntry->copyProperty(gIOCompatibilityPropertiesKey);
3524 	}
3525 	if (compatProperties) {
3526 		props = OSDynamicCast(OSDictionary, compatProperties);
3527 		if (props) {
3528 			obj = props->getObject(name);
3529 			if (obj) {
3530 				obj->retain();
3531 			}
3532 		}
3533 		compatProperties->release();
3534 	}
3535 
3536 	return obj;
3537 }
3538 
3539 /* Routine io_registry_entry_get_property */
3540 kern_return_t
3541 is_io_registry_entry_get_property_bytes(
3542 	io_object_t registry_entry,
3543 	io_name_t property_name,
3544 	io_struct_inband_t buf,
3545 	mach_msg_type_number_t *dataCnt )
3546 {
3547 	OSObject    *       obj;
3548 	OSData      *       data;
3549 	OSString    *       str;
3550 	OSBoolean   *       boo;
3551 	OSNumber    *       off;
3552 	UInt64              offsetBytes;
3553 	unsigned int        len = 0;
3554 	const void *        bytes = NULL;
3555 	IOReturn            ret = kIOReturnSuccess;
3556 
3557 	CHECK( IORegistryEntry, registry_entry, entry );
3558 
3559 #if CONFIG_MACF
3560 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3561 		return kIOReturnNotPermitted;
3562 	}
3563 #endif
3564 
3565 	obj = IOCopyPropertyCompatible(entry, property_name);
3566 	if (!obj) {
3567 		return kIOReturnNoResources;
3568 	}
3569 
3570 	// One day OSData will be a common container base class
3571 	// until then...
3572 	if ((data = OSDynamicCast( OSData, obj ))) {
3573 		len = data->getLength();
3574 		bytes = data->getBytesNoCopy();
3575 		if (!data->isSerializable()) {
3576 			len = 0;
3577 		}
3578 	} else if ((str = OSDynamicCast( OSString, obj ))) {
3579 		len = str->getLength() + 1;
3580 		bytes = str->getCStringNoCopy();
3581 	} else if ((boo = OSDynamicCast( OSBoolean, obj ))) {
3582 		len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
3583 		bytes = boo->isTrue() ? "Yes" : "No";
3584 	} else if ((off = OSDynamicCast( OSNumber, obj ))) {
3585 		offsetBytes = off->unsigned64BitValue();
3586 		len = off->numberOfBytes();
3587 		if (len > sizeof(offsetBytes)) {
3588 			len = sizeof(offsetBytes);
3589 		}
3590 		bytes = &offsetBytes;
3591 #ifdef __BIG_ENDIAN__
3592 		bytes = (const void *)
3593 		    (((UInt32) bytes) + (sizeof(UInt64) - len));
3594 #endif
3595 	} else {
3596 		ret = kIOReturnBadArgument;
3597 	}
3598 
3599 	if (bytes) {
3600 		if (*dataCnt < len) {
3601 			ret = kIOReturnIPCError;
3602 		} else {
3603 			*dataCnt = len;
3604 			bcopy( bytes, buf, len );
3605 		}
3606 	}
3607 	obj->release();
3608 
3609 	return ret;
3610 }
3611 
3612 
3613 /* Routine io_registry_entry_get_property */
3614 kern_return_t
3615 is_io_registry_entry_get_property(
3616 	io_object_t registry_entry,
3617 	io_name_t property_name,
3618 	io_buf_ptr_t *properties,
3619 	mach_msg_type_number_t *propertiesCnt )
3620 {
3621 	kern_return_t       err;
3622 	unsigned int        len;
3623 	OSObject *          obj;
3624 
3625 	CHECK( IORegistryEntry, registry_entry, entry );
3626 
3627 #if CONFIG_MACF
3628 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3629 		return kIOReturnNotPermitted;
3630 	}
3631 #endif
3632 
3633 	obj = IOCopyPropertyCompatible(entry, property_name);
3634 	if (!obj) {
3635 		return kIOReturnNotFound;
3636 	}
3637 
3638 	OSSerialize * s = OSSerialize::withCapacity(4096);
3639 	if (!s) {
3640 		obj->release();
3641 		return kIOReturnNoMemory;
3642 	}
3643 
3644 	if (obj->serialize( s )) {
3645 		len = s->getLength();
3646 		*propertiesCnt = len;
3647 		err = copyoutkdata( s->text(), len, properties );
3648 	} else {
3649 		err = kIOReturnUnsupported;
3650 	}
3651 
3652 	s->release();
3653 	obj->release();
3654 
3655 	return err;
3656 }
3657 
3658 /* Routine io_registry_entry_get_property_recursively */
3659 kern_return_t
3660 is_io_registry_entry_get_property_recursively(
3661 	io_object_t registry_entry,
3662 	io_name_t plane,
3663 	io_name_t property_name,
3664 	uint32_t options,
3665 	io_buf_ptr_t *properties,
3666 	mach_msg_type_number_t *propertiesCnt )
3667 {
3668 	kern_return_t       err;
3669 	unsigned int        len;
3670 	OSObject *          obj;
3671 
3672 	CHECK( IORegistryEntry, registry_entry, entry );
3673 
3674 #if CONFIG_MACF
3675 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3676 		return kIOReturnNotPermitted;
3677 	}
3678 #endif
3679 
3680 	obj = entry->copyProperty( property_name,
3681 	    IORegistryEntry::getPlane( plane ), options );
3682 	if (!obj) {
3683 		return kIOReturnNotFound;
3684 	}
3685 
3686 	OSSerialize * s = OSSerialize::withCapacity(4096);
3687 	if (!s) {
3688 		obj->release();
3689 		return kIOReturnNoMemory;
3690 	}
3691 
3692 	if (obj->serialize( s )) {
3693 		len = s->getLength();
3694 		*propertiesCnt = len;
3695 		err = copyoutkdata( s->text(), len, properties );
3696 	} else {
3697 		err = kIOReturnUnsupported;
3698 	}
3699 
3700 	s->release();
3701 	obj->release();
3702 
3703 	return err;
3704 }
3705 
3706 /* Routine io_registry_entry_get_properties */
3707 kern_return_t
3708 is_io_registry_entry_get_properties(
3709 	io_object_t registry_entry,
3710 	io_buf_ptr_t *properties,
3711 	mach_msg_type_number_t *propertiesCnt )
3712 {
3713 	return kIOReturnUnsupported;
3714 }
3715 
3716 #if CONFIG_MACF
3717 
3718 struct GetPropertiesEditorRef {
3719 	kauth_cred_t      cred;
3720 	IORegistryEntry * entry;
3721 	OSCollection    * root;
3722 };
3723 
3724 static const LIBKERN_RETURNS_RETAINED OSMetaClassBase *
3725 GetPropertiesEditor(void                  * reference,
3726     OSSerialize           * s,
3727     OSCollection          * container,
3728     const OSSymbol        * name,
3729     const OSMetaClassBase * value)
3730 {
3731 	GetPropertiesEditorRef * ref = (typeof(ref))reference;
3732 
3733 	if (!ref->root) {
3734 		ref->root = container;
3735 	}
3736 	if (ref->root == container) {
3737 		if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) {
3738 			value = NULL;
3739 		}
3740 	}
3741 	if (value) {
3742 		value->retain();
3743 	}
3744 	return value;
3745 }
3746 
3747 #endif /* CONFIG_MACF */
3748 
3749 /* Routine io_registry_entry_get_properties_bin_buf */
3750 kern_return_t
3751 is_io_registry_entry_get_properties_bin_buf(
3752 	io_object_t registry_entry,
3753 	mach_vm_address_t buf,
3754 	mach_vm_size_t *bufsize,
3755 	io_buf_ptr_t *properties,
3756 	mach_msg_type_number_t *propertiesCnt)
3757 {
3758 	kern_return_t          err = kIOReturnSuccess;
3759 	unsigned int           len;
3760 	OSObject             * compatProperties;
3761 	OSSerialize          * s;
3762 	OSSerialize::Editor    editor = NULL;
3763 	void                 * editRef = NULL;
3764 
3765 	CHECK(IORegistryEntry, registry_entry, entry);
3766 
3767 #if CONFIG_MACF
3768 	GetPropertiesEditorRef ref;
3769 	if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
3770 		editor    = &GetPropertiesEditor;
3771 		editRef   = &ref;
3772 		ref.cred  = kauth_cred_get();
3773 		ref.entry = entry;
3774 		ref.root  = NULL;
3775 	}
3776 #endif
3777 
3778 	s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3779 	if (!s) {
3780 		return kIOReturnNoMemory;
3781 	}
3782 
3783 
3784 	compatProperties = entry->copyProperty(gIOUserServicePropertiesKey);
3785 	if (!compatProperties
3786 	    && IOTaskRegistryCompatibility(current_task())) {
3787 		compatProperties = entry->copyProperty(gIOCompatibilityPropertiesKey);
3788 	}
3789 
3790 	if (compatProperties) {
3791 		OSDictionary * dict;
3792 
3793 		dict = entry->dictionaryWithProperties();
3794 		if (!dict) {
3795 			err = kIOReturnNoMemory;
3796 		} else {
3797 			dict->removeObject(gIOUserServicePropertiesKey);
3798 			dict->removeObject(gIOCompatibilityPropertiesKey);
3799 			dict->merge(OSDynamicCast(OSDictionary, compatProperties));
3800 			if (!dict->serialize(s)) {
3801 				err = kIOReturnUnsupported;
3802 			}
3803 			dict->release();
3804 		}
3805 		compatProperties->release();
3806 	} else if (!entry->serializeProperties(s)) {
3807 		err = kIOReturnUnsupported;
3808 	}
3809 
3810 	if (kIOReturnSuccess == err) {
3811 		len = s->getLength();
3812 		if (buf && bufsize && len <= *bufsize) {
3813 			*bufsize = len;
3814 			*propertiesCnt = 0;
3815 			*properties = nullptr;
3816 			if (copyout(s->text(), buf, len)) {
3817 				err = kIOReturnVMError;
3818 			} else {
3819 				err = kIOReturnSuccess;
3820 			}
3821 		} else {
3822 			if (bufsize) {
3823 				*bufsize = 0;
3824 			}
3825 			*propertiesCnt = len;
3826 			err = copyoutkdata( s->text(), len, properties );
3827 		}
3828 	}
3829 	s->release();
3830 
3831 	return err;
3832 }
3833 
3834 /* Routine io_registry_entry_get_properties_bin */
3835 kern_return_t
3836 is_io_registry_entry_get_properties_bin(
3837 	io_object_t registry_entry,
3838 	io_buf_ptr_t *properties,
3839 	mach_msg_type_number_t *propertiesCnt)
3840 {
3841 	return is_io_registry_entry_get_properties_bin_buf(registry_entry,
3842 	           0, NULL, properties, propertiesCnt);
3843 }
3844 
3845 /* Routine io_registry_entry_get_property_bin_buf */
3846 kern_return_t
3847 is_io_registry_entry_get_property_bin_buf(
3848 	io_object_t registry_entry,
3849 	io_name_t plane,
3850 	io_name_t property_name,
3851 	uint32_t options,
3852 	mach_vm_address_t buf,
3853 	mach_vm_size_t *bufsize,
3854 	io_buf_ptr_t *properties,
3855 	mach_msg_type_number_t *propertiesCnt )
3856 {
3857 	kern_return_t       err;
3858 	unsigned int        len;
3859 	OSObject *          obj;
3860 	const OSSymbol *    sym;
3861 
3862 	CHECK( IORegistryEntry, registry_entry, entry );
3863 
3864 #if CONFIG_MACF
3865 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3866 		return kIOReturnNotPermitted;
3867 	}
3868 #endif
3869 
3870 	sym = OSSymbol::withCString(property_name);
3871 	if (!sym) {
3872 		return kIOReturnNoMemory;
3873 	}
3874 
3875 	err = kIOReturnNotFound;
3876 	if (gIORegistryEntryPropertyKeysKey == sym) {
3877 		obj = entry->copyPropertyKeys();
3878 	} else {
3879 		if ((kIORegistryIterateRecursively & options) && plane[0]) {
3880 			obj = IOCopyPropertyCompatible(entry, property_name);
3881 			if (obj == NULL) {
3882 				IORegistryIterator * iter = IORegistryIterator::iterateOver(entry, IORegistryEntry::getPlane(plane), options);
3883 				if (iter) {
3884 					while ((NULL == obj) && (entry = iter->getNextObject())) {
3885 						OSObject * currentObj = IOCopyPropertyCompatible(entry, property_name);
3886 #if CONFIG_MACF
3887 						if (currentObj != NULL && 0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3888 							// Record that MAC hook blocked this entry and property, and continue to next entry
3889 							err = kIOReturnNotPermitted;
3890 							OSSafeReleaseNULL(currentObj);
3891 							continue;
3892 						}
3893 #endif
3894 						obj = currentObj;
3895 					}
3896 					iter->release();
3897 				}
3898 			}
3899 		} else {
3900 			obj = IOCopyPropertyCompatible(entry, property_name);
3901 		}
3902 		if (obj && gIORemoveOnReadProperties->containsObject(sym)) {
3903 			entry->removeProperty(sym);
3904 		}
3905 	}
3906 
3907 	sym->release();
3908 	if (!obj) {
3909 		return err;
3910 	}
3911 
3912 	OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3913 	if (!s) {
3914 		obj->release();
3915 		return kIOReturnNoMemory;
3916 	}
3917 
3918 	if (obj->serialize( s )) {
3919 		len = s->getLength();
3920 		if (buf && bufsize && len <= *bufsize) {
3921 			*bufsize = len;
3922 			*propertiesCnt = 0;
3923 			*properties = nullptr;
3924 			if (copyout(s->text(), buf, len)) {
3925 				err = kIOReturnVMError;
3926 			} else {
3927 				err = kIOReturnSuccess;
3928 			}
3929 		} else {
3930 			if (bufsize) {
3931 				*bufsize = 0;
3932 			}
3933 			*propertiesCnt = len;
3934 			err = copyoutkdata( s->text(), len, properties );
3935 		}
3936 	} else {
3937 		err = kIOReturnUnsupported;
3938 	}
3939 
3940 	s->release();
3941 	obj->release();
3942 
3943 	return err;
3944 }
3945 
3946 /* Routine io_registry_entry_get_property_bin */
3947 kern_return_t
3948 is_io_registry_entry_get_property_bin(
3949 	io_object_t registry_entry,
3950 	io_name_t plane,
3951 	io_name_t property_name,
3952 	uint32_t options,
3953 	io_buf_ptr_t *properties,
3954 	mach_msg_type_number_t *propertiesCnt )
3955 {
3956 	return is_io_registry_entry_get_property_bin_buf(registry_entry, plane,
3957 	           property_name, options, 0, NULL, properties, propertiesCnt);
3958 }
3959 
3960 
3961 /* Routine io_registry_entry_set_properties */
3962 kern_return_t
3963 is_io_registry_entry_set_properties
3964 (
3965 	io_object_t registry_entry,
3966 	io_buf_ptr_t properties,
3967 	mach_msg_type_number_t propertiesCnt,
3968 	kern_return_t * result)
3969 {
3970 	OSObject *          obj;
3971 	kern_return_t       err;
3972 	IOReturn            res;
3973 	vm_offset_t         data;
3974 	vm_map_offset_t     map_data;
3975 
3976 	CHECK( IORegistryEntry, registry_entry, entry );
3977 
3978 	if (propertiesCnt > sizeof(io_struct_inband_t) * 1024) {
3979 		return kIOReturnMessageTooLarge;
3980 	}
3981 
3982 	err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3983 	data = CAST_DOWN(vm_offset_t, map_data);
3984 
3985 	if (KERN_SUCCESS == err) {
3986 		FAKE_STACK_FRAME(entry->getMetaClass());
3987 
3988 		// must return success after vm_map_copyout() succeeds
3989 		obj = OSUnserializeXML((const char *) data, propertiesCnt );
3990 		vm_deallocate( kernel_map, data, propertiesCnt );
3991 
3992 		if (!obj) {
3993 			res = kIOReturnBadArgument;
3994 		}
3995 #if CONFIG_MACF
3996 		else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
3997 		    registry_entry, obj)) {
3998 			res = kIOReturnNotPermitted;
3999 		}
4000 #endif
4001 		else {
4002 			IOService    * service = OSDynamicCast(IOService, entry);
4003 			OSDictionary * props = OSDynamicCast(OSDictionary, obj);
4004 			OSObject     * allowable = entry->copyProperty(gIORegistryEntryAllowableSetPropertiesKey);
4005 			OSArray      * allowableArray;
4006 
4007 			if (!allowable) {
4008 				res = kIOReturnSuccess;
4009 			} else {
4010 				if (!props) {
4011 					res = kIOReturnNotPermitted;
4012 				} else if (!(allowableArray = OSDynamicCast(OSArray, allowable))) {
4013 					res = kIOReturnNotPermitted;
4014 				} else {
4015 					bool allFound __block, found __block;
4016 
4017 					allFound = true;
4018 					props->iterateObjects(^(const OSSymbol * key, OSObject * value) {
4019 							found = false;
4020 							for (unsigned int idx = 0; !found; idx++) {
4021 							        OSObject * next = allowableArray->getObject(idx);
4022 							        if (!next) {
4023 							                break;
4024 								}
4025 							        found = next->isEqualTo(key);
4026 							}
4027 							allFound &= found;
4028 							if (!found) {
4029 							        IOLog("IORegistryEntrySetProperties(%s, %s) disallowed due to " kIORegistryEntryAllowableSetPropertiesKey "\n",
4030 							        entry->getName(), key->getCStringNoCopy());
4031 							}
4032 							return !allFound;
4033 						});
4034 					res =  allFound ? kIOReturnSuccess : kIOReturnBadArgument;
4035 				}
4036 			}
4037 			if (kIOReturnSuccess == res) {
4038 				IOUserClient *
4039 				    client = OSDynamicCast(IOUserClient, entry);
4040 
4041 				if (client && client->defaultLockingSetProperties) {
4042 					IORWLockWrite(&client->lock);
4043 				}
4044 
4045 				if (!client && (kOSBooleanTrue == entry->getProperty(gIORegistryEntryDefaultLockingSetPropertiesKey))) {
4046 					res = entry->runPropertyActionBlock(^IOReturn (void) {
4047 							return entry->setProperties( obj );
4048 						});
4049 				} else {
4050 					res = entry->setProperties( obj );
4051 				}
4052 
4053 				if (client && client->defaultLockingSetProperties) {
4054 					IORWLockUnlock(&client->lock);
4055 				}
4056 				if (service && props && service->hasUserServer()) {
4057 					res = service->UserSetProperties(props);
4058 				}
4059 			}
4060 			OSSafeReleaseNULL(allowable);
4061 		}
4062 		if (obj) {
4063 			obj->release();
4064 		}
4065 
4066 		FAKE_STACK_FRAME_END();
4067 	} else {
4068 		res = err;
4069 	}
4070 
4071 	*result = res;
4072 	return err;
4073 }
4074 
4075 /* Routine io_registry_entry_get_child_iterator */
4076 kern_return_t
4077 is_io_registry_entry_get_child_iterator(
4078 	io_object_t registry_entry,
4079 	io_name_t plane,
4080 	io_object_t *iterator )
4081 {
4082 	CHECK( IORegistryEntry, registry_entry, entry );
4083 
4084 	*iterator = IOUserIterator::withIterator(entry->getChildIterator(
4085 		    IORegistryEntry::getPlane( plane )));
4086 
4087 	return kIOReturnSuccess;
4088 }
4089 
4090 /* Routine io_registry_entry_get_parent_iterator */
4091 kern_return_t
4092 is_io_registry_entry_get_parent_iterator(
4093 	io_object_t registry_entry,
4094 	io_name_t plane,
4095 	io_object_t *iterator)
4096 {
4097 	CHECK( IORegistryEntry, registry_entry, entry );
4098 
4099 	*iterator = IOUserIterator::withIterator(entry->getParentIterator(
4100 		    IORegistryEntry::getPlane( plane )));
4101 
4102 	return kIOReturnSuccess;
4103 }
4104 
4105 /* Routine io_service_get_busy_state */
4106 kern_return_t
4107 is_io_service_get_busy_state(
4108 	io_object_t _service,
4109 	uint32_t *busyState )
4110 {
4111 	CHECK( IOService, _service, service );
4112 
4113 	*busyState = service->getBusyState();
4114 
4115 	return kIOReturnSuccess;
4116 }
4117 
4118 /* Routine io_service_get_state */
4119 kern_return_t
4120 is_io_service_get_state(
4121 	io_object_t _service,
4122 	uint64_t *state,
4123 	uint32_t *busy_state,
4124 	uint64_t *accumulated_busy_time )
4125 {
4126 	CHECK( IOService, _service, service );
4127 
4128 	*state                 = service->getState();
4129 	*busy_state            = service->getBusyState();
4130 	*accumulated_busy_time = service->getAccumulatedBusyTime();
4131 
4132 	return kIOReturnSuccess;
4133 }
4134 
4135 /* Routine io_service_wait_quiet */
4136 kern_return_t
4137 is_io_service_wait_quiet(
4138 	io_object_t _service,
4139 	mach_timespec_t wait_time )
4140 {
4141 	uint64_t    timeoutNS;
4142 
4143 	CHECK( IOService, _service, service );
4144 
4145 	timeoutNS = wait_time.tv_sec;
4146 	timeoutNS *= kSecondScale;
4147 	timeoutNS += wait_time.tv_nsec;
4148 
4149 	return service->waitQuiet(timeoutNS);
4150 }
4151 
4152 /* Routine io_service_wait_quiet_with_options */
4153 kern_return_t
4154 is_io_service_wait_quiet_with_options(
4155 	io_object_t _service,
4156 	mach_timespec_t wait_time,
4157 	uint32_t options )
4158 {
4159 	uint64_t    timeoutNS;
4160 
4161 	CHECK( IOService, _service, service );
4162 
4163 	timeoutNS = wait_time.tv_sec;
4164 	timeoutNS *= kSecondScale;
4165 	timeoutNS += wait_time.tv_nsec;
4166 
4167 	if ((options & kIOWaitQuietPanicOnFailure) && !IOCurrentTaskHasEntitlement(kIOWaitQuietPanicsEntitlement)) {
4168 		OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
4169 		IOLog("IOServiceWaitQuietWithOptions(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
4170 		OSSafeReleaseNULL(taskName);
4171 
4172 		/* strip this option from the options before calling waitQuietWithOptions */
4173 		options &= ~kIOWaitQuietPanicOnFailure;
4174 	}
4175 
4176 	return service->waitQuietWithOptions(timeoutNS, options);
4177 }
4178 
4179 
4180 /* Routine io_service_request_probe */
4181 kern_return_t
4182 is_io_service_request_probe(
4183 	io_object_t _service,
4184 	uint32_t options )
4185 {
4186 	CHECK( IOService, _service, service );
4187 
4188 	return service->requestProbe( options );
4189 }
4190 
4191 /* Routine io_service_get_authorization_id */
4192 kern_return_t
4193 is_io_service_get_authorization_id(
4194 	io_object_t _service,
4195 	uint64_t *authorization_id )
4196 {
4197 	kern_return_t          kr;
4198 
4199 	CHECK( IOService, _service, service );
4200 
4201 	kr = IOUserClient::clientHasPrivilege((void *) current_task(),
4202 	    kIOClientPrivilegeAdministrator );
4203 	if (kIOReturnSuccess != kr) {
4204 		return kr;
4205 	}
4206 
4207 	*authorization_id = service->getAuthorizationID();
4208 
4209 	return kr;
4210 }
4211 
4212 /* Routine io_service_set_authorization_id */
4213 kern_return_t
4214 is_io_service_set_authorization_id(
4215 	io_object_t _service,
4216 	uint64_t authorization_id )
4217 {
4218 	CHECK( IOService, _service, service );
4219 
4220 	return service->setAuthorizationID( authorization_id );
4221 }
4222 
4223 /* Routine io_service_open_ndr */
4224 kern_return_t
4225 is_io_service_open_extended(
4226 	io_object_t _service,
4227 	task_t owningTask,
4228 	uint32_t connect_type,
4229 	NDR_record_t ndr,
4230 	io_buf_ptr_t properties,
4231 	mach_msg_type_number_t propertiesCnt,
4232 	kern_return_t * result,
4233 	io_object_t *connection )
4234 {
4235 	IOUserClient * client = NULL;
4236 	kern_return_t  err = KERN_SUCCESS;
4237 	IOReturn       res = kIOReturnSuccess;
4238 	OSDictionary * propertiesDict = NULL;
4239 	bool           disallowAccess = false;
4240 
4241 	CHECK( IOService, _service, service );
4242 
4243 	if (!owningTask) {
4244 		return kIOReturnBadArgument;
4245 	}
4246 	assert(owningTask == current_task());
4247 	if (owningTask != current_task()) {
4248 		return kIOReturnBadArgument;
4249 	}
4250 
4251 #if CONFIG_MACF
4252 	if (mac_iokit_check_open_service(kauth_cred_get(), service, connect_type) != 0) {
4253 		return kIOReturnNotPermitted;
4254 	}
4255 #endif
4256 	do{
4257 		if (properties) {
4258 			return kIOReturnUnsupported;
4259 		}
4260 #if 0
4261 		{
4262 			OSObject *      obj;
4263 			vm_offset_t     data;
4264 			vm_map_offset_t map_data;
4265 
4266 			if (propertiesCnt > sizeof(io_struct_inband_t)) {
4267 				return kIOReturnMessageTooLarge;
4268 			}
4269 
4270 			err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4271 			res = err;
4272 			data = CAST_DOWN(vm_offset_t, map_data);
4273 			if (KERN_SUCCESS == err) {
4274 				// must return success after vm_map_copyout() succeeds
4275 				obj = OSUnserializeXML((const char *) data, propertiesCnt );
4276 				vm_deallocate( kernel_map, data, propertiesCnt );
4277 				propertiesDict = OSDynamicCast(OSDictionary, obj);
4278 				if (!propertiesDict) {
4279 					res = kIOReturnBadArgument;
4280 					if (obj) {
4281 						obj->release();
4282 					}
4283 				}
4284 			}
4285 			if (kIOReturnSuccess != res) {
4286 				break;
4287 			}
4288 		}
4289 #endif
4290 		res = service->newUserClient( owningTask, (void *) owningTask,
4291 		    connect_type, propertiesDict, &client );
4292 
4293 		if (propertiesDict) {
4294 			propertiesDict->release();
4295 		}
4296 
4297 		if (res == kIOReturnSuccess && OSDynamicCast(IOUserClient, client) == NULL) {
4298 			// client should always be a IOUserClient
4299 			res = kIOReturnError;
4300 		}
4301 
4302 		if (res == kIOReturnSuccess) {
4303 			if (!client->reserved) {
4304 				if (!client->reserve()) {
4305 					client->clientClose();
4306 					OSSafeReleaseNULL(client);
4307 					res = kIOReturnNoMemory;
4308 				}
4309 			}
4310 		}
4311 
4312 		if (res == kIOReturnSuccess) {
4313 			OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
4314 			if (creatorName) {
4315 				client->setProperty(kIOUserClientCreatorKey, creatorName);
4316 			}
4317 			const char * creatorNameCStr = creatorName ? creatorName->getCStringNoCopy() : "<unknown>";
4318 			client->sharedInstance = (NULL != client->getProperty(kIOUserClientSharedInstanceKey));
4319 			if (client->sharedInstance) {
4320 				IOLockLock(gIOUserClientOwnersLock);
4321 			}
4322 			if (!client->opened) {
4323 				client->opened = true;
4324 
4325 				client->messageAppSuspended = (NULL != client->getProperty(kIOUserClientMessageAppSuspendedKey));
4326 				{
4327 					OSObject * obj;
4328 					extern const OSSymbol * gIOSurfaceIdentifier;
4329 					obj = client->getProperty(kIOUserClientDefaultLockingKey);
4330 					bool hasProps = false;
4331 
4332 					client->uc2022 = (NULL != OSDynamicCast(IOUserClient2022, client));
4333 					if (obj) {
4334 						hasProps = true;
4335 						client->defaultLocking = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingKey));
4336 					} else if (client->uc2022) {
4337 						res = kIOReturnError;
4338 					}
4339 					obj = client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey);
4340 					if (obj) {
4341 						hasProps = true;
4342 						client->defaultLockingSetProperties = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey));
4343 					} else if (client->uc2022) {
4344 						res = kIOReturnError;
4345 					}
4346 					obj = client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey);
4347 					if (obj) {
4348 						hasProps = true;
4349 						client->defaultLockingSingleThreadExternalMethod = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey));
4350 					} else if (client->uc2022) {
4351 						res = kIOReturnError;
4352 					}
4353 					if (kIOReturnSuccess != res) {
4354 						IOLog("IOUC %s requires kIOUserClientDefaultLockingKey, kIOUserClientDefaultLockingSetPropertiesKey, kIOUserClientDefaultLockingSingleThreadExternalMethodKey\n",
4355 						    client->getMetaClass()->getClassName());
4356 					}
4357 					if (!hasProps) {
4358 						const OSMetaClass * meta;
4359 						OSKext            * kext;
4360 						meta = client->getMetaClass();
4361 						kext = meta->getKext();
4362 						if (!kext || !kext->hasDependency(gIOSurfaceIdentifier)) {
4363 							client->defaultLocking = true;
4364 							client->defaultLockingSetProperties = false;
4365 							client->defaultLockingSingleThreadExternalMethod = false;
4366 							client->setProperty(kIOUserClientDefaultLockingKey, kOSBooleanTrue);
4367 						}
4368 					}
4369 				}
4370 			}
4371 			if (client->sharedInstance) {
4372 				IOLockUnlock(gIOUserClientOwnersLock);
4373 			}
4374 
4375 			OSObject     * requiredEntitlement = client->copyProperty(gIOUserClientEntitlementsKey);
4376 			OSString * requiredEntitlementString = OSDynamicCast(OSString, requiredEntitlement);
4377 			//If this is an IOUserClient2022, having kIOUserClientEntitlementsKey is mandatory.
4378 			//If it has kIOUserClientEntitlementsKey, the value must be either kOSBooleanFalse or an OSString
4379 			//If the value is kOSBooleanFalse, we allow access.
4380 			//If the value is an OSString, we allow access if the task has the named entitlement
4381 			if (client->uc2022) {
4382 				if (!requiredEntitlement) {
4383 					IOLog("IOUC %s missing " kIOUserClientEntitlementsKey " property\n",
4384 					    client->getMetaClass()->getClassName());
4385 					disallowAccess = true;
4386 				} else if (!requiredEntitlementString && requiredEntitlement != kOSBooleanFalse) {
4387 					IOLog("IOUC %s had " kIOUserClientEntitlementsKey "with value not boolean false or string\n", client->getMetaClass()->getClassName());
4388 					disallowAccess = true;
4389 				}
4390 			}
4391 
4392 			if (requiredEntitlement && disallowAccess == false) {
4393 				if (kOSBooleanFalse == requiredEntitlement) {
4394 					// allow
4395 					disallowAccess = false;
4396 				} else {
4397 					disallowAccess = !IOTaskHasEntitlement(owningTask, requiredEntitlementString->getCStringNoCopy());
4398 					if (disallowAccess) {
4399 						IOLog("IOUC %s missing entitlement in process %s\n",
4400 						    client->getMetaClass()->getClassName(), creatorNameCStr);
4401 					}
4402 				}
4403 			}
4404 
4405 			OSSafeReleaseNULL(requiredEntitlement);
4406 
4407 			if (disallowAccess) {
4408 				res = kIOReturnNotPrivileged;
4409 			}
4410 #if CONFIG_MACF
4411 			else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) {
4412 				IOLog("IOUC %s failed MACF in process %s\n",
4413 				    client->getMetaClass()->getClassName(), creatorNameCStr);
4414 				res = kIOReturnNotPermitted;
4415 			}
4416 #endif
4417 
4418 			if ((kIOReturnSuccess == res)
4419 			    && gIOUCFilterCallbacks
4420 			    && gIOUCFilterCallbacks->io_filter_resolver) {
4421 				io_filter_policy_t filterPolicy;
4422 				filterPolicy = client->filterForTask(owningTask, 0);
4423 				if (!filterPolicy) {
4424 					res = gIOUCFilterCallbacks->io_filter_resolver(owningTask, client, connect_type, &filterPolicy);
4425 					if (kIOReturnUnsupported == res) {
4426 						res = kIOReturnSuccess;
4427 					} else if (kIOReturnSuccess == res) {
4428 						client->filterForTask(owningTask, filterPolicy);
4429 					} else {
4430 						IOLog("IOUC %s failed sandbox in process %s\n",
4431 						    client->getMetaClass()->getClassName(), creatorNameCStr);
4432 					}
4433 				}
4434 			}
4435 
4436 			if (kIOReturnSuccess == res) {
4437 				res = client->registerOwner(owningTask);
4438 			}
4439 			OSSafeReleaseNULL(creatorName);
4440 
4441 			if (kIOReturnSuccess != res) {
4442 				IOStatisticsClientCall();
4443 				client->clientClose();
4444 				client->setTerminateDefer(service, false);
4445 				client->release();
4446 				client = NULL;
4447 				break;
4448 			}
4449 			client->setTerminateDefer(service, false);
4450 		}
4451 	}while (false);
4452 
4453 	*connection = client;
4454 	*result = res;
4455 
4456 	return err;
4457 }
4458 
4459 /* Routine io_service_close */
4460 kern_return_t
4461 is_io_service_close(
4462 	io_object_t connection )
4463 {
4464 	OSSet * mappings;
4465 	if ((mappings = OSDynamicCast(OSSet, connection))) {
4466 		return kIOReturnSuccess;
4467 	}
4468 
4469 	CHECK( IOUserClient, connection, client );
4470 
4471 	IOStatisticsClientCall();
4472 
4473 	if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) {
4474 		IORWLockWrite(&client->lock);
4475 		client->clientClose();
4476 		IORWLockUnlock(&client->lock);
4477 	} else {
4478 		IOLog("ignored is_io_service_close(0x%qx,%s)\n",
4479 		    client->getRegistryEntryID(), client->getName());
4480 	}
4481 
4482 	return kIOReturnSuccess;
4483 }
4484 
4485 /* Routine io_connect_get_service */
4486 kern_return_t
4487 is_io_connect_get_service(
4488 	io_object_t connection,
4489 	io_object_t *service )
4490 {
4491 	IOService * theService;
4492 
4493 	CHECK( IOUserClient, connection, client );
4494 
4495 	theService = client->getService();
4496 	if (theService) {
4497 		theService->retain();
4498 	}
4499 
4500 	*service = theService;
4501 
4502 	return theService ? kIOReturnSuccess : kIOReturnUnsupported;
4503 }
4504 
4505 /* Routine io_connect_set_notification_port */
4506 kern_return_t
4507 is_io_connect_set_notification_port(
4508 	io_object_t connection,
4509 	uint32_t notification_type,
4510 	mach_port_t port,
4511 	uint32_t reference)
4512 {
4513 	kern_return_t ret;
4514 	CHECK( IOUserClient, connection, client );
4515 
4516 	IOStatisticsClientCall();
4517 	IORWLockWrite(&client->lock);
4518 	ret = client->registerNotificationPort( port, notification_type,
4519 	    (io_user_reference_t) reference );
4520 	IORWLockUnlock(&client->lock);
4521 	return ret;
4522 }
4523 
4524 /* Routine io_connect_set_notification_port */
4525 kern_return_t
4526 is_io_connect_set_notification_port_64(
4527 	io_object_t connection,
4528 	uint32_t notification_type,
4529 	mach_port_t port,
4530 	io_user_reference_t reference)
4531 {
4532 	kern_return_t ret;
4533 	CHECK( IOUserClient, connection, client );
4534 
4535 	IOStatisticsClientCall();
4536 	IORWLockWrite(&client->lock);
4537 	ret = client->registerNotificationPort( port, notification_type,
4538 	    reference );
4539 	IORWLockUnlock(&client->lock);
4540 	return ret;
4541 }
4542 
4543 /* Routine io_connect_map_memory_into_task */
4544 kern_return_t
4545 is_io_connect_map_memory_into_task
4546 (
4547 	io_connect_t connection,
4548 	uint32_t memory_type,
4549 	task_t into_task,
4550 	mach_vm_address_t *address,
4551 	mach_vm_size_t *size,
4552 	uint32_t flags
4553 )
4554 {
4555 	IOReturn            err;
4556 	IOMemoryMap *       map;
4557 
4558 	CHECK( IOUserClient, connection, client );
4559 
4560 	if (!into_task) {
4561 		return kIOReturnBadArgument;
4562 	}
4563 
4564 	IOStatisticsClientCall();
4565 	if (client->defaultLocking) {
4566 		IORWLockWrite(&client->lock);
4567 	}
4568 	map = client->mapClientMemory64( memory_type, into_task, flags, *address );
4569 	if (client->defaultLocking) {
4570 		IORWLockUnlock(&client->lock);
4571 	}
4572 
4573 	if (map) {
4574 		*address = map->getAddress();
4575 		if (size) {
4576 			*size = map->getSize();
4577 		}
4578 
4579 		if (client->sharedInstance
4580 		    || (into_task != current_task())) {
4581 			// push a name out to the task owning the map,
4582 			// so we can clean up maps
4583 			mach_port_name_t name __unused =
4584 			    IOMachPort::makeSendRightForTask(
4585 				into_task, map, IKOT_IOKIT_OBJECT );
4586 			map->release();
4587 		} else {
4588 			// keep it with the user client
4589 			IOLockLock( gIOObjectPortLock);
4590 			if (NULL == client->mappings) {
4591 				client->mappings = OSSet::withCapacity(2);
4592 			}
4593 			if (client->mappings) {
4594 				client->mappings->setObject( map);
4595 			}
4596 			IOLockUnlock( gIOObjectPortLock);
4597 			map->release();
4598 		}
4599 		err = kIOReturnSuccess;
4600 	} else {
4601 		err = kIOReturnBadArgument;
4602 	}
4603 
4604 	return err;
4605 }
4606 
4607 /* Routine is_io_connect_map_memory */
4608 kern_return_t
4609 is_io_connect_map_memory(
4610 	io_object_t     connect,
4611 	uint32_t        type,
4612 	task_t          task,
4613 	uint32_t  *     mapAddr,
4614 	uint32_t  *     mapSize,
4615 	uint32_t        flags )
4616 {
4617 	IOReturn          err;
4618 	mach_vm_address_t address;
4619 	mach_vm_size_t    size;
4620 
4621 	address = SCALAR64(*mapAddr);
4622 	size    = SCALAR64(*mapSize);
4623 
4624 	err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
4625 
4626 	*mapAddr = SCALAR32(address);
4627 	*mapSize = SCALAR32(size);
4628 
4629 	return err;
4630 }
4631 } /* extern "C" */
4632 
4633 IOMemoryMap *
4634 IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
4635 {
4636 	OSIterator *  iter;
4637 	IOMemoryMap * map = NULL;
4638 
4639 	IOLockLock(gIOObjectPortLock);
4640 
4641 	iter = OSCollectionIterator::withCollection(mappings);
4642 	if (iter) {
4643 		while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) {
4644 			if (mem == map->getMemoryDescriptor()) {
4645 				map->retain();
4646 				mappings->removeObject(map);
4647 				break;
4648 			}
4649 		}
4650 		iter->release();
4651 	}
4652 
4653 	IOLockUnlock(gIOObjectPortLock);
4654 
4655 	return map;
4656 }
4657 
4658 extern "C" {
4659 /* Routine io_connect_unmap_memory_from_task */
4660 kern_return_t
4661 is_io_connect_unmap_memory_from_task
4662 (
4663 	io_connect_t connection,
4664 	uint32_t memory_type,
4665 	task_t from_task,
4666 	mach_vm_address_t address)
4667 {
4668 	IOReturn            err;
4669 	IOOptionBits        options = 0;
4670 	IOMemoryDescriptor * memory = NULL;
4671 	IOMemoryMap *       map;
4672 
4673 	CHECK( IOUserClient, connection, client );
4674 
4675 	if (!from_task) {
4676 		return kIOReturnBadArgument;
4677 	}
4678 
4679 	IOStatisticsClientCall();
4680 	if (client->defaultLocking) {
4681 		IORWLockWrite(&client->lock);
4682 	}
4683 	err = client->clientMemoryForType((UInt32) memory_type, &options, &memory );
4684 	if (client->defaultLocking) {
4685 		IORWLockUnlock(&client->lock);
4686 	}
4687 
4688 	if (memory && (kIOReturnSuccess == err)) {
4689 		options = (options & ~kIOMapUserOptionsMask)
4690 		    | kIOMapAnywhere | kIOMapReference;
4691 
4692 		map = memory->createMappingInTask( from_task, address, options );
4693 		memory->release();
4694 		if (map) {
4695 			IOLockLock( gIOObjectPortLock);
4696 			if (client->mappings) {
4697 				client->mappings->removeObject( map);
4698 			}
4699 			IOLockUnlock( gIOObjectPortLock);
4700 
4701 			mach_port_name_t name = 0;
4702 			bool is_shared_instance_or_from_current_task = from_task != current_task() || client->sharedInstance;
4703 			if (is_shared_instance_or_from_current_task) {
4704 				name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
4705 				map->release();
4706 			}
4707 
4708 			if (name) {
4709 				map->userClientUnmap();
4710 				err = iokit_mod_send_right( from_task, name, -2 );
4711 				err = kIOReturnSuccess;
4712 			} else {
4713 				IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
4714 			}
4715 			if (!is_shared_instance_or_from_current_task) {
4716 				map->release();
4717 			}
4718 		} else {
4719 			err = kIOReturnBadArgument;
4720 		}
4721 	}
4722 
4723 	return err;
4724 }
4725 
4726 kern_return_t
4727 is_io_connect_unmap_memory(
4728 	io_object_t     connect,
4729 	uint32_t        type,
4730 	task_t          task,
4731 	uint32_t        mapAddr )
4732 {
4733 	IOReturn            err;
4734 	mach_vm_address_t   address;
4735 
4736 	address = SCALAR64(mapAddr);
4737 
4738 	err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
4739 
4740 	return err;
4741 }
4742 
4743 
4744 /* Routine io_connect_add_client */
4745 kern_return_t
4746 is_io_connect_add_client(
4747 	io_object_t connection,
4748 	io_object_t connect_to)
4749 {
4750 	CHECK( IOUserClient, connection, client );
4751 	CHECK( IOUserClient, connect_to, to );
4752 
4753 	IOReturn ret;
4754 
4755 	IOStatisticsClientCall();
4756 	if (client->defaultLocking) {
4757 		IORWLockWrite(&client->lock);
4758 	}
4759 	ret = client->connectClient( to );
4760 	if (client->defaultLocking) {
4761 		IORWLockUnlock(&client->lock);
4762 	}
4763 	return ret;
4764 }
4765 
4766 
4767 /* Routine io_connect_set_properties */
4768 kern_return_t
4769 is_io_connect_set_properties(
4770 	io_object_t connection,
4771 	io_buf_ptr_t properties,
4772 	mach_msg_type_number_t propertiesCnt,
4773 	kern_return_t * result)
4774 {
4775 	return is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result );
4776 }
4777 
4778 /* Routine io_user_client_method */
4779 kern_return_t
4780 is_io_connect_method_var_output
4781 (
4782 	io_connect_t connection,
4783 	uint32_t selector,
4784 	io_scalar_inband64_t scalar_input,
4785 	mach_msg_type_number_t scalar_inputCnt,
4786 	io_struct_inband_t inband_input,
4787 	mach_msg_type_number_t inband_inputCnt,
4788 	mach_vm_address_t ool_input,
4789 	mach_vm_size_t ool_input_size,
4790 	io_struct_inband_t inband_output,
4791 	mach_msg_type_number_t *inband_outputCnt,
4792 	io_scalar_inband64_t scalar_output,
4793 	mach_msg_type_number_t *scalar_outputCnt,
4794 	io_buf_ptr_t *var_output,
4795 	mach_msg_type_number_t *var_outputCnt
4796 )
4797 {
4798 	CHECK( IOUserClient, connection, client );
4799 
4800 	IOExternalMethodArguments args;
4801 	IOReturn ret;
4802 	IOMemoryDescriptor * inputMD  = NULL;
4803 	OSObject *           structureVariableOutputData = NULL;
4804 
4805 	bzero(&args.__reserved[0], sizeof(args.__reserved));
4806 	args.__reservedA = 0;
4807 	args.version = kIOExternalMethodArgumentsCurrentVersion;
4808 
4809 	args.selector = selector;
4810 
4811 	args.asyncWakePort               = MACH_PORT_NULL;
4812 	args.asyncReference              = NULL;
4813 	args.asyncReferenceCount         = 0;
4814 	args.structureVariableOutputData = &structureVariableOutputData;
4815 
4816 	args.scalarInput = scalar_input;
4817 	args.scalarInputCount = scalar_inputCnt;
4818 	args.structureInput = inband_input;
4819 	args.structureInputSize = inband_inputCnt;
4820 
4821 	if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4822 		return kIOReturnIPCError;
4823 	}
4824 
4825 	if (ool_input) {
4826 		inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4827 		    kIODirectionOut | kIOMemoryMapCopyOnWrite,
4828 		    current_task());
4829 	}
4830 
4831 	args.structureInputDescriptor = inputMD;
4832 
4833 	args.scalarOutput = scalar_output;
4834 	args.scalarOutputCount = *scalar_outputCnt;
4835 	bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4836 	args.structureOutput = inband_output;
4837 	args.structureOutputSize = *inband_outputCnt;
4838 	args.structureOutputDescriptor = NULL;
4839 	args.structureOutputDescriptorSize = 0;
4840 
4841 	IOStatisticsClientCall();
4842 	ret = kIOReturnSuccess;
4843 
4844 	io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
4845 	if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
4846 		ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
4847 	}
4848 
4849 	if (kIOReturnSuccess == ret) {
4850 		ret = client->callExternalMethod(selector, &args);
4851 	}
4852 
4853 	*scalar_outputCnt = args.scalarOutputCount;
4854 	*inband_outputCnt = args.structureOutputSize;
4855 
4856 	if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) {
4857 		OSSerialize * serialize;
4858 		OSData      * data;
4859 		unsigned int  len;
4860 
4861 		if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) {
4862 			len = serialize->getLength();
4863 			*var_outputCnt = len;
4864 			ret = copyoutkdata(serialize->text(), len, var_output);
4865 		} else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) {
4866 			data->clipForCopyout();
4867 			len = data->getLength();
4868 			*var_outputCnt = len;
4869 			ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
4870 		} else {
4871 			ret = kIOReturnUnderrun;
4872 		}
4873 	}
4874 
4875 	if (inputMD) {
4876 		inputMD->release();
4877 	}
4878 	if (structureVariableOutputData) {
4879 		structureVariableOutputData->release();
4880 	}
4881 
4882 	return ret;
4883 }
4884 
4885 /* Routine io_user_client_method */
4886 kern_return_t
4887 is_io_connect_method
4888 (
4889 	io_connect_t connection,
4890 	uint32_t selector,
4891 	io_scalar_inband64_t scalar_input,
4892 	mach_msg_type_number_t scalar_inputCnt,
4893 	io_struct_inband_t inband_input,
4894 	mach_msg_type_number_t inband_inputCnt,
4895 	mach_vm_address_t ool_input,
4896 	mach_vm_size_t ool_input_size,
4897 	io_struct_inband_t inband_output,
4898 	mach_msg_type_number_t *inband_outputCnt,
4899 	io_scalar_inband64_t scalar_output,
4900 	mach_msg_type_number_t *scalar_outputCnt,
4901 	mach_vm_address_t ool_output,
4902 	mach_vm_size_t *ool_output_size
4903 )
4904 {
4905 	CHECK( IOUserClient, connection, client );
4906 
4907 	IOExternalMethodArguments args;
4908 	IOReturn ret;
4909 	IOMemoryDescriptor * inputMD  = NULL;
4910 	IOMemoryDescriptor * outputMD = NULL;
4911 
4912 	bzero(&args.__reserved[0], sizeof(args.__reserved));
4913 	args.__reservedA = 0;
4914 	args.version = kIOExternalMethodArgumentsCurrentVersion;
4915 
4916 	args.selector = selector;
4917 
4918 	args.asyncWakePort               = MACH_PORT_NULL;
4919 	args.asyncReference              = NULL;
4920 	args.asyncReferenceCount         = 0;
4921 	args.structureVariableOutputData = NULL;
4922 
4923 	args.scalarInput = scalar_input;
4924 	args.scalarInputCount = scalar_inputCnt;
4925 	args.structureInput = inband_input;
4926 	args.structureInputSize = inband_inputCnt;
4927 
4928 	if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4929 		return kIOReturnIPCError;
4930 	}
4931 	if (ool_output) {
4932 		if (*ool_output_size <= sizeof(io_struct_inband_t)) {
4933 			return kIOReturnIPCError;
4934 		}
4935 		if (*ool_output_size > UINT_MAX) {
4936 			return kIOReturnIPCError;
4937 		}
4938 	}
4939 
4940 	if (ool_input) {
4941 		inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4942 		    kIODirectionOut | kIOMemoryMapCopyOnWrite,
4943 		    current_task());
4944 	}
4945 
4946 	args.structureInputDescriptor = inputMD;
4947 
4948 	args.scalarOutput = scalar_output;
4949 	args.scalarOutputCount = *scalar_outputCnt;
4950 	bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4951 	args.structureOutput = inband_output;
4952 	args.structureOutputSize = *inband_outputCnt;
4953 
4954 	if (ool_output && ool_output_size) {
4955 		outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4956 		    kIODirectionIn, current_task());
4957 	}
4958 
4959 	args.structureOutputDescriptor = outputMD;
4960 	args.structureOutputDescriptorSize = ool_output_size
4961 	    ? ((typeof(args.structureOutputDescriptorSize)) * ool_output_size)
4962 	    : 0;
4963 
4964 	IOStatisticsClientCall();
4965 	ret = kIOReturnSuccess;
4966 	io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
4967 	if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
4968 		ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
4969 	}
4970 	if (kIOReturnSuccess == ret) {
4971 		ret = client->callExternalMethod( selector, &args );
4972 	}
4973 
4974 	*scalar_outputCnt = args.scalarOutputCount;
4975 	*inband_outputCnt = args.structureOutputSize;
4976 	*ool_output_size  = args.structureOutputDescriptorSize;
4977 
4978 	if (inputMD) {
4979 		inputMD->release();
4980 	}
4981 	if (outputMD) {
4982 		outputMD->release();
4983 	}
4984 
4985 	return ret;
4986 }
4987 
4988 /* Routine io_async_user_client_method */
4989 kern_return_t
4990 is_io_connect_async_method
4991 (
4992 	io_connect_t connection,
4993 	mach_port_t wake_port,
4994 	io_async_ref64_t reference,
4995 	mach_msg_type_number_t referenceCnt,
4996 	uint32_t selector,
4997 	io_scalar_inband64_t scalar_input,
4998 	mach_msg_type_number_t scalar_inputCnt,
4999 	io_struct_inband_t inband_input,
5000 	mach_msg_type_number_t inband_inputCnt,
5001 	mach_vm_address_t ool_input,
5002 	mach_vm_size_t ool_input_size,
5003 	io_struct_inband_t inband_output,
5004 	mach_msg_type_number_t *inband_outputCnt,
5005 	io_scalar_inband64_t scalar_output,
5006 	mach_msg_type_number_t *scalar_outputCnt,
5007 	mach_vm_address_t ool_output,
5008 	mach_vm_size_t * ool_output_size
5009 )
5010 {
5011 	CHECK( IOUserClient, connection, client );
5012 
5013 	IOExternalMethodArguments args;
5014 	IOReturn ret;
5015 	IOMemoryDescriptor * inputMD  = NULL;
5016 	IOMemoryDescriptor * outputMD = NULL;
5017 
5018 	if (referenceCnt < 1) {
5019 		return kIOReturnBadArgument;
5020 	}
5021 
5022 	bzero(&args.__reserved[0], sizeof(args.__reserved));
5023 	args.__reservedA = 0;
5024 	args.version = kIOExternalMethodArgumentsCurrentVersion;
5025 
5026 	reference[0]             = (io_user_reference_t) wake_port;
5027 	if (vm_map_is_64bit(get_task_map(current_task()))) {
5028 		reference[0]         |= kIOUCAsync64Flag;
5029 	}
5030 
5031 	args.selector = selector;
5032 
5033 	args.asyncWakePort       = wake_port;
5034 	args.asyncReference      = reference;
5035 	args.asyncReferenceCount = referenceCnt;
5036 
5037 	args.structureVariableOutputData = NULL;
5038 
5039 	args.scalarInput = scalar_input;
5040 	args.scalarInputCount = scalar_inputCnt;
5041 	args.structureInput = inband_input;
5042 	args.structureInputSize = inband_inputCnt;
5043 
5044 	if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
5045 		return kIOReturnIPCError;
5046 	}
5047 	if (ool_output) {
5048 		if (*ool_output_size <= sizeof(io_struct_inband_t)) {
5049 			return kIOReturnIPCError;
5050 		}
5051 		if (*ool_output_size > UINT_MAX) {
5052 			return kIOReturnIPCError;
5053 		}
5054 	}
5055 
5056 	if (ool_input) {
5057 		inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
5058 		    kIODirectionOut | kIOMemoryMapCopyOnWrite,
5059 		    current_task());
5060 	}
5061 
5062 	args.structureInputDescriptor = inputMD;
5063 
5064 	args.scalarOutput = scalar_output;
5065 	args.scalarOutputCount = *scalar_outputCnt;
5066 	bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
5067 	args.structureOutput = inband_output;
5068 	args.structureOutputSize = *inband_outputCnt;
5069 
5070 	if (ool_output) {
5071 		outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
5072 		    kIODirectionIn, current_task());
5073 	}
5074 
5075 	args.structureOutputDescriptor = outputMD;
5076 	args.structureOutputDescriptorSize = ((typeof(args.structureOutputDescriptorSize)) * ool_output_size);
5077 
5078 	IOStatisticsClientCall();
5079 	ret = kIOReturnSuccess;
5080 	io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
5081 	if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5082 		ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_async_method, selector);
5083 	}
5084 	if (kIOReturnSuccess == ret) {
5085 		ret = client->callExternalMethod( selector, &args );
5086 	}
5087 
5088 	*scalar_outputCnt = args.scalarOutputCount;
5089 	*inband_outputCnt = args.structureOutputSize;
5090 	*ool_output_size  = args.structureOutputDescriptorSize;
5091 
5092 	if (inputMD) {
5093 		inputMD->release();
5094 	}
5095 	if (outputMD) {
5096 		outputMD->release();
5097 	}
5098 
5099 	return ret;
5100 }
5101 
5102 /* Routine io_connect_method_scalarI_scalarO */
5103 kern_return_t
5104 is_io_connect_method_scalarI_scalarO(
5105 	io_object_t        connect,
5106 	uint32_t           index,
5107 	io_scalar_inband_t       input,
5108 	mach_msg_type_number_t   inputCount,
5109 	io_scalar_inband_t       output,
5110 	mach_msg_type_number_t * outputCount )
5111 {
5112 	IOReturn err;
5113 	uint32_t i;
5114 	io_scalar_inband64_t _input;
5115 	io_scalar_inband64_t _output;
5116 
5117 	mach_msg_type_number_t struct_outputCnt = 0;
5118 	mach_vm_size_t ool_output_size = 0;
5119 
5120 	bzero(&_output[0], sizeof(_output));
5121 	for (i = 0; i < inputCount; i++) {
5122 		_input[i] = SCALAR64(input[i]);
5123 	}
5124 
5125 	err = is_io_connect_method(connect, index,
5126 	    _input, inputCount,
5127 	    NULL, 0,
5128 	    0, 0,
5129 	    NULL, &struct_outputCnt,
5130 	    _output, outputCount,
5131 	    0, &ool_output_size);
5132 
5133 	for (i = 0; i < *outputCount; i++) {
5134 		output[i] = SCALAR32(_output[i]);
5135 	}
5136 
5137 	return err;
5138 }
5139 
5140 kern_return_t
5141 shim_io_connect_method_scalarI_scalarO(
5142 	IOExternalMethod *      method,
5143 	IOService *             object,
5144 	const io_user_scalar_t * input,
5145 	mach_msg_type_number_t   inputCount,
5146 	io_user_scalar_t * output,
5147 	mach_msg_type_number_t * outputCount )
5148 {
5149 	IOMethod            func;
5150 	io_scalar_inband_t  _output;
5151 	IOReturn            err;
5152 	err = kIOReturnBadArgument;
5153 
5154 	bzero(&_output[0], sizeof(_output));
5155 	do {
5156 		if (inputCount != method->count0) {
5157 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5158 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5159 			continue;
5160 		}
5161 		if (*outputCount != method->count1) {
5162 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5163 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5164 			continue;
5165 		}
5166 
5167 		func = method->func;
5168 
5169 		switch (inputCount) {
5170 		case 6:
5171 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5172 			    ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5173 			break;
5174 		case 5:
5175 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5176 			    ARG32(input[3]), ARG32(input[4]),
5177 			    &_output[0] );
5178 			break;
5179 		case 4:
5180 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5181 			    ARG32(input[3]),
5182 			    &_output[0], &_output[1] );
5183 			break;
5184 		case 3:
5185 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5186 			    &_output[0], &_output[1], &_output[2] );
5187 			break;
5188 		case 2:
5189 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]),
5190 			    &_output[0], &_output[1], &_output[2],
5191 			    &_output[3] );
5192 			break;
5193 		case 1:
5194 			err = (object->*func)(  ARG32(input[0]),
5195 			    &_output[0], &_output[1], &_output[2],
5196 			    &_output[3], &_output[4] );
5197 			break;
5198 		case 0:
5199 			err = (object->*func)(  &_output[0], &_output[1], &_output[2],
5200 			    &_output[3], &_output[4], &_output[5] );
5201 			break;
5202 
5203 		default:
5204 			IOLog("%s: Bad method table\n", object->getName());
5205 		}
5206 	}while (false);
5207 
5208 	uint32_t i;
5209 	for (i = 0; i < *outputCount; i++) {
5210 		output[i] = SCALAR32(_output[i]);
5211 	}
5212 
5213 	return err;
5214 }
5215 
5216 /* Routine io_async_method_scalarI_scalarO */
5217 kern_return_t
5218 is_io_async_method_scalarI_scalarO(
5219 	io_object_t        connect,
5220 	mach_port_t wake_port,
5221 	io_async_ref_t reference,
5222 	mach_msg_type_number_t referenceCnt,
5223 	uint32_t           index,
5224 	io_scalar_inband_t       input,
5225 	mach_msg_type_number_t   inputCount,
5226 	io_scalar_inband_t       output,
5227 	mach_msg_type_number_t * outputCount )
5228 {
5229 	IOReturn err;
5230 	uint32_t i;
5231 	io_scalar_inband64_t _input;
5232 	io_scalar_inband64_t _output;
5233 	io_async_ref64_t _reference;
5234 
5235 	if (referenceCnt > ASYNC_REF64_COUNT) {
5236 		return kIOReturnBadArgument;
5237 	}
5238 	bzero(&_output[0], sizeof(_output));
5239 	for (i = 0; i < referenceCnt; i++) {
5240 		_reference[i] = REF64(reference[i]);
5241 	}
5242 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5243 
5244 	mach_msg_type_number_t struct_outputCnt = 0;
5245 	mach_vm_size_t ool_output_size = 0;
5246 
5247 	for (i = 0; i < inputCount; i++) {
5248 		_input[i] = SCALAR64(input[i]);
5249 	}
5250 
5251 	err = is_io_connect_async_method(connect,
5252 	    wake_port, _reference, referenceCnt,
5253 	    index,
5254 	    _input, inputCount,
5255 	    NULL, 0,
5256 	    0, 0,
5257 	    NULL, &struct_outputCnt,
5258 	    _output, outputCount,
5259 	    0, &ool_output_size);
5260 
5261 	for (i = 0; i < *outputCount; i++) {
5262 		output[i] = SCALAR32(_output[i]);
5263 	}
5264 
5265 	return err;
5266 }
5267 /* Routine io_async_method_scalarI_structureO */
5268 kern_return_t
5269 is_io_async_method_scalarI_structureO(
5270 	io_object_t     connect,
5271 	mach_port_t wake_port,
5272 	io_async_ref_t reference,
5273 	mach_msg_type_number_t referenceCnt,
5274 	uint32_t        index,
5275 	io_scalar_inband_t input,
5276 	mach_msg_type_number_t  inputCount,
5277 	io_struct_inband_t              output,
5278 	mach_msg_type_number_t *        outputCount )
5279 {
5280 	uint32_t i;
5281 	io_scalar_inband64_t _input;
5282 	io_async_ref64_t _reference;
5283 
5284 	if (referenceCnt > ASYNC_REF64_COUNT) {
5285 		return kIOReturnBadArgument;
5286 	}
5287 	for (i = 0; i < referenceCnt; i++) {
5288 		_reference[i] = REF64(reference[i]);
5289 	}
5290 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5291 
5292 	mach_msg_type_number_t scalar_outputCnt = 0;
5293 	mach_vm_size_t ool_output_size = 0;
5294 
5295 	for (i = 0; i < inputCount; i++) {
5296 		_input[i] = SCALAR64(input[i]);
5297 	}
5298 
5299 	return is_io_connect_async_method(connect,
5300 	           wake_port, _reference, referenceCnt,
5301 	           index,
5302 	           _input, inputCount,
5303 	           NULL, 0,
5304 	           0, 0,
5305 	           output, outputCount,
5306 	           NULL, &scalar_outputCnt,
5307 	           0, &ool_output_size);
5308 }
5309 
5310 /* Routine io_async_method_scalarI_structureI */
5311 kern_return_t
5312 is_io_async_method_scalarI_structureI(
5313 	io_connect_t            connect,
5314 	mach_port_t wake_port,
5315 	io_async_ref_t reference,
5316 	mach_msg_type_number_t referenceCnt,
5317 	uint32_t                index,
5318 	io_scalar_inband_t      input,
5319 	mach_msg_type_number_t  inputCount,
5320 	io_struct_inband_t      inputStruct,
5321 	mach_msg_type_number_t  inputStructCount )
5322 {
5323 	uint32_t i;
5324 	io_scalar_inband64_t _input;
5325 	io_async_ref64_t _reference;
5326 
5327 	if (referenceCnt > ASYNC_REF64_COUNT) {
5328 		return kIOReturnBadArgument;
5329 	}
5330 	for (i = 0; i < referenceCnt; i++) {
5331 		_reference[i] = REF64(reference[i]);
5332 	}
5333 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5334 
5335 	mach_msg_type_number_t scalar_outputCnt = 0;
5336 	mach_msg_type_number_t inband_outputCnt = 0;
5337 	mach_vm_size_t ool_output_size = 0;
5338 
5339 	for (i = 0; i < inputCount; i++) {
5340 		_input[i] = SCALAR64(input[i]);
5341 	}
5342 
5343 	return is_io_connect_async_method(connect,
5344 	           wake_port, _reference, referenceCnt,
5345 	           index,
5346 	           _input, inputCount,
5347 	           inputStruct, inputStructCount,
5348 	           0, 0,
5349 	           NULL, &inband_outputCnt,
5350 	           NULL, &scalar_outputCnt,
5351 	           0, &ool_output_size);
5352 }
5353 
5354 /* Routine io_async_method_structureI_structureO */
5355 kern_return_t
5356 is_io_async_method_structureI_structureO(
5357 	io_object_t     connect,
5358 	mach_port_t wake_port,
5359 	io_async_ref_t reference,
5360 	mach_msg_type_number_t referenceCnt,
5361 	uint32_t        index,
5362 	io_struct_inband_t              input,
5363 	mach_msg_type_number_t  inputCount,
5364 	io_struct_inband_t              output,
5365 	mach_msg_type_number_t *        outputCount )
5366 {
5367 	uint32_t i;
5368 	mach_msg_type_number_t scalar_outputCnt = 0;
5369 	mach_vm_size_t ool_output_size = 0;
5370 	io_async_ref64_t _reference;
5371 
5372 	if (referenceCnt > ASYNC_REF64_COUNT) {
5373 		return kIOReturnBadArgument;
5374 	}
5375 	for (i = 0; i < referenceCnt; i++) {
5376 		_reference[i] = REF64(reference[i]);
5377 	}
5378 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5379 
5380 	return is_io_connect_async_method(connect,
5381 	           wake_port, _reference, referenceCnt,
5382 	           index,
5383 	           NULL, 0,
5384 	           input, inputCount,
5385 	           0, 0,
5386 	           output, outputCount,
5387 	           NULL, &scalar_outputCnt,
5388 	           0, &ool_output_size);
5389 }
5390 
5391 
5392 kern_return_t
5393 shim_io_async_method_scalarI_scalarO(
5394 	IOExternalAsyncMethod * method,
5395 	IOService *             object,
5396 	mach_port_t             asyncWakePort,
5397 	io_user_reference_t *   asyncReference,
5398 	uint32_t                asyncReferenceCount,
5399 	const io_user_scalar_t * input,
5400 	mach_msg_type_number_t   inputCount,
5401 	io_user_scalar_t * output,
5402 	mach_msg_type_number_t * outputCount )
5403 {
5404 	IOAsyncMethod       func;
5405 	uint32_t            i;
5406 	io_scalar_inband_t  _output;
5407 	IOReturn            err;
5408 	io_async_ref_t      reference;
5409 
5410 	bzero(&_output[0], sizeof(_output));
5411 	for (i = 0; i < asyncReferenceCount; i++) {
5412 		reference[i] = REF32(asyncReference[i]);
5413 	}
5414 
5415 	err = kIOReturnBadArgument;
5416 
5417 	do {
5418 		if (inputCount != method->count0) {
5419 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5420 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5421 			continue;
5422 		}
5423 		if (*outputCount != method->count1) {
5424 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5425 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5426 			continue;
5427 		}
5428 
5429 		func = method->func;
5430 
5431 		switch (inputCount) {
5432 		case 6:
5433 			err = (object->*func)(  reference,
5434 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5435 			    ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5436 			break;
5437 		case 5:
5438 			err = (object->*func)(  reference,
5439 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5440 			    ARG32(input[3]), ARG32(input[4]),
5441 			    &_output[0] );
5442 			break;
5443 		case 4:
5444 			err = (object->*func)(  reference,
5445 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5446 			    ARG32(input[3]),
5447 			    &_output[0], &_output[1] );
5448 			break;
5449 		case 3:
5450 			err = (object->*func)(  reference,
5451 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5452 			    &_output[0], &_output[1], &_output[2] );
5453 			break;
5454 		case 2:
5455 			err = (object->*func)(  reference,
5456 			    ARG32(input[0]), ARG32(input[1]),
5457 			    &_output[0], &_output[1], &_output[2],
5458 			    &_output[3] );
5459 			break;
5460 		case 1:
5461 			err = (object->*func)(  reference,
5462 			    ARG32(input[0]),
5463 			    &_output[0], &_output[1], &_output[2],
5464 			    &_output[3], &_output[4] );
5465 			break;
5466 		case 0:
5467 			err = (object->*func)(  reference,
5468 			    &_output[0], &_output[1], &_output[2],
5469 			    &_output[3], &_output[4], &_output[5] );
5470 			break;
5471 
5472 		default:
5473 			IOLog("%s: Bad method table\n", object->getName());
5474 		}
5475 	}while (false);
5476 
5477 	for (i = 0; i < *outputCount; i++) {
5478 		output[i] = SCALAR32(_output[i]);
5479 	}
5480 
5481 	return err;
5482 }
5483 
5484 
5485 /* Routine io_connect_method_scalarI_structureO */
5486 kern_return_t
5487 is_io_connect_method_scalarI_structureO(
5488 	io_object_t     connect,
5489 	uint32_t        index,
5490 	io_scalar_inband_t input,
5491 	mach_msg_type_number_t  inputCount,
5492 	io_struct_inband_t              output,
5493 	mach_msg_type_number_t *        outputCount )
5494 {
5495 	uint32_t i;
5496 	io_scalar_inband64_t _input;
5497 
5498 	mach_msg_type_number_t scalar_outputCnt = 0;
5499 	mach_vm_size_t ool_output_size = 0;
5500 
5501 	for (i = 0; i < inputCount; i++) {
5502 		_input[i] = SCALAR64(input[i]);
5503 	}
5504 
5505 	return is_io_connect_method(connect, index,
5506 	           _input, inputCount,
5507 	           NULL, 0,
5508 	           0, 0,
5509 	           output, outputCount,
5510 	           NULL, &scalar_outputCnt,
5511 	           0, &ool_output_size);
5512 }
5513 
5514 kern_return_t
5515 shim_io_connect_method_scalarI_structureO(
5516 
5517 	IOExternalMethod *      method,
5518 	IOService *             object,
5519 	const io_user_scalar_t * input,
5520 	mach_msg_type_number_t  inputCount,
5521 	io_struct_inband_t              output,
5522 	IOByteCount *   outputCount )
5523 {
5524 	IOMethod            func;
5525 	IOReturn            err;
5526 
5527 	err = kIOReturnBadArgument;
5528 
5529 	do {
5530 		if (inputCount != method->count0) {
5531 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5532 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5533 			continue;
5534 		}
5535 		if ((kIOUCVariableStructureSize != method->count1)
5536 		    && (*outputCount != method->count1)) {
5537 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5538 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5539 			continue;
5540 		}
5541 
5542 		func = method->func;
5543 
5544 		switch (inputCount) {
5545 		case 5:
5546 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5547 			    ARG32(input[3]), ARG32(input[4]),
5548 			    output );
5549 			break;
5550 		case 4:
5551 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5552 			    ARG32(input[3]),
5553 			    output, (void *)outputCount );
5554 			break;
5555 		case 3:
5556 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5557 			    output, (void *)outputCount, NULL );
5558 			break;
5559 		case 2:
5560 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]),
5561 			    output, (void *)outputCount, NULL, NULL );
5562 			break;
5563 		case 1:
5564 			err = (object->*func)(  ARG32(input[0]),
5565 			    output, (void *)outputCount, NULL, NULL, NULL );
5566 			break;
5567 		case 0:
5568 			err = (object->*func)(  output, (void *)outputCount, NULL, NULL, NULL, NULL );
5569 			break;
5570 
5571 		default:
5572 			IOLog("%s: Bad method table\n", object->getName());
5573 		}
5574 	}while (false);
5575 
5576 	return err;
5577 }
5578 
5579 
5580 kern_return_t
5581 shim_io_async_method_scalarI_structureO(
5582 	IOExternalAsyncMethod * method,
5583 	IOService *             object,
5584 	mach_port_t             asyncWakePort,
5585 	io_user_reference_t *   asyncReference,
5586 	uint32_t                asyncReferenceCount,
5587 	const io_user_scalar_t * input,
5588 	mach_msg_type_number_t  inputCount,
5589 	io_struct_inband_t              output,
5590 	mach_msg_type_number_t *        outputCount )
5591 {
5592 	IOAsyncMethod       func;
5593 	uint32_t            i;
5594 	IOReturn            err;
5595 	io_async_ref_t      reference;
5596 
5597 	for (i = 0; i < asyncReferenceCount; i++) {
5598 		reference[i] = REF32(asyncReference[i]);
5599 	}
5600 
5601 	err = kIOReturnBadArgument;
5602 	do {
5603 		if (inputCount != method->count0) {
5604 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5605 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5606 			continue;
5607 		}
5608 		if ((kIOUCVariableStructureSize != method->count1)
5609 		    && (*outputCount != method->count1)) {
5610 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5611 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5612 			continue;
5613 		}
5614 
5615 		func = method->func;
5616 
5617 		switch (inputCount) {
5618 		case 5:
5619 			err = (object->*func)(  reference,
5620 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5621 			    ARG32(input[3]), ARG32(input[4]),
5622 			    output );
5623 			break;
5624 		case 4:
5625 			err = (object->*func)(  reference,
5626 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5627 			    ARG32(input[3]),
5628 			    output, (void *)outputCount );
5629 			break;
5630 		case 3:
5631 			err = (object->*func)(  reference,
5632 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5633 			    output, (void *)outputCount, NULL );
5634 			break;
5635 		case 2:
5636 			err = (object->*func)(  reference,
5637 			    ARG32(input[0]), ARG32(input[1]),
5638 			    output, (void *)outputCount, NULL, NULL );
5639 			break;
5640 		case 1:
5641 			err = (object->*func)(  reference,
5642 			    ARG32(input[0]),
5643 			    output, (void *)outputCount, NULL, NULL, NULL );
5644 			break;
5645 		case 0:
5646 			err = (object->*func)(  reference,
5647 			    output, (void *)outputCount, NULL, NULL, NULL, NULL );
5648 			break;
5649 
5650 		default:
5651 			IOLog("%s: Bad method table\n", object->getName());
5652 		}
5653 	}while (false);
5654 
5655 	return err;
5656 }
5657 
5658 /* Routine io_connect_method_scalarI_structureI */
5659 kern_return_t
5660 is_io_connect_method_scalarI_structureI(
5661 	io_connect_t            connect,
5662 	uint32_t                index,
5663 	io_scalar_inband_t      input,
5664 	mach_msg_type_number_t  inputCount,
5665 	io_struct_inband_t      inputStruct,
5666 	mach_msg_type_number_t  inputStructCount )
5667 {
5668 	uint32_t i;
5669 	io_scalar_inband64_t _input;
5670 
5671 	mach_msg_type_number_t scalar_outputCnt = 0;
5672 	mach_msg_type_number_t inband_outputCnt = 0;
5673 	mach_vm_size_t ool_output_size = 0;
5674 
5675 	for (i = 0; i < inputCount; i++) {
5676 		_input[i] = SCALAR64(input[i]);
5677 	}
5678 
5679 	return is_io_connect_method(connect, index,
5680 	           _input, inputCount,
5681 	           inputStruct, inputStructCount,
5682 	           0, 0,
5683 	           NULL, &inband_outputCnt,
5684 	           NULL, &scalar_outputCnt,
5685 	           0, &ool_output_size);
5686 }
5687 
5688 kern_return_t
5689 shim_io_connect_method_scalarI_structureI(
5690 	IOExternalMethod *  method,
5691 	IOService *         object,
5692 	const io_user_scalar_t * input,
5693 	mach_msg_type_number_t  inputCount,
5694 	io_struct_inband_t              inputStruct,
5695 	mach_msg_type_number_t  inputStructCount )
5696 {
5697 	IOMethod            func;
5698 	IOReturn            err = kIOReturnBadArgument;
5699 
5700 	do{
5701 		if (inputCount != method->count0) {
5702 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5703 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5704 			continue;
5705 		}
5706 		if ((kIOUCVariableStructureSize != method->count1)
5707 		    && (inputStructCount != method->count1)) {
5708 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5709 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5710 			continue;
5711 		}
5712 
5713 		func = method->func;
5714 
5715 		switch (inputCount) {
5716 		case 5:
5717 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5718 			    ARG32(input[3]), ARG32(input[4]),
5719 			    inputStruct );
5720 			break;
5721 		case 4:
5722 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *)  input[2],
5723 			    ARG32(input[3]),
5724 			    inputStruct, (void *)(uintptr_t)inputStructCount );
5725 			break;
5726 		case 3:
5727 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5728 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5729 			    NULL );
5730 			break;
5731 		case 2:
5732 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5733 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5734 			    NULL, NULL );
5735 			break;
5736 		case 1:
5737 			err = (object->*func)( ARG32(input[0]),
5738 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5739 			    NULL, NULL, NULL );
5740 			break;
5741 		case 0:
5742 			err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
5743 			    NULL, NULL, NULL, NULL );
5744 			break;
5745 
5746 		default:
5747 			IOLog("%s: Bad method table\n", object->getName());
5748 		}
5749 	}while (false);
5750 
5751 	return err;
5752 }
5753 
5754 kern_return_t
5755 shim_io_async_method_scalarI_structureI(
5756 	IOExternalAsyncMethod * method,
5757 	IOService *             object,
5758 	mach_port_t             asyncWakePort,
5759 	io_user_reference_t *   asyncReference,
5760 	uint32_t                asyncReferenceCount,
5761 	const io_user_scalar_t * input,
5762 	mach_msg_type_number_t  inputCount,
5763 	io_struct_inband_t              inputStruct,
5764 	mach_msg_type_number_t  inputStructCount )
5765 {
5766 	IOAsyncMethod       func;
5767 	uint32_t            i;
5768 	IOReturn            err = kIOReturnBadArgument;
5769 	io_async_ref_t      reference;
5770 
5771 	for (i = 0; i < asyncReferenceCount; i++) {
5772 		reference[i] = REF32(asyncReference[i]);
5773 	}
5774 
5775 	do{
5776 		if (inputCount != method->count0) {
5777 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5778 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5779 			continue;
5780 		}
5781 		if ((kIOUCVariableStructureSize != method->count1)
5782 		    && (inputStructCount != method->count1)) {
5783 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5784 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5785 			continue;
5786 		}
5787 
5788 		func = method->func;
5789 
5790 		switch (inputCount) {
5791 		case 5:
5792 			err = (object->*func)(  reference,
5793 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5794 			    ARG32(input[3]), ARG32(input[4]),
5795 			    inputStruct );
5796 			break;
5797 		case 4:
5798 			err = (object->*func)(  reference,
5799 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5800 			    ARG32(input[3]),
5801 			    inputStruct, (void *)(uintptr_t)inputStructCount );
5802 			break;
5803 		case 3:
5804 			err = (object->*func)(  reference,
5805 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5806 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5807 			    NULL );
5808 			break;
5809 		case 2:
5810 			err = (object->*func)(  reference,
5811 			    ARG32(input[0]), ARG32(input[1]),
5812 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5813 			    NULL, NULL );
5814 			break;
5815 		case 1:
5816 			err = (object->*func)(  reference,
5817 			    ARG32(input[0]),
5818 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5819 			    NULL, NULL, NULL );
5820 			break;
5821 		case 0:
5822 			err = (object->*func)(  reference,
5823 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5824 			    NULL, NULL, NULL, NULL );
5825 			break;
5826 
5827 		default:
5828 			IOLog("%s: Bad method table\n", object->getName());
5829 		}
5830 	}while (false);
5831 
5832 	return err;
5833 }
5834 
5835 /* Routine io_connect_method_structureI_structureO */
5836 kern_return_t
5837 is_io_connect_method_structureI_structureO(
5838 	io_object_t     connect,
5839 	uint32_t        index,
5840 	io_struct_inband_t              input,
5841 	mach_msg_type_number_t  inputCount,
5842 	io_struct_inband_t              output,
5843 	mach_msg_type_number_t *        outputCount )
5844 {
5845 	mach_msg_type_number_t scalar_outputCnt = 0;
5846 	mach_vm_size_t ool_output_size = 0;
5847 
5848 	return is_io_connect_method(connect, index,
5849 	           NULL, 0,
5850 	           input, inputCount,
5851 	           0, 0,
5852 	           output, outputCount,
5853 	           NULL, &scalar_outputCnt,
5854 	           0, &ool_output_size);
5855 }
5856 
5857 kern_return_t
5858 shim_io_connect_method_structureI_structureO(
5859 	IOExternalMethod *  method,
5860 	IOService *         object,
5861 	io_struct_inband_t              input,
5862 	mach_msg_type_number_t  inputCount,
5863 	io_struct_inband_t              output,
5864 	IOByteCount *   outputCount )
5865 {
5866 	IOMethod            func;
5867 	IOReturn            err = kIOReturnBadArgument;
5868 
5869 	do{
5870 		if ((kIOUCVariableStructureSize != method->count0)
5871 		    && (inputCount != method->count0)) {
5872 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5873 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5874 			continue;
5875 		}
5876 		if ((kIOUCVariableStructureSize != method->count1)
5877 		    && (*outputCount != method->count1)) {
5878 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5879 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5880 			continue;
5881 		}
5882 
5883 		func = method->func;
5884 
5885 		if (method->count1) {
5886 			if (method->count0) {
5887 				err = (object->*func)( input, output,
5888 				    (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5889 			} else {
5890 				err = (object->*func)( output, outputCount, NULL, NULL, NULL, NULL );
5891 			}
5892 		} else {
5893 			err = (object->*func)( input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5894 		}
5895 	}while (false);
5896 
5897 
5898 	return err;
5899 }
5900 
5901 kern_return_t
5902 shim_io_async_method_structureI_structureO(
5903 	IOExternalAsyncMethod * method,
5904 	IOService *             object,
5905 	mach_port_t           asyncWakePort,
5906 	io_user_reference_t * asyncReference,
5907 	uint32_t              asyncReferenceCount,
5908 	io_struct_inband_t              input,
5909 	mach_msg_type_number_t  inputCount,
5910 	io_struct_inband_t              output,
5911 	mach_msg_type_number_t *        outputCount )
5912 {
5913 	IOAsyncMethod       func;
5914 	uint32_t            i;
5915 	IOReturn            err;
5916 	io_async_ref_t      reference;
5917 
5918 	for (i = 0; i < asyncReferenceCount; i++) {
5919 		reference[i] = REF32(asyncReference[i]);
5920 	}
5921 
5922 	err = kIOReturnBadArgument;
5923 	do{
5924 		if ((kIOUCVariableStructureSize != method->count0)
5925 		    && (inputCount != method->count0)) {
5926 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5927 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5928 			continue;
5929 		}
5930 		if ((kIOUCVariableStructureSize != method->count1)
5931 		    && (*outputCount != method->count1)) {
5932 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5933 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5934 			continue;
5935 		}
5936 
5937 		func = method->func;
5938 
5939 		if (method->count1) {
5940 			if (method->count0) {
5941 				err = (object->*func)( reference,
5942 				    input, output,
5943 				    (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5944 			} else {
5945 				err = (object->*func)( reference,
5946 				    output, outputCount, NULL, NULL, NULL, NULL );
5947 			}
5948 		} else {
5949 			err = (object->*func)( reference,
5950 			    input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5951 		}
5952 	}while (false);
5953 
5954 	return err;
5955 }
5956 
5957 /* Routine io_catalog_send_data */
5958 kern_return_t
5959 is_io_catalog_send_data(
5960 	mach_port_t             main_port,
5961 	uint32_t                flag,
5962 	io_buf_ptr_t            inData,
5963 	mach_msg_type_number_t  inDataCount,
5964 	kern_return_t *         result)
5965 {
5966 	// Allow sending catalog data if there is no kextd and the kernel is DEVELOPMENT || DEBUG
5967 #if NO_KEXTD && !(DEVELOPMENT || DEBUG)
5968 	return kIOReturnNotPrivileged;
5969 #else /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
5970 	OSObject * obj = NULL;
5971 	vm_offset_t data;
5972 	kern_return_t kr = kIOReturnError;
5973 
5974 	//printf("io_catalog_send_data called. flag: %d\n", flag);
5975 
5976 	if (main_port != main_device_port) {
5977 		return kIOReturnNotPrivileged;
5978 	}
5979 
5980 	if ((flag != kIOCatalogRemoveKernelLinker__Removed &&
5981 	    flag != kIOCatalogKextdActive &&
5982 	    flag != kIOCatalogKextdFinishedLaunching) &&
5983 	    (!inData || !inDataCount)) {
5984 		return kIOReturnBadArgument;
5985 	}
5986 
5987 	if (!IOCurrentTaskHasEntitlement(kIOCatalogManagementEntitlement)) {
5988 		OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
5989 		IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
5990 		OSSafeReleaseNULL(taskName);
5991 		// For now, fake success to not break applications relying on this function succeeding.
5992 		// See <rdar://problem/32554970> for more details.
5993 		return kIOReturnSuccess;
5994 	}
5995 
5996 	if (inData) {
5997 		vm_map_offset_t map_data;
5998 
5999 		if (inDataCount > sizeof(io_struct_inband_t) * 1024) {
6000 			return kIOReturnMessageTooLarge;
6001 		}
6002 
6003 		kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
6004 		data = CAST_DOWN(vm_offset_t, map_data);
6005 
6006 		if (kr != KERN_SUCCESS) {
6007 			return kr;
6008 		}
6009 
6010 		// must return success after vm_map_copyout() succeeds
6011 
6012 		if (inDataCount) {
6013 			obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
6014 			vm_deallocate( kernel_map, data, inDataCount );
6015 			if (!obj) {
6016 				*result = kIOReturnNoMemory;
6017 				return KERN_SUCCESS;
6018 			}
6019 		}
6020 	}
6021 
6022 	switch (flag) {
6023 	case kIOCatalogResetDrivers:
6024 	case kIOCatalogResetDriversNoMatch: {
6025 		OSArray * array;
6026 
6027 		array = OSDynamicCast(OSArray, obj);
6028 		if (array) {
6029 			if (!gIOCatalogue->resetAndAddDrivers(array,
6030 			    flag == kIOCatalogResetDrivers)) {
6031 				kr = kIOReturnError;
6032 			}
6033 		} else {
6034 			kr = kIOReturnBadArgument;
6035 		}
6036 	}
6037 	break;
6038 
6039 	case kIOCatalogAddDrivers:
6040 	case kIOCatalogAddDriversNoMatch: {
6041 		OSArray * array;
6042 
6043 		array = OSDynamicCast(OSArray, obj);
6044 		if (array) {
6045 			if (!gIOCatalogue->addDrivers( array,
6046 			    flag == kIOCatalogAddDrivers)) {
6047 				kr = kIOReturnError;
6048 			}
6049 		} else {
6050 			kr = kIOReturnBadArgument;
6051 		}
6052 	}
6053 	break;
6054 
6055 	case kIOCatalogRemoveDrivers:
6056 	case kIOCatalogRemoveDriversNoMatch: {
6057 		OSDictionary * dict;
6058 
6059 		dict = OSDynamicCast(OSDictionary, obj);
6060 		if (dict) {
6061 			if (!gIOCatalogue->removeDrivers( dict,
6062 			    flag == kIOCatalogRemoveDrivers )) {
6063 				kr = kIOReturnError;
6064 			}
6065 		} else {
6066 			kr = kIOReturnBadArgument;
6067 		}
6068 	}
6069 	break;
6070 
6071 	case kIOCatalogStartMatching__Removed:
6072 	case kIOCatalogRemoveKernelLinker__Removed:
6073 	case kIOCatalogKextdActive:
6074 	case kIOCatalogKextdFinishedLaunching:
6075 		kr = KERN_NOT_SUPPORTED;
6076 		break;
6077 
6078 	default:
6079 		kr = kIOReturnBadArgument;
6080 		break;
6081 	}
6082 
6083 	if (obj) {
6084 		obj->release();
6085 	}
6086 
6087 	*result = kr;
6088 	return KERN_SUCCESS;
6089 #endif /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
6090 }
6091 
6092 /* Routine io_catalog_terminate */
6093 kern_return_t
6094 is_io_catalog_terminate(
6095 	mach_port_t main_port,
6096 	uint32_t flag,
6097 	io_name_t name )
6098 {
6099 	kern_return_t          kr;
6100 
6101 	if (main_port != main_device_port) {
6102 		return kIOReturnNotPrivileged;
6103 	}
6104 
6105 	kr = IOUserClient::clientHasPrivilege((void *) current_task(),
6106 	    kIOClientPrivilegeAdministrator );
6107 	if (kIOReturnSuccess != kr) {
6108 		return kr;
6109 	}
6110 
6111 	switch (flag) {
6112 #if !defined(SECURE_KERNEL)
6113 	case kIOCatalogServiceTerminate:
6114 		kr = gIOCatalogue->terminateDrivers(NULL, name, false);
6115 		break;
6116 
6117 	case kIOCatalogModuleUnload:
6118 	case kIOCatalogModuleTerminate:
6119 		kr = gIOCatalogue->terminateDriversForModule(name,
6120 		    flag == kIOCatalogModuleUnload);
6121 		break;
6122 #endif
6123 
6124 	default:
6125 		kr = kIOReturnBadArgument;
6126 		break;
6127 	}
6128 
6129 	return kr;
6130 }
6131 
6132 /* Routine io_catalog_get_data */
6133 kern_return_t
6134 is_io_catalog_get_data(
6135 	mach_port_t             main_port,
6136 	uint32_t                flag,
6137 	io_buf_ptr_t            *outData,
6138 	mach_msg_type_number_t  *outDataCount)
6139 {
6140 	kern_return_t kr = kIOReturnSuccess;
6141 	OSSerialize * s;
6142 
6143 	if (main_port != main_device_port) {
6144 		return kIOReturnNotPrivileged;
6145 	}
6146 
6147 	//printf("io_catalog_get_data called. flag: %d\n", flag);
6148 
6149 	s = OSSerialize::withCapacity(4096);
6150 	if (!s) {
6151 		return kIOReturnNoMemory;
6152 	}
6153 
6154 	kr = gIOCatalogue->serializeData(flag, s);
6155 
6156 	if (kr == kIOReturnSuccess) {
6157 		mach_vm_address_t data;
6158 		vm_map_copy_t copy;
6159 		unsigned int size;
6160 
6161 		size = s->getLength();
6162 		kr = mach_vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
6163 		if (kr == kIOReturnSuccess) {
6164 			bcopy(s->text(), (void *)data, size);
6165 			kr = vm_map_copyin(kernel_map, data, size, true, &copy);
6166 			*outData = (char *)copy;
6167 			*outDataCount = size;
6168 		}
6169 	}
6170 
6171 	s->release();
6172 
6173 	return kr;
6174 }
6175 
6176 /* Routine io_catalog_get_gen_count */
6177 kern_return_t
6178 is_io_catalog_get_gen_count(
6179 	mach_port_t             main_port,
6180 	uint32_t                *genCount)
6181 {
6182 	if (main_port != main_device_port) {
6183 		return kIOReturnNotPrivileged;
6184 	}
6185 
6186 	//printf("io_catalog_get_gen_count called.\n");
6187 
6188 	if (!genCount) {
6189 		return kIOReturnBadArgument;
6190 	}
6191 
6192 	*genCount = gIOCatalogue->getGenerationCount();
6193 
6194 	return kIOReturnSuccess;
6195 }
6196 
6197 /* Routine io_catalog_module_loaded.
6198  * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
6199  */
6200 kern_return_t
6201 is_io_catalog_module_loaded(
6202 	mach_port_t             main_port,
6203 	io_name_t               name)
6204 {
6205 	if (main_port != main_device_port) {
6206 		return kIOReturnNotPrivileged;
6207 	}
6208 
6209 	//printf("io_catalog_module_loaded called. name %s\n", name);
6210 
6211 	if (!name) {
6212 		return kIOReturnBadArgument;
6213 	}
6214 
6215 	gIOCatalogue->moduleHasLoaded(name);
6216 
6217 	return kIOReturnSuccess;
6218 }
6219 
6220 kern_return_t
6221 is_io_catalog_reset(
6222 	mach_port_t             main_port,
6223 	uint32_t                flag)
6224 {
6225 	if (main_port != main_device_port) {
6226 		return kIOReturnNotPrivileged;
6227 	}
6228 
6229 	switch (flag) {
6230 	case kIOCatalogResetDefault:
6231 		gIOCatalogue->reset();
6232 		break;
6233 
6234 	default:
6235 		return kIOReturnBadArgument;
6236 	}
6237 
6238 	return kIOReturnSuccess;
6239 }
6240 
6241 kern_return_t
6242 iokit_user_client_trap(struct iokit_user_client_trap_args *args)
6243 {
6244 	kern_return_t    result = kIOReturnBadArgument;
6245 	IOUserClient   * userClient;
6246 	OSObject       * object;
6247 	uintptr_t        ref;
6248 	mach_port_name_t portName;
6249 
6250 	ref     = (uintptr_t) args->userClientRef;
6251 
6252 	if ((ref == MACH_PORT_DEAD) || (ref == (uintptr_t) MACH_PORT_NULL)) {
6253 		return kIOReturnBadArgument;
6254 	}
6255 	// kobject port names always have b0-1 set, so we use these bits as flags to
6256 	// iokit_user_client_trap()
6257 	// keep this up to date with ipc_entry_name_mask();
6258 	portName = (mach_port_name_t) (ref | 3);
6259 	if (((1ULL << 32) & ref) || !(1 & ref)) {
6260 		object = iokit_lookup_uext_ref_current_task(portName);
6261 		if (object) {
6262 			result = IOUserServerUEXTTrap(object, args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6263 		}
6264 		OSSafeReleaseNULL(object);
6265 	} else {
6266 		io_object_t ref_current_task = iokit_lookup_connect_ref_current_task((mach_port_name_t) ref);
6267 		if ((userClient = OSDynamicCast(IOUserClient, ref_current_task))) {
6268 			IOExternalTrap *trap = NULL;
6269 			IOService *target = NULL;
6270 
6271 			result = kIOReturnSuccess;
6272 			io_filter_policy_t filterPolicy = userClient->filterForTask(current_task(), 0);
6273 			if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
6274 				result = gIOUCFilterCallbacks->io_filter_applier(userClient, filterPolicy, io_filter_type_trap, args->index);
6275 			}
6276 			if (kIOReturnSuccess == result) {
6277 				trap = userClient->getTargetAndTrapForIndex(&target, args->index);
6278 			}
6279 			if (trap && target) {
6280 				IOTrap func;
6281 
6282 				func = trap->func;
6283 
6284 				if (func) {
6285 					result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6286 				}
6287 			}
6288 
6289 			iokit_remove_connect_reference(userClient);
6290 		} else {
6291 			OSSafeReleaseNULL(ref_current_task);
6292 		}
6293 	}
6294 
6295 	return result;
6296 }
6297 
6298 /* Routine io_device_tree_entry_exists_with_name */
6299 kern_return_t
6300 is_io_device_tree_entry_exists_with_name(
6301 	mach_port_t main_port,
6302 	io_name_t name,
6303 	boolean_t *exists )
6304 {
6305 	OSCollectionIterator *iter;
6306 
6307 	if (main_port != main_device_port) {
6308 		return kIOReturnNotPrivileged;
6309 	}
6310 
6311 	iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, name);
6312 	*exists = iter && iter->getNextObject();
6313 	OSSafeReleaseNULL(iter);
6314 
6315 	return kIOReturnSuccess;
6316 }
6317 } /* extern "C" */
6318 
6319 IOReturn
6320 IOUserClient::callExternalMethod(uint32_t selector, IOExternalMethodArguments * args)
6321 {
6322 	IOReturn ret;
6323 
6324 	if (defaultLocking) {
6325 		if (defaultLockingSingleThreadExternalMethod) {
6326 			IORWLockWrite(&lock);
6327 		} else {
6328 			IORWLockRead(&lock);
6329 		}
6330 	}
6331 	if (uc2022) {
6332 		ret = ((IOUserClient2022 *) this)->externalMethod(selector, (IOExternalMethodArgumentsOpaque *) args);
6333 	} else {
6334 		ret = externalMethod(selector, args);
6335 	}
6336 	if (defaultLocking) {
6337 		IORWLockUnlock(&lock);
6338 	}
6339 	return ret;
6340 }
6341 
6342 MIG_SERVER_ROUTINE IOReturn
6343 IOUserClient2022::externalMethod(uint32_t selector, IOExternalMethodArguments * arguments,
6344     IOExternalMethodDispatch *dispatch,
6345     OSObject *target, void *reference)
6346 {
6347 	panic("wrong externalMethod for IOUserClient2022");
6348 }
6349 
6350 IOReturn
6351 IOUserClient2022::dispatchExternalMethod(uint32_t selector, IOExternalMethodArgumentsOpaque *arguments,
6352     const IOExternalMethodDispatch2022 dispatchArray[], size_t dispatchArrayCount,
6353     OSObject * target, void * reference)
6354 {
6355 	IOReturn    err;
6356 	IOExternalMethodArguments * args = (typeof(args))arguments;
6357 	const IOExternalMethodDispatch2022 * dispatch;
6358 
6359 	if (!dispatchArray) {
6360 		return kIOReturnError;
6361 	}
6362 	if (selector >= dispatchArrayCount) {
6363 		return kIOReturnBadArgument;
6364 	}
6365 	dispatch = &dispatchArray[selector];
6366 
6367 	uint32_t count;
6368 	count = dispatch->checkScalarInputCount;
6369 	if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6370 		return kIOReturnBadArgument;
6371 	}
6372 
6373 	count = dispatch->checkStructureInputSize;
6374 	if ((kIOUCVariableStructureSize != count)
6375 	    && (count != ((args->structureInputDescriptor)
6376 	    ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6377 		return kIOReturnBadArgument;
6378 	}
6379 
6380 	count = dispatch->checkScalarOutputCount;
6381 	if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6382 		return kIOReturnBadArgument;
6383 	}
6384 
6385 	count = dispatch->checkStructureOutputSize;
6386 	if ((kIOUCVariableStructureSize != count)
6387 	    && (count != ((args->structureOutputDescriptor)
6388 	    ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6389 		return kIOReturnBadArgument;
6390 	}
6391 
6392 	if (args->asyncWakePort && !dispatch->allowAsync) {
6393 		return kIOReturnBadArgument;
6394 	}
6395 
6396 	if (dispatch->checkEntitlement) {
6397 		if (!IOCurrentTaskHasEntitlement(dispatch->checkEntitlement)) {
6398 			return kIOReturnNotPrivileged;
6399 		}
6400 	}
6401 
6402 	if (dispatch->function) {
6403 		err = (*dispatch->function)(target, reference, args);
6404 	} else {
6405 		err = kIOReturnNoCompletion; /* implementer can dispatch */
6406 	}
6407 	return err;
6408 }
6409 
6410 IOReturn
6411 IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
6412     IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
6413 {
6414 	IOReturn    err;
6415 	IOService * object;
6416 	IOByteCount structureOutputSize;
6417 
6418 	if (dispatch) {
6419 		uint32_t count;
6420 		count = dispatch->checkScalarInputCount;
6421 		if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6422 			return kIOReturnBadArgument;
6423 		}
6424 
6425 		count = dispatch->checkStructureInputSize;
6426 		if ((kIOUCVariableStructureSize != count)
6427 		    && (count != ((args->structureInputDescriptor)
6428 		    ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6429 			return kIOReturnBadArgument;
6430 		}
6431 
6432 		count = dispatch->checkScalarOutputCount;
6433 		if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6434 			return kIOReturnBadArgument;
6435 		}
6436 
6437 		count = dispatch->checkStructureOutputSize;
6438 		if ((kIOUCVariableStructureSize != count)
6439 		    && (count != ((args->structureOutputDescriptor)
6440 		    ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6441 			return kIOReturnBadArgument;
6442 		}
6443 
6444 		if (dispatch->function) {
6445 			err = (*dispatch->function)(target, reference, args);
6446 		} else {
6447 			err = kIOReturnNoCompletion; /* implementer can dispatch */
6448 		}
6449 		return err;
6450 	}
6451 
6452 
6453 	// pre-Leopard API's don't do ool structs
6454 	if (args->structureInputDescriptor || args->structureOutputDescriptor) {
6455 		err = kIOReturnIPCError;
6456 		return err;
6457 	}
6458 
6459 	structureOutputSize = args->structureOutputSize;
6460 
6461 	if (args->asyncWakePort) {
6462 		IOExternalAsyncMethod * method;
6463 		object = NULL;
6464 		if (!(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object) {
6465 			return kIOReturnUnsupported;
6466 		}
6467 
6468 		if (kIOUCForegroundOnly & method->flags) {
6469 			if (task_is_gpu_denied(current_task())) {
6470 				return kIOReturnNotPermitted;
6471 			}
6472 		}
6473 
6474 		switch (method->flags & kIOUCTypeMask) {
6475 		case kIOUCScalarIStructI:
6476 			err = shim_io_async_method_scalarI_structureI( method, object,
6477 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6478 			    args->scalarInput, args->scalarInputCount,
6479 			    (char *)args->structureInput, args->structureInputSize );
6480 			break;
6481 
6482 		case kIOUCScalarIScalarO:
6483 			err = shim_io_async_method_scalarI_scalarO( method, object,
6484 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6485 			    args->scalarInput, args->scalarInputCount,
6486 			    args->scalarOutput, &args->scalarOutputCount );
6487 			break;
6488 
6489 		case kIOUCScalarIStructO:
6490 			err = shim_io_async_method_scalarI_structureO( method, object,
6491 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6492 			    args->scalarInput, args->scalarInputCount,
6493 			    (char *) args->structureOutput, &args->structureOutputSize );
6494 			break;
6495 
6496 
6497 		case kIOUCStructIStructO:
6498 			err = shim_io_async_method_structureI_structureO( method, object,
6499 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6500 			    (char *)args->structureInput, args->structureInputSize,
6501 			    (char *) args->structureOutput, &args->structureOutputSize );
6502 			break;
6503 
6504 		default:
6505 			err = kIOReturnBadArgument;
6506 			break;
6507 		}
6508 	} else {
6509 		IOExternalMethod *      method;
6510 		object = NULL;
6511 		if (!(method = getTargetAndMethodForIndex(&object, selector)) || !object) {
6512 			return kIOReturnUnsupported;
6513 		}
6514 
6515 		if (kIOUCForegroundOnly & method->flags) {
6516 			if (task_is_gpu_denied(current_task())) {
6517 				return kIOReturnNotPermitted;
6518 			}
6519 		}
6520 
6521 		switch (method->flags & kIOUCTypeMask) {
6522 		case kIOUCScalarIStructI:
6523 			err = shim_io_connect_method_scalarI_structureI( method, object,
6524 			    args->scalarInput, args->scalarInputCount,
6525 			    (char *) args->structureInput, args->structureInputSize );
6526 			break;
6527 
6528 		case kIOUCScalarIScalarO:
6529 			err = shim_io_connect_method_scalarI_scalarO( method, object,
6530 			    args->scalarInput, args->scalarInputCount,
6531 			    args->scalarOutput, &args->scalarOutputCount );
6532 			break;
6533 
6534 		case kIOUCScalarIStructO:
6535 			err = shim_io_connect_method_scalarI_structureO( method, object,
6536 			    args->scalarInput, args->scalarInputCount,
6537 			    (char *) args->structureOutput, &structureOutputSize );
6538 			break;
6539 
6540 
6541 		case kIOUCStructIStructO:
6542 			err = shim_io_connect_method_structureI_structureO( method, object,
6543 			    (char *) args->structureInput, args->structureInputSize,
6544 			    (char *) args->structureOutput, &structureOutputSize );
6545 			break;
6546 
6547 		default:
6548 			err = kIOReturnBadArgument;
6549 			break;
6550 		}
6551 	}
6552 
6553 	if (structureOutputSize > UINT_MAX) {
6554 		structureOutputSize = 0;
6555 		err = kIOReturnBadArgument;
6556 	}
6557 
6558 	args->structureOutputSize = ((typeof(args->structureOutputSize))structureOutputSize);
6559 
6560 	return err;
6561 }
6562 
6563 IOReturn
6564 IOUserClient::registerFilterCallbacks(const struct io_filter_callbacks *callbacks, size_t size)
6565 {
6566 	if (size < sizeof(*callbacks)) {
6567 		return kIOReturnBadArgument;
6568 	}
6569 	if (!OSCompareAndSwapPtr(NULL, __DECONST(void *, callbacks), &gIOUCFilterCallbacks)) {
6570 		return kIOReturnBusy;
6571 	}
6572 	return kIOReturnSuccess;
6573 }
6574 
6575 
6576 OSMetaClassDefineReservedUnused(IOUserClient, 0);
6577 OSMetaClassDefineReservedUnused(IOUserClient, 1);
6578 OSMetaClassDefineReservedUnused(IOUserClient, 2);
6579 OSMetaClassDefineReservedUnused(IOUserClient, 3);
6580 OSMetaClassDefineReservedUnused(IOUserClient, 4);
6581 OSMetaClassDefineReservedUnused(IOUserClient, 5);
6582 OSMetaClassDefineReservedUnused(IOUserClient, 6);
6583 OSMetaClassDefineReservedUnused(IOUserClient, 7);
6584 OSMetaClassDefineReservedUnused(IOUserClient, 8);
6585 OSMetaClassDefineReservedUnused(IOUserClient, 9);
6586 OSMetaClassDefineReservedUnused(IOUserClient, 10);
6587 OSMetaClassDefineReservedUnused(IOUserClient, 11);
6588 OSMetaClassDefineReservedUnused(IOUserClient, 12);
6589 OSMetaClassDefineReservedUnused(IOUserClient, 13);
6590 OSMetaClassDefineReservedUnused(IOUserClient, 14);
6591 OSMetaClassDefineReservedUnused(IOUserClient, 15);
6592 
6593 OSMetaClassDefineReservedUnused(IOUserClient2022, 0);
6594 OSMetaClassDefineReservedUnused(IOUserClient2022, 1);
6595 OSMetaClassDefineReservedUnused(IOUserClient2022, 2);
6596 OSMetaClassDefineReservedUnused(IOUserClient2022, 3);
6597