xref: /xnu-11215/iokit/Kernel/IOUserClient.cpp (revision bb611c8f)
1 /*
2  * Copyright (c) 1998-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 
30 #include <libkern/c++/OSKext.h>
31 #include <libkern/c++/OSSharedPtr.h>
32 #include <IOKit/IOKitServer.h>
33 #include <IOKit/IOKitKeysPrivate.h>
34 #include <IOKit/IOUserClient.h>
35 #include <IOKit/IOService.h>
36 #include <IOKit/IORegistryEntry.h>
37 #include <IOKit/IOCatalogue.h>
38 #include <IOKit/IOMemoryDescriptor.h>
39 #include <IOKit/IOBufferMemoryDescriptor.h>
40 #include <IOKit/IOLib.h>
41 #include <IOKit/IOBSD.h>
42 #include <IOKit/IOStatisticsPrivate.h>
43 #include <IOKit/IOTimeStamp.h>
44 #include <IOKit/IODeviceTreeSupport.h>
45 #include <IOKit/IOUserServer.h>
46 #include <IOKit/system.h>
47 #include <libkern/OSDebug.h>
48 #include <DriverKit/OSAction.h>
49 #include <sys/proc.h>
50 #include <sys/kauth.h>
51 #include <sys/codesign.h>
52 
53 #include <mach/sdt.h>
54 #include <os/hash.h>
55 
56 #if CONFIG_MACF
57 
58 extern "C" {
59 #include <security/mac_framework.h>
60 };
61 #include <sys/kauth.h>
62 
63 #define IOMACF_LOG 0
64 
65 #endif /* CONFIG_MACF */
66 
67 #include <IOKit/assert.h>
68 
69 #include "IOServicePrivate.h"
70 #include "IOKitKernelInternal.h"
71 
72 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
73 #define SCALAR32(x) ((uint32_t )x)
74 #define ARG32(x)    ((void *)(uintptr_t)SCALAR32(x))
75 #define REF64(x)    ((io_user_reference_t)((UInt64)(x)))
76 #define REF32(x)    ((int)(x))
77 
78 enum{
79 	kIOUCAsync0Flags          = 3ULL,
80 	kIOUCAsync64Flag          = 1ULL,
81 	kIOUCAsyncErrorLoggedFlag = 2ULL
82 };
83 
84 #if IOKITSTATS
85 
86 #define IOStatisticsRegisterCounter() \
87 do { \
88 	reserved->counter = IOStatistics::registerUserClient(this); \
89 } while (0)
90 
91 #define IOStatisticsUnregisterCounter() \
92 do { \
93 	if (reserved) \
94 	        IOStatistics::unregisterUserClient(reserved->counter); \
95 } while (0)
96 
97 #define IOStatisticsClientCall() \
98 do { \
99 	IOStatistics::countUserClientCall(client); \
100 } while (0)
101 
102 #else
103 
104 #define IOStatisticsRegisterCounter()
105 #define IOStatisticsUnregisterCounter()
106 #define IOStatisticsClientCall()
107 
108 #endif /* IOKITSTATS */
109 
110 #if DEVELOPMENT || DEBUG
111 
112 #define FAKE_STACK_FRAME(a)                                             \
113 	const void ** __frameptr;                                       \
114 	const void  * __retaddr;                                        \
115 	__frameptr = (typeof(__frameptr)) __builtin_frame_address(0);   \
116 	__retaddr = __frameptr[1];                                      \
117 	__frameptr[1] = (a);
118 
119 #define FAKE_STACK_FRAME_END()                                          \
120 	__frameptr[1] = __retaddr;
121 
122 #else /* DEVELOPMENT || DEBUG */
123 
124 #define FAKE_STACK_FRAME(a)
125 #define FAKE_STACK_FRAME_END()
126 
127 #endif /* DEVELOPMENT || DEBUG */
128 
129 #define ASYNC_REF_COUNT         (sizeof(io_async_ref_t) / sizeof(natural_t))
130 #define ASYNC_REF64_COUNT       (sizeof(io_async_ref64_t) / sizeof(io_user_reference_t))
131 
132 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
133 
134 extern "C" {
135 #include <mach/mach_traps.h>
136 #include <vm/vm_map.h>
137 } /* extern "C" */
138 
139 struct IOMachPortHashList;
140 
141 static_assert(IKOT_MAX_TYPE <= 255);
142 
143 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
144 
145 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
146 class IOMachPort : public OSObject
147 {
148 	OSDeclareDefaultStructors(IOMachPort);
149 public:
150 	SLIST_ENTRY(IOMachPort) link;
151 	ipc_port_t  port;
152 	OSObject*   object;
153 	UInt32      mscount;
154 	UInt8       holdDestroy;
155 	UInt8       type;
156 
157 	static IOMachPort* withObjectAndType(OSObject *obj, ipc_kobject_type_t type);
158 
159 	static IOMachPortHashList* bucketForObject(OSObject *obj,
160 	    ipc_kobject_type_t type);
161 
162 	static IOMachPort* portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type);
163 
164 	static bool noMoreSendersForObject( OSObject * obj,
165 	    ipc_kobject_type_t type, mach_port_mscount_t * mscount );
166 	static void releasePortForObject( OSObject * obj,
167 	    ipc_kobject_type_t type );
168 	static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
169 
170 	static mach_port_name_t makeSendRightForTask( task_t task,
171 	    io_object_t obj, ipc_kobject_type_t type );
172 
173 	virtual void free() APPLE_KEXT_OVERRIDE;
174 };
175 
176 #define super OSObject
177 OSDefineMetaClassAndStructorsWithZone(IOMachPort, OSObject, ZC_ZFREE_CLEARMEM)
178 
179 static IOLock *         gIOObjectPortLock;
180 IOLock *                gIOUserServerLock;
181 
182 SECURITY_READ_ONLY_LATE(const struct io_filter_callbacks *) gIOUCFilterCallbacks;
183 
184 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
185 
186 SLIST_HEAD(IOMachPortHashList, IOMachPort);
187 
188 #if defined(XNU_TARGET_OS_OSX)
189 #define PORT_HASH_SIZE 4096
190 #else /* defined(!XNU_TARGET_OS_OSX) */
191 #define PORT_HASH_SIZE 256
192 #endif /* !defined(!XNU_TARGET_OS_OSX) */
193 
194 IOMachPortHashList gIOMachPortHash[PORT_HASH_SIZE];
195 
196 void
197 IOMachPortInitialize(void)
198 {
199 	for (size_t i = 0; i < PORT_HASH_SIZE; i++) {
200 		SLIST_INIT(&gIOMachPortHash[i]);
201 	}
202 }
203 
204 IOMachPortHashList*
205 IOMachPort::bucketForObject(OSObject *obj, ipc_kobject_type_t type )
206 {
207 	return &gIOMachPortHash[os_hash_kernel_pointer(obj) % PORT_HASH_SIZE];
208 }
209 
210 IOMachPort*
211 IOMachPort::portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type)
212 {
213 	IOMachPort *machPort;
214 
215 	SLIST_FOREACH(machPort, bucket, link) {
216 		if (machPort->object == obj && machPort->type == type) {
217 			return machPort;
218 		}
219 	}
220 	return NULL;
221 }
222 
223 IOMachPort*
224 IOMachPort::withObjectAndType(OSObject *obj, ipc_kobject_type_t type)
225 {
226 	IOMachPort *machPort = NULL;
227 
228 	machPort = new IOMachPort;
229 	if (__improbable(machPort && !machPort->init())) {
230 		return NULL;
231 	}
232 
233 	machPort->object = obj;
234 	machPort->type = (typeof(machPort->type))type;
235 	machPort->port = iokit_alloc_object_port(obj, type);
236 
237 	obj->taggedRetain(OSTypeID(OSCollection));
238 	machPort->mscount++;
239 
240 	return machPort;
241 }
242 
243 bool
244 IOMachPort::noMoreSendersForObject( OSObject * obj,
245     ipc_kobject_type_t type, mach_port_mscount_t * mscount )
246 {
247 	IOMachPort *machPort = NULL;
248 	IOUserClient *uc;
249 	OSAction *action;
250 	bool destroyed = true;
251 
252 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
253 
254 	obj->retain();
255 
256 	lck_mtx_lock(gIOObjectPortLock);
257 
258 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
259 
260 	if (machPort) {
261 		destroyed = (machPort->mscount <= *mscount);
262 		if (!destroyed) {
263 			*mscount = machPort->mscount;
264 			lck_mtx_unlock(gIOObjectPortLock);
265 		} else {
266 			if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj))) {
267 				uc->noMoreSenders();
268 			}
269 			SLIST_REMOVE(bucket, machPort, IOMachPort, link);
270 
271 			lck_mtx_unlock(gIOObjectPortLock);
272 
273 			machPort->release();
274 			obj->taggedRelease(OSTypeID(OSCollection));
275 		}
276 	} else {
277 		lck_mtx_unlock(gIOObjectPortLock);
278 	}
279 
280 	if ((IKOT_UEXT_OBJECT == type) && (action = OSDynamicCast(OSAction, obj))) {
281 		action->Aborted();
282 	}
283 
284 	obj->release();
285 
286 	return destroyed;
287 }
288 
289 void
290 IOMachPort::releasePortForObject( OSObject * obj,
291     ipc_kobject_type_t type )
292 {
293 	IOMachPort *machPort;
294 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
295 
296 	assert(IKOT_IOKIT_CONNECT != type);
297 
298 	lck_mtx_lock(gIOObjectPortLock);
299 
300 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
301 
302 	if (machPort && !machPort->holdDestroy) {
303 		obj->retain();
304 		SLIST_REMOVE(bucket, machPort, IOMachPort, link);
305 
306 		lck_mtx_unlock(gIOObjectPortLock);
307 
308 		machPort->release();
309 		obj->taggedRelease(OSTypeID(OSCollection));
310 		obj->release();
311 	} else {
312 		lck_mtx_unlock(gIOObjectPortLock);
313 	}
314 }
315 
316 void
317 IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
318 {
319 	IOMachPort *        machPort;
320 
321 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
322 	lck_mtx_lock(gIOObjectPortLock);
323 
324 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
325 
326 	if (machPort) {
327 		machPort->holdDestroy = true;
328 	}
329 
330 	lck_mtx_unlock(gIOObjectPortLock);
331 }
332 
333 void
334 IOMachPortDestroyUserReferences(OSObject * obj, natural_t type)
335 {
336 	IOMachPort::releasePortForObject(obj, type);
337 }
338 
339 void
340 IOUserClient::destroyUserReferences( OSObject * obj )
341 {
342 	IOMachPort *machPort;
343 
344 	IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
345 
346 	// panther, 3160200
347 	// IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
348 
349 	obj->retain();
350 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, IKOT_IOKIT_CONNECT);
351 	IOMachPortHashList *mappingBucket = NULL;
352 
353 	lck_mtx_lock(gIOObjectPortLock);
354 
355 	IOUserClient * uc = OSDynamicCast(IOUserClient, obj);
356 	if (uc && uc->mappings) {
357 		mappingBucket = IOMachPort::bucketForObject(uc->mappings, IKOT_IOKIT_CONNECT);
358 	}
359 
360 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, IKOT_IOKIT_CONNECT);
361 
362 	if (machPort == NULL) {
363 		lck_mtx_unlock(gIOObjectPortLock);
364 		goto end;
365 	}
366 
367 	SLIST_REMOVE(bucket, machPort, IOMachPort, link);
368 	obj->taggedRelease(OSTypeID(OSCollection));
369 
370 	if (uc) {
371 		uc->noMoreSenders();
372 		if (uc->mappings) {
373 			uc->mappings->taggedRetain(OSTypeID(OSCollection));
374 			machPort->object = uc->mappings;
375 			SLIST_INSERT_HEAD(mappingBucket, machPort, link);
376 			iokit_switch_object_port(machPort->port, uc->mappings, IKOT_IOKIT_CONNECT);
377 
378 			lck_mtx_unlock(gIOObjectPortLock);
379 
380 			uc->mappings->release();
381 			uc->mappings = NULL;
382 		} else {
383 			lck_mtx_unlock(gIOObjectPortLock);
384 			machPort->release();
385 		}
386 	} else {
387 		lck_mtx_unlock(gIOObjectPortLock);
388 		machPort->release();
389 	}
390 
391 
392 end:
393 
394 	obj->release();
395 }
396 
397 mach_port_name_t
398 IOMachPort::makeSendRightForTask( task_t task,
399     io_object_t obj, ipc_kobject_type_t type )
400 {
401 	return iokit_make_send_right( task, obj, type );
402 }
403 
404 void
405 IOMachPort::free( void )
406 {
407 	if (port) {
408 		iokit_destroy_object_port( port );
409 	}
410 	super::free();
411 }
412 
413 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
414 
415 static bool
416 IOTaskRegistryCompatibility(task_t task)
417 {
418 	return false;
419 }
420 
421 static void
422 IOTaskRegistryCompatibilityMatching(task_t task, OSDictionary * matching)
423 {
424 	if (!IOTaskRegistryCompatibility(task)) {
425 		return;
426 	}
427 	matching->setObject(gIOCompatibilityMatchKey, kOSBooleanTrue);
428 }
429 
430 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
431 
432 class IOUserIterator : public OSIterator
433 {
434 	OSDeclareDefaultStructors(IOUserIterator);
435 public:
436 	OSObject    *       userIteratorObject;
437 	IOLock      *       lock;
438 
439 	static IOUserIterator * withIterator(LIBKERN_CONSUMED OSIterator * iter);
440 	virtual bool init( void ) APPLE_KEXT_OVERRIDE;
441 	virtual void free() APPLE_KEXT_OVERRIDE;
442 
443 	virtual void reset() APPLE_KEXT_OVERRIDE;
444 	virtual bool isValid() APPLE_KEXT_OVERRIDE;
445 	virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
446 	virtual OSObject * copyNextObject();
447 };
448 
449 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
450 
451 class IOUserNotification : public IOUserIterator
452 {
453 	OSDeclareDefaultStructors(IOUserNotification);
454 
455 #define holdNotify      userIteratorObject
456 
457 public:
458 
459 	virtual void free() APPLE_KEXT_OVERRIDE;
460 
461 	virtual void setNotification( IONotifier * obj );
462 
463 	virtual void reset() APPLE_KEXT_OVERRIDE;
464 	virtual bool isValid() APPLE_KEXT_OVERRIDE;
465 };
466 
467 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
468 
469 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
470 
471 IOUserIterator *
472 IOUserIterator::withIterator(OSIterator * iter)
473 {
474 	IOUserIterator * me;
475 
476 	if (!iter) {
477 		return NULL;
478 	}
479 
480 	me = new IOUserIterator;
481 	if (me && !me->init()) {
482 		me->release();
483 		me = NULL;
484 	}
485 	if (!me) {
486 		return me;
487 	}
488 	me->userIteratorObject = iter;
489 
490 	return me;
491 }
492 
493 bool
494 IOUserIterator::init( void )
495 {
496 	if (!OSObject::init()) {
497 		return false;
498 	}
499 
500 	lock = IOLockAlloc();
501 	if (!lock) {
502 		return false;
503 	}
504 
505 	return true;
506 }
507 
508 void
509 IOUserIterator::free()
510 {
511 	if (userIteratorObject) {
512 		userIteratorObject->release();
513 	}
514 	if (lock) {
515 		IOLockFree(lock);
516 	}
517 	OSObject::free();
518 }
519 
520 void
521 IOUserIterator::reset()
522 {
523 	IOLockLock(lock);
524 	assert(OSDynamicCast(OSIterator, userIteratorObject));
525 	((OSIterator *)userIteratorObject)->reset();
526 	IOLockUnlock(lock);
527 }
528 
529 bool
530 IOUserIterator::isValid()
531 {
532 	bool ret;
533 
534 	IOLockLock(lock);
535 	assert(OSDynamicCast(OSIterator, userIteratorObject));
536 	ret = ((OSIterator *)userIteratorObject)->isValid();
537 	IOLockUnlock(lock);
538 
539 	return ret;
540 }
541 
542 OSObject *
543 IOUserIterator::getNextObject()
544 {
545 	assert(false);
546 	return NULL;
547 }
548 
549 OSObject *
550 IOUserIterator::copyNextObject()
551 {
552 	OSObject * ret = NULL;
553 
554 	IOLockLock(lock);
555 	if (userIteratorObject) {
556 		ret = ((OSIterator *)userIteratorObject)->getNextObject();
557 		if (ret) {
558 			ret->retain();
559 		}
560 	}
561 	IOLockUnlock(lock);
562 
563 	return ret;
564 }
565 
566 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
567 extern "C" {
568 // functions called from osfmk/device/iokit_rpc.c
569 
570 void
571 iokit_port_object_description(io_object_t obj, kobject_description_t desc)
572 {
573 	IORegistryEntry    * regEntry;
574 	IOUserNotification * __unused noti;
575 	_IOServiceNotifier * __unused serviceNoti;
576 	OSSerialize        * __unused s;
577 
578 	if ((regEntry = OSDynamicCast(IORegistryEntry, obj))) {
579 		snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(0x%qx)", obj->getMetaClass()->getClassName(), regEntry->getRegistryEntryID());
580 #if DEVELOPMENT || DEBUG
581 	} else if ((noti = OSDynamicCast(IOUserNotification, obj))
582 	    && ((serviceNoti = OSDynamicCast(_IOServiceNotifier, noti->holdNotify)))) {
583 		s = OSSerialize::withCapacity((unsigned int) page_size);
584 		if (s && serviceNoti->matching->serialize(s)) {
585 			snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(%s)", obj->getMetaClass()->getClassName(), s->text());
586 		}
587 		OSSafeReleaseNULL(s);
588 #endif /* DEVELOPMENT || DEBUG */
589 	} else {
590 		snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s", obj->getMetaClass()->getClassName());
591 	}
592 }
593 
594 // FIXME: Implementation of these functions are hidden from the static analyzer.
595 // As for now, the analyzer doesn't consistently support wrapper functions
596 // for retain and release.
597 #ifndef __clang_analyzer__
598 void
599 iokit_add_reference( io_object_t obj, natural_t type )
600 {
601 	IOUserClient * uc;
602 
603 	if (!obj) {
604 		return;
605 	}
606 
607 	if ((IKOT_IOKIT_CONNECT == type)
608 	    && (uc = OSDynamicCast(IOUserClient, obj))) {
609 		OSIncrementAtomic(&uc->__ipc);
610 	}
611 
612 	obj->retain();
613 }
614 
615 void
616 iokit_remove_reference( io_object_t obj )
617 {
618 	if (obj) {
619 		obj->release();
620 	}
621 }
622 #endif // __clang_analyzer__
623 
624 void
625 iokit_remove_connect_reference( io_object_t obj )
626 {
627 	IOUserClient * uc;
628 	bool           finalize = false;
629 
630 	if (!obj) {
631 		return;
632 	}
633 
634 	if ((uc = OSDynamicCast(IOUserClient, obj))) {
635 		if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive()) {
636 			IOLockLock(gIOObjectPortLock);
637 			if ((finalize = uc->__ipcFinal)) {
638 				uc->__ipcFinal = false;
639 			}
640 			IOLockUnlock(gIOObjectPortLock);
641 		}
642 		if (finalize) {
643 			uc->scheduleFinalize(true);
644 		}
645 	}
646 
647 	obj->release();
648 }
649 
650 bool
651 IOUserClient::finalizeUserReferences(OSObject * obj)
652 {
653 	IOUserClient * uc;
654 	bool           ok = true;
655 
656 	if ((uc = OSDynamicCast(IOUserClient, obj))) {
657 		IOLockLock(gIOObjectPortLock);
658 		if ((uc->__ipcFinal = (0 != uc->__ipc))) {
659 			ok = false;
660 		}
661 		IOLockUnlock(gIOObjectPortLock);
662 	}
663 	return ok;
664 }
665 
666 ipc_port_t
667 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
668 {
669 	IOMachPort *machPort = NULL;
670 	ipc_port_t   port = NULL;
671 
672 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
673 
674 	lck_mtx_lock(gIOObjectPortLock);
675 
676 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
677 
678 	if (__improbable(machPort == NULL)) {
679 		machPort = IOMachPort::withObjectAndType(obj, type);
680 		if (__improbable(machPort == NULL)) {
681 			goto end;
682 		}
683 		SLIST_INSERT_HEAD(bucket, machPort, link);
684 	} else {
685 		machPort->mscount++;
686 	}
687 
688 	iokit_retain_port(machPort->port);
689 	port = machPort->port;
690 
691 end:
692 	lck_mtx_unlock(gIOObjectPortLock);
693 
694 	return port;
695 }
696 
697 kern_return_t
698 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
699     ipc_kobject_type_t type, mach_port_mscount_t * mscount )
700 {
701 	IOUserClient *      client;
702 	IOMemoryMap *       map;
703 	IOUserNotification * notify;
704 	IOUserServerCheckInToken * token;
705 
706 	if (!IOMachPort::noMoreSendersForObject( obj, type, mscount )) {
707 		return kIOReturnNotReady;
708 	}
709 
710 	switch (type) {
711 	case IKOT_IOKIT_CONNECT:
712 		if ((client = OSDynamicCast( IOUserClient, obj ))) {
713 			IOStatisticsClientCall();
714 			IORWLockWrite(client->lock);
715 			client->clientDied();
716 			IORWLockUnlock(client->lock);
717 		}
718 		break;
719 	case IKOT_IOKIT_OBJECT:
720 		if ((map = OSDynamicCast( IOMemoryMap, obj ))) {
721 			map->taskDied();
722 		} else if ((notify = OSDynamicCast( IOUserNotification, obj ))) {
723 			notify->setNotification( NULL );
724 		}
725 		break;
726 	case IKOT_IOKIT_IDENT:
727 		if ((token = OSDynamicCast( IOUserServerCheckInToken, obj ))) {
728 			IOUserServerCheckInToken::notifyNoSenders( token );
729 		}
730 		break;
731 	}
732 
733 	return kIOReturnSuccess;
734 }
735 };      /* extern "C" */
736 
737 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
738 
739 class IOServiceUserNotification : public IOUserNotification
740 {
741 	OSDeclareDefaultStructors(IOServiceUserNotification);
742 
743 	struct PingMsg {
744 		mach_msg_header_t               msgHdr;
745 		OSNotificationHeader64          notifyHeader;
746 	};
747 
748 	enum { kMaxOutstanding = 1024 };
749 
750 	PingMsg     *       pingMsg;
751 	mach_msg_size_t     msgSize;
752 	OSArray     *       newSet;
753 	bool                armed;
754 	bool                ipcLogged;
755 
756 public:
757 
758 	virtual bool init( mach_port_t port, natural_t type,
759 	    void * reference, vm_size_t referenceSize,
760 	    bool clientIs64 );
761 	virtual void free() APPLE_KEXT_OVERRIDE;
762 	void invalidatePort(void);
763 
764 	static bool _handler( void * target,
765 	    void * ref, IOService * newService, IONotifier * notifier );
766 	virtual bool handler( void * ref, IOService * newService );
767 
768 	virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
769 	virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
770 };
771 
772 class IOServiceMessageUserNotification : public IOUserNotification
773 {
774 	OSDeclareDefaultStructors(IOServiceMessageUserNotification);
775 
776 	struct PingMsg {
777 		mach_msg_header_t               msgHdr;
778 		mach_msg_body_t                 msgBody;
779 		mach_msg_port_descriptor_t      ports[1];
780 		OSNotificationHeader64          notifyHeader __attribute__ ((packed));
781 	};
782 
783 	PingMsg *           pingMsg;
784 	mach_msg_size_t     msgSize;
785 	uint8_t             clientIs64;
786 	int                 owningPID;
787 	bool                ipcLogged;
788 
789 public:
790 
791 	virtual bool init( mach_port_t port, natural_t type,
792 	    void * reference, vm_size_t referenceSize,
793 	    mach_msg_size_t extraSize,
794 	    bool clientIs64 );
795 
796 	virtual void free() APPLE_KEXT_OVERRIDE;
797 	void invalidatePort(void);
798 
799 	static IOReturn _handler( void * target, void * ref,
800 	    UInt32 messageType, IOService * provider,
801 	    void * messageArgument, vm_size_t argSize );
802 	virtual IOReturn handler( void * ref,
803 	    UInt32 messageType, IOService * provider,
804 	    void * messageArgument, vm_size_t argSize );
805 
806 	virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
807 	virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
808 };
809 
810 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
811 
812 #undef super
813 #define super IOUserIterator
814 OSDefineMetaClass( IOUserNotification, IOUserIterator );
815 OSDefineAbstractStructors( IOUserNotification, IOUserIterator );
816 
817 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
818 
819 void
820 IOUserNotification::free( void )
821 {
822 	if (holdNotify) {
823 		assert(OSDynamicCast(IONotifier, holdNotify));
824 		((IONotifier *)holdNotify)->remove();
825 		holdNotify = NULL;
826 	}
827 	// can't be in handler now
828 
829 	super::free();
830 }
831 
832 
833 void
834 IOUserNotification::setNotification( IONotifier * notify )
835 {
836 	OSObject * previousNotify;
837 
838 	IOLockLock( gIOObjectPortLock);
839 
840 	previousNotify = holdNotify;
841 	holdNotify = notify;
842 
843 	IOLockUnlock( gIOObjectPortLock);
844 
845 	if (previousNotify) {
846 		assert(OSDynamicCast(IONotifier, previousNotify));
847 		((IONotifier *)previousNotify)->remove();
848 	}
849 }
850 
851 void
852 IOUserNotification::reset()
853 {
854 	// ?
855 }
856 
857 bool
858 IOUserNotification::isValid()
859 {
860 	return true;
861 }
862 
863 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
864 
865 #undef super
866 #define super IOUserNotification
867 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
868 
869 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
870 
871 bool
872 IOServiceUserNotification::init( mach_port_t port, natural_t type,
873     void * reference, vm_size_t referenceSize,
874     bool clientIs64 )
875 {
876 	if (!super::init()) {
877 		return false;
878 	}
879 
880 	newSet = OSArray::withCapacity( 1 );
881 	if (!newSet) {
882 		return false;
883 	}
884 
885 	if (referenceSize > sizeof(OSAsyncReference64)) {
886 		return false;
887 	}
888 
889 	msgSize = (mach_msg_size_t) (sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize);
890 
891 	pingMsg = (PingMsg *) IOMalloc( msgSize);
892 	if (!pingMsg) {
893 		return false;
894 	}
895 
896 	bzero( pingMsg, msgSize);
897 
898 	pingMsg->msgHdr.msgh_remote_port    = port;
899 	pingMsg->msgHdr.msgh_bits           = MACH_MSGH_BITS(
900 		MACH_MSG_TYPE_COPY_SEND /*remote*/,
901 		MACH_MSG_TYPE_MAKE_SEND /*local*/);
902 	pingMsg->msgHdr.msgh_size           = msgSize;
903 	pingMsg->msgHdr.msgh_id             = kOSNotificationMessageID;
904 
905 	pingMsg->notifyHeader.size = 0;
906 	pingMsg->notifyHeader.type = type;
907 	bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
908 
909 	return true;
910 }
911 
912 void
913 IOServiceUserNotification::invalidatePort(void)
914 {
915 	if (pingMsg) {
916 		pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
917 	}
918 }
919 
920 void
921 IOServiceUserNotification::free( void )
922 {
923 	PingMsg   * _pingMsg;
924 	vm_size_t   _msgSize;
925 	OSArray   * _newSet;
926 
927 	_pingMsg   = pingMsg;
928 	_msgSize   = msgSize;
929 	_newSet    = newSet;
930 
931 	super::free();
932 
933 	if (_pingMsg && _msgSize) {
934 		if (_pingMsg->msgHdr.msgh_remote_port) {
935 			iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
936 		}
937 		IOFree(_pingMsg, _msgSize);
938 	}
939 
940 	if (_newSet) {
941 		_newSet->release();
942 	}
943 }
944 
945 bool
946 IOServiceUserNotification::_handler( void * target,
947     void * ref, IOService * newService, IONotifier * notifier )
948 {
949 	return ((IOServiceUserNotification *) target)->handler( ref, newService );
950 }
951 
952 bool
953 IOServiceUserNotification::handler( void * ref,
954     IOService * newService )
955 {
956 	unsigned int        count;
957 	kern_return_t       kr;
958 	ipc_port_t          port = NULL;
959 	bool                sendPing = false;
960 
961 	IOTakeLock( lock );
962 
963 	count = newSet->getCount();
964 	if (count < kMaxOutstanding) {
965 		newSet->setObject( newService );
966 		if ((sendPing = (armed && (0 == count)))) {
967 			armed = false;
968 		}
969 	}
970 
971 	IOUnlock( lock );
972 
973 	if (kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type) {
974 		IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
975 	}
976 
977 	if (sendPing) {
978 		if ((port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ))) {
979 			pingMsg->msgHdr.msgh_local_port = port;
980 		} else {
981 			pingMsg->msgHdr.msgh_local_port = NULL;
982 		}
983 
984 		kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
985 		    pingMsg->msgHdr.msgh_size,
986 		    (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
987 		    0);
988 		if (port) {
989 			iokit_release_port( port );
990 		}
991 
992 		if ((KERN_SUCCESS != kr) && !ipcLogged) {
993 			ipcLogged = true;
994 			IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
995 		}
996 	}
997 
998 	return true;
999 }
1000 OSObject *
1001 IOServiceUserNotification::getNextObject()
1002 {
1003 	assert(false);
1004 	return NULL;
1005 }
1006 
1007 OSObject *
1008 IOServiceUserNotification::copyNextObject()
1009 {
1010 	unsigned int        count;
1011 	OSObject *          result;
1012 
1013 	IOLockLock(lock);
1014 
1015 	count = newSet->getCount();
1016 	if (count) {
1017 		result = newSet->getObject( count - 1 );
1018 		result->retain();
1019 		newSet->removeObject( count - 1);
1020 	} else {
1021 		result = NULL;
1022 		armed = true;
1023 	}
1024 
1025 	IOLockUnlock(lock);
1026 
1027 	return result;
1028 }
1029 
1030 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1031 
1032 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
1033 
1034 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1035 
1036 bool
1037 IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
1038     void * reference, vm_size_t referenceSize, mach_msg_size_t extraSize,
1039     bool client64 )
1040 {
1041 	if (!super::init()) {
1042 		return false;
1043 	}
1044 
1045 	if (referenceSize > sizeof(OSAsyncReference64)) {
1046 		return false;
1047 	}
1048 
1049 	clientIs64 = client64;
1050 
1051 	owningPID = proc_selfpid();
1052 
1053 	extraSize += sizeof(IOServiceInterestContent64);
1054 	msgSize = (mach_msg_size_t) (sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize);
1055 	pingMsg = (PingMsg *) IOMalloc( msgSize);
1056 	if (!pingMsg) {
1057 		return false;
1058 	}
1059 
1060 	bzero( pingMsg, msgSize);
1061 
1062 	pingMsg->msgHdr.msgh_remote_port    = port;
1063 	pingMsg->msgHdr.msgh_bits           = MACH_MSGH_BITS_COMPLEX
1064 	    |  MACH_MSGH_BITS(
1065 		MACH_MSG_TYPE_COPY_SEND /*remote*/,
1066 		MACH_MSG_TYPE_MAKE_SEND /*local*/);
1067 	pingMsg->msgHdr.msgh_size           = msgSize;
1068 	pingMsg->msgHdr.msgh_id             = kOSNotificationMessageID;
1069 
1070 	pingMsg->msgBody.msgh_descriptor_count = 1;
1071 
1072 	pingMsg->ports[0].name              = NULL;
1073 	pingMsg->ports[0].disposition       = MACH_MSG_TYPE_MAKE_SEND;
1074 	pingMsg->ports[0].type              = MACH_MSG_PORT_DESCRIPTOR;
1075 
1076 	pingMsg->notifyHeader.size          = extraSize;
1077 	pingMsg->notifyHeader.type          = type;
1078 	bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
1079 
1080 	return true;
1081 }
1082 
1083 void
1084 IOServiceMessageUserNotification::invalidatePort(void)
1085 {
1086 	if (pingMsg) {
1087 		pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
1088 	}
1089 }
1090 
1091 void
1092 IOServiceMessageUserNotification::free( void )
1093 {
1094 	PingMsg *   _pingMsg;
1095 	vm_size_t   _msgSize;
1096 
1097 	_pingMsg   = pingMsg;
1098 	_msgSize   = msgSize;
1099 
1100 	super::free();
1101 
1102 	if (_pingMsg && _msgSize) {
1103 		if (_pingMsg->msgHdr.msgh_remote_port) {
1104 			iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
1105 		}
1106 		IOFree( _pingMsg, _msgSize);
1107 	}
1108 }
1109 
1110 IOReturn
1111 IOServiceMessageUserNotification::_handler( void * target, void * ref,
1112     UInt32 messageType, IOService * provider,
1113     void * argument, vm_size_t argSize )
1114 {
1115 	return ((IOServiceMessageUserNotification *) target)->handler(
1116 		ref, messageType, provider, argument, argSize);
1117 }
1118 
1119 IOReturn
1120 IOServiceMessageUserNotification::handler( void * ref,
1121     UInt32 messageType, IOService * provider,
1122     void * messageArgument, vm_size_t callerArgSize )
1123 {
1124 	enum                         { kLocalMsgSize = 0x100 };
1125 	uint64_t                     stackMsg[kLocalMsgSize / sizeof(uint64_t)];
1126 	void *                       allocMsg;
1127 	kern_return_t                kr;
1128 	vm_size_t                    argSize;
1129 	mach_msg_size_t              thisMsgSize;
1130 	ipc_port_t                   thisPort, providerPort;
1131 	struct PingMsg *             thisMsg;
1132 	IOServiceInterestContent64 * data;
1133 
1134 	if (kIOMessageCopyClientID == messageType) {
1135 		*((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
1136 		return kIOReturnSuccess;
1137 	}
1138 
1139 	if (callerArgSize == 0) {
1140 		if (clientIs64) {
1141 			argSize = sizeof(data->messageArgument[0]);
1142 		} else {
1143 			argSize = sizeof(uint32_t);
1144 		}
1145 	} else {
1146 		if (callerArgSize > kIOUserNotifyMaxMessageSize) {
1147 			callerArgSize = kIOUserNotifyMaxMessageSize;
1148 		}
1149 		argSize = callerArgSize;
1150 	}
1151 
1152 	// adjust message size for ipc restrictions
1153 	natural_t type;
1154 	type = pingMsg->notifyHeader.type;
1155 	type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1156 	type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1157 	argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1158 
1159 	if (os_add3_overflow(msgSize, sizeof(IOServiceInterestContent64) - sizeof(data->messageArgument), argSize, &thisMsgSize)) {
1160 		return kIOReturnBadArgument;
1161 	}
1162 
1163 	if (thisMsgSize > sizeof(stackMsg)) {
1164 		allocMsg = IOMalloc(thisMsgSize);
1165 		if (!allocMsg) {
1166 			return kIOReturnNoMemory;
1167 		}
1168 		thisMsg = (typeof(thisMsg))allocMsg;
1169 	} else {
1170 		allocMsg = NULL;
1171 		thisMsg  = (typeof(thisMsg))stackMsg;
1172 	}
1173 
1174 	bcopy(pingMsg, thisMsg, msgSize);
1175 	thisMsg->notifyHeader.type = type;
1176 	data = (IOServiceInterestContent64 *) (((uint8_t *) thisMsg) + msgSize);
1177 	// == pingMsg->notifyHeader.content;
1178 	data->messageType = messageType;
1179 
1180 	if (callerArgSize == 0) {
1181 		data->messageArgument[0] = (io_user_reference_t) messageArgument;
1182 		if (!clientIs64) {
1183 			data->messageArgument[0] |= (data->messageArgument[0] << 32);
1184 		}
1185 	} else {
1186 		bcopy( messageArgument, data->messageArgument, callerArgSize );
1187 		bzero((void *)(((uintptr_t) &data->messageArgument[0]) + callerArgSize), argSize - callerArgSize);
1188 	}
1189 
1190 	thisMsg->notifyHeader.type = type;
1191 	thisMsg->msgHdr.msgh_size  = thisMsgSize;
1192 
1193 	providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
1194 	thisMsg->ports[0].name = providerPort;
1195 	thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
1196 	thisMsg->msgHdr.msgh_local_port = thisPort;
1197 
1198 	kr = mach_msg_send_from_kernel_with_options( &thisMsg->msgHdr,
1199 	    thisMsg->msgHdr.msgh_size,
1200 	    (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1201 	    0);
1202 	if (thisPort) {
1203 		iokit_release_port( thisPort );
1204 	}
1205 	if (providerPort) {
1206 		iokit_release_port( providerPort );
1207 	}
1208 
1209 	if (allocMsg) {
1210 		IOFree(allocMsg, thisMsgSize);
1211 	}
1212 
1213 	if ((KERN_SUCCESS != kr) && !ipcLogged) {
1214 		ipcLogged = true;
1215 		IOLog("%s: mach_msg_send_from_kernel_proper (0x%x)\n", __PRETTY_FUNCTION__, kr );
1216 	}
1217 
1218 	return kIOReturnSuccess;
1219 }
1220 
1221 OSObject *
1222 IOServiceMessageUserNotification::getNextObject()
1223 {
1224 	return NULL;
1225 }
1226 
1227 OSObject *
1228 IOServiceMessageUserNotification::copyNextObject()
1229 {
1230 	return NULL;
1231 }
1232 
1233 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1234 
1235 #undef super
1236 #define super IOService
1237 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1238 
1239 IOLock       * gIOUserClientOwnersLock;
1240 
1241 void
1242 IOUserClient::initialize( void )
1243 {
1244 	gIOObjectPortLock       = IOLockAlloc();
1245 	gIOUserClientOwnersLock = IOLockAlloc();
1246 	gIOUserServerLock       = IOLockAlloc();
1247 	assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1248 
1249 #if IOTRACKING
1250 	IOTrackingQueueCollectUser(IOUserIterator::gMetaClass.getTracking());
1251 	IOTrackingQueueCollectUser(IOServiceMessageUserNotification::gMetaClass.getTracking());
1252 	IOTrackingQueueCollectUser(IOServiceUserNotification::gMetaClass.getTracking());
1253 	IOTrackingQueueCollectUser(IOUserClient::gMetaClass.getTracking());
1254 	IOTrackingQueueCollectUser(IOMachPort::gMetaClass.getTracking());
1255 #endif /* IOTRACKING */
1256 }
1257 
1258 void
1259 #if __LP64__
1260 __attribute__((__noreturn__))
1261 #endif
1262 IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1263     mach_port_t wakePort,
1264     void *callback, void *refcon)
1265 {
1266 #if __LP64__
1267 	panic("setAsyncReference not valid for 64b");
1268 #else
1269 	asyncRef[kIOAsyncReservedIndex]      = ((uintptr_t) wakePort)
1270 	    | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1271 	asyncRef[kIOAsyncCalloutFuncIndex]   = (uintptr_t) callback;
1272 	asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1273 #endif
1274 }
1275 
1276 void
1277 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1278     mach_port_t wakePort,
1279     mach_vm_address_t callback, io_user_reference_t refcon)
1280 {
1281 	asyncRef[kIOAsyncReservedIndex]      = ((io_user_reference_t) wakePort)
1282 	    | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1283 	asyncRef[kIOAsyncCalloutFuncIndex]   = (io_user_reference_t) callback;
1284 	asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1285 }
1286 
1287 void
1288 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1289     mach_port_t wakePort,
1290     mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1291 {
1292 	setAsyncReference64(asyncRef, wakePort, callback, refcon);
1293 	if (vm_map_is_64bit(get_task_map(task))) {
1294 		asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1295 	}
1296 }
1297 
1298 static OSDictionary *
1299 CopyConsoleUser(UInt32 uid)
1300 {
1301 	OSArray * array;
1302 	OSDictionary * user = NULL;
1303 
1304 	if ((array = OSDynamicCast(OSArray,
1305 	    IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) {
1306 		for (unsigned int idx = 0;
1307 		    (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1308 		    idx++) {
1309 			OSNumber * num;
1310 
1311 			if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1312 			    && (uid == num->unsigned32BitValue())) {
1313 				user->retain();
1314 				break;
1315 			}
1316 		}
1317 		array->release();
1318 	}
1319 	return user;
1320 }
1321 
1322 static OSDictionary *
1323 CopyUserOnConsole(void)
1324 {
1325 	OSArray * array;
1326 	OSDictionary * user = NULL;
1327 
1328 	if ((array = OSDynamicCast(OSArray,
1329 	    IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) {
1330 		for (unsigned int idx = 0;
1331 		    (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1332 		    idx++) {
1333 			if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) {
1334 				user->retain();
1335 				break;
1336 			}
1337 		}
1338 		array->release();
1339 	}
1340 	return user;
1341 }
1342 
1343 IOReturn
1344 IOUserClient::clientHasAuthorization( task_t task,
1345     IOService * service )
1346 {
1347 	proc_t p;
1348 
1349 	p = (proc_t) get_bsdtask_info(task);
1350 	if (p) {
1351 		uint64_t authorizationID;
1352 
1353 		authorizationID = proc_uniqueid(p);
1354 		if (authorizationID) {
1355 			if (service->getAuthorizationID() == authorizationID) {
1356 				return kIOReturnSuccess;
1357 			}
1358 		}
1359 	}
1360 
1361 	return kIOReturnNotPermitted;
1362 }
1363 
1364 IOReturn
1365 IOUserClient::clientHasPrivilege( void * securityToken,
1366     const char * privilegeName )
1367 {
1368 	kern_return_t           kr;
1369 	security_token_t        token;
1370 	mach_msg_type_number_t  count;
1371 	task_t                  task;
1372 	OSDictionary *          user;
1373 	bool                    secureConsole;
1374 
1375 
1376 	if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1377 	    sizeof(kIOClientPrivilegeForeground))) {
1378 		if (task_is_gpu_denied(current_task())) {
1379 			return kIOReturnNotPrivileged;
1380 		} else {
1381 			return kIOReturnSuccess;
1382 		}
1383 	}
1384 
1385 	if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1386 	    sizeof(kIOClientPrivilegeConsoleSession))) {
1387 		kauth_cred_t cred;
1388 		proc_t       p;
1389 
1390 		task = (task_t) securityToken;
1391 		if (!task) {
1392 			task = current_task();
1393 		}
1394 		p = (proc_t) get_bsdtask_info(task);
1395 		kr = kIOReturnNotPrivileged;
1396 
1397 		if (p && (cred = kauth_cred_proc_ref(p))) {
1398 			user = CopyUserOnConsole();
1399 			if (user) {
1400 				OSNumber * num;
1401 				if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1402 				    && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) {
1403 					kr = kIOReturnSuccess;
1404 				}
1405 				user->release();
1406 			}
1407 			kauth_cred_unref(&cred);
1408 		}
1409 		return kr;
1410 	}
1411 
1412 	if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1413 	    sizeof(kIOClientPrivilegeSecureConsoleProcess)))) {
1414 		task = (task_t)((IOUCProcessToken *)securityToken)->token;
1415 	} else {
1416 		task = (task_t)securityToken;
1417 	}
1418 
1419 	count = TASK_SECURITY_TOKEN_COUNT;
1420 	kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1421 
1422 	if (KERN_SUCCESS != kr) {
1423 	} else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1424 	    sizeof(kIOClientPrivilegeAdministrator))) {
1425 		if (0 != token.val[0]) {
1426 			kr = kIOReturnNotPrivileged;
1427 		}
1428 	} else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1429 	    sizeof(kIOClientPrivilegeLocalUser))) {
1430 		user = CopyConsoleUser(token.val[0]);
1431 		if (user) {
1432 			user->release();
1433 		} else {
1434 			kr = kIOReturnNotPrivileged;
1435 		}
1436 	} else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1437 	    sizeof(kIOClientPrivilegeConsoleUser))) {
1438 		user = CopyConsoleUser(token.val[0]);
1439 		if (user) {
1440 			if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) {
1441 				kr = kIOReturnNotPrivileged;
1442 			} else if (secureConsole) {
1443 				OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1444 				if (pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) {
1445 					kr = kIOReturnNotPrivileged;
1446 				}
1447 			}
1448 			user->release();
1449 		} else {
1450 			kr = kIOReturnNotPrivileged;
1451 		}
1452 	} else {
1453 		kr = kIOReturnUnsupported;
1454 	}
1455 
1456 	return kr;
1457 }
1458 #define MAX_ENTITLEMENTS_LEN    (128 * 1024)
1459 
1460 OSDictionary *
1461 IOUserClient::copyClientEntitlements(task_t task)
1462 {
1463 	proc_t p = NULL;
1464 	pid_t pid = 0;
1465 	size_t len = 0;
1466 	void *entitlements_blob = NULL;
1467 	OSDictionary *entitlements = NULL;
1468 
1469 	p = (proc_t)get_bsdtask_info(task);
1470 	if (p == NULL) {
1471 		return NULL;
1472 	}
1473 	pid = proc_pid(p);
1474 
1475 	if (cs_entitlements_dictionary_copy(p, (void **)&entitlements) == 0) {
1476 		if (entitlements) {
1477 			return entitlements;
1478 		}
1479 	}
1480 
1481 	if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0) {
1482 		return NULL;
1483 	}
1484 	return IOUserClient::copyEntitlementsFromBlob(entitlements_blob, len);
1485 }
1486 
1487 OSDictionary *
1488 IOUserClient::copyEntitlementsFromBlob(void *entitlements_blob, size_t len)
1489 {
1490 	char *entitlements_data = NULL;
1491 	OSObject *entitlements_obj = NULL;
1492 	OSString *errorString = NULL;
1493 	OSDictionary *entitlements = NULL;
1494 
1495 	if (len <= offsetof(CS_GenericBlob, data)) {
1496 		goto fail;
1497 	}
1498 
1499 	/*
1500 	 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1501 	 * we'll try to parse in the kernel.
1502 	 */
1503 	len -= offsetof(CS_GenericBlob, data);
1504 	if (len > MAX_ENTITLEMENTS_LEN) {
1505 		IOLog("failed to parse entitlements: %lu bytes of entitlements exceeds maximum of %u\n",
1506 		    len, MAX_ENTITLEMENTS_LEN);
1507 		goto fail;
1508 	}
1509 
1510 	/*
1511 	 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1512 	 * what is stored in the entitlements blob.  Copy the string and
1513 	 * terminate it.
1514 	 */
1515 	entitlements_data = (char *)IOMalloc(len + 1);
1516 	if (entitlements_data == NULL) {
1517 		goto fail;
1518 	}
1519 	memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len);
1520 	entitlements_data[len] = '\0';
1521 
1522 	entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString);
1523 	if (errorString != NULL) {
1524 		IOLog("failed to parse entitlements: %s\n", errorString->getCStringNoCopy());
1525 		goto fail;
1526 	}
1527 	if (entitlements_obj == NULL) {
1528 		goto fail;
1529 	}
1530 
1531 	entitlements = OSDynamicCast(OSDictionary, entitlements_obj);
1532 	if (entitlements == NULL) {
1533 		goto fail;
1534 	}
1535 	entitlements_obj = NULL;
1536 
1537 fail:
1538 	if (entitlements_data != NULL) {
1539 		IOFree(entitlements_data, len + 1);
1540 	}
1541 	if (entitlements_obj != NULL) {
1542 		entitlements_obj->release();
1543 	}
1544 	if (errorString != NULL) {
1545 		errorString->release();
1546 	}
1547 	return entitlements;
1548 }
1549 
1550 OSDictionary *
1551 IOUserClient::copyClientEntitlementsVnode(vnode_t vnode, off_t offset)
1552 {
1553 	size_t len = 0;
1554 	void *entitlements_blob = NULL;
1555 
1556 	if (cs_entitlements_blob_get_vnode(vnode, offset, &entitlements_blob, &len) != 0) {
1557 		return NULL;
1558 	}
1559 	return IOUserClient::copyEntitlementsFromBlob(entitlements_blob, len);
1560 }
1561 
1562 OSObject *
1563 IOUserClient::copyClientEntitlement( task_t task,
1564     const char * entitlement )
1565 {
1566 	OSDictionary *entitlements;
1567 	OSObject *value;
1568 
1569 	entitlements = copyClientEntitlements(task);
1570 	if (entitlements == NULL) {
1571 		return NULL;
1572 	}
1573 
1574 	/* Fetch the entitlement value from the dictionary. */
1575 	value = entitlements->getObject(entitlement);
1576 	if (value != NULL) {
1577 		value->retain();
1578 	}
1579 
1580 	entitlements->release();
1581 	return value;
1582 }
1583 
1584 OSObject *
1585 IOUserClient::copyClientEntitlementVnode(
1586 	struct vnode *vnode,
1587 	off_t offset,
1588 	const char *entitlement)
1589 {
1590 	OSDictionary *entitlements;
1591 	OSObject *value;
1592 
1593 	entitlements = copyClientEntitlementsVnode(vnode, offset);
1594 	if (entitlements == NULL) {
1595 		return NULL;
1596 	}
1597 
1598 	/* Fetch the entitlement value from the dictionary. */
1599 	value = entitlements->getObject(entitlement);
1600 	if (value != NULL) {
1601 		value->retain();
1602 	}
1603 
1604 	entitlements->release();
1605 	return value;
1606 }
1607 
1608 bool
1609 IOUserClient::init()
1610 {
1611 	if (getPropertyTable() || super::init()) {
1612 		return reserve();
1613 	}
1614 
1615 	return false;
1616 }
1617 
1618 bool
1619 IOUserClient::init(OSDictionary * dictionary)
1620 {
1621 	if (getPropertyTable() || super::init(dictionary)) {
1622 		return reserve();
1623 	}
1624 
1625 	return false;
1626 }
1627 
1628 bool
1629 IOUserClient::initWithTask(task_t owningTask,
1630     void * securityID,
1631     UInt32 type )
1632 {
1633 	if (getPropertyTable() || super::init()) {
1634 		return reserve();
1635 	}
1636 
1637 	return false;
1638 }
1639 
1640 bool
1641 IOUserClient::initWithTask(task_t owningTask,
1642     void * securityID,
1643     UInt32 type,
1644     OSDictionary * properties )
1645 {
1646 	bool ok;
1647 
1648 	ok = super::init( properties );
1649 	ok &= initWithTask( owningTask, securityID, type );
1650 
1651 	return ok;
1652 }
1653 
1654 bool
1655 IOUserClient::reserve()
1656 {
1657 	if (!reserved) {
1658 		reserved = IONewZero(ExpansionData, 1);
1659 		if (!reserved) {
1660 			return false;
1661 		}
1662 	}
1663 	setTerminateDefer(NULL, true);
1664 	IOStatisticsRegisterCounter();
1665 
1666 	return true;
1667 }
1668 
1669 struct IOUserClientOwner {
1670 	task_t         task;
1671 	queue_chain_t  taskLink;
1672 	IOUserClient * uc;
1673 	queue_chain_t  ucLink;
1674 };
1675 
1676 IOReturn
1677 IOUserClient::registerOwner(task_t task)
1678 {
1679 	IOUserClientOwner * owner;
1680 	IOReturn            ret;
1681 	bool                newOwner;
1682 
1683 	IOLockLock(gIOUserClientOwnersLock);
1684 
1685 	newOwner = true;
1686 	ret = kIOReturnSuccess;
1687 
1688 	if (!owners.next) {
1689 		queue_init(&owners);
1690 	} else {
1691 		queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1692 		{
1693 			if (task != owner->task) {
1694 				continue;
1695 			}
1696 			newOwner = false;
1697 			break;
1698 		}
1699 	}
1700 	if (newOwner) {
1701 		owner = IONew(IOUserClientOwner, 1);
1702 		if (!owner) {
1703 			ret = kIOReturnNoMemory;
1704 		} else {
1705 			owner->task = task;
1706 			owner->uc   = this;
1707 			queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1708 			queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1709 			if (messageAppSuspended) {
1710 				task_set_message_app_suspended(task, true);
1711 			}
1712 		}
1713 	}
1714 
1715 	IOLockUnlock(gIOUserClientOwnersLock);
1716 
1717 	return ret;
1718 }
1719 
1720 void
1721 IOUserClient::noMoreSenders(void)
1722 {
1723 	IOUserClientOwner * owner;
1724 	IOUserClientOwner * iter;
1725 	queue_head_t      * taskque;
1726 	bool                hasMessageAppSuspended;
1727 
1728 	IOLockLock(gIOUserClientOwnersLock);
1729 
1730 	if (owners.next) {
1731 		while (!queue_empty(&owners)) {
1732 			owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1733 			taskque = task_io_user_clients(owner->task);
1734 			queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1735 			hasMessageAppSuspended = false;
1736 			queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1737 				hasMessageAppSuspended = iter->uc->messageAppSuspended;
1738 				if (hasMessageAppSuspended) {
1739 					break;
1740 				}
1741 			}
1742 			task_set_message_app_suspended(owner->task, hasMessageAppSuspended);
1743 			queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1744 			IODelete(owner, IOUserClientOwner, 1);
1745 		}
1746 		owners.next = owners.prev = NULL;
1747 	}
1748 
1749 	IOLockUnlock(gIOUserClientOwnersLock);
1750 }
1751 
1752 
1753 extern "C" void
1754 iokit_task_app_suspended_changed(task_t task)
1755 {
1756 	queue_head_t      * taskque;
1757 	IOUserClientOwner * owner;
1758 	OSSet             * set;
1759 
1760 	IOLockLock(gIOUserClientOwnersLock);
1761 
1762 	taskque = task_io_user_clients(task);
1763 	set = NULL;
1764 	queue_iterate(taskque, owner, IOUserClientOwner *, taskLink) {
1765 		if (!owner->uc->messageAppSuspended) {
1766 			continue;
1767 		}
1768 		if (!set) {
1769 			set = OSSet::withCapacity(4);
1770 			if (!set) {
1771 				break;
1772 			}
1773 		}
1774 		set->setObject(owner->uc);
1775 	}
1776 
1777 	IOLockUnlock(gIOUserClientOwnersLock);
1778 
1779 	if (set) {
1780 		set->iterateObjects(^bool (OSObject * obj) {
1781 			IOUserClient      * uc;
1782 
1783 			uc = (typeof(uc))obj;
1784 #if 0
1785 			{
1786 			        OSString          * str;
1787 			        str = IOCopyLogNameForPID(task_pid(task));
1788 			        IOLog("iokit_task_app_suspended_changed(%s) %s %d\n", str ? str->getCStringNoCopy() : "",
1789 			        uc->getName(), task_is_app_suspended(task));
1790 			        OSSafeReleaseNULL(str);
1791 			}
1792 #endif
1793 			uc->message(kIOMessageTaskAppSuspendedChange, NULL);
1794 
1795 			return false;
1796 		});
1797 		set->release();
1798 	}
1799 }
1800 
1801 extern "C" kern_return_t
1802 iokit_task_terminate(task_t task)
1803 {
1804 	IOUserClientOwner * owner;
1805 	IOUserClient      * dead;
1806 	IOUserClient      * uc;
1807 	queue_head_t      * taskque;
1808 
1809 	IOLockLock(gIOUserClientOwnersLock);
1810 
1811 	taskque = task_io_user_clients(task);
1812 	dead = NULL;
1813 	while (!queue_empty(taskque)) {
1814 		owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1815 		uc = owner->uc;
1816 		queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1817 		queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1818 		if (queue_empty(&uc->owners)) {
1819 			uc->retain();
1820 			IOLog("destroying out of band connect for %s\n", uc->getName());
1821 			// now using the uc queue head as a singly linked queue,
1822 			// leaving .next as NULL to mark it empty
1823 			uc->owners.next = NULL;
1824 			uc->owners.prev = (queue_entry_t) dead;
1825 			dead = uc;
1826 		}
1827 		IODelete(owner, IOUserClientOwner, 1);
1828 	}
1829 
1830 	IOLockUnlock(gIOUserClientOwnersLock);
1831 
1832 	while (dead) {
1833 		uc = dead;
1834 		dead = (IOUserClient *)(void *) dead->owners.prev;
1835 		uc->owners.prev = NULL;
1836 		if (uc->sharedInstance || !uc->closed) {
1837 			uc->clientDied();
1838 		}
1839 		uc->release();
1840 	}
1841 
1842 	return KERN_SUCCESS;
1843 }
1844 
1845 struct IOUCFilterPolicy {
1846 	task_t             task;
1847 	io_filter_policy_t filterPolicy;
1848 	IOUCFilterPolicy * next;
1849 };
1850 
1851 io_filter_policy_t
1852 IOUserClient::filterForTask(task_t task, io_filter_policy_t addFilterPolicy)
1853 {
1854 	IOUCFilterPolicy * elem;
1855 	io_filter_policy_t filterPolicy;
1856 
1857 	filterPolicy = 0;
1858 	IOLockLock(filterLock);
1859 
1860 	for (elem = reserved->filterPolicies; elem && (elem->task != task); elem = elem->next) {
1861 	}
1862 
1863 	if (elem) {
1864 		if (addFilterPolicy) {
1865 			assert(addFilterPolicy == elem->filterPolicy);
1866 		}
1867 		filterPolicy = elem->filterPolicy;
1868 	} else if (addFilterPolicy) {
1869 		elem = IONewZero(IOUCFilterPolicy, 1);
1870 		if (elem) {
1871 			elem->task               = task;
1872 			elem->filterPolicy       = addFilterPolicy;
1873 			elem->next               = reserved->filterPolicies;
1874 			reserved->filterPolicies = elem;
1875 			filterPolicy = addFilterPolicy;
1876 		}
1877 	}
1878 
1879 	IOLockUnlock(filterLock);
1880 	return filterPolicy;
1881 }
1882 
1883 void
1884 IOUserClient::free()
1885 {
1886 	if (mappings) {
1887 		mappings->release();
1888 	}
1889 	if (lock) {
1890 		IORWLockFree(lock);
1891 	}
1892 	if (filterLock) {
1893 		IOLockFree(filterLock);
1894 	}
1895 
1896 	IOStatisticsUnregisterCounter();
1897 
1898 	assert(!owners.next);
1899 	assert(!owners.prev);
1900 
1901 	if (reserved) {
1902 		IOUCFilterPolicy * elem;
1903 		IOUCFilterPolicy * nextElem;
1904 		for (elem = reserved->filterPolicies; elem; elem = nextElem) {
1905 			nextElem = elem->next;
1906 			if (elem->filterPolicy && gIOUCFilterCallbacks->io_filter_release) {
1907 				gIOUCFilterCallbacks->io_filter_release(elem->filterPolicy);
1908 			}
1909 			IODelete(elem, IOUCFilterPolicy, 1);
1910 		}
1911 		IODelete(reserved, ExpansionData, 1);
1912 	}
1913 
1914 	super::free();
1915 }
1916 
1917 IOReturn
1918 IOUserClient::clientDied( void )
1919 {
1920 	IOReturn ret = kIOReturnNotReady;
1921 
1922 	if (sharedInstance || OSCompareAndSwap8(0, 1, &closed)) {
1923 		ret = clientClose();
1924 	}
1925 
1926 	return ret;
1927 }
1928 
1929 IOReturn
1930 IOUserClient::clientClose( void )
1931 {
1932 	return kIOReturnUnsupported;
1933 }
1934 
1935 IOService *
1936 IOUserClient::getService( void )
1937 {
1938 	return NULL;
1939 }
1940 
1941 IOReturn
1942 IOUserClient::registerNotificationPort(
1943 	mach_port_t     /* port */,
1944 	UInt32          /* type */,
1945 	UInt32          /* refCon */)
1946 {
1947 	return kIOReturnUnsupported;
1948 }
1949 
1950 IOReturn
1951 IOUserClient::registerNotificationPort(
1952 	mach_port_t port,
1953 	UInt32          type,
1954 	io_user_reference_t refCon)
1955 {
1956 	return registerNotificationPort(port, type, (UInt32) refCon);
1957 }
1958 
1959 IOReturn
1960 IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1961     semaphore_t * semaphore )
1962 {
1963 	return kIOReturnUnsupported;
1964 }
1965 
1966 IOReturn
1967 IOUserClient::connectClient( IOUserClient * /* client */ )
1968 {
1969 	return kIOReturnUnsupported;
1970 }
1971 
1972 IOReturn
1973 IOUserClient::clientMemoryForType( UInt32 type,
1974     IOOptionBits * options,
1975     IOMemoryDescriptor ** memory )
1976 {
1977 	return kIOReturnUnsupported;
1978 }
1979 
1980 IOReturn
1981 IOUserClient::clientMemoryForType( UInt32 type,
1982     IOOptionBits * options,
1983     OSSharedPtr<IOMemoryDescriptor>& memory )
1984 {
1985 	IOMemoryDescriptor* memoryRaw = nullptr;
1986 	IOReturn result = clientMemoryForType(type, options, &memoryRaw);
1987 	memory.reset(memoryRaw, OSNoRetain);
1988 	return result;
1989 }
1990 
1991 #if !__LP64__
1992 IOMemoryMap *
1993 IOUserClient::mapClientMemory(
1994 	IOOptionBits            type,
1995 	task_t                  task,
1996 	IOOptionBits            mapFlags,
1997 	IOVirtualAddress        atAddress )
1998 {
1999 	return NULL;
2000 }
2001 #endif
2002 
2003 IOMemoryMap *
2004 IOUserClient::mapClientMemory64(
2005 	IOOptionBits            type,
2006 	task_t                  task,
2007 	IOOptionBits            mapFlags,
2008 	mach_vm_address_t       atAddress )
2009 {
2010 	IOReturn            err;
2011 	IOOptionBits        options = 0;
2012 	IOMemoryDescriptor * memory = NULL;
2013 	IOMemoryMap *       map = NULL;
2014 
2015 	err = clientMemoryForType((UInt32) type, &options, &memory );
2016 
2017 	if (memory && (kIOReturnSuccess == err)) {
2018 		FAKE_STACK_FRAME(getMetaClass());
2019 
2020 		options = (options & ~kIOMapUserOptionsMask)
2021 		    | (mapFlags & kIOMapUserOptionsMask);
2022 		map = memory->createMappingInTask( task, atAddress, options );
2023 		memory->release();
2024 
2025 		FAKE_STACK_FRAME_END();
2026 	}
2027 
2028 	return map;
2029 }
2030 
2031 IOReturn
2032 IOUserClient::exportObjectToClient(task_t task,
2033     OSObject *obj, io_object_t *clientObj)
2034 {
2035 	mach_port_name_t    name;
2036 
2037 	name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
2038 
2039 	*clientObj = (io_object_t)(uintptr_t) name;
2040 
2041 	if (obj) {
2042 		obj->release();
2043 	}
2044 
2045 	return kIOReturnSuccess;
2046 }
2047 
2048 IOReturn
2049 IOUserClient::copyPortNameForObjectInTask(task_t task,
2050     OSObject *obj, mach_port_name_t * port_name)
2051 {
2052 	mach_port_name_t    name;
2053 
2054 	name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT );
2055 
2056 	*(mach_port_name_t *) port_name = name;
2057 
2058 	return kIOReturnSuccess;
2059 }
2060 
2061 IOReturn
2062 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2063     OSObject **obj)
2064 {
2065 	OSObject * object;
2066 
2067 	object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task);
2068 
2069 	*obj = object;
2070 
2071 	return object ? kIOReturnSuccess : kIOReturnIPCError;
2072 }
2073 
2074 IOReturn
2075 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2076     OSSharedPtr<OSObject>& obj)
2077 {
2078 	OSObject* objRaw = NULL;
2079 	IOReturn result = copyObjectForPortNameInTask(task, port_name, &objRaw);
2080 	obj.reset(objRaw, OSNoRetain);
2081 	return result;
2082 }
2083 
2084 IOReturn
2085 IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta)
2086 {
2087 	return iokit_mod_send_right(task, port_name, delta);
2088 }
2089 
2090 IOExternalMethod *
2091 IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
2092 {
2093 	return NULL;
2094 }
2095 
2096 IOExternalAsyncMethod *
2097 IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
2098 {
2099 	return NULL;
2100 }
2101 
2102 IOExternalTrap *
2103 IOUserClient::
2104 getExternalTrapForIndex(UInt32 index)
2105 {
2106 	return NULL;
2107 }
2108 
2109 #pragma clang diagnostic push
2110 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2111 
2112 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
2113 // functions can break clients of kexts implementing getExternalMethodForIndex()
2114 IOExternalMethod *
2115 IOUserClient::
2116 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
2117 {
2118 	IOExternalMethod *method = getExternalMethodForIndex(index);
2119 
2120 	if (method) {
2121 		*targetP = (IOService *) method->object;
2122 	}
2123 
2124 	return method;
2125 }
2126 
2127 IOExternalMethod *
2128 IOUserClient::
2129 getTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2130 {
2131 	IOService* targetPRaw = NULL;
2132 	IOExternalMethod* result = getTargetAndMethodForIndex(&targetPRaw, index);
2133 	targetP.reset(targetPRaw, OSRetain);
2134 	return result;
2135 }
2136 
2137 IOExternalAsyncMethod *
2138 IOUserClient::
2139 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
2140 {
2141 	IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
2142 
2143 	if (method) {
2144 		*targetP = (IOService *) method->object;
2145 	}
2146 
2147 	return method;
2148 }
2149 
2150 IOExternalAsyncMethod *
2151 IOUserClient::
2152 getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2153 {
2154 	IOService* targetPRaw = NULL;
2155 	IOExternalAsyncMethod* result = getAsyncTargetAndMethodForIndex(&targetPRaw, index);
2156 	targetP.reset(targetPRaw, OSRetain);
2157 	return result;
2158 }
2159 
2160 IOExternalTrap *
2161 IOUserClient::
2162 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
2163 {
2164 	IOExternalTrap *trap = getExternalTrapForIndex(index);
2165 
2166 	if (trap) {
2167 		*targetP = trap->object;
2168 	}
2169 
2170 	return trap;
2171 }
2172 #pragma clang diagnostic pop
2173 
2174 IOReturn
2175 IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
2176 {
2177 	mach_port_t port;
2178 	port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2179 
2180 	if (MACH_PORT_NULL != port) {
2181 		iokit_release_port_send(port);
2182 	}
2183 
2184 	return kIOReturnSuccess;
2185 }
2186 
2187 IOReturn
2188 IOUserClient::releaseNotificationPort(mach_port_t port)
2189 {
2190 	if (MACH_PORT_NULL != port) {
2191 		iokit_release_port_send(port);
2192 	}
2193 
2194 	return kIOReturnSuccess;
2195 }
2196 
2197 IOReturn
2198 IOUserClient::sendAsyncResult(OSAsyncReference reference,
2199     IOReturn result, void *args[], UInt32 numArgs)
2200 {
2201 	OSAsyncReference64  reference64;
2202 	io_user_reference_t args64[kMaxAsyncArgs];
2203 	unsigned int        idx;
2204 
2205 	if (numArgs > kMaxAsyncArgs) {
2206 		return kIOReturnMessageTooLarge;
2207 	}
2208 
2209 	for (idx = 0; idx < kOSAsyncRef64Count; idx++) {
2210 		reference64[idx] = REF64(reference[idx]);
2211 	}
2212 
2213 	for (idx = 0; idx < numArgs; idx++) {
2214 		args64[idx] = REF64(args[idx]);
2215 	}
2216 
2217 	return sendAsyncResult64(reference64, result, args64, numArgs);
2218 }
2219 
2220 IOReturn
2221 IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
2222     IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2223 {
2224 	return _sendAsyncResult64(reference, result, args, numArgs, options);
2225 }
2226 
2227 IOReturn
2228 IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
2229     IOReturn result, io_user_reference_t args[], UInt32 numArgs)
2230 {
2231 	return _sendAsyncResult64(reference, result, args, numArgs, 0);
2232 }
2233 
2234 IOReturn
2235 IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
2236     IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2237 {
2238 	struct ReplyMsg {
2239 		mach_msg_header_t msgHdr;
2240 		union{
2241 			struct{
2242 				OSNotificationHeader     notifyHdr;
2243 				IOAsyncCompletionContent asyncContent;
2244 				uint32_t                 args[kMaxAsyncArgs];
2245 			} msg32;
2246 			struct{
2247 				OSNotificationHeader64   notifyHdr;
2248 				IOAsyncCompletionContent asyncContent;
2249 				io_user_reference_t      args[kMaxAsyncArgs] __attribute__ ((packed));
2250 			} msg64;
2251 		} m;
2252 	};
2253 	ReplyMsg      replyMsg;
2254 	mach_port_t   replyPort;
2255 	kern_return_t kr;
2256 
2257 	// If no reply port, do nothing.
2258 	replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2259 	if (replyPort == MACH_PORT_NULL) {
2260 		return kIOReturnSuccess;
2261 	}
2262 
2263 	if (numArgs > kMaxAsyncArgs) {
2264 		return kIOReturnMessageTooLarge;
2265 	}
2266 
2267 	bzero(&replyMsg, sizeof(replyMsg));
2268 	replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
2269 	    0 /*local*/);
2270 	replyMsg.msgHdr.msgh_remote_port = replyPort;
2271 	replyMsg.msgHdr.msgh_local_port  = NULL;
2272 	replyMsg.msgHdr.msgh_id          = kOSNotificationMessageID;
2273 	if (kIOUCAsync64Flag & reference[0]) {
2274 		replyMsg.msgHdr.msgh_size =
2275 		    sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
2276 		    - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
2277 		replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2278 		    + numArgs * sizeof(io_user_reference_t);
2279 		replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
2280 		/* Copy reference except for reference[0], which is left as 0 from the earlier bzero */
2281 		bcopy(&reference[1], &replyMsg.m.msg64.notifyHdr.reference[1], sizeof(OSAsyncReference64) - sizeof(reference[0]));
2282 
2283 		replyMsg.m.msg64.asyncContent.result = result;
2284 		if (numArgs) {
2285 			bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
2286 		}
2287 	} else {
2288 		unsigned int idx;
2289 
2290 		replyMsg.msgHdr.msgh_size =
2291 		    sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
2292 		    - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
2293 
2294 		replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2295 		    + numArgs * sizeof(uint32_t);
2296 		replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
2297 
2298 		/* Skip reference[0] which is left as 0 from the earlier bzero */
2299 		for (idx = 1; idx < kOSAsyncRefCount; idx++) {
2300 			replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
2301 		}
2302 
2303 		replyMsg.m.msg32.asyncContent.result = result;
2304 
2305 		for (idx = 0; idx < numArgs; idx++) {
2306 			replyMsg.m.msg32.args[idx] = REF32(args[idx]);
2307 		}
2308 	}
2309 
2310 	if ((options & kIOUserNotifyOptionCanDrop) != 0) {
2311 		kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
2312 		    replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
2313 	} else {
2314 		/* Fail on full queue. */
2315 		kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
2316 		    replyMsg.msgHdr.msgh_size);
2317 	}
2318 	if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0])) {
2319 		reference[0] |= kIOUCAsyncErrorLoggedFlag;
2320 		IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
2321 	}
2322 	return kr;
2323 }
2324 
2325 
2326 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2327 
2328 extern "C" {
2329 #define CHECK(cls, obj, out)                      \
2330 	cls * out;                              \
2331 	if( !(out = OSDynamicCast( cls, obj)))  \
2332 	    return( kIOReturnBadArgument )
2333 
2334 #define CHECKLOCKED(cls, obj, out)                                        \
2335 	IOUserIterator * oIter;                                         \
2336 	cls * out;                                                      \
2337 	if( !(oIter = OSDynamicCast(IOUserIterator, obj)))              \
2338 	    return (kIOReturnBadArgument);                              \
2339 	if( !(out = OSDynamicCast(cls, oIter->userIteratorObject)))     \
2340 	    return (kIOReturnBadArgument)
2341 
2342 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2343 
2344 // Create a vm_map_copy_t or kalloc'ed data for memory
2345 // to be copied out. ipc will free after the copyout.
2346 
2347 static kern_return_t
2348 copyoutkdata( const void * data, vm_size_t len,
2349     io_buf_ptr_t * buf )
2350 {
2351 	kern_return_t       err;
2352 	vm_map_copy_t       copy;
2353 
2354 	err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2355 	    false /* src_destroy */, &copy);
2356 
2357 	assert( err == KERN_SUCCESS );
2358 	if (err == KERN_SUCCESS) {
2359 		*buf = (char *) copy;
2360 	}
2361 
2362 	return err;
2363 }
2364 
2365 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2366 
2367 /* Routine io_server_version */
2368 kern_return_t
2369 is_io_server_version(
2370 	mach_port_t master_port,
2371 	uint64_t *version)
2372 {
2373 	*version = IOKIT_SERVER_VERSION;
2374 	return kIOReturnSuccess;
2375 }
2376 
2377 /* Routine io_object_get_class */
2378 kern_return_t
2379 is_io_object_get_class(
2380 	io_object_t object,
2381 	io_name_t className )
2382 {
2383 	const OSMetaClass* my_obj = NULL;
2384 
2385 	if (!object) {
2386 		return kIOReturnBadArgument;
2387 	}
2388 
2389 	my_obj = object->getMetaClass();
2390 	if (!my_obj) {
2391 		return kIOReturnNotFound;
2392 	}
2393 
2394 	strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
2395 
2396 	return kIOReturnSuccess;
2397 }
2398 
2399 /* Routine io_object_get_superclass */
2400 kern_return_t
2401 is_io_object_get_superclass(
2402 	mach_port_t master_port,
2403 	io_name_t obj_name,
2404 	io_name_t class_name)
2405 {
2406 	IOReturn            ret;
2407 	const OSMetaClass * meta;
2408 	const OSMetaClass * super;
2409 	const OSSymbol    * name;
2410 	const char        * cstr;
2411 
2412 	if (!obj_name || !class_name) {
2413 		return kIOReturnBadArgument;
2414 	}
2415 	if (master_port != master_device_port) {
2416 		return kIOReturnNotPrivileged;
2417 	}
2418 
2419 	ret = kIOReturnNotFound;
2420 	meta = NULL;
2421 	do{
2422 		name = OSSymbol::withCString(obj_name);
2423 		if (!name) {
2424 			break;
2425 		}
2426 		meta = OSMetaClass::copyMetaClassWithName(name);
2427 		if (!meta) {
2428 			break;
2429 		}
2430 		super = meta->getSuperClass();
2431 		if (!super) {
2432 			break;
2433 		}
2434 		cstr = super->getClassName();
2435 		if (!cstr) {
2436 			break;
2437 		}
2438 		strlcpy(class_name, cstr, sizeof(io_name_t));
2439 		ret = kIOReturnSuccess;
2440 	}while (false);
2441 
2442 	OSSafeReleaseNULL(name);
2443 	if (meta) {
2444 		meta->releaseMetaClass();
2445 	}
2446 
2447 	return ret;
2448 }
2449 
2450 /* Routine io_object_get_bundle_identifier */
2451 kern_return_t
2452 is_io_object_get_bundle_identifier(
2453 	mach_port_t master_port,
2454 	io_name_t obj_name,
2455 	io_name_t bundle_name)
2456 {
2457 	IOReturn            ret;
2458 	const OSMetaClass * meta;
2459 	const OSSymbol    * name;
2460 	const OSSymbol    * identifier;
2461 	const char        * cstr;
2462 
2463 	if (!obj_name || !bundle_name) {
2464 		return kIOReturnBadArgument;
2465 	}
2466 	if (master_port != master_device_port) {
2467 		return kIOReturnNotPrivileged;
2468 	}
2469 
2470 	ret = kIOReturnNotFound;
2471 	meta = NULL;
2472 	do{
2473 		name = OSSymbol::withCString(obj_name);
2474 		if (!name) {
2475 			break;
2476 		}
2477 		meta = OSMetaClass::copyMetaClassWithName(name);
2478 		if (!meta) {
2479 			break;
2480 		}
2481 		identifier = meta->getKmodName();
2482 		if (!identifier) {
2483 			break;
2484 		}
2485 		cstr = identifier->getCStringNoCopy();
2486 		if (!cstr) {
2487 			break;
2488 		}
2489 		strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
2490 		ret = kIOReturnSuccess;
2491 	}while (false);
2492 
2493 	OSSafeReleaseNULL(name);
2494 	if (meta) {
2495 		meta->releaseMetaClass();
2496 	}
2497 
2498 	return ret;
2499 }
2500 
2501 /* Routine io_object_conforms_to */
2502 kern_return_t
2503 is_io_object_conforms_to(
2504 	io_object_t object,
2505 	io_name_t className,
2506 	boolean_t *conforms )
2507 {
2508 	if (!object) {
2509 		return kIOReturnBadArgument;
2510 	}
2511 
2512 	*conforms = (NULL != object->metaCast( className ));
2513 
2514 	return kIOReturnSuccess;
2515 }
2516 
2517 /* Routine io_object_get_retain_count */
2518 kern_return_t
2519 is_io_object_get_retain_count(
2520 	io_object_t object,
2521 	uint32_t *retainCount )
2522 {
2523 	if (!object) {
2524 		return kIOReturnBadArgument;
2525 	}
2526 
2527 	*retainCount = object->getRetainCount();
2528 	return kIOReturnSuccess;
2529 }
2530 
2531 /* Routine io_iterator_next */
2532 kern_return_t
2533 is_io_iterator_next(
2534 	io_object_t iterator,
2535 	io_object_t *object )
2536 {
2537 	IOReturn    ret;
2538 	OSObject *  obj;
2539 	OSIterator * iter;
2540 	IOUserIterator * uiter;
2541 
2542 	if ((uiter = OSDynamicCast(IOUserIterator, iterator))) {
2543 		obj = uiter->copyNextObject();
2544 	} else if ((iter = OSDynamicCast(OSIterator, iterator))) {
2545 		obj = iter->getNextObject();
2546 		if (obj) {
2547 			obj->retain();
2548 		}
2549 	} else {
2550 		return kIOReturnBadArgument;
2551 	}
2552 
2553 	if (obj) {
2554 		*object = obj;
2555 		ret = kIOReturnSuccess;
2556 	} else {
2557 		ret = kIOReturnNoDevice;
2558 	}
2559 
2560 	return ret;
2561 }
2562 
2563 /* Routine io_iterator_reset */
2564 kern_return_t
2565 is_io_iterator_reset(
2566 	io_object_t iterator )
2567 {
2568 	CHECK( OSIterator, iterator, iter );
2569 
2570 	iter->reset();
2571 
2572 	return kIOReturnSuccess;
2573 }
2574 
2575 /* Routine io_iterator_is_valid */
2576 kern_return_t
2577 is_io_iterator_is_valid(
2578 	io_object_t iterator,
2579 	boolean_t *is_valid )
2580 {
2581 	CHECK( OSIterator, iterator, iter );
2582 
2583 	*is_valid = iter->isValid();
2584 
2585 	return kIOReturnSuccess;
2586 }
2587 
2588 static kern_return_t
2589 internal_io_service_match_property_table(
2590 	io_service_t _service,
2591 	const char * matching,
2592 	mach_msg_type_number_t matching_size,
2593 	boolean_t *matches)
2594 {
2595 	CHECK( IOService, _service, service );
2596 
2597 	kern_return_t       kr;
2598 	OSObject *          obj;
2599 	OSDictionary *      dict;
2600 
2601 	assert(matching_size);
2602 
2603 
2604 	obj = OSUnserializeXML(matching, matching_size);
2605 
2606 	if ((dict = OSDynamicCast( OSDictionary, obj))) {
2607 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2608 		*matches = service->passiveMatch( dict );
2609 		kr = kIOReturnSuccess;
2610 	} else {
2611 		kr = kIOReturnBadArgument;
2612 	}
2613 
2614 	if (obj) {
2615 		obj->release();
2616 	}
2617 
2618 	return kr;
2619 }
2620 
2621 /* Routine io_service_match_property_table */
2622 kern_return_t
2623 is_io_service_match_property_table(
2624 	io_service_t service,
2625 	io_string_t matching,
2626 	boolean_t *matches )
2627 {
2628 	return kIOReturnUnsupported;
2629 }
2630 
2631 
2632 /* Routine io_service_match_property_table_ool */
2633 kern_return_t
2634 is_io_service_match_property_table_ool(
2635 	io_object_t service,
2636 	io_buf_ptr_t matching,
2637 	mach_msg_type_number_t matchingCnt,
2638 	kern_return_t *result,
2639 	boolean_t *matches )
2640 {
2641 	kern_return_t         kr;
2642 	vm_offset_t           data;
2643 	vm_map_offset_t       map_data;
2644 
2645 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2646 	data = CAST_DOWN(vm_offset_t, map_data);
2647 
2648 	if (KERN_SUCCESS == kr) {
2649 		// must return success after vm_map_copyout() succeeds
2650 		*result = internal_io_service_match_property_table(service,
2651 		    (const char *)data, matchingCnt, matches );
2652 		vm_deallocate( kernel_map, data, matchingCnt );
2653 	}
2654 
2655 	return kr;
2656 }
2657 
2658 /* Routine io_service_match_property_table_bin */
2659 kern_return_t
2660 is_io_service_match_property_table_bin(
2661 	io_object_t service,
2662 	io_struct_inband_t matching,
2663 	mach_msg_type_number_t matchingCnt,
2664 	boolean_t *matches)
2665 {
2666 	return internal_io_service_match_property_table(service, matching, matchingCnt, matches);
2667 }
2668 
2669 static kern_return_t
2670 internal_io_service_get_matching_services(
2671 	mach_port_t master_port,
2672 	const char * matching,
2673 	mach_msg_type_number_t matching_size,
2674 	io_iterator_t *existing )
2675 {
2676 	kern_return_t       kr;
2677 	OSObject *          obj;
2678 	OSDictionary *      dict;
2679 
2680 	if (master_port != master_device_port) {
2681 		return kIOReturnNotPrivileged;
2682 	}
2683 
2684 	assert(matching_size);
2685 	obj = OSUnserializeXML(matching, matching_size);
2686 
2687 	if ((dict = OSDynamicCast( OSDictionary, obj))) {
2688 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2689 		*existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2690 		kr = kIOReturnSuccess;
2691 	} else {
2692 		kr = kIOReturnBadArgument;
2693 	}
2694 
2695 	if (obj) {
2696 		obj->release();
2697 	}
2698 
2699 	return kr;
2700 }
2701 
2702 /* Routine io_service_get_matching_services */
2703 kern_return_t
2704 is_io_service_get_matching_services(
2705 	mach_port_t master_port,
2706 	io_string_t matching,
2707 	io_iterator_t *existing )
2708 {
2709 	return kIOReturnUnsupported;
2710 }
2711 
2712 /* Routine io_service_get_matching_services_ool */
2713 kern_return_t
2714 is_io_service_get_matching_services_ool(
2715 	mach_port_t master_port,
2716 	io_buf_ptr_t matching,
2717 	mach_msg_type_number_t matchingCnt,
2718 	kern_return_t *result,
2719 	io_object_t *existing )
2720 {
2721 	kern_return_t       kr;
2722 	vm_offset_t         data;
2723 	vm_map_offset_t     map_data;
2724 
2725 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2726 	data = CAST_DOWN(vm_offset_t, map_data);
2727 
2728 	if (KERN_SUCCESS == kr) {
2729 		// must return success after vm_map_copyout() succeeds
2730 		// and mig will copy out objects on success
2731 		*existing = NULL;
2732 		*result = internal_io_service_get_matching_services(master_port,
2733 		    (const char *) data, matchingCnt, existing);
2734 		vm_deallocate( kernel_map, data, matchingCnt );
2735 	}
2736 
2737 	return kr;
2738 }
2739 
2740 /* Routine io_service_get_matching_services_bin */
2741 kern_return_t
2742 is_io_service_get_matching_services_bin(
2743 	mach_port_t master_port,
2744 	io_struct_inband_t matching,
2745 	mach_msg_type_number_t matchingCnt,
2746 	io_object_t *existing)
2747 {
2748 	return internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing);
2749 }
2750 
2751 
2752 static kern_return_t
2753 internal_io_service_get_matching_service(
2754 	mach_port_t master_port,
2755 	const char * matching,
2756 	mach_msg_type_number_t matching_size,
2757 	io_service_t *service )
2758 {
2759 	kern_return_t       kr;
2760 	OSObject *          obj;
2761 	OSDictionary *      dict;
2762 
2763 	if (master_port != master_device_port) {
2764 		return kIOReturnNotPrivileged;
2765 	}
2766 
2767 	assert(matching_size);
2768 	obj = OSUnserializeXML(matching, matching_size);
2769 
2770 	if ((dict = OSDynamicCast( OSDictionary, obj))) {
2771 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2772 		*service = IOService::copyMatchingService( dict );
2773 		kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2774 	} else {
2775 		kr = kIOReturnBadArgument;
2776 	}
2777 
2778 	if (obj) {
2779 		obj->release();
2780 	}
2781 
2782 	return kr;
2783 }
2784 
2785 /* Routine io_service_get_matching_service */
2786 kern_return_t
2787 is_io_service_get_matching_service(
2788 	mach_port_t master_port,
2789 	io_string_t matching,
2790 	io_service_t *service )
2791 {
2792 	return kIOReturnUnsupported;
2793 }
2794 
2795 /* Routine io_service_get_matching_services_ool */
2796 kern_return_t
2797 is_io_service_get_matching_service_ool(
2798 	mach_port_t master_port,
2799 	io_buf_ptr_t matching,
2800 	mach_msg_type_number_t matchingCnt,
2801 	kern_return_t *result,
2802 	io_object_t *service )
2803 {
2804 	kern_return_t       kr;
2805 	vm_offset_t         data;
2806 	vm_map_offset_t     map_data;
2807 
2808 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2809 	data = CAST_DOWN(vm_offset_t, map_data);
2810 
2811 	if (KERN_SUCCESS == kr) {
2812 		// must return success after vm_map_copyout() succeeds
2813 		// and mig will copy out objects on success
2814 		*service = NULL;
2815 		*result = internal_io_service_get_matching_service(master_port,
2816 		    (const char *) data, matchingCnt, service );
2817 		vm_deallocate( kernel_map, data, matchingCnt );
2818 	}
2819 
2820 	return kr;
2821 }
2822 
2823 /* Routine io_service_get_matching_service_bin */
2824 kern_return_t
2825 is_io_service_get_matching_service_bin(
2826 	mach_port_t master_port,
2827 	io_struct_inband_t matching,
2828 	mach_msg_type_number_t matchingCnt,
2829 	io_object_t *service)
2830 {
2831 	return internal_io_service_get_matching_service(master_port, matching, matchingCnt, service);
2832 }
2833 
2834 static kern_return_t
2835 internal_io_service_add_notification(
2836 	mach_port_t master_port,
2837 	io_name_t notification_type,
2838 	const char * matching,
2839 	size_t matching_size,
2840 	mach_port_t port,
2841 	void * reference,
2842 	vm_size_t referenceSize,
2843 	bool client64,
2844 	io_object_t * notification )
2845 {
2846 	IOServiceUserNotification * userNotify = NULL;
2847 	IONotifier *                notify = NULL;
2848 	const OSSymbol *            sym;
2849 	OSObject *                  obj;
2850 	OSDictionary *              dict;
2851 	IOReturn                    err;
2852 	natural_t                   userMsgType;
2853 
2854 	if (master_port != master_device_port) {
2855 		return kIOReturnNotPrivileged;
2856 	}
2857 
2858 	do {
2859 		err = kIOReturnNoResources;
2860 
2861 		if (matching_size > (sizeof(io_struct_inband_t) * 1024)) {
2862 			return kIOReturnMessageTooLarge;
2863 		}
2864 
2865 		if (!(sym = OSSymbol::withCString( notification_type ))) {
2866 			err = kIOReturnNoResources;
2867 		}
2868 
2869 		assert(matching_size);
2870 		obj = OSUnserializeXML(matching, matching_size);
2871 		dict = OSDynamicCast(OSDictionary, obj);
2872 		if (!dict) {
2873 			err = kIOReturnBadArgument;
2874 			continue;
2875 		}
2876 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2877 
2878 		if ((sym == gIOPublishNotification)
2879 		    || (sym == gIOFirstPublishNotification)) {
2880 			userMsgType = kIOServicePublishNotificationType;
2881 		} else if ((sym == gIOMatchedNotification)
2882 		    || (sym == gIOFirstMatchNotification)) {
2883 			userMsgType = kIOServiceMatchedNotificationType;
2884 		} else if ((sym == gIOTerminatedNotification)
2885 		    || (sym == gIOWillTerminateNotification)) {
2886 			userMsgType = kIOServiceTerminatedNotificationType;
2887 		} else {
2888 			userMsgType = kLastIOKitNotificationType;
2889 		}
2890 
2891 		userNotify = new IOServiceUserNotification;
2892 
2893 		if (userNotify && !userNotify->init( port, userMsgType,
2894 		    reference, referenceSize, client64)) {
2895 			userNotify->release();
2896 			userNotify = NULL;
2897 		}
2898 		if (!userNotify) {
2899 			continue;
2900 		}
2901 
2902 		notify = IOService::addMatchingNotification( sym, dict,
2903 		    &userNotify->_handler, userNotify );
2904 		if (notify) {
2905 			*notification = userNotify;
2906 			userNotify->setNotification( notify );
2907 			err = kIOReturnSuccess;
2908 		} else {
2909 			err = kIOReturnUnsupported;
2910 		}
2911 	} while (false);
2912 
2913 	if ((kIOReturnSuccess != err) && userNotify) {
2914 		userNotify->invalidatePort();
2915 		userNotify->release();
2916 		userNotify = NULL;
2917 	}
2918 
2919 	if (sym) {
2920 		sym->release();
2921 	}
2922 	if (obj) {
2923 		obj->release();
2924 	}
2925 
2926 	return err;
2927 }
2928 
2929 
2930 /* Routine io_service_add_notification */
2931 kern_return_t
2932 is_io_service_add_notification(
2933 	mach_port_t master_port,
2934 	io_name_t notification_type,
2935 	io_string_t matching,
2936 	mach_port_t port,
2937 	io_async_ref_t reference,
2938 	mach_msg_type_number_t referenceCnt,
2939 	io_object_t * notification )
2940 {
2941 	return kIOReturnUnsupported;
2942 }
2943 
2944 /* Routine io_service_add_notification_64 */
2945 kern_return_t
2946 is_io_service_add_notification_64(
2947 	mach_port_t master_port,
2948 	io_name_t notification_type,
2949 	io_string_t matching,
2950 	mach_port_t wake_port,
2951 	io_async_ref64_t reference,
2952 	mach_msg_type_number_t referenceCnt,
2953 	io_object_t *notification )
2954 {
2955 	return kIOReturnUnsupported;
2956 }
2957 
2958 /* Routine io_service_add_notification_bin */
2959 kern_return_t
2960 is_io_service_add_notification_bin
2961 (
2962 	mach_port_t master_port,
2963 	io_name_t notification_type,
2964 	io_struct_inband_t matching,
2965 	mach_msg_type_number_t matchingCnt,
2966 	mach_port_t wake_port,
2967 	io_async_ref_t reference,
2968 	mach_msg_type_number_t referenceCnt,
2969 	io_object_t *notification)
2970 {
2971 	io_async_ref_t zreference;
2972 
2973 	if (referenceCnt > ASYNC_REF_COUNT) {
2974 		return kIOReturnBadArgument;
2975 	}
2976 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2977 	bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2978 
2979 	return internal_io_service_add_notification(master_port, notification_type,
2980 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
2981 	           false, notification);
2982 }
2983 
2984 /* Routine io_service_add_notification_bin_64 */
2985 kern_return_t
2986 is_io_service_add_notification_bin_64
2987 (
2988 	mach_port_t master_port,
2989 	io_name_t notification_type,
2990 	io_struct_inband_t matching,
2991 	mach_msg_type_number_t matchingCnt,
2992 	mach_port_t wake_port,
2993 	io_async_ref64_t reference,
2994 	mach_msg_type_number_t referenceCnt,
2995 	io_object_t *notification)
2996 {
2997 	io_async_ref64_t zreference;
2998 
2999 	if (referenceCnt > ASYNC_REF64_COUNT) {
3000 		return kIOReturnBadArgument;
3001 	}
3002 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3003 	bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3004 
3005 	return internal_io_service_add_notification(master_port, notification_type,
3006 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
3007 	           true, notification);
3008 }
3009 
3010 static kern_return_t
3011 internal_io_service_add_notification_ool(
3012 	mach_port_t master_port,
3013 	io_name_t notification_type,
3014 	io_buf_ptr_t matching,
3015 	mach_msg_type_number_t matchingCnt,
3016 	mach_port_t wake_port,
3017 	void * reference,
3018 	vm_size_t referenceSize,
3019 	bool client64,
3020 	kern_return_t *result,
3021 	io_object_t *notification )
3022 {
3023 	kern_return_t       kr;
3024 	vm_offset_t         data;
3025 	vm_map_offset_t     map_data;
3026 
3027 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
3028 	data = CAST_DOWN(vm_offset_t, map_data);
3029 
3030 	if (KERN_SUCCESS == kr) {
3031 		// must return success after vm_map_copyout() succeeds
3032 		// and mig will copy out objects on success
3033 		*notification = NULL;
3034 		*result = internal_io_service_add_notification( master_port, notification_type,
3035 		    (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
3036 		vm_deallocate( kernel_map, data, matchingCnt );
3037 	}
3038 
3039 	return kr;
3040 }
3041 
3042 /* Routine io_service_add_notification_ool */
3043 kern_return_t
3044 is_io_service_add_notification_ool(
3045 	mach_port_t master_port,
3046 	io_name_t notification_type,
3047 	io_buf_ptr_t matching,
3048 	mach_msg_type_number_t matchingCnt,
3049 	mach_port_t wake_port,
3050 	io_async_ref_t reference,
3051 	mach_msg_type_number_t referenceCnt,
3052 	kern_return_t *result,
3053 	io_object_t *notification )
3054 {
3055 	io_async_ref_t zreference;
3056 
3057 	if (referenceCnt > ASYNC_REF_COUNT) {
3058 		return kIOReturnBadArgument;
3059 	}
3060 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3061 	bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3062 
3063 	return internal_io_service_add_notification_ool(master_port, notification_type,
3064 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
3065 	           false, result, notification);
3066 }
3067 
3068 /* Routine io_service_add_notification_ool_64 */
3069 kern_return_t
3070 is_io_service_add_notification_ool_64(
3071 	mach_port_t master_port,
3072 	io_name_t notification_type,
3073 	io_buf_ptr_t matching,
3074 	mach_msg_type_number_t matchingCnt,
3075 	mach_port_t wake_port,
3076 	io_async_ref64_t reference,
3077 	mach_msg_type_number_t referenceCnt,
3078 	kern_return_t *result,
3079 	io_object_t *notification )
3080 {
3081 	io_async_ref64_t zreference;
3082 
3083 	if (referenceCnt > ASYNC_REF64_COUNT) {
3084 		return kIOReturnBadArgument;
3085 	}
3086 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3087 	bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3088 
3089 	return internal_io_service_add_notification_ool(master_port, notification_type,
3090 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
3091 	           true, result, notification);
3092 }
3093 
3094 /* Routine io_service_add_notification_old */
3095 kern_return_t
3096 is_io_service_add_notification_old(
3097 	mach_port_t master_port,
3098 	io_name_t notification_type,
3099 	io_string_t matching,
3100 	mach_port_t port,
3101 	// for binary compatibility reasons, this must be natural_t for ILP32
3102 	natural_t ref,
3103 	io_object_t * notification )
3104 {
3105 	return is_io_service_add_notification( master_port, notification_type,
3106 	           matching, port, &ref, 1, notification );
3107 }
3108 
3109 
3110 static kern_return_t
3111 internal_io_service_add_interest_notification(
3112 	io_object_t _service,
3113 	io_name_t type_of_interest,
3114 	mach_port_t port,
3115 	void * reference,
3116 	vm_size_t referenceSize,
3117 	bool client64,
3118 	io_object_t * notification )
3119 {
3120 	IOServiceMessageUserNotification *  userNotify = NULL;
3121 	IONotifier *                        notify = NULL;
3122 	const OSSymbol *                    sym;
3123 	IOReturn                            err;
3124 
3125 	CHECK( IOService, _service, service );
3126 
3127 	err = kIOReturnNoResources;
3128 	if ((sym = OSSymbol::withCString( type_of_interest ))) {
3129 		do {
3130 			userNotify = new IOServiceMessageUserNotification;
3131 
3132 			if (userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
3133 			    reference, referenceSize,
3134 			    kIOUserNotifyMaxMessageSize,
3135 			    client64 )) {
3136 				userNotify->release();
3137 				userNotify = NULL;
3138 			}
3139 			if (!userNotify) {
3140 				continue;
3141 			}
3142 
3143 			notify = service->registerInterest( sym,
3144 			    &userNotify->_handler, userNotify );
3145 			if (notify) {
3146 				*notification = userNotify;
3147 				userNotify->setNotification( notify );
3148 				err = kIOReturnSuccess;
3149 			} else {
3150 				err = kIOReturnUnsupported;
3151 			}
3152 
3153 			sym->release();
3154 		} while (false);
3155 	}
3156 
3157 	if ((kIOReturnSuccess != err) && userNotify) {
3158 		userNotify->invalidatePort();
3159 		userNotify->release();
3160 		userNotify = NULL;
3161 	}
3162 
3163 	return err;
3164 }
3165 
3166 /* Routine io_service_add_message_notification */
3167 kern_return_t
3168 is_io_service_add_interest_notification(
3169 	io_object_t service,
3170 	io_name_t type_of_interest,
3171 	mach_port_t port,
3172 	io_async_ref_t reference,
3173 	mach_msg_type_number_t referenceCnt,
3174 	io_object_t * notification )
3175 {
3176 	io_async_ref_t zreference;
3177 
3178 	if (referenceCnt > ASYNC_REF_COUNT) {
3179 		return kIOReturnBadArgument;
3180 	}
3181 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3182 	bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3183 
3184 	return internal_io_service_add_interest_notification(service, type_of_interest,
3185 	           port, &zreference[0], sizeof(io_async_ref_t), false, notification);
3186 }
3187 
3188 /* Routine io_service_add_interest_notification_64 */
3189 kern_return_t
3190 is_io_service_add_interest_notification_64(
3191 	io_object_t service,
3192 	io_name_t type_of_interest,
3193 	mach_port_t wake_port,
3194 	io_async_ref64_t reference,
3195 	mach_msg_type_number_t referenceCnt,
3196 	io_object_t *notification )
3197 {
3198 	io_async_ref64_t zreference;
3199 
3200 	if (referenceCnt > ASYNC_REF64_COUNT) {
3201 		return kIOReturnBadArgument;
3202 	}
3203 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3204 	bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3205 
3206 	return internal_io_service_add_interest_notification(service, type_of_interest,
3207 	           wake_port, &zreference[0], sizeof(io_async_ref64_t), true, notification);
3208 }
3209 
3210 
3211 /* Routine io_service_acknowledge_notification */
3212 kern_return_t
3213 is_io_service_acknowledge_notification(
3214 	io_object_t _service,
3215 	natural_t notify_ref,
3216 	natural_t response )
3217 {
3218 	CHECK( IOService, _service, service );
3219 
3220 	return service->acknowledgeNotification((IONotificationRef)(uintptr_t) notify_ref,
3221 	           (IOOptionBits) response );
3222 }
3223 
3224 /* Routine io_connect_get_semaphore */
3225 kern_return_t
3226 is_io_connect_get_notification_semaphore(
3227 	io_connect_t connection,
3228 	natural_t notification_type,
3229 	semaphore_t *semaphore )
3230 {
3231 	IOReturn ret;
3232 	CHECK( IOUserClient, connection, client );
3233 
3234 	IOStatisticsClientCall();
3235 	IORWLockWrite(client->lock);
3236 	ret = client->getNotificationSemaphore((UInt32) notification_type,
3237 	    semaphore );
3238 	IORWLockUnlock(client->lock);
3239 
3240 	return ret;
3241 }
3242 
3243 /* Routine io_registry_get_root_entry */
3244 kern_return_t
3245 is_io_registry_get_root_entry(
3246 	mach_port_t master_port,
3247 	io_object_t *root )
3248 {
3249 	IORegistryEntry *   entry;
3250 
3251 	if (master_port != master_device_port) {
3252 		return kIOReturnNotPrivileged;
3253 	}
3254 
3255 	entry = IORegistryEntry::getRegistryRoot();
3256 	if (entry) {
3257 		entry->retain();
3258 	}
3259 	*root = entry;
3260 
3261 	return kIOReturnSuccess;
3262 }
3263 
3264 /* Routine io_registry_create_iterator */
3265 kern_return_t
3266 is_io_registry_create_iterator(
3267 	mach_port_t master_port,
3268 	io_name_t plane,
3269 	uint32_t options,
3270 	io_object_t *iterator )
3271 {
3272 	if (master_port != master_device_port) {
3273 		return kIOReturnNotPrivileged;
3274 	}
3275 
3276 	*iterator = IOUserIterator::withIterator(
3277 		IORegistryIterator::iterateOver(
3278 			IORegistryEntry::getPlane( plane ), options ));
3279 
3280 	return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3281 }
3282 
3283 /* Routine io_registry_entry_create_iterator */
3284 kern_return_t
3285 is_io_registry_entry_create_iterator(
3286 	io_object_t registry_entry,
3287 	io_name_t plane,
3288 	uint32_t options,
3289 	io_object_t *iterator )
3290 {
3291 	CHECK( IORegistryEntry, registry_entry, entry );
3292 
3293 	*iterator = IOUserIterator::withIterator(
3294 		IORegistryIterator::iterateOver( entry,
3295 		IORegistryEntry::getPlane( plane ), options ));
3296 
3297 	return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3298 }
3299 
3300 /* Routine io_registry_iterator_enter */
3301 kern_return_t
3302 is_io_registry_iterator_enter_entry(
3303 	io_object_t iterator )
3304 {
3305 	CHECKLOCKED( IORegistryIterator, iterator, iter );
3306 
3307 	IOLockLock(oIter->lock);
3308 	iter->enterEntry();
3309 	IOLockUnlock(oIter->lock);
3310 
3311 	return kIOReturnSuccess;
3312 }
3313 
3314 /* Routine io_registry_iterator_exit */
3315 kern_return_t
3316 is_io_registry_iterator_exit_entry(
3317 	io_object_t iterator )
3318 {
3319 	bool        didIt;
3320 
3321 	CHECKLOCKED( IORegistryIterator, iterator, iter );
3322 
3323 	IOLockLock(oIter->lock);
3324 	didIt = iter->exitEntry();
3325 	IOLockUnlock(oIter->lock);
3326 
3327 	return didIt ? kIOReturnSuccess : kIOReturnNoDevice;
3328 }
3329 
3330 /* Routine io_registry_entry_from_path */
3331 kern_return_t
3332 is_io_registry_entry_from_path(
3333 	mach_port_t master_port,
3334 	io_string_t path,
3335 	io_object_t *registry_entry )
3336 {
3337 	IORegistryEntry *   entry;
3338 
3339 	if (master_port != master_device_port) {
3340 		return kIOReturnNotPrivileged;
3341 	}
3342 
3343 	entry = IORegistryEntry::fromPath( path );
3344 
3345 	if (!entry && IOTaskRegistryCompatibility(current_task())) {
3346 		OSDictionary * matching;
3347 		const OSObject * objects[2] = { kOSBooleanTrue, NULL };
3348 		const OSSymbol * keys[2]    = { gIOCompatibilityMatchKey, gIOPathMatchKey };
3349 
3350 		objects[1] = OSString::withCStringNoCopy(path);
3351 		matching = OSDictionary::withObjects(objects, keys, 2, 2);
3352 		if (matching) {
3353 			entry = IOService::copyMatchingService(matching);
3354 		}
3355 		OSSafeReleaseNULL(matching);
3356 		OSSafeReleaseNULL(objects[1]);
3357 	}
3358 
3359 	*registry_entry = entry;
3360 
3361 	return kIOReturnSuccess;
3362 }
3363 
3364 
3365 /* Routine io_registry_entry_from_path */
3366 kern_return_t
3367 is_io_registry_entry_from_path_ool(
3368 	mach_port_t master_port,
3369 	io_string_inband_t path,
3370 	io_buf_ptr_t path_ool,
3371 	mach_msg_type_number_t path_oolCnt,
3372 	kern_return_t *result,
3373 	io_object_t *registry_entry)
3374 {
3375 	IORegistryEntry *   entry;
3376 	vm_map_offset_t     map_data;
3377 	const char *        cpath;
3378 	IOReturn            res;
3379 	kern_return_t       err;
3380 
3381 	if (master_port != master_device_port) {
3382 		return kIOReturnNotPrivileged;
3383 	}
3384 
3385 	map_data = 0;
3386 	entry    = NULL;
3387 	res = err = KERN_SUCCESS;
3388 	if (path[0]) {
3389 		cpath = path;
3390 	} else {
3391 		if (!path_oolCnt) {
3392 			return kIOReturnBadArgument;
3393 		}
3394 		if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) {
3395 			return kIOReturnMessageTooLarge;
3396 		}
3397 
3398 		err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
3399 		if (KERN_SUCCESS == err) {
3400 			// must return success to mig after vm_map_copyout() succeeds, so result is actual
3401 			cpath = CAST_DOWN(const char *, map_data);
3402 			if (cpath[path_oolCnt - 1]) {
3403 				res = kIOReturnBadArgument;
3404 			}
3405 		}
3406 	}
3407 
3408 	if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) {
3409 		entry = IORegistryEntry::fromPath(cpath);
3410 		res = entry ? kIOReturnSuccess : kIOReturnNotFound;
3411 	}
3412 
3413 	if (map_data) {
3414 		vm_deallocate(kernel_map, map_data, path_oolCnt);
3415 	}
3416 
3417 	if (KERN_SUCCESS != err) {
3418 		res = err;
3419 	}
3420 	*registry_entry = entry;
3421 	*result = res;
3422 
3423 	return err;
3424 }
3425 
3426 
3427 /* Routine io_registry_entry_in_plane */
3428 kern_return_t
3429 is_io_registry_entry_in_plane(
3430 	io_object_t registry_entry,
3431 	io_name_t plane,
3432 	boolean_t *inPlane )
3433 {
3434 	CHECK( IORegistryEntry, registry_entry, entry );
3435 
3436 	*inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
3437 
3438 	return kIOReturnSuccess;
3439 }
3440 
3441 
3442 /* Routine io_registry_entry_get_path */
3443 kern_return_t
3444 is_io_registry_entry_get_path(
3445 	io_object_t registry_entry,
3446 	io_name_t plane,
3447 	io_string_t path )
3448 {
3449 	int         length;
3450 	CHECK( IORegistryEntry, registry_entry, entry );
3451 
3452 	length = sizeof(io_string_t);
3453 	if (entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) {
3454 		return kIOReturnSuccess;
3455 	} else {
3456 		return kIOReturnBadArgument;
3457 	}
3458 }
3459 
3460 /* Routine io_registry_entry_get_path */
3461 kern_return_t
3462 is_io_registry_entry_get_path_ool(
3463 	io_object_t registry_entry,
3464 	io_name_t plane,
3465 	io_string_inband_t path,
3466 	io_buf_ptr_t *path_ool,
3467 	mach_msg_type_number_t *path_oolCnt)
3468 {
3469 	enum   { kMaxPath = 16384 };
3470 	IOReturn err;
3471 	int      length;
3472 	char   * buf;
3473 
3474 	CHECK( IORegistryEntry, registry_entry, entry );
3475 
3476 	*path_ool    = NULL;
3477 	*path_oolCnt = 0;
3478 	length = sizeof(io_string_inband_t);
3479 	if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) {
3480 		err = kIOReturnSuccess;
3481 	} else {
3482 		length = kMaxPath;
3483 		buf = IONew(char, length);
3484 		if (!buf) {
3485 			err = kIOReturnNoMemory;
3486 		} else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) {
3487 			err = kIOReturnError;
3488 		} else {
3489 			*path_oolCnt = length;
3490 			err = copyoutkdata(buf, length, path_ool);
3491 		}
3492 		if (buf) {
3493 			IODelete(buf, char, kMaxPath);
3494 		}
3495 	}
3496 
3497 	return err;
3498 }
3499 
3500 
3501 /* Routine io_registry_entry_get_name */
3502 kern_return_t
3503 is_io_registry_entry_get_name(
3504 	io_object_t registry_entry,
3505 	io_name_t name )
3506 {
3507 	CHECK( IORegistryEntry, registry_entry, entry );
3508 
3509 	strncpy( name, entry->getName(), sizeof(io_name_t));
3510 
3511 	return kIOReturnSuccess;
3512 }
3513 
3514 /* Routine io_registry_entry_get_name_in_plane */
3515 kern_return_t
3516 is_io_registry_entry_get_name_in_plane(
3517 	io_object_t registry_entry,
3518 	io_name_t planeName,
3519 	io_name_t name )
3520 {
3521 	const IORegistryPlane * plane;
3522 	CHECK( IORegistryEntry, registry_entry, entry );
3523 
3524 	if (planeName[0]) {
3525 		plane = IORegistryEntry::getPlane( planeName );
3526 	} else {
3527 		plane = NULL;
3528 	}
3529 
3530 	strncpy( name, entry->getName( plane), sizeof(io_name_t));
3531 
3532 	return kIOReturnSuccess;
3533 }
3534 
3535 /* Routine io_registry_entry_get_location_in_plane */
3536 kern_return_t
3537 is_io_registry_entry_get_location_in_plane(
3538 	io_object_t registry_entry,
3539 	io_name_t planeName,
3540 	io_name_t location )
3541 {
3542 	const IORegistryPlane * plane;
3543 	CHECK( IORegistryEntry, registry_entry, entry );
3544 
3545 	if (planeName[0]) {
3546 		plane = IORegistryEntry::getPlane( planeName );
3547 	} else {
3548 		plane = NULL;
3549 	}
3550 
3551 	const char * cstr = entry->getLocation( plane );
3552 
3553 	if (cstr) {
3554 		strncpy( location, cstr, sizeof(io_name_t));
3555 		return kIOReturnSuccess;
3556 	} else {
3557 		return kIOReturnNotFound;
3558 	}
3559 }
3560 
3561 /* Routine io_registry_entry_get_registry_entry_id */
3562 kern_return_t
3563 is_io_registry_entry_get_registry_entry_id(
3564 	io_object_t registry_entry,
3565 	uint64_t *entry_id )
3566 {
3567 	CHECK( IORegistryEntry, registry_entry, entry );
3568 
3569 	*entry_id = entry->getRegistryEntryID();
3570 
3571 	return kIOReturnSuccess;
3572 }
3573 
3574 
3575 static OSObject *
3576 IOCopyPropertyCompatible(IORegistryEntry * regEntry, const char * name)
3577 {
3578 	OSObject     * obj;
3579 	OSObject     * compatProps;
3580 	OSDictionary * props;
3581 
3582 	obj = regEntry->copyProperty(name);
3583 	if (!obj
3584 	    && IOTaskRegistryCompatibility(current_task())
3585 	    && (compatProps = regEntry->copyProperty(gIOCompatibilityPropertiesKey))) {
3586 		props = OSDynamicCast(OSDictionary, compatProps);
3587 		if (props) {
3588 			obj = props->getObject(name);
3589 			if (obj) {
3590 				obj->retain();
3591 			}
3592 		}
3593 		compatProps->release();
3594 	}
3595 
3596 	return obj;
3597 }
3598 
3599 /* Routine io_registry_entry_get_property */
3600 kern_return_t
3601 is_io_registry_entry_get_property_bytes(
3602 	io_object_t registry_entry,
3603 	io_name_t property_name,
3604 	io_struct_inband_t buf,
3605 	mach_msg_type_number_t *dataCnt )
3606 {
3607 	OSObject    *       obj;
3608 	OSData      *       data;
3609 	OSString    *       str;
3610 	OSBoolean   *       boo;
3611 	OSNumber    *       off;
3612 	UInt64              offsetBytes;
3613 	unsigned int        len = 0;
3614 	const void *        bytes = NULL;
3615 	IOReturn            ret = kIOReturnSuccess;
3616 
3617 	CHECK( IORegistryEntry, registry_entry, entry );
3618 
3619 #if CONFIG_MACF
3620 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3621 		return kIOReturnNotPermitted;
3622 	}
3623 #endif
3624 
3625 	obj = IOCopyPropertyCompatible(entry, property_name);
3626 	if (!obj) {
3627 		return kIOReturnNoResources;
3628 	}
3629 
3630 	// One day OSData will be a common container base class
3631 	// until then...
3632 	if ((data = OSDynamicCast( OSData, obj ))) {
3633 		len = data->getLength();
3634 		bytes = data->getBytesNoCopy();
3635 		if (!data->isSerializable()) {
3636 			len = 0;
3637 		}
3638 	} else if ((str = OSDynamicCast( OSString, obj ))) {
3639 		len = str->getLength() + 1;
3640 		bytes = str->getCStringNoCopy();
3641 	} else if ((boo = OSDynamicCast( OSBoolean, obj ))) {
3642 		len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
3643 		bytes = boo->isTrue() ? "Yes" : "No";
3644 	} else if ((off = OSDynamicCast( OSNumber, obj ))) {
3645 		offsetBytes = off->unsigned64BitValue();
3646 		len = off->numberOfBytes();
3647 		if (len > sizeof(offsetBytes)) {
3648 			len = sizeof(offsetBytes);
3649 		}
3650 		bytes = &offsetBytes;
3651 #ifdef __BIG_ENDIAN__
3652 		bytes = (const void *)
3653 		    (((UInt32) bytes) + (sizeof(UInt64) - len));
3654 #endif
3655 	} else {
3656 		ret = kIOReturnBadArgument;
3657 	}
3658 
3659 	if (bytes) {
3660 		if (*dataCnt < len) {
3661 			ret = kIOReturnIPCError;
3662 		} else {
3663 			*dataCnt = len;
3664 			bcopy( bytes, buf, len );
3665 		}
3666 	}
3667 	obj->release();
3668 
3669 	return ret;
3670 }
3671 
3672 
3673 /* Routine io_registry_entry_get_property */
3674 kern_return_t
3675 is_io_registry_entry_get_property(
3676 	io_object_t registry_entry,
3677 	io_name_t property_name,
3678 	io_buf_ptr_t *properties,
3679 	mach_msg_type_number_t *propertiesCnt )
3680 {
3681 	kern_return_t       err;
3682 	unsigned int        len;
3683 	OSObject *          obj;
3684 
3685 	CHECK( IORegistryEntry, registry_entry, entry );
3686 
3687 #if CONFIG_MACF
3688 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3689 		return kIOReturnNotPermitted;
3690 	}
3691 #endif
3692 
3693 	obj = IOCopyPropertyCompatible(entry, property_name);
3694 	if (!obj) {
3695 		return kIOReturnNotFound;
3696 	}
3697 
3698 	OSSerialize * s = OSSerialize::withCapacity(4096);
3699 	if (!s) {
3700 		obj->release();
3701 		return kIOReturnNoMemory;
3702 	}
3703 
3704 	if (obj->serialize( s )) {
3705 		len = s->getLength();
3706 		*propertiesCnt = len;
3707 		err = copyoutkdata( s->text(), len, properties );
3708 	} else {
3709 		err = kIOReturnUnsupported;
3710 	}
3711 
3712 	s->release();
3713 	obj->release();
3714 
3715 	return err;
3716 }
3717 
3718 /* Routine io_registry_entry_get_property_recursively */
3719 kern_return_t
3720 is_io_registry_entry_get_property_recursively(
3721 	io_object_t registry_entry,
3722 	io_name_t plane,
3723 	io_name_t property_name,
3724 	uint32_t options,
3725 	io_buf_ptr_t *properties,
3726 	mach_msg_type_number_t *propertiesCnt )
3727 {
3728 	kern_return_t       err;
3729 	unsigned int        len;
3730 	OSObject *          obj;
3731 
3732 	CHECK( IORegistryEntry, registry_entry, entry );
3733 
3734 #if CONFIG_MACF
3735 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3736 		return kIOReturnNotPermitted;
3737 	}
3738 #endif
3739 
3740 	obj = entry->copyProperty( property_name,
3741 	    IORegistryEntry::getPlane( plane ), options );
3742 	if (!obj) {
3743 		return kIOReturnNotFound;
3744 	}
3745 
3746 	OSSerialize * s = OSSerialize::withCapacity(4096);
3747 	if (!s) {
3748 		obj->release();
3749 		return kIOReturnNoMemory;
3750 	}
3751 
3752 	if (obj->serialize( s )) {
3753 		len = s->getLength();
3754 		*propertiesCnt = len;
3755 		err = copyoutkdata( s->text(), len, properties );
3756 	} else {
3757 		err = kIOReturnUnsupported;
3758 	}
3759 
3760 	s->release();
3761 	obj->release();
3762 
3763 	return err;
3764 }
3765 
3766 /* Routine io_registry_entry_get_properties */
3767 kern_return_t
3768 is_io_registry_entry_get_properties(
3769 	io_object_t registry_entry,
3770 	io_buf_ptr_t *properties,
3771 	mach_msg_type_number_t *propertiesCnt )
3772 {
3773 	return kIOReturnUnsupported;
3774 }
3775 
3776 #if CONFIG_MACF
3777 
3778 struct GetPropertiesEditorRef {
3779 	kauth_cred_t      cred;
3780 	IORegistryEntry * entry;
3781 	OSCollection    * root;
3782 };
3783 
3784 static const OSMetaClassBase *
3785 GetPropertiesEditor(void                  * reference,
3786     OSSerialize           * s,
3787     OSCollection          * container,
3788     const OSSymbol        * name,
3789     const OSMetaClassBase * value)
3790 {
3791 	GetPropertiesEditorRef * ref = (typeof(ref))reference;
3792 
3793 	if (!ref->root) {
3794 		ref->root = container;
3795 	}
3796 	if (ref->root == container) {
3797 		if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) {
3798 			value = NULL;
3799 		}
3800 	}
3801 	if (value) {
3802 		value->retain();
3803 	}
3804 	return value;
3805 }
3806 
3807 #endif /* CONFIG_MACF */
3808 
3809 /* Routine io_registry_entry_get_properties_bin_buf */
3810 kern_return_t
3811 is_io_registry_entry_get_properties_bin_buf(
3812 	io_object_t registry_entry,
3813 	mach_vm_address_t buf,
3814 	mach_vm_size_t *bufsize,
3815 	io_buf_ptr_t *properties,
3816 	mach_msg_type_number_t *propertiesCnt)
3817 {
3818 	kern_return_t          err = kIOReturnSuccess;
3819 	unsigned int           len;
3820 	OSObject             * compatProperties;
3821 	OSSerialize          * s;
3822 	OSSerialize::Editor    editor = NULL;
3823 	void                 * editRef = NULL;
3824 
3825 	CHECK(IORegistryEntry, registry_entry, entry);
3826 
3827 #if CONFIG_MACF
3828 	GetPropertiesEditorRef ref;
3829 	if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
3830 		editor    = &GetPropertiesEditor;
3831 		editRef   = &ref;
3832 		ref.cred  = kauth_cred_get();
3833 		ref.entry = entry;
3834 		ref.root  = NULL;
3835 	}
3836 #endif
3837 
3838 	s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3839 	if (!s) {
3840 		return kIOReturnNoMemory;
3841 	}
3842 
3843 	if (IOTaskRegistryCompatibility(current_task())
3844 	    && (compatProperties = entry->copyProperty(gIOCompatibilityPropertiesKey))) {
3845 		OSDictionary * dict;
3846 
3847 		dict = entry->dictionaryWithProperties();
3848 		if (!dict) {
3849 			err = kIOReturnNoMemory;
3850 		} else {
3851 			dict->removeObject(gIOCompatibilityPropertiesKey);
3852 			dict->merge(OSDynamicCast(OSDictionary, compatProperties));
3853 			if (!dict->serialize(s)) {
3854 				err = kIOReturnUnsupported;
3855 			}
3856 			dict->release();
3857 		}
3858 		compatProperties->release();
3859 	} else if (!entry->serializeProperties(s)) {
3860 		err = kIOReturnUnsupported;
3861 	}
3862 
3863 	if (kIOReturnSuccess == err) {
3864 		len = s->getLength();
3865 		if (buf && bufsize && len <= *bufsize) {
3866 			*bufsize = len;
3867 			*propertiesCnt = 0;
3868 			*properties = nullptr;
3869 			if (copyout(s->text(), buf, len)) {
3870 				err = kIOReturnVMError;
3871 			} else {
3872 				err = kIOReturnSuccess;
3873 			}
3874 		} else {
3875 			if (bufsize) {
3876 				*bufsize = 0;
3877 			}
3878 			*propertiesCnt = len;
3879 			err = copyoutkdata( s->text(), len, properties );
3880 		}
3881 	}
3882 	s->release();
3883 
3884 	return err;
3885 }
3886 
3887 /* Routine io_registry_entry_get_properties_bin */
3888 kern_return_t
3889 is_io_registry_entry_get_properties_bin(
3890 	io_object_t registry_entry,
3891 	io_buf_ptr_t *properties,
3892 	mach_msg_type_number_t *propertiesCnt)
3893 {
3894 	return is_io_registry_entry_get_properties_bin_buf(registry_entry,
3895 	           0, NULL, properties, propertiesCnt);
3896 }
3897 
3898 /* Routine io_registry_entry_get_property_bin_buf */
3899 kern_return_t
3900 is_io_registry_entry_get_property_bin_buf(
3901 	io_object_t registry_entry,
3902 	io_name_t plane,
3903 	io_name_t property_name,
3904 	uint32_t options,
3905 	mach_vm_address_t buf,
3906 	mach_vm_size_t *bufsize,
3907 	io_buf_ptr_t *properties,
3908 	mach_msg_type_number_t *propertiesCnt )
3909 {
3910 	kern_return_t       err;
3911 	unsigned int        len;
3912 	OSObject *          obj;
3913 	const OSSymbol *    sym;
3914 
3915 	CHECK( IORegistryEntry, registry_entry, entry );
3916 
3917 #if CONFIG_MACF
3918 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3919 		return kIOReturnNotPermitted;
3920 	}
3921 #endif
3922 
3923 	sym = OSSymbol::withCString(property_name);
3924 	if (!sym) {
3925 		return kIOReturnNoMemory;
3926 	}
3927 
3928 	if (gIORegistryEntryPropertyKeysKey == sym) {
3929 		obj = entry->copyPropertyKeys();
3930 	} else {
3931 		if ((kIORegistryIterateRecursively & options) && plane[0]) {
3932 			if (!IOTaskRegistryCompatibility(current_task())) {
3933 				obj = entry->copyProperty(property_name,
3934 				    IORegistryEntry::getPlane(plane), options);
3935 			} else {
3936 				obj = IOCopyPropertyCompatible(entry, property_name);
3937 				if ((NULL == obj) && plane && (options & kIORegistryIterateRecursively)) {
3938 					IORegistryIterator * iter;
3939 					iter = IORegistryIterator::iterateOver(entry, IORegistryEntry::getPlane(plane), options);
3940 					if (iter) {
3941 						while ((NULL == obj) && (entry = iter->getNextObject())) {
3942 							obj = IOCopyPropertyCompatible(entry, property_name);
3943 						}
3944 						iter->release();
3945 					}
3946 				}
3947 			}
3948 		} else {
3949 			obj = IOCopyPropertyCompatible(entry, property_name);
3950 		}
3951 		if (obj && gIORemoveOnReadProperties->containsObject(sym)) {
3952 			entry->removeProperty(sym);
3953 		}
3954 	}
3955 
3956 	sym->release();
3957 	if (!obj) {
3958 		return kIOReturnNotFound;
3959 	}
3960 
3961 	OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3962 	if (!s) {
3963 		obj->release();
3964 		return kIOReturnNoMemory;
3965 	}
3966 
3967 	if (obj->serialize( s )) {
3968 		len = s->getLength();
3969 		if (buf && bufsize && len <= *bufsize) {
3970 			*bufsize = len;
3971 			*propertiesCnt = 0;
3972 			*properties = nullptr;
3973 			if (copyout(s->text(), buf, len)) {
3974 				err = kIOReturnVMError;
3975 			} else {
3976 				err = kIOReturnSuccess;
3977 			}
3978 		} else {
3979 			if (bufsize) {
3980 				*bufsize = 0;
3981 			}
3982 			*propertiesCnt = len;
3983 			err = copyoutkdata( s->text(), len, properties );
3984 		}
3985 	} else {
3986 		err = kIOReturnUnsupported;
3987 	}
3988 
3989 	s->release();
3990 	obj->release();
3991 
3992 	return err;
3993 }
3994 
3995 /* Routine io_registry_entry_get_property_bin */
3996 kern_return_t
3997 is_io_registry_entry_get_property_bin(
3998 	io_object_t registry_entry,
3999 	io_name_t plane,
4000 	io_name_t property_name,
4001 	uint32_t options,
4002 	io_buf_ptr_t *properties,
4003 	mach_msg_type_number_t *propertiesCnt )
4004 {
4005 	return is_io_registry_entry_get_property_bin_buf(registry_entry, plane,
4006 	           property_name, options, 0, NULL, properties, propertiesCnt);
4007 }
4008 
4009 
4010 /* Routine io_registry_entry_set_properties */
4011 kern_return_t
4012 is_io_registry_entry_set_properties
4013 (
4014 	io_object_t registry_entry,
4015 	io_buf_ptr_t properties,
4016 	mach_msg_type_number_t propertiesCnt,
4017 	kern_return_t * result)
4018 {
4019 	OSObject *          obj;
4020 	kern_return_t       err;
4021 	IOReturn            res;
4022 	vm_offset_t         data;
4023 	vm_map_offset_t     map_data;
4024 
4025 	CHECK( IORegistryEntry, registry_entry, entry );
4026 
4027 	if (propertiesCnt > sizeof(io_struct_inband_t) * 1024) {
4028 		return kIOReturnMessageTooLarge;
4029 	}
4030 
4031 	err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4032 	data = CAST_DOWN(vm_offset_t, map_data);
4033 
4034 	if (KERN_SUCCESS == err) {
4035 		FAKE_STACK_FRAME(entry->getMetaClass());
4036 
4037 		// must return success after vm_map_copyout() succeeds
4038 		obj = OSUnserializeXML((const char *) data, propertiesCnt );
4039 		vm_deallocate( kernel_map, data, propertiesCnt );
4040 
4041 		if (!obj) {
4042 			res = kIOReturnBadArgument;
4043 		}
4044 #if CONFIG_MACF
4045 		else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
4046 		    registry_entry, obj)) {
4047 			res = kIOReturnNotPermitted;
4048 		}
4049 #endif
4050 		else {
4051 			res = entry->setProperties( obj );
4052 		}
4053 
4054 		if (obj) {
4055 			obj->release();
4056 		}
4057 
4058 		FAKE_STACK_FRAME_END();
4059 	} else {
4060 		res = err;
4061 	}
4062 
4063 	*result = res;
4064 	return err;
4065 }
4066 
4067 /* Routine io_registry_entry_get_child_iterator */
4068 kern_return_t
4069 is_io_registry_entry_get_child_iterator(
4070 	io_object_t registry_entry,
4071 	io_name_t plane,
4072 	io_object_t *iterator )
4073 {
4074 	CHECK( IORegistryEntry, registry_entry, entry );
4075 
4076 	*iterator = IOUserIterator::withIterator(entry->getChildIterator(
4077 		    IORegistryEntry::getPlane( plane )));
4078 
4079 	return kIOReturnSuccess;
4080 }
4081 
4082 /* Routine io_registry_entry_get_parent_iterator */
4083 kern_return_t
4084 is_io_registry_entry_get_parent_iterator(
4085 	io_object_t registry_entry,
4086 	io_name_t plane,
4087 	io_object_t *iterator)
4088 {
4089 	CHECK( IORegistryEntry, registry_entry, entry );
4090 
4091 	*iterator = IOUserIterator::withIterator(entry->getParentIterator(
4092 		    IORegistryEntry::getPlane( plane )));
4093 
4094 	return kIOReturnSuccess;
4095 }
4096 
4097 /* Routine io_service_get_busy_state */
4098 kern_return_t
4099 is_io_service_get_busy_state(
4100 	io_object_t _service,
4101 	uint32_t *busyState )
4102 {
4103 	CHECK( IOService, _service, service );
4104 
4105 	*busyState = service->getBusyState();
4106 
4107 	return kIOReturnSuccess;
4108 }
4109 
4110 /* Routine io_service_get_state */
4111 kern_return_t
4112 is_io_service_get_state(
4113 	io_object_t _service,
4114 	uint64_t *state,
4115 	uint32_t *busy_state,
4116 	uint64_t *accumulated_busy_time )
4117 {
4118 	CHECK( IOService, _service, service );
4119 
4120 	*state                 = service->getState();
4121 	*busy_state            = service->getBusyState();
4122 	*accumulated_busy_time = service->getAccumulatedBusyTime();
4123 
4124 	return kIOReturnSuccess;
4125 }
4126 
4127 /* Routine io_service_wait_quiet */
4128 kern_return_t
4129 is_io_service_wait_quiet(
4130 	io_object_t _service,
4131 	mach_timespec_t wait_time )
4132 {
4133 	uint64_t    timeoutNS;
4134 
4135 	CHECK( IOService, _service, service );
4136 
4137 	timeoutNS = wait_time.tv_sec;
4138 	timeoutNS *= kSecondScale;
4139 	timeoutNS += wait_time.tv_nsec;
4140 
4141 	return service->waitQuiet(timeoutNS);
4142 }
4143 
4144 /* Routine io_service_request_probe */
4145 kern_return_t
4146 is_io_service_request_probe(
4147 	io_object_t _service,
4148 	uint32_t options )
4149 {
4150 	CHECK( IOService, _service, service );
4151 
4152 	return service->requestProbe( options );
4153 }
4154 
4155 /* Routine io_service_get_authorization_id */
4156 kern_return_t
4157 is_io_service_get_authorization_id(
4158 	io_object_t _service,
4159 	uint64_t *authorization_id )
4160 {
4161 	kern_return_t          kr;
4162 
4163 	CHECK( IOService, _service, service );
4164 
4165 	kr = IOUserClient::clientHasPrivilege((void *) current_task(),
4166 	    kIOClientPrivilegeAdministrator );
4167 	if (kIOReturnSuccess != kr) {
4168 		return kr;
4169 	}
4170 
4171 	*authorization_id = service->getAuthorizationID();
4172 
4173 	return kr;
4174 }
4175 
4176 /* Routine io_service_set_authorization_id */
4177 kern_return_t
4178 is_io_service_set_authorization_id(
4179 	io_object_t _service,
4180 	uint64_t authorization_id )
4181 {
4182 	CHECK( IOService, _service, service );
4183 
4184 	return service->setAuthorizationID( authorization_id );
4185 }
4186 
4187 /* Routine io_service_open_ndr */
4188 kern_return_t
4189 is_io_service_open_extended(
4190 	io_object_t _service,
4191 	task_t owningTask,
4192 	uint32_t connect_type,
4193 	NDR_record_t ndr,
4194 	io_buf_ptr_t properties,
4195 	mach_msg_type_number_t propertiesCnt,
4196 	kern_return_t * result,
4197 	io_object_t *connection )
4198 {
4199 	IOUserClient * client = NULL;
4200 	kern_return_t  err = KERN_SUCCESS;
4201 	IOReturn       res = kIOReturnSuccess;
4202 	OSDictionary * propertiesDict = NULL;
4203 	bool           crossEndian;
4204 	bool           disallowAccess;
4205 
4206 	CHECK( IOService, _service, service );
4207 
4208 	if (!owningTask) {
4209 		return kIOReturnBadArgument;
4210 	}
4211 	assert(owningTask == current_task());
4212 	if (owningTask != current_task()) {
4213 		return kIOReturnBadArgument;
4214 	}
4215 
4216 	do{
4217 		if (properties) {
4218 			return kIOReturnUnsupported;
4219 		}
4220 #if 0
4221 		{
4222 			OSObject *      obj;
4223 			vm_offset_t     data;
4224 			vm_map_offset_t map_data;
4225 
4226 			if (propertiesCnt > sizeof(io_struct_inband_t)) {
4227 				return kIOReturnMessageTooLarge;
4228 			}
4229 
4230 			err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4231 			res = err;
4232 			data = CAST_DOWN(vm_offset_t, map_data);
4233 			if (KERN_SUCCESS == err) {
4234 				// must return success after vm_map_copyout() succeeds
4235 				obj = OSUnserializeXML((const char *) data, propertiesCnt );
4236 				vm_deallocate( kernel_map, data, propertiesCnt );
4237 				propertiesDict = OSDynamicCast(OSDictionary, obj);
4238 				if (!propertiesDict) {
4239 					res = kIOReturnBadArgument;
4240 					if (obj) {
4241 						obj->release();
4242 					}
4243 				}
4244 			}
4245 			if (kIOReturnSuccess != res) {
4246 				break;
4247 			}
4248 		}
4249 #endif
4250 		crossEndian = (ndr.int_rep != NDR_record.int_rep);
4251 		if (crossEndian) {
4252 			if (!propertiesDict) {
4253 				propertiesDict = OSDictionary::withCapacity(4);
4254 			}
4255 			OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
4256 			if (data) {
4257 				if (propertiesDict) {
4258 					propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
4259 				}
4260 				data->release();
4261 			}
4262 		}
4263 
4264 		res = service->newUserClient( owningTask, (void *) owningTask,
4265 		    connect_type, propertiesDict, &client );
4266 
4267 		if (propertiesDict) {
4268 			propertiesDict->release();
4269 		}
4270 
4271 		if (res == kIOReturnSuccess) {
4272 			assert( OSDynamicCast(IOUserClient, client));
4273 			if (!client->reserved) {
4274 				if (!client->reserve()) {
4275 					client->clientClose();
4276 					OSSafeReleaseNULL(client);
4277 					res = kIOReturnNoMemory;
4278 				}
4279 			}
4280 		}
4281 
4282 		if (res == kIOReturnSuccess) {
4283 			client->sharedInstance = (NULL != client->getProperty(kIOUserClientSharedInstanceKey));
4284 			if (client->sharedInstance) {
4285 				IOLockLock(gIOUserClientOwnersLock);
4286 			}
4287 			if (!client->lock) {
4288 				client->lock       = IORWLockAlloc();
4289 				client->filterLock = IOLockAlloc();
4290 
4291 				client->messageAppSuspended = (NULL != client->getProperty(kIOUserClientMessageAppSuspendedKey));
4292 				{
4293 					OSObject * obj;
4294 					extern const OSSymbol * gIOSurfaceIdentifier;
4295 					obj = client->getProperty(kIOUserClientDefaultLockingKey);
4296 					if (obj) {
4297 						client->defaultLocking = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingKey));
4298 					} else {
4299 						const OSMetaClass * meta;
4300 						OSKext            * kext;
4301 						meta = client->getMetaClass();
4302 						kext = meta->getKext();
4303 						if (!kext || !kext->hasDependency(gIOSurfaceIdentifier)) {
4304 							client->defaultLocking = true;
4305 							client->setProperty(kIOUserClientDefaultLockingKey, kOSBooleanTrue);
4306 						}
4307 					}
4308 				}
4309 			}
4310 			if (client->sharedInstance) {
4311 				IOLockUnlock(gIOUserClientOwnersLock);
4312 			}
4313 
4314 			disallowAccess = (crossEndian
4315 			    && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
4316 			    && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
4317 			if (disallowAccess) {
4318 				res = kIOReturnUnsupported;
4319 			}
4320 #if CONFIG_MACF
4321 			else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) {
4322 				res = kIOReturnNotPermitted;
4323 			}
4324 #endif
4325 
4326 			if ((kIOReturnSuccess == res)
4327 			    && gIOUCFilterCallbacks
4328 			    && gIOUCFilterCallbacks->io_filter_resolver) {
4329 				io_filter_policy_t filterPolicy;
4330 				filterPolicy = client->filterForTask(owningTask, 0);
4331 				if (!filterPolicy) {
4332 					res = gIOUCFilterCallbacks->io_filter_resolver(owningTask, client, connect_type, &filterPolicy);
4333 					if (kIOReturnUnsupported == res) {
4334 						res = kIOReturnSuccess;
4335 					} else if (kIOReturnSuccess == res) {
4336 						client->filterForTask(owningTask, filterPolicy);
4337 					}
4338 				}
4339 			}
4340 
4341 			if (kIOReturnSuccess == res) {
4342 				res = client->registerOwner(owningTask);
4343 			}
4344 
4345 			if (kIOReturnSuccess != res) {
4346 				IOStatisticsClientCall();
4347 				client->clientClose();
4348 				client->release();
4349 				client = NULL;
4350 				break;
4351 			}
4352 			OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
4353 			if (creatorName) {
4354 				client->setProperty(kIOUserClientCreatorKey, creatorName);
4355 				creatorName->release();
4356 			}
4357 			client->setTerminateDefer(service, false);
4358 		}
4359 	}while (false);
4360 
4361 	*connection = client;
4362 	*result = res;
4363 
4364 	return err;
4365 }
4366 
4367 /* Routine io_service_close */
4368 kern_return_t
4369 is_io_service_close(
4370 	io_object_t connection )
4371 {
4372 	OSSet * mappings;
4373 	if ((mappings = OSDynamicCast(OSSet, connection))) {
4374 		return kIOReturnSuccess;
4375 	}
4376 
4377 	CHECK( IOUserClient, connection, client );
4378 
4379 	IOStatisticsClientCall();
4380 
4381 	if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) {
4382 		IORWLockWrite(client->lock);
4383 		client->clientClose();
4384 		IORWLockUnlock(client->lock);
4385 	} else {
4386 		IOLog("ignored is_io_service_close(0x%qx,%s)\n",
4387 		    client->getRegistryEntryID(), client->getName());
4388 	}
4389 
4390 	return kIOReturnSuccess;
4391 }
4392 
4393 /* Routine io_connect_get_service */
4394 kern_return_t
4395 is_io_connect_get_service(
4396 	io_object_t connection,
4397 	io_object_t *service )
4398 {
4399 	IOService * theService;
4400 
4401 	CHECK( IOUserClient, connection, client );
4402 
4403 	theService = client->getService();
4404 	if (theService) {
4405 		theService->retain();
4406 	}
4407 
4408 	*service = theService;
4409 
4410 	return theService ? kIOReturnSuccess : kIOReturnUnsupported;
4411 }
4412 
4413 /* Routine io_connect_set_notification_port */
4414 kern_return_t
4415 is_io_connect_set_notification_port(
4416 	io_object_t connection,
4417 	uint32_t notification_type,
4418 	mach_port_t port,
4419 	uint32_t reference)
4420 {
4421 	kern_return_t ret;
4422 	CHECK( IOUserClient, connection, client );
4423 
4424 	IOStatisticsClientCall();
4425 	IORWLockWrite(client->lock);
4426 	ret = client->registerNotificationPort( port, notification_type,
4427 	    (io_user_reference_t) reference );
4428 	IORWLockUnlock(client->lock);
4429 	return ret;
4430 }
4431 
4432 /* Routine io_connect_set_notification_port */
4433 kern_return_t
4434 is_io_connect_set_notification_port_64(
4435 	io_object_t connection,
4436 	uint32_t notification_type,
4437 	mach_port_t port,
4438 	io_user_reference_t reference)
4439 {
4440 	kern_return_t ret;
4441 	CHECK( IOUserClient, connection, client );
4442 
4443 	IOStatisticsClientCall();
4444 	IORWLockWrite(client->lock);
4445 	ret = client->registerNotificationPort( port, notification_type,
4446 	    reference );
4447 	IORWLockUnlock(client->lock);
4448 	return ret;
4449 }
4450 
4451 /* Routine io_connect_map_memory_into_task */
4452 kern_return_t
4453 is_io_connect_map_memory_into_task
4454 (
4455 	io_connect_t connection,
4456 	uint32_t memory_type,
4457 	task_t into_task,
4458 	mach_vm_address_t *address,
4459 	mach_vm_size_t *size,
4460 	uint32_t flags
4461 )
4462 {
4463 	IOReturn            err;
4464 	IOMemoryMap *       map;
4465 
4466 	CHECK( IOUserClient, connection, client );
4467 
4468 	if (!into_task) {
4469 		return kIOReturnBadArgument;
4470 	}
4471 
4472 	IOStatisticsClientCall();
4473 	if (client->defaultLocking) {
4474 		IORWLockWrite(client->lock);
4475 	}
4476 	map = client->mapClientMemory64( memory_type, into_task, flags, *address );
4477 	if (client->defaultLocking) {
4478 		IORWLockUnlock(client->lock);
4479 	}
4480 
4481 	if (map) {
4482 		*address = map->getAddress();
4483 		if (size) {
4484 			*size = map->getSize();
4485 		}
4486 
4487 		if (client->sharedInstance
4488 		    || (into_task != current_task())) {
4489 			// push a name out to the task owning the map,
4490 			// so we can clean up maps
4491 			mach_port_name_t name __unused =
4492 			    IOMachPort::makeSendRightForTask(
4493 				into_task, map, IKOT_IOKIT_OBJECT );
4494 			map->release();
4495 		} else {
4496 			// keep it with the user client
4497 			IOLockLock( gIOObjectPortLock);
4498 			if (NULL == client->mappings) {
4499 				client->mappings = OSSet::withCapacity(2);
4500 			}
4501 			if (client->mappings) {
4502 				client->mappings->setObject( map);
4503 			}
4504 			IOLockUnlock( gIOObjectPortLock);
4505 			map->release();
4506 		}
4507 		err = kIOReturnSuccess;
4508 	} else {
4509 		err = kIOReturnBadArgument;
4510 	}
4511 
4512 	return err;
4513 }
4514 
4515 /* Routine is_io_connect_map_memory */
4516 kern_return_t
4517 is_io_connect_map_memory(
4518 	io_object_t     connect,
4519 	uint32_t        type,
4520 	task_t          task,
4521 	uint32_t  *     mapAddr,
4522 	uint32_t  *     mapSize,
4523 	uint32_t        flags )
4524 {
4525 	IOReturn          err;
4526 	mach_vm_address_t address;
4527 	mach_vm_size_t    size;
4528 
4529 	address = SCALAR64(*mapAddr);
4530 	size    = SCALAR64(*mapSize);
4531 
4532 	err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
4533 
4534 	*mapAddr = SCALAR32(address);
4535 	*mapSize = SCALAR32(size);
4536 
4537 	return err;
4538 }
4539 } /* extern "C" */
4540 
4541 IOMemoryMap *
4542 IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
4543 {
4544 	OSIterator *  iter;
4545 	IOMemoryMap * map = NULL;
4546 
4547 	IOLockLock(gIOObjectPortLock);
4548 
4549 	iter = OSCollectionIterator::withCollection(mappings);
4550 	if (iter) {
4551 		while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) {
4552 			if (mem == map->getMemoryDescriptor()) {
4553 				map->retain();
4554 				mappings->removeObject(map);
4555 				break;
4556 			}
4557 		}
4558 		iter->release();
4559 	}
4560 
4561 	IOLockUnlock(gIOObjectPortLock);
4562 
4563 	return map;
4564 }
4565 
4566 extern "C" {
4567 /* Routine io_connect_unmap_memory_from_task */
4568 kern_return_t
4569 is_io_connect_unmap_memory_from_task
4570 (
4571 	io_connect_t connection,
4572 	uint32_t memory_type,
4573 	task_t from_task,
4574 	mach_vm_address_t address)
4575 {
4576 	IOReturn            err;
4577 	IOOptionBits        options = 0;
4578 	IOMemoryDescriptor * memory = NULL;
4579 	IOMemoryMap *       map;
4580 
4581 	CHECK( IOUserClient, connection, client );
4582 
4583 	if (!from_task) {
4584 		return kIOReturnBadArgument;
4585 	}
4586 
4587 	IOStatisticsClientCall();
4588 	if (client->defaultLocking) {
4589 		IORWLockWrite(client->lock);
4590 	}
4591 	err = client->clientMemoryForType((UInt32) memory_type, &options, &memory );
4592 	if (client->defaultLocking) {
4593 		IORWLockUnlock(client->lock);
4594 	}
4595 
4596 	if (memory && (kIOReturnSuccess == err)) {
4597 		options = (options & ~kIOMapUserOptionsMask)
4598 		    | kIOMapAnywhere | kIOMapReference;
4599 
4600 		map = memory->createMappingInTask( from_task, address, options );
4601 		memory->release();
4602 		if (map) {
4603 			IOLockLock( gIOObjectPortLock);
4604 			if (client->mappings) {
4605 				client->mappings->removeObject( map);
4606 			}
4607 			IOLockUnlock( gIOObjectPortLock);
4608 
4609 			mach_port_name_t name = 0;
4610 			bool is_shared_instance_or_from_current_task = from_task != current_task() || client->sharedInstance;
4611 			if (is_shared_instance_or_from_current_task) {
4612 				name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
4613 				map->release();
4614 			}
4615 
4616 			if (name) {
4617 				map->userClientUnmap();
4618 				err = iokit_mod_send_right( from_task, name, -2 );
4619 				err = kIOReturnSuccess;
4620 			} else {
4621 				IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
4622 			}
4623 			if (!is_shared_instance_or_from_current_task) {
4624 				map->release();
4625 			}
4626 		} else {
4627 			err = kIOReturnBadArgument;
4628 		}
4629 	}
4630 
4631 	return err;
4632 }
4633 
4634 kern_return_t
4635 is_io_connect_unmap_memory(
4636 	io_object_t     connect,
4637 	uint32_t        type,
4638 	task_t          task,
4639 	uint32_t        mapAddr )
4640 {
4641 	IOReturn            err;
4642 	mach_vm_address_t   address;
4643 
4644 	address = SCALAR64(mapAddr);
4645 
4646 	err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
4647 
4648 	return err;
4649 }
4650 
4651 
4652 /* Routine io_connect_add_client */
4653 kern_return_t
4654 is_io_connect_add_client(
4655 	io_object_t connection,
4656 	io_object_t connect_to)
4657 {
4658 	CHECK( IOUserClient, connection, client );
4659 	CHECK( IOUserClient, connect_to, to );
4660 
4661 	IOReturn ret;
4662 
4663 	IOStatisticsClientCall();
4664 	if (client->defaultLocking) {
4665 		IORWLockWrite(client->lock);
4666 	}
4667 	ret = client->connectClient( to );
4668 	if (client->defaultLocking) {
4669 		IORWLockUnlock(client->lock);
4670 	}
4671 	return ret;
4672 }
4673 
4674 
4675 /* Routine io_connect_set_properties */
4676 kern_return_t
4677 is_io_connect_set_properties(
4678 	io_object_t connection,
4679 	io_buf_ptr_t properties,
4680 	mach_msg_type_number_t propertiesCnt,
4681 	kern_return_t * result)
4682 {
4683 	return is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result );
4684 }
4685 
4686 /* Routine io_user_client_method */
4687 kern_return_t
4688 is_io_connect_method_var_output
4689 (
4690 	io_connect_t connection,
4691 	uint32_t selector,
4692 	io_scalar_inband64_t scalar_input,
4693 	mach_msg_type_number_t scalar_inputCnt,
4694 	io_struct_inband_t inband_input,
4695 	mach_msg_type_number_t inband_inputCnt,
4696 	mach_vm_address_t ool_input,
4697 	mach_vm_size_t ool_input_size,
4698 	io_struct_inband_t inband_output,
4699 	mach_msg_type_number_t *inband_outputCnt,
4700 	io_scalar_inband64_t scalar_output,
4701 	mach_msg_type_number_t *scalar_outputCnt,
4702 	io_buf_ptr_t *var_output,
4703 	mach_msg_type_number_t *var_outputCnt
4704 )
4705 {
4706 	CHECK( IOUserClient, connection, client );
4707 
4708 	IOExternalMethodArguments args;
4709 	IOReturn ret;
4710 	IOMemoryDescriptor * inputMD  = NULL;
4711 	OSObject *           structureVariableOutputData = NULL;
4712 
4713 	bzero(&args.__reserved[0], sizeof(args.__reserved));
4714 	args.__reservedA = 0;
4715 	args.version = kIOExternalMethodArgumentsCurrentVersion;
4716 
4717 	args.selector = selector;
4718 
4719 	args.asyncWakePort               = MACH_PORT_NULL;
4720 	args.asyncReference              = NULL;
4721 	args.asyncReferenceCount         = 0;
4722 	args.structureVariableOutputData = &structureVariableOutputData;
4723 
4724 	args.scalarInput = scalar_input;
4725 	args.scalarInputCount = scalar_inputCnt;
4726 	args.structureInput = inband_input;
4727 	args.structureInputSize = inband_inputCnt;
4728 
4729 	if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4730 		return kIOReturnIPCError;
4731 	}
4732 
4733 	if (ool_input) {
4734 		inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4735 		    kIODirectionOut | kIOMemoryMapCopyOnWrite,
4736 		    current_task());
4737 	}
4738 
4739 	args.structureInputDescriptor = inputMD;
4740 
4741 	args.scalarOutput = scalar_output;
4742 	args.scalarOutputCount = *scalar_outputCnt;
4743 	bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4744 	args.structureOutput = inband_output;
4745 	args.structureOutputSize = *inband_outputCnt;
4746 	args.structureOutputDescriptor = NULL;
4747 	args.structureOutputDescriptorSize = 0;
4748 
4749 	IOStatisticsClientCall();
4750 	ret = kIOReturnSuccess;
4751 
4752 	io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
4753 	if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
4754 		ret = gIOUCFilterCallbacks->io_filter_applier(filterPolicy, io_filter_type_external_method, selector);
4755 	}
4756 	if (kIOReturnSuccess == ret) {
4757 		if (client->defaultLocking) {
4758 			IORWLockRead(client->lock);
4759 		}
4760 		ret = client->externalMethod( selector, &args );
4761 		if (client->defaultLocking) {
4762 			IORWLockUnlock(client->lock);
4763 		}
4764 	}
4765 
4766 	*scalar_outputCnt = args.scalarOutputCount;
4767 	*inband_outputCnt = args.structureOutputSize;
4768 
4769 	if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) {
4770 		OSSerialize * serialize;
4771 		OSData      * data;
4772 		unsigned int  len;
4773 
4774 		if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) {
4775 			len = serialize->getLength();
4776 			*var_outputCnt = len;
4777 			ret = copyoutkdata(serialize->text(), len, var_output);
4778 		} else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) {
4779 			len = data->getLength();
4780 			*var_outputCnt = len;
4781 			ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
4782 		} else {
4783 			ret = kIOReturnUnderrun;
4784 		}
4785 	}
4786 
4787 	if (inputMD) {
4788 		inputMD->release();
4789 	}
4790 	if (structureVariableOutputData) {
4791 		structureVariableOutputData->release();
4792 	}
4793 
4794 	return ret;
4795 }
4796 
4797 /* Routine io_user_client_method */
4798 kern_return_t
4799 is_io_connect_method
4800 (
4801 	io_connect_t connection,
4802 	uint32_t selector,
4803 	io_scalar_inband64_t scalar_input,
4804 	mach_msg_type_number_t scalar_inputCnt,
4805 	io_struct_inband_t inband_input,
4806 	mach_msg_type_number_t inband_inputCnt,
4807 	mach_vm_address_t ool_input,
4808 	mach_vm_size_t ool_input_size,
4809 	io_struct_inband_t inband_output,
4810 	mach_msg_type_number_t *inband_outputCnt,
4811 	io_scalar_inband64_t scalar_output,
4812 	mach_msg_type_number_t *scalar_outputCnt,
4813 	mach_vm_address_t ool_output,
4814 	mach_vm_size_t *ool_output_size
4815 )
4816 {
4817 	CHECK( IOUserClient, connection, client );
4818 
4819 	IOExternalMethodArguments args;
4820 	IOReturn ret;
4821 	IOMemoryDescriptor * inputMD  = NULL;
4822 	IOMemoryDescriptor * outputMD = NULL;
4823 
4824 	bzero(&args.__reserved[0], sizeof(args.__reserved));
4825 	args.__reservedA = 0;
4826 	args.version = kIOExternalMethodArgumentsCurrentVersion;
4827 
4828 	args.selector = selector;
4829 
4830 	args.asyncWakePort               = MACH_PORT_NULL;
4831 	args.asyncReference              = NULL;
4832 	args.asyncReferenceCount         = 0;
4833 	args.structureVariableOutputData = NULL;
4834 
4835 	args.scalarInput = scalar_input;
4836 	args.scalarInputCount = scalar_inputCnt;
4837 	args.structureInput = inband_input;
4838 	args.structureInputSize = inband_inputCnt;
4839 
4840 	if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4841 		return kIOReturnIPCError;
4842 	}
4843 	if (ool_output) {
4844 		if (*ool_output_size <= sizeof(io_struct_inband_t)) {
4845 			return kIOReturnIPCError;
4846 		}
4847 		if (*ool_output_size > UINT_MAX) {
4848 			return kIOReturnIPCError;
4849 		}
4850 	}
4851 
4852 	if (ool_input) {
4853 		inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4854 		    kIODirectionOut | kIOMemoryMapCopyOnWrite,
4855 		    current_task());
4856 	}
4857 
4858 	args.structureInputDescriptor = inputMD;
4859 
4860 	args.scalarOutput = scalar_output;
4861 	args.scalarOutputCount = *scalar_outputCnt;
4862 	bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4863 	args.structureOutput = inband_output;
4864 	args.structureOutputSize = *inband_outputCnt;
4865 
4866 	if (ool_output && ool_output_size) {
4867 		outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4868 		    kIODirectionIn, current_task());
4869 	}
4870 
4871 	args.structureOutputDescriptor = outputMD;
4872 	args.structureOutputDescriptorSize = ool_output_size
4873 	    ? ((typeof(args.structureOutputDescriptorSize)) * ool_output_size)
4874 	    : 0;
4875 
4876 	IOStatisticsClientCall();
4877 	ret = kIOReturnSuccess;
4878 	io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
4879 	if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
4880 		ret = gIOUCFilterCallbacks->io_filter_applier(filterPolicy, io_filter_type_external_method, selector);
4881 	}
4882 	if (kIOReturnSuccess == ret) {
4883 		if (client->defaultLocking) {
4884 			IORWLockRead(client->lock);
4885 		}
4886 		ret = client->externalMethod( selector, &args );
4887 		if (client->defaultLocking) {
4888 			IORWLockUnlock(client->lock);
4889 		}
4890 	}
4891 
4892 	*scalar_outputCnt = args.scalarOutputCount;
4893 	*inband_outputCnt = args.structureOutputSize;
4894 	*ool_output_size  = args.structureOutputDescriptorSize;
4895 
4896 	if (inputMD) {
4897 		inputMD->release();
4898 	}
4899 	if (outputMD) {
4900 		outputMD->release();
4901 	}
4902 
4903 	return ret;
4904 }
4905 
4906 /* Routine io_async_user_client_method */
4907 kern_return_t
4908 is_io_connect_async_method
4909 (
4910 	io_connect_t connection,
4911 	mach_port_t wake_port,
4912 	io_async_ref64_t reference,
4913 	mach_msg_type_number_t referenceCnt,
4914 	uint32_t selector,
4915 	io_scalar_inband64_t scalar_input,
4916 	mach_msg_type_number_t scalar_inputCnt,
4917 	io_struct_inband_t inband_input,
4918 	mach_msg_type_number_t inband_inputCnt,
4919 	mach_vm_address_t ool_input,
4920 	mach_vm_size_t ool_input_size,
4921 	io_struct_inband_t inband_output,
4922 	mach_msg_type_number_t *inband_outputCnt,
4923 	io_scalar_inband64_t scalar_output,
4924 	mach_msg_type_number_t *scalar_outputCnt,
4925 	mach_vm_address_t ool_output,
4926 	mach_vm_size_t * ool_output_size
4927 )
4928 {
4929 	CHECK( IOUserClient, connection, client );
4930 
4931 	IOExternalMethodArguments args;
4932 	IOReturn ret;
4933 	IOMemoryDescriptor * inputMD  = NULL;
4934 	IOMemoryDescriptor * outputMD = NULL;
4935 
4936 	if (referenceCnt < 1) {
4937 		return kIOReturnBadArgument;
4938 	}
4939 
4940 	bzero(&args.__reserved[0], sizeof(args.__reserved));
4941 	args.__reservedA = 0;
4942 	args.version = kIOExternalMethodArgumentsCurrentVersion;
4943 
4944 	reference[0]             = (io_user_reference_t) wake_port;
4945 	if (vm_map_is_64bit(get_task_map(current_task()))) {
4946 		reference[0]         |= kIOUCAsync64Flag;
4947 	}
4948 
4949 	args.selector = selector;
4950 
4951 	args.asyncWakePort       = wake_port;
4952 	args.asyncReference      = reference;
4953 	args.asyncReferenceCount = referenceCnt;
4954 
4955 	args.structureVariableOutputData = NULL;
4956 
4957 	args.scalarInput = scalar_input;
4958 	args.scalarInputCount = scalar_inputCnt;
4959 	args.structureInput = inband_input;
4960 	args.structureInputSize = inband_inputCnt;
4961 
4962 	if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4963 		return kIOReturnIPCError;
4964 	}
4965 	if (ool_output) {
4966 		if (*ool_output_size <= sizeof(io_struct_inband_t)) {
4967 			return kIOReturnIPCError;
4968 		}
4969 		if (*ool_output_size > UINT_MAX) {
4970 			return kIOReturnIPCError;
4971 		}
4972 	}
4973 
4974 	if (ool_input) {
4975 		inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4976 		    kIODirectionOut | kIOMemoryMapCopyOnWrite,
4977 		    current_task());
4978 	}
4979 
4980 	args.structureInputDescriptor = inputMD;
4981 
4982 	args.scalarOutput = scalar_output;
4983 	args.scalarOutputCount = *scalar_outputCnt;
4984 	bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4985 	args.structureOutput = inband_output;
4986 	args.structureOutputSize = *inband_outputCnt;
4987 
4988 	if (ool_output) {
4989 		outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4990 		    kIODirectionIn, current_task());
4991 	}
4992 
4993 	args.structureOutputDescriptor = outputMD;
4994 	args.structureOutputDescriptorSize = ((typeof(args.structureOutputDescriptorSize)) * ool_output_size);
4995 
4996 	IOStatisticsClientCall();
4997 	ret = kIOReturnSuccess;
4998 	io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
4999 	if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5000 		ret = gIOUCFilterCallbacks->io_filter_applier(filterPolicy, io_filter_type_external_async_method, selector);
5001 	}
5002 	if (kIOReturnSuccess == ret) {
5003 		if (client->defaultLocking) {
5004 			IORWLockRead(client->lock);
5005 		}
5006 		ret = client->externalMethod( selector, &args );
5007 		if (client->defaultLocking) {
5008 			IORWLockUnlock(client->lock);
5009 		}
5010 	}
5011 
5012 	*scalar_outputCnt = args.scalarOutputCount;
5013 	*inband_outputCnt = args.structureOutputSize;
5014 	*ool_output_size  = args.structureOutputDescriptorSize;
5015 
5016 	if (inputMD) {
5017 		inputMD->release();
5018 	}
5019 	if (outputMD) {
5020 		outputMD->release();
5021 	}
5022 
5023 	return ret;
5024 }
5025 
5026 /* Routine io_connect_method_scalarI_scalarO */
5027 kern_return_t
5028 is_io_connect_method_scalarI_scalarO(
5029 	io_object_t        connect,
5030 	uint32_t           index,
5031 	io_scalar_inband_t       input,
5032 	mach_msg_type_number_t   inputCount,
5033 	io_scalar_inband_t       output,
5034 	mach_msg_type_number_t * outputCount )
5035 {
5036 	IOReturn err;
5037 	uint32_t i;
5038 	io_scalar_inband64_t _input;
5039 	io_scalar_inband64_t _output;
5040 
5041 	mach_msg_type_number_t struct_outputCnt = 0;
5042 	mach_vm_size_t ool_output_size = 0;
5043 
5044 	bzero(&_output[0], sizeof(_output));
5045 	for (i = 0; i < inputCount; i++) {
5046 		_input[i] = SCALAR64(input[i]);
5047 	}
5048 
5049 	err = is_io_connect_method(connect, index,
5050 	    _input, inputCount,
5051 	    NULL, 0,
5052 	    0, 0,
5053 	    NULL, &struct_outputCnt,
5054 	    _output, outputCount,
5055 	    0, &ool_output_size);
5056 
5057 	for (i = 0; i < *outputCount; i++) {
5058 		output[i] = SCALAR32(_output[i]);
5059 	}
5060 
5061 	return err;
5062 }
5063 
5064 kern_return_t
5065 shim_io_connect_method_scalarI_scalarO(
5066 	IOExternalMethod *      method,
5067 	IOService *             object,
5068 	const io_user_scalar_t * input,
5069 	mach_msg_type_number_t   inputCount,
5070 	io_user_scalar_t * output,
5071 	mach_msg_type_number_t * outputCount )
5072 {
5073 	IOMethod            func;
5074 	io_scalar_inband_t  _output;
5075 	IOReturn            err;
5076 	err = kIOReturnBadArgument;
5077 
5078 	bzero(&_output[0], sizeof(_output));
5079 	do {
5080 		if (inputCount != method->count0) {
5081 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5082 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5083 			continue;
5084 		}
5085 		if (*outputCount != method->count1) {
5086 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5087 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5088 			continue;
5089 		}
5090 
5091 		func = method->func;
5092 
5093 		switch (inputCount) {
5094 		case 6:
5095 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5096 			    ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5097 			break;
5098 		case 5:
5099 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5100 			    ARG32(input[3]), ARG32(input[4]),
5101 			    &_output[0] );
5102 			break;
5103 		case 4:
5104 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5105 			    ARG32(input[3]),
5106 			    &_output[0], &_output[1] );
5107 			break;
5108 		case 3:
5109 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5110 			    &_output[0], &_output[1], &_output[2] );
5111 			break;
5112 		case 2:
5113 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]),
5114 			    &_output[0], &_output[1], &_output[2],
5115 			    &_output[3] );
5116 			break;
5117 		case 1:
5118 			err = (object->*func)(  ARG32(input[0]),
5119 			    &_output[0], &_output[1], &_output[2],
5120 			    &_output[3], &_output[4] );
5121 			break;
5122 		case 0:
5123 			err = (object->*func)(  &_output[0], &_output[1], &_output[2],
5124 			    &_output[3], &_output[4], &_output[5] );
5125 			break;
5126 
5127 		default:
5128 			IOLog("%s: Bad method table\n", object->getName());
5129 		}
5130 	}while (false);
5131 
5132 	uint32_t i;
5133 	for (i = 0; i < *outputCount; i++) {
5134 		output[i] = SCALAR32(_output[i]);
5135 	}
5136 
5137 	return err;
5138 }
5139 
5140 /* Routine io_async_method_scalarI_scalarO */
5141 kern_return_t
5142 is_io_async_method_scalarI_scalarO(
5143 	io_object_t        connect,
5144 	mach_port_t wake_port,
5145 	io_async_ref_t reference,
5146 	mach_msg_type_number_t referenceCnt,
5147 	uint32_t           index,
5148 	io_scalar_inband_t       input,
5149 	mach_msg_type_number_t   inputCount,
5150 	io_scalar_inband_t       output,
5151 	mach_msg_type_number_t * outputCount )
5152 {
5153 	IOReturn err;
5154 	uint32_t i;
5155 	io_scalar_inband64_t _input;
5156 	io_scalar_inband64_t _output;
5157 	io_async_ref64_t _reference;
5158 
5159 	if (referenceCnt > ASYNC_REF64_COUNT) {
5160 		return kIOReturnBadArgument;
5161 	}
5162 	bzero(&_output[0], sizeof(_output));
5163 	for (i = 0; i < referenceCnt; i++) {
5164 		_reference[i] = REF64(reference[i]);
5165 	}
5166 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5167 
5168 	mach_msg_type_number_t struct_outputCnt = 0;
5169 	mach_vm_size_t ool_output_size = 0;
5170 
5171 	for (i = 0; i < inputCount; i++) {
5172 		_input[i] = SCALAR64(input[i]);
5173 	}
5174 
5175 	err = is_io_connect_async_method(connect,
5176 	    wake_port, _reference, referenceCnt,
5177 	    index,
5178 	    _input, inputCount,
5179 	    NULL, 0,
5180 	    0, 0,
5181 	    NULL, &struct_outputCnt,
5182 	    _output, outputCount,
5183 	    0, &ool_output_size);
5184 
5185 	for (i = 0; i < *outputCount; i++) {
5186 		output[i] = SCALAR32(_output[i]);
5187 	}
5188 
5189 	return err;
5190 }
5191 /* Routine io_async_method_scalarI_structureO */
5192 kern_return_t
5193 is_io_async_method_scalarI_structureO(
5194 	io_object_t     connect,
5195 	mach_port_t wake_port,
5196 	io_async_ref_t reference,
5197 	mach_msg_type_number_t referenceCnt,
5198 	uint32_t        index,
5199 	io_scalar_inband_t input,
5200 	mach_msg_type_number_t  inputCount,
5201 	io_struct_inband_t              output,
5202 	mach_msg_type_number_t *        outputCount )
5203 {
5204 	uint32_t i;
5205 	io_scalar_inband64_t _input;
5206 	io_async_ref64_t _reference;
5207 
5208 	if (referenceCnt > ASYNC_REF64_COUNT) {
5209 		return kIOReturnBadArgument;
5210 	}
5211 	for (i = 0; i < referenceCnt; i++) {
5212 		_reference[i] = REF64(reference[i]);
5213 	}
5214 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5215 
5216 	mach_msg_type_number_t scalar_outputCnt = 0;
5217 	mach_vm_size_t ool_output_size = 0;
5218 
5219 	for (i = 0; i < inputCount; i++) {
5220 		_input[i] = SCALAR64(input[i]);
5221 	}
5222 
5223 	return is_io_connect_async_method(connect,
5224 	           wake_port, _reference, referenceCnt,
5225 	           index,
5226 	           _input, inputCount,
5227 	           NULL, 0,
5228 	           0, 0,
5229 	           output, outputCount,
5230 	           NULL, &scalar_outputCnt,
5231 	           0, &ool_output_size);
5232 }
5233 
5234 /* Routine io_async_method_scalarI_structureI */
5235 kern_return_t
5236 is_io_async_method_scalarI_structureI(
5237 	io_connect_t            connect,
5238 	mach_port_t wake_port,
5239 	io_async_ref_t reference,
5240 	mach_msg_type_number_t referenceCnt,
5241 	uint32_t                index,
5242 	io_scalar_inband_t      input,
5243 	mach_msg_type_number_t  inputCount,
5244 	io_struct_inband_t      inputStruct,
5245 	mach_msg_type_number_t  inputStructCount )
5246 {
5247 	uint32_t i;
5248 	io_scalar_inband64_t _input;
5249 	io_async_ref64_t _reference;
5250 
5251 	if (referenceCnt > ASYNC_REF64_COUNT) {
5252 		return kIOReturnBadArgument;
5253 	}
5254 	for (i = 0; i < referenceCnt; i++) {
5255 		_reference[i] = REF64(reference[i]);
5256 	}
5257 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5258 
5259 	mach_msg_type_number_t scalar_outputCnt = 0;
5260 	mach_msg_type_number_t inband_outputCnt = 0;
5261 	mach_vm_size_t ool_output_size = 0;
5262 
5263 	for (i = 0; i < inputCount; i++) {
5264 		_input[i] = SCALAR64(input[i]);
5265 	}
5266 
5267 	return is_io_connect_async_method(connect,
5268 	           wake_port, _reference, referenceCnt,
5269 	           index,
5270 	           _input, inputCount,
5271 	           inputStruct, inputStructCount,
5272 	           0, 0,
5273 	           NULL, &inband_outputCnt,
5274 	           NULL, &scalar_outputCnt,
5275 	           0, &ool_output_size);
5276 }
5277 
5278 /* Routine io_async_method_structureI_structureO */
5279 kern_return_t
5280 is_io_async_method_structureI_structureO(
5281 	io_object_t     connect,
5282 	mach_port_t wake_port,
5283 	io_async_ref_t reference,
5284 	mach_msg_type_number_t referenceCnt,
5285 	uint32_t        index,
5286 	io_struct_inband_t              input,
5287 	mach_msg_type_number_t  inputCount,
5288 	io_struct_inband_t              output,
5289 	mach_msg_type_number_t *        outputCount )
5290 {
5291 	uint32_t i;
5292 	mach_msg_type_number_t scalar_outputCnt = 0;
5293 	mach_vm_size_t ool_output_size = 0;
5294 	io_async_ref64_t _reference;
5295 
5296 	if (referenceCnt > ASYNC_REF64_COUNT) {
5297 		return kIOReturnBadArgument;
5298 	}
5299 	for (i = 0; i < referenceCnt; i++) {
5300 		_reference[i] = REF64(reference[i]);
5301 	}
5302 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5303 
5304 	return is_io_connect_async_method(connect,
5305 	           wake_port, _reference, referenceCnt,
5306 	           index,
5307 	           NULL, 0,
5308 	           input, inputCount,
5309 	           0, 0,
5310 	           output, outputCount,
5311 	           NULL, &scalar_outputCnt,
5312 	           0, &ool_output_size);
5313 }
5314 
5315 
5316 kern_return_t
5317 shim_io_async_method_scalarI_scalarO(
5318 	IOExternalAsyncMethod * method,
5319 	IOService *             object,
5320 	mach_port_t             asyncWakePort,
5321 	io_user_reference_t *   asyncReference,
5322 	uint32_t                asyncReferenceCount,
5323 	const io_user_scalar_t * input,
5324 	mach_msg_type_number_t   inputCount,
5325 	io_user_scalar_t * output,
5326 	mach_msg_type_number_t * outputCount )
5327 {
5328 	IOAsyncMethod       func;
5329 	uint32_t            i;
5330 	io_scalar_inband_t  _output;
5331 	IOReturn            err;
5332 	io_async_ref_t      reference;
5333 
5334 	bzero(&_output[0], sizeof(_output));
5335 	for (i = 0; i < asyncReferenceCount; i++) {
5336 		reference[i] = REF32(asyncReference[i]);
5337 	}
5338 
5339 	err = kIOReturnBadArgument;
5340 
5341 	do {
5342 		if (inputCount != method->count0) {
5343 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5344 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5345 			continue;
5346 		}
5347 		if (*outputCount != method->count1) {
5348 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5349 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5350 			continue;
5351 		}
5352 
5353 		func = method->func;
5354 
5355 		switch (inputCount) {
5356 		case 6:
5357 			err = (object->*func)(  reference,
5358 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5359 			    ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5360 			break;
5361 		case 5:
5362 			err = (object->*func)(  reference,
5363 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5364 			    ARG32(input[3]), ARG32(input[4]),
5365 			    &_output[0] );
5366 			break;
5367 		case 4:
5368 			err = (object->*func)(  reference,
5369 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5370 			    ARG32(input[3]),
5371 			    &_output[0], &_output[1] );
5372 			break;
5373 		case 3:
5374 			err = (object->*func)(  reference,
5375 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5376 			    &_output[0], &_output[1], &_output[2] );
5377 			break;
5378 		case 2:
5379 			err = (object->*func)(  reference,
5380 			    ARG32(input[0]), ARG32(input[1]),
5381 			    &_output[0], &_output[1], &_output[2],
5382 			    &_output[3] );
5383 			break;
5384 		case 1:
5385 			err = (object->*func)(  reference,
5386 			    ARG32(input[0]),
5387 			    &_output[0], &_output[1], &_output[2],
5388 			    &_output[3], &_output[4] );
5389 			break;
5390 		case 0:
5391 			err = (object->*func)(  reference,
5392 			    &_output[0], &_output[1], &_output[2],
5393 			    &_output[3], &_output[4], &_output[5] );
5394 			break;
5395 
5396 		default:
5397 			IOLog("%s: Bad method table\n", object->getName());
5398 		}
5399 	}while (false);
5400 
5401 	for (i = 0; i < *outputCount; i++) {
5402 		output[i] = SCALAR32(_output[i]);
5403 	}
5404 
5405 	return err;
5406 }
5407 
5408 
5409 /* Routine io_connect_method_scalarI_structureO */
5410 kern_return_t
5411 is_io_connect_method_scalarI_structureO(
5412 	io_object_t     connect,
5413 	uint32_t        index,
5414 	io_scalar_inband_t input,
5415 	mach_msg_type_number_t  inputCount,
5416 	io_struct_inband_t              output,
5417 	mach_msg_type_number_t *        outputCount )
5418 {
5419 	uint32_t i;
5420 	io_scalar_inband64_t _input;
5421 
5422 	mach_msg_type_number_t scalar_outputCnt = 0;
5423 	mach_vm_size_t ool_output_size = 0;
5424 
5425 	for (i = 0; i < inputCount; i++) {
5426 		_input[i] = SCALAR64(input[i]);
5427 	}
5428 
5429 	return is_io_connect_method(connect, index,
5430 	           _input, inputCount,
5431 	           NULL, 0,
5432 	           0, 0,
5433 	           output, outputCount,
5434 	           NULL, &scalar_outputCnt,
5435 	           0, &ool_output_size);
5436 }
5437 
5438 kern_return_t
5439 shim_io_connect_method_scalarI_structureO(
5440 
5441 	IOExternalMethod *      method,
5442 	IOService *             object,
5443 	const io_user_scalar_t * input,
5444 	mach_msg_type_number_t  inputCount,
5445 	io_struct_inband_t              output,
5446 	IOByteCount *   outputCount )
5447 {
5448 	IOMethod            func;
5449 	IOReturn            err;
5450 
5451 	err = kIOReturnBadArgument;
5452 
5453 	do {
5454 		if (inputCount != method->count0) {
5455 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5456 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5457 			continue;
5458 		}
5459 		if ((kIOUCVariableStructureSize != method->count1)
5460 		    && (*outputCount != method->count1)) {
5461 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5462 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5463 			continue;
5464 		}
5465 
5466 		func = method->func;
5467 
5468 		switch (inputCount) {
5469 		case 5:
5470 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5471 			    ARG32(input[3]), ARG32(input[4]),
5472 			    output );
5473 			break;
5474 		case 4:
5475 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5476 			    ARG32(input[3]),
5477 			    output, (void *)outputCount );
5478 			break;
5479 		case 3:
5480 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5481 			    output, (void *)outputCount, NULL );
5482 			break;
5483 		case 2:
5484 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]),
5485 			    output, (void *)outputCount, NULL, NULL );
5486 			break;
5487 		case 1:
5488 			err = (object->*func)(  ARG32(input[0]),
5489 			    output, (void *)outputCount, NULL, NULL, NULL );
5490 			break;
5491 		case 0:
5492 			err = (object->*func)(  output, (void *)outputCount, NULL, NULL, NULL, NULL );
5493 			break;
5494 
5495 		default:
5496 			IOLog("%s: Bad method table\n", object->getName());
5497 		}
5498 	}while (false);
5499 
5500 	return err;
5501 }
5502 
5503 
5504 kern_return_t
5505 shim_io_async_method_scalarI_structureO(
5506 	IOExternalAsyncMethod * method,
5507 	IOService *             object,
5508 	mach_port_t             asyncWakePort,
5509 	io_user_reference_t *   asyncReference,
5510 	uint32_t                asyncReferenceCount,
5511 	const io_user_scalar_t * input,
5512 	mach_msg_type_number_t  inputCount,
5513 	io_struct_inband_t              output,
5514 	mach_msg_type_number_t *        outputCount )
5515 {
5516 	IOAsyncMethod       func;
5517 	uint32_t            i;
5518 	IOReturn            err;
5519 	io_async_ref_t      reference;
5520 
5521 	for (i = 0; i < asyncReferenceCount; i++) {
5522 		reference[i] = REF32(asyncReference[i]);
5523 	}
5524 
5525 	err = kIOReturnBadArgument;
5526 	do {
5527 		if (inputCount != method->count0) {
5528 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5529 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5530 			continue;
5531 		}
5532 		if ((kIOUCVariableStructureSize != method->count1)
5533 		    && (*outputCount != method->count1)) {
5534 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5535 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5536 			continue;
5537 		}
5538 
5539 		func = method->func;
5540 
5541 		switch (inputCount) {
5542 		case 5:
5543 			err = (object->*func)(  reference,
5544 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5545 			    ARG32(input[3]), ARG32(input[4]),
5546 			    output );
5547 			break;
5548 		case 4:
5549 			err = (object->*func)(  reference,
5550 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5551 			    ARG32(input[3]),
5552 			    output, (void *)outputCount );
5553 			break;
5554 		case 3:
5555 			err = (object->*func)(  reference,
5556 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5557 			    output, (void *)outputCount, NULL );
5558 			break;
5559 		case 2:
5560 			err = (object->*func)(  reference,
5561 			    ARG32(input[0]), ARG32(input[1]),
5562 			    output, (void *)outputCount, NULL, NULL );
5563 			break;
5564 		case 1:
5565 			err = (object->*func)(  reference,
5566 			    ARG32(input[0]),
5567 			    output, (void *)outputCount, NULL, NULL, NULL );
5568 			break;
5569 		case 0:
5570 			err = (object->*func)(  reference,
5571 			    output, (void *)outputCount, NULL, NULL, NULL, NULL );
5572 			break;
5573 
5574 		default:
5575 			IOLog("%s: Bad method table\n", object->getName());
5576 		}
5577 	}while (false);
5578 
5579 	return err;
5580 }
5581 
5582 /* Routine io_connect_method_scalarI_structureI */
5583 kern_return_t
5584 is_io_connect_method_scalarI_structureI(
5585 	io_connect_t            connect,
5586 	uint32_t                index,
5587 	io_scalar_inband_t      input,
5588 	mach_msg_type_number_t  inputCount,
5589 	io_struct_inband_t      inputStruct,
5590 	mach_msg_type_number_t  inputStructCount )
5591 {
5592 	uint32_t i;
5593 	io_scalar_inband64_t _input;
5594 
5595 	mach_msg_type_number_t scalar_outputCnt = 0;
5596 	mach_msg_type_number_t inband_outputCnt = 0;
5597 	mach_vm_size_t ool_output_size = 0;
5598 
5599 	for (i = 0; i < inputCount; i++) {
5600 		_input[i] = SCALAR64(input[i]);
5601 	}
5602 
5603 	return is_io_connect_method(connect, index,
5604 	           _input, inputCount,
5605 	           inputStruct, inputStructCount,
5606 	           0, 0,
5607 	           NULL, &inband_outputCnt,
5608 	           NULL, &scalar_outputCnt,
5609 	           0, &ool_output_size);
5610 }
5611 
5612 kern_return_t
5613 shim_io_connect_method_scalarI_structureI(
5614 	IOExternalMethod *  method,
5615 	IOService *         object,
5616 	const io_user_scalar_t * input,
5617 	mach_msg_type_number_t  inputCount,
5618 	io_struct_inband_t              inputStruct,
5619 	mach_msg_type_number_t  inputStructCount )
5620 {
5621 	IOMethod            func;
5622 	IOReturn            err = kIOReturnBadArgument;
5623 
5624 	do{
5625 		if (inputCount != method->count0) {
5626 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5627 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5628 			continue;
5629 		}
5630 		if ((kIOUCVariableStructureSize != method->count1)
5631 		    && (inputStructCount != method->count1)) {
5632 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5633 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5634 			continue;
5635 		}
5636 
5637 		func = method->func;
5638 
5639 		switch (inputCount) {
5640 		case 5:
5641 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5642 			    ARG32(input[3]), ARG32(input[4]),
5643 			    inputStruct );
5644 			break;
5645 		case 4:
5646 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *)  input[2],
5647 			    ARG32(input[3]),
5648 			    inputStruct, (void *)(uintptr_t)inputStructCount );
5649 			break;
5650 		case 3:
5651 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5652 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5653 			    NULL );
5654 			break;
5655 		case 2:
5656 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5657 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5658 			    NULL, NULL );
5659 			break;
5660 		case 1:
5661 			err = (object->*func)( ARG32(input[0]),
5662 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5663 			    NULL, NULL, NULL );
5664 			break;
5665 		case 0:
5666 			err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
5667 			    NULL, NULL, NULL, NULL );
5668 			break;
5669 
5670 		default:
5671 			IOLog("%s: Bad method table\n", object->getName());
5672 		}
5673 	}while (false);
5674 
5675 	return err;
5676 }
5677 
5678 kern_return_t
5679 shim_io_async_method_scalarI_structureI(
5680 	IOExternalAsyncMethod * method,
5681 	IOService *             object,
5682 	mach_port_t             asyncWakePort,
5683 	io_user_reference_t *   asyncReference,
5684 	uint32_t                asyncReferenceCount,
5685 	const io_user_scalar_t * input,
5686 	mach_msg_type_number_t  inputCount,
5687 	io_struct_inband_t              inputStruct,
5688 	mach_msg_type_number_t  inputStructCount )
5689 {
5690 	IOAsyncMethod       func;
5691 	uint32_t            i;
5692 	IOReturn            err = kIOReturnBadArgument;
5693 	io_async_ref_t      reference;
5694 
5695 	for (i = 0; i < asyncReferenceCount; i++) {
5696 		reference[i] = REF32(asyncReference[i]);
5697 	}
5698 
5699 	do{
5700 		if (inputCount != method->count0) {
5701 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5702 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5703 			continue;
5704 		}
5705 		if ((kIOUCVariableStructureSize != method->count1)
5706 		    && (inputStructCount != method->count1)) {
5707 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5708 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5709 			continue;
5710 		}
5711 
5712 		func = method->func;
5713 
5714 		switch (inputCount) {
5715 		case 5:
5716 			err = (object->*func)(  reference,
5717 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5718 			    ARG32(input[3]), ARG32(input[4]),
5719 			    inputStruct );
5720 			break;
5721 		case 4:
5722 			err = (object->*func)(  reference,
5723 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5724 			    ARG32(input[3]),
5725 			    inputStruct, (void *)(uintptr_t)inputStructCount );
5726 			break;
5727 		case 3:
5728 			err = (object->*func)(  reference,
5729 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5730 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5731 			    NULL );
5732 			break;
5733 		case 2:
5734 			err = (object->*func)(  reference,
5735 			    ARG32(input[0]), ARG32(input[1]),
5736 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5737 			    NULL, NULL );
5738 			break;
5739 		case 1:
5740 			err = (object->*func)(  reference,
5741 			    ARG32(input[0]),
5742 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5743 			    NULL, NULL, NULL );
5744 			break;
5745 		case 0:
5746 			err = (object->*func)(  reference,
5747 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5748 			    NULL, NULL, NULL, NULL );
5749 			break;
5750 
5751 		default:
5752 			IOLog("%s: Bad method table\n", object->getName());
5753 		}
5754 	}while (false);
5755 
5756 	return err;
5757 }
5758 
5759 /* Routine io_connect_method_structureI_structureO */
5760 kern_return_t
5761 is_io_connect_method_structureI_structureO(
5762 	io_object_t     connect,
5763 	uint32_t        index,
5764 	io_struct_inband_t              input,
5765 	mach_msg_type_number_t  inputCount,
5766 	io_struct_inband_t              output,
5767 	mach_msg_type_number_t *        outputCount )
5768 {
5769 	mach_msg_type_number_t scalar_outputCnt = 0;
5770 	mach_vm_size_t ool_output_size = 0;
5771 
5772 	return is_io_connect_method(connect, index,
5773 	           NULL, 0,
5774 	           input, inputCount,
5775 	           0, 0,
5776 	           output, outputCount,
5777 	           NULL, &scalar_outputCnt,
5778 	           0, &ool_output_size);
5779 }
5780 
5781 kern_return_t
5782 shim_io_connect_method_structureI_structureO(
5783 	IOExternalMethod *  method,
5784 	IOService *         object,
5785 	io_struct_inband_t              input,
5786 	mach_msg_type_number_t  inputCount,
5787 	io_struct_inband_t              output,
5788 	IOByteCount *   outputCount )
5789 {
5790 	IOMethod            func;
5791 	IOReturn            err = kIOReturnBadArgument;
5792 
5793 	do{
5794 		if ((kIOUCVariableStructureSize != method->count0)
5795 		    && (inputCount != method->count0)) {
5796 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5797 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5798 			continue;
5799 		}
5800 		if ((kIOUCVariableStructureSize != method->count1)
5801 		    && (*outputCount != method->count1)) {
5802 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5803 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5804 			continue;
5805 		}
5806 
5807 		func = method->func;
5808 
5809 		if (method->count1) {
5810 			if (method->count0) {
5811 				err = (object->*func)( input, output,
5812 				    (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5813 			} else {
5814 				err = (object->*func)( output, outputCount, NULL, NULL, NULL, NULL );
5815 			}
5816 		} else {
5817 			err = (object->*func)( input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5818 		}
5819 	}while (false);
5820 
5821 
5822 	return err;
5823 }
5824 
5825 kern_return_t
5826 shim_io_async_method_structureI_structureO(
5827 	IOExternalAsyncMethod * method,
5828 	IOService *             object,
5829 	mach_port_t           asyncWakePort,
5830 	io_user_reference_t * asyncReference,
5831 	uint32_t              asyncReferenceCount,
5832 	io_struct_inband_t              input,
5833 	mach_msg_type_number_t  inputCount,
5834 	io_struct_inband_t              output,
5835 	mach_msg_type_number_t *        outputCount )
5836 {
5837 	IOAsyncMethod       func;
5838 	uint32_t            i;
5839 	IOReturn            err;
5840 	io_async_ref_t      reference;
5841 
5842 	for (i = 0; i < asyncReferenceCount; i++) {
5843 		reference[i] = REF32(asyncReference[i]);
5844 	}
5845 
5846 	err = kIOReturnBadArgument;
5847 	do{
5848 		if ((kIOUCVariableStructureSize != method->count0)
5849 		    && (inputCount != method->count0)) {
5850 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5851 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5852 			continue;
5853 		}
5854 		if ((kIOUCVariableStructureSize != method->count1)
5855 		    && (*outputCount != method->count1)) {
5856 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5857 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5858 			continue;
5859 		}
5860 
5861 		func = method->func;
5862 
5863 		if (method->count1) {
5864 			if (method->count0) {
5865 				err = (object->*func)( reference,
5866 				    input, output,
5867 				    (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5868 			} else {
5869 				err = (object->*func)( reference,
5870 				    output, outputCount, NULL, NULL, NULL, NULL );
5871 			}
5872 		} else {
5873 			err = (object->*func)( reference,
5874 			    input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5875 		}
5876 	}while (false);
5877 
5878 	return err;
5879 }
5880 
5881 /* Routine io_catalog_send_data */
5882 kern_return_t
5883 is_io_catalog_send_data(
5884 	mach_port_t             master_port,
5885 	uint32_t                flag,
5886 	io_buf_ptr_t            inData,
5887 	mach_msg_type_number_t  inDataCount,
5888 	kern_return_t *         result)
5889 {
5890 #if NO_KEXTD
5891 	return kIOReturnNotPrivileged;
5892 #else /* NO_KEXTD */
5893 	OSObject * obj = NULL;
5894 	vm_offset_t data;
5895 	kern_return_t kr = kIOReturnError;
5896 
5897 	//printf("io_catalog_send_data called. flag: %d\n", flag);
5898 
5899 	if (master_port != master_device_port) {
5900 		return kIOReturnNotPrivileged;
5901 	}
5902 
5903 	if ((flag != kIOCatalogRemoveKernelLinker__Removed &&
5904 	    flag != kIOCatalogKextdActive &&
5905 	    flag != kIOCatalogKextdFinishedLaunching) &&
5906 	    (!inData || !inDataCount)) {
5907 		return kIOReturnBadArgument;
5908 	}
5909 
5910 	if (!IOTaskHasEntitlement(current_task(), kIOCatalogManagementEntitlement)) {
5911 		OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
5912 		IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
5913 		OSSafeReleaseNULL(taskName);
5914 		// For now, fake success to not break applications relying on this function succeeding.
5915 		// See <rdar://problem/32554970> for more details.
5916 		return kIOReturnSuccess;
5917 	}
5918 
5919 	if (inData) {
5920 		vm_map_offset_t map_data;
5921 
5922 		if (inDataCount > sizeof(io_struct_inband_t) * 1024) {
5923 			return kIOReturnMessageTooLarge;
5924 		}
5925 
5926 		kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
5927 		data = CAST_DOWN(vm_offset_t, map_data);
5928 
5929 		if (kr != KERN_SUCCESS) {
5930 			return kr;
5931 		}
5932 
5933 		// must return success after vm_map_copyout() succeeds
5934 
5935 		if (inDataCount) {
5936 			obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
5937 			vm_deallocate( kernel_map, data, inDataCount );
5938 			if (!obj) {
5939 				*result = kIOReturnNoMemory;
5940 				return KERN_SUCCESS;
5941 			}
5942 		}
5943 	}
5944 
5945 	switch (flag) {
5946 	case kIOCatalogResetDrivers:
5947 	case kIOCatalogResetDriversNoMatch: {
5948 		OSArray * array;
5949 
5950 		array = OSDynamicCast(OSArray, obj);
5951 		if (array) {
5952 			if (!gIOCatalogue->resetAndAddDrivers(array,
5953 			    flag == kIOCatalogResetDrivers)) {
5954 				kr = kIOReturnError;
5955 			}
5956 		} else {
5957 			kr = kIOReturnBadArgument;
5958 		}
5959 	}
5960 	break;
5961 
5962 	case kIOCatalogAddDrivers:
5963 	case kIOCatalogAddDriversNoMatch: {
5964 		OSArray * array;
5965 
5966 		array = OSDynamicCast(OSArray, obj);
5967 		if (array) {
5968 			if (!gIOCatalogue->addDrivers( array,
5969 			    flag == kIOCatalogAddDrivers)) {
5970 				kr = kIOReturnError;
5971 			}
5972 		} else {
5973 			kr = kIOReturnBadArgument;
5974 		}
5975 	}
5976 	break;
5977 
5978 	case kIOCatalogRemoveDrivers:
5979 	case kIOCatalogRemoveDriversNoMatch: {
5980 		OSDictionary * dict;
5981 
5982 		dict = OSDynamicCast(OSDictionary, obj);
5983 		if (dict) {
5984 			if (!gIOCatalogue->removeDrivers( dict,
5985 			    flag == kIOCatalogRemoveDrivers )) {
5986 				kr = kIOReturnError;
5987 			}
5988 		} else {
5989 			kr = kIOReturnBadArgument;
5990 		}
5991 	}
5992 	break;
5993 
5994 	case kIOCatalogStartMatching__Removed:
5995 	case kIOCatalogRemoveKernelLinker__Removed:
5996 	case kIOCatalogKextdActive:
5997 	case kIOCatalogKextdFinishedLaunching:
5998 		kr = KERN_NOT_SUPPORTED;
5999 		break;
6000 
6001 	default:
6002 		kr = kIOReturnBadArgument;
6003 		break;
6004 	}
6005 
6006 	if (obj) {
6007 		obj->release();
6008 	}
6009 
6010 	*result = kr;
6011 	return KERN_SUCCESS;
6012 #endif /* NO_KEXTD */
6013 }
6014 
6015 /* Routine io_catalog_terminate */
6016 kern_return_t
6017 is_io_catalog_terminate(
6018 	mach_port_t master_port,
6019 	uint32_t flag,
6020 	io_name_t name )
6021 {
6022 	kern_return_t          kr;
6023 
6024 	if (master_port != master_device_port) {
6025 		return kIOReturnNotPrivileged;
6026 	}
6027 
6028 	kr = IOUserClient::clientHasPrivilege((void *) current_task(),
6029 	    kIOClientPrivilegeAdministrator );
6030 	if (kIOReturnSuccess != kr) {
6031 		return kr;
6032 	}
6033 
6034 	switch (flag) {
6035 #if !defined(SECURE_KERNEL)
6036 	case kIOCatalogServiceTerminate:
6037 		kr = gIOCatalogue->terminateDrivers(NULL, name);
6038 		break;
6039 
6040 	case kIOCatalogModuleUnload:
6041 	case kIOCatalogModuleTerminate:
6042 		kr = gIOCatalogue->terminateDriversForModule(name,
6043 		    flag == kIOCatalogModuleUnload);
6044 		break;
6045 #endif
6046 
6047 	default:
6048 		kr = kIOReturnBadArgument;
6049 		break;
6050 	}
6051 
6052 	return kr;
6053 }
6054 
6055 /* Routine io_catalog_get_data */
6056 kern_return_t
6057 is_io_catalog_get_data(
6058 	mach_port_t             master_port,
6059 	uint32_t                flag,
6060 	io_buf_ptr_t            *outData,
6061 	mach_msg_type_number_t  *outDataCount)
6062 {
6063 	kern_return_t kr = kIOReturnSuccess;
6064 	OSSerialize * s;
6065 
6066 	if (master_port != master_device_port) {
6067 		return kIOReturnNotPrivileged;
6068 	}
6069 
6070 	//printf("io_catalog_get_data called. flag: %d\n", flag);
6071 
6072 	s = OSSerialize::withCapacity(4096);
6073 	if (!s) {
6074 		return kIOReturnNoMemory;
6075 	}
6076 
6077 	kr = gIOCatalogue->serializeData(flag, s);
6078 
6079 	if (kr == kIOReturnSuccess) {
6080 		vm_offset_t data;
6081 		vm_map_copy_t copy;
6082 		unsigned int size;
6083 
6084 		size = s->getLength();
6085 		kr = vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
6086 		if (kr == kIOReturnSuccess) {
6087 			bcopy(s->text(), (void *)data, size);
6088 			kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
6089 			    size, true, &copy);
6090 			*outData = (char *)copy;
6091 			*outDataCount = size;
6092 		}
6093 	}
6094 
6095 	s->release();
6096 
6097 	return kr;
6098 }
6099 
6100 /* Routine io_catalog_get_gen_count */
6101 kern_return_t
6102 is_io_catalog_get_gen_count(
6103 	mach_port_t             master_port,
6104 	uint32_t                *genCount)
6105 {
6106 	if (master_port != master_device_port) {
6107 		return kIOReturnNotPrivileged;
6108 	}
6109 
6110 	//printf("io_catalog_get_gen_count called.\n");
6111 
6112 	if (!genCount) {
6113 		return kIOReturnBadArgument;
6114 	}
6115 
6116 	*genCount = gIOCatalogue->getGenerationCount();
6117 
6118 	return kIOReturnSuccess;
6119 }
6120 
6121 /* Routine io_catalog_module_loaded.
6122  * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
6123  */
6124 kern_return_t
6125 is_io_catalog_module_loaded(
6126 	mach_port_t             master_port,
6127 	io_name_t               name)
6128 {
6129 	if (master_port != master_device_port) {
6130 		return kIOReturnNotPrivileged;
6131 	}
6132 
6133 	//printf("io_catalog_module_loaded called. name %s\n", name);
6134 
6135 	if (!name) {
6136 		return kIOReturnBadArgument;
6137 	}
6138 
6139 	gIOCatalogue->moduleHasLoaded(name);
6140 
6141 	return kIOReturnSuccess;
6142 }
6143 
6144 kern_return_t
6145 is_io_catalog_reset(
6146 	mach_port_t             master_port,
6147 	uint32_t                flag)
6148 {
6149 	if (master_port != master_device_port) {
6150 		return kIOReturnNotPrivileged;
6151 	}
6152 
6153 	switch (flag) {
6154 	case kIOCatalogResetDefault:
6155 		gIOCatalogue->reset();
6156 		break;
6157 
6158 	default:
6159 		return kIOReturnBadArgument;
6160 	}
6161 
6162 	return kIOReturnSuccess;
6163 }
6164 
6165 kern_return_t
6166 iokit_user_client_trap(struct iokit_user_client_trap_args *args)
6167 {
6168 	kern_return_t  result = kIOReturnBadArgument;
6169 	IOUserClient * userClient;
6170 	OSObject     * object;
6171 	uintptr_t      ref;
6172 
6173 	ref = (uintptr_t) args->userClientRef;
6174 	if ((1ULL << 32) & ref) {
6175 		object = iokit_lookup_uext_ref_current_task((mach_port_name_t) ref);
6176 		if (object) {
6177 			result = IOUserServerUEXTTrap(object, args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6178 		}
6179 		OSSafeReleaseNULL(object);
6180 	} else if ((userClient = OSDynamicCast(IOUserClient, iokit_lookup_connect_ref_current_task((mach_port_name_t) ref)))) {
6181 		IOExternalTrap *trap = NULL;
6182 		IOService *target = NULL;
6183 
6184 		result = kIOReturnSuccess;
6185 		io_filter_policy_t filterPolicy = userClient->filterForTask(current_task(), 0);
6186 		if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
6187 			result = gIOUCFilterCallbacks->io_filter_applier(filterPolicy, io_filter_type_trap, args->index);
6188 		}
6189 		if (kIOReturnSuccess == result) {
6190 			trap = userClient->getTargetAndTrapForIndex(&target, args->index);
6191 		}
6192 		if (trap && target) {
6193 			IOTrap func;
6194 
6195 			func = trap->func;
6196 
6197 			if (func) {
6198 				result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6199 			}
6200 		}
6201 
6202 		iokit_remove_connect_reference(userClient);
6203 	}
6204 
6205 	return result;
6206 }
6207 
6208 /* Routine io_device_tree_entry_exists_with_name */
6209 kern_return_t
6210 is_io_device_tree_entry_exists_with_name(
6211 	mach_port_t master_port,
6212 	io_name_t name,
6213 	boolean_t *exists )
6214 {
6215 	OSCollectionIterator *iter;
6216 
6217 	if (master_port != master_device_port) {
6218 		return kIOReturnNotPrivileged;
6219 	}
6220 
6221 	iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, name);
6222 	*exists = iter && iter->getNextObject();
6223 	OSSafeReleaseNULL(iter);
6224 
6225 	return kIOReturnSuccess;
6226 }
6227 } /* extern "C" */
6228 
6229 IOReturn
6230 IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
6231     IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
6232 {
6233 	IOReturn    err;
6234 	IOService * object;
6235 	IOByteCount structureOutputSize;
6236 
6237 	if (dispatch) {
6238 		uint32_t count;
6239 		count = dispatch->checkScalarInputCount;
6240 		if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6241 			return kIOReturnBadArgument;
6242 		}
6243 
6244 		count = dispatch->checkStructureInputSize;
6245 		if ((kIOUCVariableStructureSize != count)
6246 		    && (count != ((args->structureInputDescriptor)
6247 		    ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6248 			return kIOReturnBadArgument;
6249 		}
6250 
6251 		count = dispatch->checkScalarOutputCount;
6252 		if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6253 			return kIOReturnBadArgument;
6254 		}
6255 
6256 		count = dispatch->checkStructureOutputSize;
6257 		if ((kIOUCVariableStructureSize != count)
6258 		    && (count != ((args->structureOutputDescriptor)
6259 		    ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6260 			return kIOReturnBadArgument;
6261 		}
6262 
6263 		if (dispatch->function) {
6264 			err = (*dispatch->function)(target, reference, args);
6265 		} else {
6266 			err = kIOReturnNoCompletion; /* implementator can dispatch */
6267 		}
6268 		return err;
6269 	}
6270 
6271 
6272 	// pre-Leopard API's don't do ool structs
6273 	if (args->structureInputDescriptor || args->structureOutputDescriptor) {
6274 		err = kIOReturnIPCError;
6275 		return err;
6276 	}
6277 
6278 	structureOutputSize = args->structureOutputSize;
6279 
6280 	if (args->asyncWakePort) {
6281 		IOExternalAsyncMethod * method;
6282 		object = NULL;
6283 		if (!(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object) {
6284 			return kIOReturnUnsupported;
6285 		}
6286 
6287 		if (kIOUCForegroundOnly & method->flags) {
6288 			if (task_is_gpu_denied(current_task())) {
6289 				return kIOReturnNotPermitted;
6290 			}
6291 		}
6292 
6293 		switch (method->flags & kIOUCTypeMask) {
6294 		case kIOUCScalarIStructI:
6295 			err = shim_io_async_method_scalarI_structureI( method, object,
6296 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6297 			    args->scalarInput, args->scalarInputCount,
6298 			    (char *)args->structureInput, args->structureInputSize );
6299 			break;
6300 
6301 		case kIOUCScalarIScalarO:
6302 			err = shim_io_async_method_scalarI_scalarO( method, object,
6303 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6304 			    args->scalarInput, args->scalarInputCount,
6305 			    args->scalarOutput, &args->scalarOutputCount );
6306 			break;
6307 
6308 		case kIOUCScalarIStructO:
6309 			err = shim_io_async_method_scalarI_structureO( method, object,
6310 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6311 			    args->scalarInput, args->scalarInputCount,
6312 			    (char *) args->structureOutput, &args->structureOutputSize );
6313 			break;
6314 
6315 
6316 		case kIOUCStructIStructO:
6317 			err = shim_io_async_method_structureI_structureO( method, object,
6318 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6319 			    (char *)args->structureInput, args->structureInputSize,
6320 			    (char *) args->structureOutput, &args->structureOutputSize );
6321 			break;
6322 
6323 		default:
6324 			err = kIOReturnBadArgument;
6325 			break;
6326 		}
6327 	} else {
6328 		IOExternalMethod *      method;
6329 		object = NULL;
6330 		if (!(method = getTargetAndMethodForIndex(&object, selector)) || !object) {
6331 			return kIOReturnUnsupported;
6332 		}
6333 
6334 		if (kIOUCForegroundOnly & method->flags) {
6335 			if (task_is_gpu_denied(current_task())) {
6336 				return kIOReturnNotPermitted;
6337 			}
6338 		}
6339 
6340 		switch (method->flags & kIOUCTypeMask) {
6341 		case kIOUCScalarIStructI:
6342 			err = shim_io_connect_method_scalarI_structureI( method, object,
6343 			    args->scalarInput, args->scalarInputCount,
6344 			    (char *) args->structureInput, args->structureInputSize );
6345 			break;
6346 
6347 		case kIOUCScalarIScalarO:
6348 			err = shim_io_connect_method_scalarI_scalarO( method, object,
6349 			    args->scalarInput, args->scalarInputCount,
6350 			    args->scalarOutput, &args->scalarOutputCount );
6351 			break;
6352 
6353 		case kIOUCScalarIStructO:
6354 			err = shim_io_connect_method_scalarI_structureO( method, object,
6355 			    args->scalarInput, args->scalarInputCount,
6356 			    (char *) args->structureOutput, &structureOutputSize );
6357 			break;
6358 
6359 
6360 		case kIOUCStructIStructO:
6361 			err = shim_io_connect_method_structureI_structureO( method, object,
6362 			    (char *) args->structureInput, args->structureInputSize,
6363 			    (char *) args->structureOutput, &structureOutputSize );
6364 			break;
6365 
6366 		default:
6367 			err = kIOReturnBadArgument;
6368 			break;
6369 		}
6370 	}
6371 
6372 	if (structureOutputSize > UINT_MAX) {
6373 		structureOutputSize = 0;
6374 		err = kIOReturnBadArgument;
6375 	}
6376 
6377 	args->structureOutputSize = ((typeof(args->structureOutputSize))structureOutputSize);
6378 
6379 	return err;
6380 }
6381 
6382 IOReturn
6383 IOUserClient::registerFilterCallbacks(const struct io_filter_callbacks *callbacks, size_t size)
6384 {
6385 	if (size < sizeof(*callbacks)) {
6386 		return kIOReturnBadArgument;
6387 	}
6388 	if (!OSCompareAndSwapPtr(NULL, __DECONST(void *, callbacks), &gIOUCFilterCallbacks)) {
6389 		return kIOReturnBusy;
6390 	}
6391 	return kIOReturnSuccess;
6392 }
6393 
6394 #if __LP64__
6395 OSMetaClassDefineReservedUnused(IOUserClient, 0);
6396 OSMetaClassDefineReservedUnused(IOUserClient, 1);
6397 #else
6398 OSMetaClassDefineReservedUsed(IOUserClient, 0);
6399 OSMetaClassDefineReservedUsed(IOUserClient, 1);
6400 #endif
6401 OSMetaClassDefineReservedUnused(IOUserClient, 2);
6402 OSMetaClassDefineReservedUnused(IOUserClient, 3);
6403 OSMetaClassDefineReservedUnused(IOUserClient, 4);
6404 OSMetaClassDefineReservedUnused(IOUserClient, 5);
6405 OSMetaClassDefineReservedUnused(IOUserClient, 6);
6406 OSMetaClassDefineReservedUnused(IOUserClient, 7);
6407 OSMetaClassDefineReservedUnused(IOUserClient, 8);
6408 OSMetaClassDefineReservedUnused(IOUserClient, 9);
6409 OSMetaClassDefineReservedUnused(IOUserClient, 10);
6410 OSMetaClassDefineReservedUnused(IOUserClient, 11);
6411 OSMetaClassDefineReservedUnused(IOUserClient, 12);
6412 OSMetaClassDefineReservedUnused(IOUserClient, 13);
6413 OSMetaClassDefineReservedUnused(IOUserClient, 14);
6414 OSMetaClassDefineReservedUnused(IOUserClient, 15);
6415