1 /*
2 * Copyright (c) 1998-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <libkern/c++/OSKext.h>
30 #include <libkern/c++/OSSharedPtr.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/IODeviceTreeSupport.h>
44 #include <IOKit/IOUserServer.h>
45 #include <IOKit/system.h>
46 #include <libkern/OSDebug.h>
47 #include <DriverKit/OSAction.h>
48 #include <sys/proc.h>
49 #include <sys/kauth.h>
50 #include <sys/codesign.h>
51 #include <sys/code_signing.h>
52 #include <vm/vm_kern_xnu.h>
53
54 #include <mach/sdt.h>
55 #include <os/hash.h>
56
57 #include <libkern/amfi/amfi.h>
58
59 #if CONFIG_MACF
60
61 extern "C" {
62 #include <security/mac_framework.h>
63 };
64 #include <sys/kauth.h>
65
66 #define IOMACF_LOG 0
67
68 #endif /* CONFIG_MACF */
69
70 #include <IOKit/assert.h>
71
72 #include "IOServicePrivate.h"
73 #include "IOKitKernelInternal.h"
74
75 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
76 #define SCALAR32(x) ((uint32_t )x)
77 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
78 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
79 #define REF32(x) ((int)(x))
80
81 enum{
82 kIOUCAsync0Flags = 3ULL,
83 kIOUCAsync64Flag = 1ULL,
84 kIOUCAsyncErrorLoggedFlag = 2ULL
85 };
86
87 #if IOKITSTATS
88
89 #define IOStatisticsRegisterCounter() \
90 do { \
91 reserved->counter = IOStatistics::registerUserClient(this); \
92 } while (0)
93
94 #define IOStatisticsUnregisterCounter() \
95 do { \
96 if (reserved) \
97 IOStatistics::unregisterUserClient(reserved->counter); \
98 } while (0)
99
100 #define IOStatisticsClientCall() \
101 do { \
102 IOStatistics::countUserClientCall(client); \
103 } while (0)
104
105 #else
106
107 #define IOStatisticsRegisterCounter()
108 #define IOStatisticsUnregisterCounter()
109 #define IOStatisticsClientCall()
110
111 #endif /* IOKITSTATS */
112
113 #if DEVELOPMENT || DEBUG
114
115 #define FAKE_STACK_FRAME(a) \
116 const void ** __frameptr; \
117 const void * __retaddr; \
118 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
119 __retaddr = __frameptr[1]; \
120 __frameptr[1] = (a);
121
122 #define FAKE_STACK_FRAME_END() \
123 __frameptr[1] = __retaddr;
124
125 #else /* DEVELOPMENT || DEBUG */
126
127 #define FAKE_STACK_FRAME(a)
128 #define FAKE_STACK_FRAME_END()
129
130 #endif /* DEVELOPMENT || DEBUG */
131
132 #define ASYNC_REF_COUNT (sizeof(io_async_ref_t) / sizeof(natural_t))
133 #define ASYNC_REF64_COUNT (sizeof(io_async_ref64_t) / sizeof(io_user_reference_t))
134
135 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
136
137 extern "C" {
138 #include <mach/mach_traps.h>
139 #include <vm/vm_map_xnu.h>
140 } /* extern "C" */
141
142 struct IOMachPortHashList;
143
144 static_assert(IKOT_MAX_TYPE <= 255);
145
146 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
147
148 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
149 class IOMachPort : public OSObject
150 {
151 OSDeclareDefaultStructors(IOMachPort);
152 public:
153 mach_port_mscount_t mscount;
154 IOLock lock;
155 SLIST_ENTRY(IOMachPort) link;
156 ipc_port_t port;
157 OSObject* XNU_PTRAUTH_SIGNED_PTR("IOMachPort.object") object;
158
159 static IOMachPort* withObjectAndType(OSObject *obj, ipc_kobject_type_t type);
160
161 static IOMachPortHashList* bucketForObject(OSObject *obj,
162 ipc_kobject_type_t type);
163
164 static LIBKERN_RETURNS_NOT_RETAINED IOMachPort* portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type);
165
166 static bool noMoreSendersForObject( OSObject * obj,
167 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
168 static void releasePortForObject( OSObject * obj,
169 ipc_kobject_type_t type );
170
171 static mach_port_name_t makeSendRightForTask( task_t task,
172 io_object_t obj, ipc_kobject_type_t type );
173
174 virtual void free() APPLE_KEXT_OVERRIDE;
175 };
176
177 #define super OSObject
178 OSDefineMetaClassAndStructorsWithZone(IOMachPort, OSObject, ZC_ZFREE_CLEARMEM)
179
180 static IOLock * gIOObjectPortLock;
181 IOLock * gIOUserServerLock;
182
183 SECURITY_READ_ONLY_LATE(const struct io_filter_callbacks *) gIOUCFilterCallbacks;
184
185 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
186
187 SLIST_HEAD(IOMachPortHashList, IOMachPort);
188
189 #if defined(XNU_TARGET_OS_OSX)
190 #define PORT_HASH_SIZE 4096
191 #else /* defined(!XNU_TARGET_OS_OSX) */
192 #define PORT_HASH_SIZE 256
193 #endif /* !defined(!XNU_TARGET_OS_OSX) */
194
195 IOMachPortHashList gIOMachPortHash[PORT_HASH_SIZE];
196
197 void
IOMachPortInitialize(void)198 IOMachPortInitialize(void)
199 {
200 for (size_t i = 0; i < PORT_HASH_SIZE; i++) {
201 SLIST_INIT(&gIOMachPortHash[i]);
202 }
203 }
204
205 IOMachPortHashList*
bucketForObject(OSObject * obj,ipc_kobject_type_t type)206 IOMachPort::bucketForObject(OSObject *obj, ipc_kobject_type_t type )
207 {
208 return &gIOMachPortHash[os_hash_kernel_pointer(obj) % PORT_HASH_SIZE];
209 }
210
211 IOMachPort*
portForObjectInBucket(IOMachPortHashList * bucket,OSObject * obj,ipc_kobject_type_t type)212 IOMachPort::portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type)
213 {
214 IOMachPort *machPort;
215
216 SLIST_FOREACH(machPort, bucket, link) {
217 if (machPort->object == obj && iokit_port_type(machPort->port) == type) {
218 return machPort;
219 }
220 }
221 return NULL;
222 }
223
224 IOMachPort*
withObjectAndType(OSObject * obj,ipc_kobject_type_t type)225 IOMachPort::withObjectAndType(OSObject *obj, ipc_kobject_type_t type)
226 {
227 IOMachPort *machPort = NULL;
228
229 machPort = new IOMachPort;
230 if (__improbable(machPort && !machPort->init())) {
231 OSSafeReleaseNULL(machPort);
232 return NULL;
233 }
234
235 machPort->object = obj;
236 machPort->port = iokit_alloc_object_port(machPort, type);
237 IOLockInlineInit(&machPort->lock);
238
239 obj->taggedRetain(OSTypeID(OSCollection));
240 machPort->mscount++;
241
242 return machPort;
243 }
244
245 bool
noMoreSendersForObject(OSObject * obj,ipc_kobject_type_t type,mach_port_mscount_t * mscount)246 IOMachPort::noMoreSendersForObject( OSObject * obj,
247 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
248 {
249 IOMachPort *machPort = NULL;
250 IOUserClient *uc;
251 OSAction *action;
252 bool destroyed = true;
253
254 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
255
256 obj->retain();
257
258 lck_mtx_lock(gIOObjectPortLock);
259
260 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
261
262 if (machPort) {
263 destroyed = (machPort->mscount <= *mscount);
264 if (!destroyed) {
265 *mscount = machPort->mscount;
266 lck_mtx_unlock(gIOObjectPortLock);
267 } else {
268 if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj))) {
269 uc->noMoreSenders();
270 }
271 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
272
273 IOLockLock(&machPort->lock);
274 iokit_remove_object_port(machPort->port, type);
275 machPort->object = NULL;
276 IOLockUnlock(&machPort->lock);
277
278 lck_mtx_unlock(gIOObjectPortLock);
279
280 OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
281
282 obj->taggedRelease(OSTypeID(OSCollection));
283 }
284 } else {
285 lck_mtx_unlock(gIOObjectPortLock);
286 }
287
288 if ((IKOT_UEXT_OBJECT == type) && (action = OSDynamicCast(OSAction, obj))) {
289 action->Aborted();
290 }
291
292 if (IKOT_UEXT_OBJECT == type && IOUserServer::shouldLeakObjects()) {
293 // Leak object
294 obj->retain();
295 }
296
297 obj->release();
298
299 return destroyed;
300 }
301
302 void
releasePortForObject(OSObject * obj,ipc_kobject_type_t type)303 IOMachPort::releasePortForObject( OSObject * obj,
304 ipc_kobject_type_t type )
305 {
306 IOMachPort *machPort;
307 IOService *service;
308 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
309
310 assert(IKOT_IOKIT_CONNECT != type);
311
312 lck_mtx_lock(gIOObjectPortLock);
313
314 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
315
316 if (machPort
317 && (type == IKOT_IOKIT_OBJECT)
318 && (service = OSDynamicCast(IOService, obj))
319 && !service->machPortHoldDestroy()) {
320 obj->retain();
321 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
322
323 IOLockLock(&machPort->lock);
324 iokit_remove_object_port(machPort->port, type);
325 machPort->object = NULL;
326 IOLockUnlock(&machPort->lock);
327
328 lck_mtx_unlock(gIOObjectPortLock);
329
330 OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
331
332 obj->taggedRelease(OSTypeID(OSCollection));
333 obj->release();
334 } else {
335 lck_mtx_unlock(gIOObjectPortLock);
336 }
337 }
338
339 void
destroyUserReferences(OSObject * obj)340 IOUserClient::destroyUserReferences( OSObject * obj )
341 {
342 IOMachPort *machPort;
343 bool destroyPort;
344
345 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
346
347 // panther, 3160200
348 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
349
350 obj->retain();
351 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, IKOT_IOKIT_CONNECT);
352 IOMachPortHashList *mappingBucket = NULL;
353
354 lck_mtx_lock(gIOObjectPortLock);
355
356 IOUserClient * uc = OSDynamicCast(IOUserClient, obj);
357 if (uc && uc->mappings) {
358 mappingBucket = IOMachPort::bucketForObject(uc->mappings, IKOT_IOKIT_CONNECT);
359 }
360
361 machPort = IOMachPort::portForObjectInBucket(bucket, obj, IKOT_IOKIT_CONNECT);
362
363 if (machPort == NULL) {
364 lck_mtx_unlock(gIOObjectPortLock);
365 goto end;
366 }
367
368 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
369 obj->taggedRelease(OSTypeID(OSCollection));
370
371 destroyPort = true;
372 if (uc) {
373 uc->noMoreSenders();
374 if (uc->mappings) {
375 uc->mappings->taggedRetain(OSTypeID(OSCollection));
376 SLIST_INSERT_HEAD(mappingBucket, machPort, link);
377
378 IOLockLock(&machPort->lock);
379 machPort->object = uc->mappings;
380 IOLockUnlock(&machPort->lock);
381
382 lck_mtx_unlock(gIOObjectPortLock);
383
384 OSSafeReleaseNULL(uc->mappings);
385 destroyPort = false;
386 }
387 }
388
389 if (destroyPort) {
390 IOLockLock(&machPort->lock);
391 iokit_remove_object_port(machPort->port, IKOT_IOKIT_CONNECT);
392 machPort->object = NULL;
393 IOLockUnlock(&machPort->lock);
394
395 lck_mtx_unlock(gIOObjectPortLock);
396 OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
397 }
398
399 end:
400 OSSafeReleaseNULL(obj);
401 }
402
403 mach_port_name_t
makeSendRightForTask(task_t task,io_object_t obj,ipc_kobject_type_t type)404 IOMachPort::makeSendRightForTask( task_t task,
405 io_object_t obj, ipc_kobject_type_t type )
406 {
407 return iokit_make_send_right( task, obj, type );
408 }
409
410 void
free(void)411 IOMachPort::free( void )
412 {
413 if (port) {
414 iokit_destroy_object_port(port, iokit_port_type(port));
415 }
416 IOLockInlineDestroy(&lock);
417 super::free();
418 }
419
420 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
421
422 static bool
IOTaskRegistryCompatibility(task_t task)423 IOTaskRegistryCompatibility(task_t task)
424 {
425 return false;
426 }
427
428 static void
IOTaskRegistryCompatibilityMatching(task_t task,OSDictionary * matching)429 IOTaskRegistryCompatibilityMatching(task_t task, OSDictionary * matching)
430 {
431 matching->setObject(gIOServiceNotificationUserKey, kOSBooleanTrue);
432 if (!IOTaskRegistryCompatibility(task)) {
433 return;
434 }
435 matching->setObject(gIOCompatibilityMatchKey, kOSBooleanTrue);
436 }
437
438 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
439
OSDefineMetaClassAndStructors(IOUserIterator,OSIterator)440 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
441
442 IOUserIterator *
443 IOUserIterator::withIterator(OSIterator * iter)
444 {
445 IOUserIterator * me;
446
447 if (!iter) {
448 return NULL;
449 }
450
451 me = new IOUserIterator;
452 if (me && !me->init()) {
453 me->release();
454 me = NULL;
455 }
456 if (!me) {
457 iter->release();
458 return me;
459 }
460 me->userIteratorObject = iter;
461
462 return me;
463 }
464
465 bool
init(void)466 IOUserIterator::init( void )
467 {
468 if (!OSObject::init()) {
469 return false;
470 }
471
472 IOLockInlineInit(&lock);
473 return true;
474 }
475
476 void
free()477 IOUserIterator::free()
478 {
479 if (userIteratorObject) {
480 userIteratorObject->release();
481 }
482 IOLockInlineDestroy(&lock);
483 OSObject::free();
484 }
485
486 void
reset()487 IOUserIterator::reset()
488 {
489 IOLockLock(&lock);
490 assert(OSDynamicCast(OSIterator, userIteratorObject));
491 ((OSIterator *)userIteratorObject)->reset();
492 IOLockUnlock(&lock);
493 }
494
495 bool
isValid()496 IOUserIterator::isValid()
497 {
498 bool ret;
499
500 IOLockLock(&lock);
501 assert(OSDynamicCast(OSIterator, userIteratorObject));
502 ret = ((OSIterator *)userIteratorObject)->isValid();
503 IOLockUnlock(&lock);
504
505 return ret;
506 }
507
508 OSObject *
getNextObject()509 IOUserIterator::getNextObject()
510 {
511 assert(false);
512 return NULL;
513 }
514
515 OSObject *
copyNextObject()516 IOUserIterator::copyNextObject()
517 {
518 OSObject * ret = NULL;
519
520 IOLockLock(&lock);
521 if (userIteratorObject) {
522 ret = ((OSIterator *)userIteratorObject)->getNextObject();
523 if (ret) {
524 ret->retain();
525 }
526 }
527 IOLockUnlock(&lock);
528
529 return ret;
530 }
531
532 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
533 extern "C" {
534 // functions called from osfmk/device/iokit_rpc.c
535
536 void
iokit_port_object_description(io_object_t obj,kobject_description_t desc)537 iokit_port_object_description(io_object_t obj, kobject_description_t desc)
538 {
539 IORegistryEntry * regEntry;
540 IOUserNotification * __unused noti;
541 _IOServiceNotifier * __unused serviceNoti;
542 OSSerialize * __unused s;
543 OSDictionary * __unused matching = NULL;
544
545 if ((regEntry = OSDynamicCast(IORegistryEntry, obj))) {
546 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(0x%qx)", obj->getMetaClass()->getClassName(), regEntry->getRegistryEntryID());
547 #if DEVELOPMENT || DEBUG
548 } else if ((noti = OSDynamicCast(IOUserNotification, obj))) {
549 // serviceNoti->matching may become NULL if the port gets a no-senders notification, so we have to lock gIOObjectPortLock
550 IOLockLock(gIOObjectPortLock);
551 serviceNoti = OSDynamicCast(_IOServiceNotifier, noti->userIteratorObject);
552 if (serviceNoti && (matching = serviceNoti->matching)) {
553 matching->retain();
554 }
555 IOLockUnlock(gIOObjectPortLock);
556
557 if (matching) {
558 s = OSSerialize::withCapacity((unsigned int) page_size);
559 if (s && matching->serialize(s)) {
560 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(%s)", obj->getMetaClass()->getClassName(), s->text());
561 }
562 OSSafeReleaseNULL(s);
563 OSSafeReleaseNULL(matching);
564 }
565 #endif /* DEVELOPMENT || DEBUG */
566 } else {
567 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s", obj->getMetaClass()->getClassName());
568 }
569 }
570
571 // FIXME: Implementation of these functions are hidden from the static analyzer.
572 // As for now, the analyzer doesn't consistently support wrapper functions
573 // for retain and release.
574 #ifndef __clang_analyzer__
575 void
iokit_add_reference(io_object_t obj,natural_t type)576 iokit_add_reference( io_object_t obj, natural_t type )
577 {
578 if (!obj) {
579 return;
580 }
581 obj->retain();
582 }
583
584 void
iokit_remove_reference(io_object_t obj)585 iokit_remove_reference( io_object_t obj )
586 {
587 if (obj) {
588 obj->release();
589 }
590 }
591 #endif // __clang_analyzer__
592
593 void
iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj)594 iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj )
595 {
596 if (!obj) {
597 return;
598 }
599 obj->release();
600 }
601
602 enum {
603 kIPCLockNone = 0,
604 kIPCLockRead = 1,
605 kIPCLockWrite = 2
606 };
607
608 void
ipcEnter(int locking)609 IOUserClient::ipcEnter(int locking)
610 {
611 switch (locking) {
612 case kIPCLockWrite:
613 IORWLockWrite(&lock);
614 break;
615 case kIPCLockRead:
616 IORWLockRead(&lock);
617 break;
618 case kIPCLockNone:
619 break;
620 default:
621 panic("ipcEnter");
622 }
623
624 OSIncrementAtomic(&__ipc);
625 }
626
627 void
ipcExit(int locking)628 IOUserClient::ipcExit(int locking)
629 {
630 bool finalize = false;
631
632 assert(__ipc);
633 if (1 == OSDecrementAtomic(&__ipc) && isInactive()) {
634 IOLockLock(gIOObjectPortLock);
635 if ((finalize = __ipcFinal)) {
636 __ipcFinal = false;
637 }
638 IOLockUnlock(gIOObjectPortLock);
639 if (finalize) {
640 scheduleFinalize(true);
641 }
642 }
643 switch (locking) {
644 case kIPCLockWrite:
645 case kIPCLockRead:
646 IORWLockUnlock(&lock);
647 break;
648 case kIPCLockNone:
649 break;
650 default:
651 panic("ipcExit");
652 }
653 }
654
655 void
iokit_kobject_retain(io_kobject_t machPort)656 iokit_kobject_retain(io_kobject_t machPort)
657 {
658 assert(OSDynamicCast(IOMachPort, machPort));
659 machPort->retain();
660 }
661
662 io_object_t
iokit_copy_object_for_consumed_kobject(LIBKERN_CONSUMED io_kobject_t machPort,natural_t type)663 iokit_copy_object_for_consumed_kobject(LIBKERN_CONSUMED io_kobject_t machPort, natural_t type)
664 {
665 io_object_t result;
666
667 assert(OSDynamicCast(IOMachPort, machPort));
668
669 IOLockLock(&machPort->lock);
670 result = machPort->object;
671 if (result) {
672 iokit_add_reference(result, type);
673 }
674 IOLockUnlock(&machPort->lock);
675 machPort->release();
676 return result;
677 }
678
679 bool
finalizeUserReferences(OSObject * obj)680 IOUserClient::finalizeUserReferences(OSObject * obj)
681 {
682 IOUserClient * uc;
683 bool ok = true;
684
685 if ((uc = OSDynamicCast(IOUserClient, obj))) {
686 IOLockLock(gIOObjectPortLock);
687 if ((uc->__ipcFinal = (0 != uc->__ipc))) {
688 ok = false;
689 }
690 IOLockUnlock(gIOObjectPortLock);
691 }
692 return ok;
693 }
694
695 ipc_port_t
iokit_port_for_object(io_object_t obj,ipc_kobject_type_t type,ipc_kobject_t * kobj)696 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type, ipc_kobject_t * kobj )
697 {
698 IOMachPort *machPort = NULL;
699 ipc_port_t port = NULL;
700
701 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
702
703 lck_mtx_lock(gIOObjectPortLock);
704
705 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
706
707 if (__improbable(machPort == NULL)) {
708 machPort = IOMachPort::withObjectAndType(obj, type);
709 if (__improbable(machPort == NULL)) {
710 goto end;
711 }
712 SLIST_INSERT_HEAD(bucket, machPort, link);
713 } else {
714 machPort->mscount++;
715 }
716
717 iokit_retain_port(machPort->port);
718 port = machPort->port;
719
720 end:
721 if (kobj) {
722 *kobj = machPort;
723 }
724 lck_mtx_unlock(gIOObjectPortLock);
725
726 return port;
727 }
728
729 kern_return_t
iokit_client_died(io_object_t obj,ipc_port_t,ipc_kobject_type_t type,mach_port_mscount_t * mscount)730 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
731 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
732 {
733 IOUserClient * client;
734 IOMemoryMap * map;
735 IOUserNotification * notify;
736 IOUserServerCheckInToken * token;
737
738 if (!IOMachPort::noMoreSendersForObject( obj, type, mscount )) {
739 return kIOReturnNotReady;
740 }
741
742 switch (type) {
743 case IKOT_IOKIT_CONNECT:
744 if ((client = OSDynamicCast( IOUserClient, obj ))) {
745 IOStatisticsClientCall();
746 IORWLockWrite(&client->lock);
747 client->clientDied();
748 IORWLockUnlock(&client->lock);
749 }
750 break;
751 case IKOT_IOKIT_OBJECT:
752 if ((map = OSDynamicCast( IOMemoryMap, obj ))) {
753 map->taskDied();
754 } else if ((notify = OSDynamicCast( IOUserNotification, obj ))) {
755 notify->setNotification( NULL );
756 }
757 break;
758 case IKOT_IOKIT_IDENT:
759 if ((token = OSDynamicCast( IOUserServerCheckInToken, obj ))) {
760 token->cancel();
761 }
762 break;
763 }
764
765 return kIOReturnSuccess;
766 }
767 }; /* extern "C" */
768
769 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
770
771 class IOServiceUserNotification : public IOUserNotification
772 {
773 OSDeclareDefaultStructors(IOServiceUserNotification);
774
775 struct PingMsgKdata {
776 mach_msg_header_t msgHdr;
777 };
778 struct PingMsgUdata {
779 OSNotificationHeader64 notifyHeader;
780 };
781
782 enum { kMaxOutstanding = 1024 };
783
784 ipc_port_t remotePort;
785 void *msgReference;
786 mach_msg_size_t msgReferenceSize;
787 natural_t msgType;
788 OSArray * newSet;
789 bool armed;
790 bool ipcLogged;
791
792 public:
793
794 virtual bool init( mach_port_t port, natural_t type,
795 void * reference, vm_size_t referenceSize,
796 bool clientIs64 );
797 virtual void free() APPLE_KEXT_OVERRIDE;
798 void invalidatePort(void);
799
800 static bool _handler( void * target,
801 void * ref, IOService * newService, IONotifier * notifier );
802 virtual bool handler( void * ref, IOService * newService );
803
804 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
805 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
806 };
807
808 class IOServiceMessageUserNotification : public IOUserNotification
809 {
810 OSDeclareDefaultStructors(IOServiceMessageUserNotification);
811
812 struct PingMsgKdata {
813 mach_msg_header_t msgHdr;
814 mach_msg_body_t msgBody;
815 mach_msg_port_descriptor_t ports[1];
816 };
817 struct PingMsgUdata {
818 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
819 };
820
821 ipc_port_t remotePort;
822 void *msgReference;
823 mach_msg_size_t msgReferenceSize;
824 mach_msg_size_t msgExtraSize;
825 natural_t msgType;
826 uint8_t clientIs64;
827 int owningPID;
828 bool ipcLogged;
829
830 public:
831
832 virtual bool init( mach_port_t port, natural_t type,
833 void * reference, vm_size_t referenceSize,
834 bool clientIs64 );
835
836 virtual void free() APPLE_KEXT_OVERRIDE;
837 void invalidatePort(void);
838
839 static IOReturn _handler( void * target, void * ref,
840 UInt32 messageType, IOService * provider,
841 void * messageArgument, vm_size_t argSize );
842 virtual IOReturn handler( void * ref,
843 UInt32 messageType, IOService * provider,
844 void * messageArgument, vm_size_t argSize );
845
846 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
847 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
848 };
849
850 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
851
852 #undef super
853 #define super IOUserIterator
854 OSDefineMetaClass( IOUserNotification, IOUserIterator );
855 OSDefineAbstractStructors( IOUserNotification, IOUserIterator );
856
857 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
858
859 void
free(void)860 IOUserNotification::free( void )
861 {
862 #if DEVELOPMENT || DEBUG
863 IOLockLock( gIOObjectPortLock);
864
865 assert(userIteratorObject == NULL);
866
867 IOLockUnlock( gIOObjectPortLock);
868 #endif /* DEVELOPMENT || DEBUG */
869
870 super::free();
871 }
872
873
874 void
setNotification(IONotifier * notify)875 IOUserNotification::setNotification( IONotifier * notify )
876 {
877 OSObject * previousNotify;
878
879 /*
880 * We must retain this object here before proceeding.
881 * Two threads may race in setNotification(). If one thread sets a new notifier while the
882 * other thread sets the notifier to NULL, it is possible for the second thread to call release()
883 * before the first thread calls retain(). Without the retain here, this thread interleaving
884 * would cause the object to get released and freed before it is retained by the first thread,
885 * which is a UaF.
886 */
887 retain();
888
889 IOLockLock( gIOObjectPortLock);
890
891 previousNotify = userIteratorObject;
892 userIteratorObject = notify;
893
894 IOLockUnlock( gIOObjectPortLock);
895
896 if (previousNotify) {
897 assert(OSDynamicCast(IONotifier, previousNotify));
898 ((IONotifier *)previousNotify)->remove();
899
900 if (notify == NULL) {
901 release();
902 }
903 } else if (notify) {
904 // new IONotifier, retain the object. release() will happen in setNotification(NULL)
905 retain();
906 }
907
908 release(); // paired with retain() at beginning of this method
909 }
910
911 void
reset()912 IOUserNotification::reset()
913 {
914 // ?
915 }
916
917 bool
isValid()918 IOUserNotification::isValid()
919 {
920 return true;
921 }
922
923 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
924
925 #undef super
926 #define super IOUserNotification
OSDefineMetaClassAndStructors(IOServiceUserNotification,IOUserNotification)927 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
928
929 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
930
931 bool
932 IOServiceUserNotification::init( mach_port_t port, natural_t type,
933 void * reference, vm_size_t referenceSize,
934 bool clientIs64 )
935 {
936 if (!super::init()) {
937 return false;
938 }
939
940 newSet = OSArray::withCapacity( 1 );
941 if (!newSet) {
942 return false;
943 }
944
945 if (referenceSize > sizeof(OSAsyncReference64)) {
946 return false;
947 }
948
949 msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
950 msgReference = IOMallocZeroData(msgReferenceSize);
951 if (!msgReference) {
952 return false;
953 }
954
955 remotePort = port;
956 msgType = type;
957 bcopy( reference, msgReference, referenceSize );
958
959 return true;
960 }
961
962 void
invalidatePort(void)963 IOServiceUserNotification::invalidatePort(void)
964 {
965 remotePort = MACH_PORT_NULL;
966 }
967
968 void
free(void)969 IOServiceUserNotification::free( void )
970 {
971 if (remotePort) {
972 iokit_release_port_send(remotePort);
973 }
974 IOFreeData(msgReference, msgReferenceSize);
975 OSSafeReleaseNULL(newSet);
976
977 super::free();
978 }
979
980 bool
_handler(void * target,void * ref,IOService * newService,IONotifier * notifier)981 IOServiceUserNotification::_handler( void * target,
982 void * ref, IOService * newService, IONotifier * notifier )
983 {
984 IOServiceUserNotification * targetObj = (IOServiceUserNotification *)target;
985 bool ret;
986
987 targetObj->retain();
988 ret = targetObj->handler( ref, newService );
989 targetObj->release();
990 return ret;
991 }
992
993 bool
handler(void * ref,IOService * newService)994 IOServiceUserNotification::handler( void * ref,
995 IOService * newService )
996 {
997 unsigned int count;
998 kern_return_t kr;
999 ipc_port_t port = NULL;
1000 bool sendPing = false;
1001 mach_msg_size_t msgSize, payloadSize;
1002
1003 IOTakeLock( &lock );
1004
1005 count = newSet->getCount();
1006 if (count < kMaxOutstanding) {
1007 newSet->setObject( newService );
1008 if ((sendPing = (armed && (0 == count)))) {
1009 armed = false;
1010 }
1011 }
1012
1013 IOUnlock( &lock );
1014
1015 if (kIOServiceTerminatedNotificationType == msgType) {
1016 lck_mtx_lock(gIOObjectPortLock);
1017 newService->setMachPortHoldDestroy(true);
1018 lck_mtx_unlock(gIOObjectPortLock);
1019 }
1020
1021 if (sendPing) {
1022 port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT, NULL );
1023
1024 payloadSize = sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize;
1025 msgSize = (mach_msg_size_t)(sizeof(PingMsgKdata) + payloadSize);
1026
1027 kr = kernel_mach_msg_send_with_builder_internal(0, payloadSize,
1028 MACH_SEND_KERNEL_IMPORTANCE, MACH_MSG_TIMEOUT_NONE, NULL,
1029 ^(mach_msg_header_t *hdr, __assert_only mach_msg_descriptor_t *descs, void *payload){
1030 PingMsgUdata *udata = (PingMsgUdata *)payload;
1031
1032 hdr->msgh_remote_port = remotePort;
1033 hdr->msgh_local_port = port;
1034 hdr->msgh_bits = MACH_MSGH_BITS(
1035 MACH_MSG_TYPE_COPY_SEND /*remote*/,
1036 MACH_MSG_TYPE_MAKE_SEND /*local*/);
1037 hdr->msgh_size = msgSize;
1038 hdr->msgh_id = kOSNotificationMessageID;
1039
1040 assert(descs == NULL);
1041 /* End of kernel processed data */
1042
1043 udata->notifyHeader.size = 0;
1044 udata->notifyHeader.type = msgType;
1045
1046 assert((char *)udata->notifyHeader.reference + msgReferenceSize <= (char *)payload + payloadSize);
1047 bcopy( msgReference, udata->notifyHeader.reference, msgReferenceSize );
1048 });
1049
1050 if (port) {
1051 iokit_release_port( port );
1052 }
1053
1054 if ((KERN_SUCCESS != kr) && !ipcLogged) {
1055 ipcLogged = true;
1056 IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1057 }
1058 }
1059
1060 return true;
1061 }
1062 OSObject *
getNextObject()1063 IOServiceUserNotification::getNextObject()
1064 {
1065 assert(false);
1066 return NULL;
1067 }
1068
1069 OSObject *
copyNextObject()1070 IOServiceUserNotification::copyNextObject()
1071 {
1072 unsigned int count;
1073 OSObject * result;
1074
1075 IOLockLock(&lock);
1076
1077 count = newSet->getCount();
1078 if (count) {
1079 result = newSet->getObject( count - 1 );
1080 result->retain();
1081 newSet->removeObject( count - 1);
1082 } else {
1083 result = NULL;
1084 armed = true;
1085 }
1086
1087 IOLockUnlock(&lock);
1088
1089 return result;
1090 }
1091
1092 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1093
OSDefineMetaClassAndStructors(IOServiceMessageUserNotification,IOUserNotification)1094 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
1095
1096 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1097
1098 bool
1099 IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
1100 void * reference, vm_size_t referenceSize, bool client64 )
1101 {
1102 if (!super::init()) {
1103 return false;
1104 }
1105
1106 if (referenceSize > sizeof(OSAsyncReference64)) {
1107 return false;
1108 }
1109
1110 clientIs64 = client64;
1111
1112 owningPID = proc_selfpid();
1113
1114 msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
1115 msgReference = IOMallocZeroData(msgReferenceSize);
1116 if (!msgReference) {
1117 return false;
1118 }
1119
1120 remotePort = port;
1121 msgType = type;
1122 bcopy( reference, msgReference, referenceSize );
1123
1124 return true;
1125 }
1126
1127 void
invalidatePort(void)1128 IOServiceMessageUserNotification::invalidatePort(void)
1129 {
1130 remotePort = MACH_PORT_NULL;
1131 }
1132
1133 void
free(void)1134 IOServiceMessageUserNotification::free( void )
1135 {
1136 if (remotePort) {
1137 iokit_release_port_send(remotePort);
1138 }
1139 IOFreeData(msgReference, msgReferenceSize);
1140
1141 super::free();
1142 }
1143
1144 IOReturn
_handler(void * target,void * ref,UInt32 messageType,IOService * provider,void * argument,vm_size_t argSize)1145 IOServiceMessageUserNotification::_handler( void * target, void * ref,
1146 UInt32 messageType, IOService * provider,
1147 void * argument, vm_size_t argSize )
1148 {
1149 IOServiceMessageUserNotification * targetObj = (IOServiceMessageUserNotification *)target;
1150 IOReturn ret;
1151
1152 targetObj->retain();
1153 ret = targetObj->handler(
1154 ref, messageType, provider, argument, argSize);
1155 targetObj->release();
1156 return ret;
1157 }
1158
1159 IOReturn
handler(void * ref,UInt32 messageType,IOService * provider,void * messageArgument,vm_size_t callerArgSize)1160 IOServiceMessageUserNotification::handler( void * ref,
1161 UInt32 messageType, IOService * provider,
1162 void * messageArgument, vm_size_t callerArgSize )
1163 {
1164 kern_return_t kr;
1165 vm_size_t argSize;
1166 mach_msg_size_t thisMsgSize;
1167 ipc_port_t thisPort, providerPort;
1168
1169 if (kIOMessageCopyClientID == messageType) {
1170 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
1171 return kIOReturnSuccess;
1172 }
1173
1174 if (callerArgSize == 0) {
1175 if (clientIs64) {
1176 argSize = sizeof(io_user_reference_t);
1177 } else {
1178 argSize = sizeof(uint32_t);
1179 }
1180 } else {
1181 if (callerArgSize > kIOUserNotifyMaxMessageSize) {
1182 callerArgSize = kIOUserNotifyMaxMessageSize;
1183 }
1184 argSize = callerArgSize;
1185 }
1186
1187 // adjust message size for ipc restrictions
1188 natural_t type = msgType;
1189 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1190 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1191 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1192
1193 mach_msg_size_t extraSize = kIOUserNotifyMaxMessageSize + sizeof(IOServiceInterestContent64);
1194 mach_msg_size_t msgSize = (mach_msg_size_t) (sizeof(PingMsgKdata) +
1195 sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize);
1196
1197 if (os_add3_overflow(msgSize, offsetof(IOServiceInterestContent64, messageArgument), argSize, &thisMsgSize)) {
1198 return kIOReturnBadArgument;
1199 }
1200 mach_msg_size_t payloadSize = thisMsgSize - sizeof(PingMsgKdata);
1201
1202 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT, NULL );
1203 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT, NULL );
1204
1205 kr = kernel_mach_msg_send_with_builder_internal(1, payloadSize,
1206 MACH_SEND_KERNEL_IMPORTANCE, MACH_MSG_TIMEOUT_NONE, NULL,
1207 ^(mach_msg_header_t *hdr, mach_msg_descriptor_t *descs, void *payload){
1208 mach_msg_port_descriptor_t *port_desc = (mach_msg_port_descriptor_t *)descs;
1209 PingMsgUdata *udata = (PingMsgUdata *)payload;
1210 IOServiceInterestContent64 * data;
1211 mach_msg_size_t dataOffset;
1212
1213 hdr->msgh_remote_port = remotePort;
1214 hdr->msgh_local_port = thisPort;
1215 hdr->msgh_bits = MACH_MSGH_BITS_COMPLEX
1216 | MACH_MSGH_BITS(
1217 MACH_MSG_TYPE_COPY_SEND /*remote*/,
1218 MACH_MSG_TYPE_MAKE_SEND /*local*/);
1219 hdr->msgh_size = thisMsgSize;
1220 hdr->msgh_id = kOSNotificationMessageID;
1221
1222 /* body.msgh_descriptor_count is set automatically after the closure */
1223
1224 port_desc[0].name = providerPort;
1225 port_desc[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
1226 port_desc[0].type = MACH_MSG_PORT_DESCRIPTOR;
1227 /* End of kernel processed data */
1228
1229 udata->notifyHeader.size = extraSize;
1230 udata->notifyHeader.type = type;
1231 bcopy( msgReference, udata->notifyHeader.reference, msgReferenceSize );
1232
1233 /* data is after msgReference */
1234 dataOffset = sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize;
1235 data = (IOServiceInterestContent64 *) (((uint8_t *) udata) + dataOffset);
1236 data->messageType = messageType;
1237
1238 if (callerArgSize == 0) {
1239 assert((char *)data->messageArgument + argSize <= (char *)payload + payloadSize);
1240 data->messageArgument[0] = (io_user_reference_t) messageArgument;
1241 if (!clientIs64) {
1242 data->messageArgument[0] |= (data->messageArgument[0] << 32);
1243 }
1244 } else {
1245 assert((char *)data->messageArgument + callerArgSize <= (char *)payload + payloadSize);
1246 bcopy(messageArgument, data->messageArgument, callerArgSize);
1247 }
1248 });
1249
1250 if (thisPort) {
1251 iokit_release_port( thisPort );
1252 }
1253 if (providerPort) {
1254 iokit_release_port( providerPort );
1255 }
1256
1257 if (kr == MACH_SEND_NO_BUFFER) {
1258 return kIOReturnNoMemory;
1259 }
1260
1261 if ((KERN_SUCCESS != kr) && !ipcLogged) {
1262 ipcLogged = true;
1263 IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1264 }
1265
1266 return kIOReturnSuccess;
1267 }
1268
1269 OSObject *
getNextObject()1270 IOServiceMessageUserNotification::getNextObject()
1271 {
1272 return NULL;
1273 }
1274
1275 OSObject *
copyNextObject()1276 IOServiceMessageUserNotification::copyNextObject()
1277 {
1278 return NULL;
1279 }
1280
1281 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1282
1283 #undef super
1284 #define super IOService
1285 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1286
1287 IOLock * gIOUserClientOwnersLock;
1288
1289 static_assert(offsetof(IOUserClient, __opaque_end) -
1290 offsetof(IOUserClient, __opaque_start) == sizeof(void *) * 9,
1291 "ABI check: Opaque ivars for IOUserClient must be 9 void * big");
1292
1293 void
initialize(void)1294 IOUserClient::initialize( void )
1295 {
1296 gIOObjectPortLock = IOLockAlloc();
1297 gIOUserClientOwnersLock = IOLockAlloc();
1298 gIOUserServerLock = IOLockAlloc();
1299 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1300
1301 #if IOTRACKING
1302 IOTrackingQueueCollectUser(IOUserIterator::gMetaClass.getTracking());
1303 IOTrackingQueueCollectUser(IOServiceMessageUserNotification::gMetaClass.getTracking());
1304 IOTrackingQueueCollectUser(IOServiceUserNotification::gMetaClass.getTracking());
1305 IOTrackingQueueCollectUser(IOUserClient::gMetaClass.getTracking());
1306 IOTrackingQueueCollectUser(IOMachPort::gMetaClass.getTracking());
1307 #endif /* IOTRACKING */
1308 }
1309
1310 void
1311 #if __LP64__
1312 __attribute__((__noreturn__))
1313 #endif
setAsyncReference(OSAsyncReference asyncRef,mach_port_t wakePort,void * callback,void * refcon)1314 IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1315 mach_port_t wakePort,
1316 void *callback, void *refcon)
1317 {
1318 #if __LP64__
1319 panic("setAsyncReference not valid for 64b");
1320 #else
1321 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1322 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1323 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1324 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1325 #endif
1326 }
1327
1328 void
setAsyncReference64(OSAsyncReference64 asyncRef,mach_port_t wakePort,mach_vm_address_t callback,io_user_reference_t refcon)1329 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1330 mach_port_t wakePort,
1331 mach_vm_address_t callback, io_user_reference_t refcon)
1332 {
1333 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1334 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1335 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1336 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1337 }
1338
1339 void
setAsyncReference64(OSAsyncReference64 asyncRef,mach_port_t wakePort,mach_vm_address_t callback,io_user_reference_t refcon,task_t task)1340 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1341 mach_port_t wakePort,
1342 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1343 {
1344 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1345 if (vm_map_is_64bit(get_task_map(task))) {
1346 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1347 }
1348 }
1349
1350 static OSDictionary *
CopyConsoleUser(UInt32 uid)1351 CopyConsoleUser(UInt32 uid)
1352 {
1353 OSArray * array;
1354 OSDictionary * user = NULL;
1355
1356 OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1357 if ((array = OSDynamicCast(OSArray, ioProperty))) {
1358 for (unsigned int idx = 0;
1359 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1360 idx++) {
1361 OSNumber * num;
1362
1363 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1364 && (uid == num->unsigned32BitValue())) {
1365 user->retain();
1366 break;
1367 }
1368 }
1369 }
1370 OSSafeReleaseNULL(ioProperty);
1371 return user;
1372 }
1373
1374 static OSDictionary *
CopyUserOnConsole(void)1375 CopyUserOnConsole(void)
1376 {
1377 OSArray * array;
1378 OSDictionary * user = NULL;
1379
1380 OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1381 if ((array = OSDynamicCast(OSArray, ioProperty))) {
1382 for (unsigned int idx = 0;
1383 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1384 idx++) {
1385 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) {
1386 user->retain();
1387 break;
1388 }
1389 }
1390 }
1391 OSSafeReleaseNULL(ioProperty);
1392 return user;
1393 }
1394
1395 IOReturn
clientHasAuthorization(task_t task,IOService * service)1396 IOUserClient::clientHasAuthorization( task_t task,
1397 IOService * service )
1398 {
1399 proc_t p;
1400
1401 p = (proc_t) get_bsdtask_info(task);
1402 if (p) {
1403 uint64_t authorizationID;
1404
1405 authorizationID = proc_uniqueid(p);
1406 if (authorizationID) {
1407 if (service->getAuthorizationID() == authorizationID) {
1408 return kIOReturnSuccess;
1409 }
1410 }
1411 }
1412
1413 return kIOReturnNotPermitted;
1414 }
1415
1416 IOReturn
clientHasPrivilege(void * securityToken,const char * privilegeName)1417 IOUserClient::clientHasPrivilege( void * securityToken,
1418 const char * privilegeName )
1419 {
1420 kern_return_t kr;
1421 security_token_t token;
1422 mach_msg_type_number_t count;
1423 task_t task;
1424 OSDictionary * user;
1425 bool secureConsole;
1426
1427
1428 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1429 sizeof(kIOClientPrivilegeForeground))) {
1430 if (task_is_gpu_denied(current_task())) {
1431 return kIOReturnNotPrivileged;
1432 } else {
1433 return kIOReturnSuccess;
1434 }
1435 }
1436
1437 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1438 sizeof(kIOClientPrivilegeConsoleSession))) {
1439 kauth_cred_t cred;
1440 proc_t p;
1441
1442 task = (task_t) securityToken;
1443 if (!task) {
1444 task = current_task();
1445 }
1446 p = (proc_t) get_bsdtask_info(task);
1447 kr = kIOReturnNotPrivileged;
1448
1449 if (p && (cred = kauth_cred_proc_ref(p))) {
1450 user = CopyUserOnConsole();
1451 if (user) {
1452 OSNumber * num;
1453 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1454 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) {
1455 kr = kIOReturnSuccess;
1456 }
1457 user->release();
1458 }
1459 kauth_cred_unref(&cred);
1460 }
1461 return kr;
1462 }
1463
1464 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1465 sizeof(kIOClientPrivilegeSecureConsoleProcess)))) {
1466 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1467 } else {
1468 task = (task_t)securityToken;
1469 }
1470
1471 count = TASK_SECURITY_TOKEN_COUNT;
1472 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1473
1474 if (KERN_SUCCESS != kr) {
1475 } else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1476 sizeof(kIOClientPrivilegeAdministrator))) {
1477 if (0 != token.val[0]) {
1478 kr = kIOReturnNotPrivileged;
1479 }
1480 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1481 sizeof(kIOClientPrivilegeLocalUser))) {
1482 user = CopyConsoleUser(token.val[0]);
1483 if (user) {
1484 user->release();
1485 } else {
1486 kr = kIOReturnNotPrivileged;
1487 }
1488 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1489 sizeof(kIOClientPrivilegeConsoleUser))) {
1490 user = CopyConsoleUser(token.val[0]);
1491 if (user) {
1492 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) {
1493 kr = kIOReturnNotPrivileged;
1494 } else if (secureConsole) {
1495 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1496 if (pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) {
1497 kr = kIOReturnNotPrivileged;
1498 }
1499 }
1500 user->release();
1501 } else {
1502 kr = kIOReturnNotPrivileged;
1503 }
1504 } else {
1505 kr = kIOReturnUnsupported;
1506 }
1507
1508 return kr;
1509 }
1510
1511 OSDictionary *
copyClientEntitlements(task_t task)1512 IOUserClient::copyClientEntitlements(task_t task)
1513 {
1514 proc_t p = NULL;
1515 pid_t pid = 0;
1516 OSDictionary *entitlements = NULL;
1517
1518 p = (proc_t)get_bsdtask_info(task);
1519 if (p == NULL) {
1520 return NULL;
1521 }
1522 pid = proc_pid(p);
1523
1524 if (cs_entitlements_dictionary_copy(p, (void **)&entitlements) == 0) {
1525 if (entitlements) {
1526 return entitlements;
1527 }
1528 }
1529
1530 // If the above fails, thats it
1531 return NULL;
1532 }
1533
1534 OSDictionary *
copyClientEntitlementsVnode(vnode_t vnode,off_t offset)1535 IOUserClient::copyClientEntitlementsVnode(vnode_t vnode, off_t offset)
1536 {
1537 OSDictionary *entitlements = NULL;
1538
1539 if (cs_entitlements_dictionary_copy_vnode(vnode, offset, (void**)&entitlements) != 0) {
1540 return NULL;
1541 }
1542 return entitlements;
1543 }
1544
1545 OSObject *
copyClientEntitlement(task_t task,const char * entitlement)1546 IOUserClient::copyClientEntitlement( task_t task,
1547 const char * entitlement )
1548 {
1549 void *entitlement_object = NULL;
1550
1551 if (task == NULL) {
1552 task = current_task();
1553 }
1554
1555 /* Validate input arguments */
1556 if (task == kernel_task || entitlement == NULL) {
1557 return NULL;
1558 }
1559 proc_t proc = (proc_t)get_bsdtask_info(task);
1560
1561 kern_return_t ret = amfi->OSEntitlements.copyEntitlementAsOSObjectWithProc(
1562 proc,
1563 entitlement,
1564 &entitlement_object);
1565
1566 if (ret != KERN_SUCCESS) {
1567 return NULL;
1568 }
1569 assert(entitlement_object != NULL);
1570
1571 return (OSObject*)entitlement_object;
1572 }
1573
1574 OSObject *
copyClientEntitlementVnode(struct vnode * vnode,off_t offset,const char * entitlement)1575 IOUserClient::copyClientEntitlementVnode(
1576 struct vnode *vnode,
1577 off_t offset,
1578 const char *entitlement)
1579 {
1580 OSDictionary *entitlements;
1581 OSObject *value;
1582
1583 entitlements = copyClientEntitlementsVnode(vnode, offset);
1584 if (entitlements == NULL) {
1585 return NULL;
1586 }
1587
1588 /* Fetch the entitlement value from the dictionary. */
1589 value = entitlements->getObject(entitlement);
1590 if (value != NULL) {
1591 value->retain();
1592 }
1593
1594 entitlements->release();
1595 return value;
1596 }
1597
1598 bool
init()1599 IOUserClient::init()
1600 {
1601 if (getPropertyTable() || super::init()) {
1602 return reserve();
1603 }
1604
1605 return false;
1606 }
1607
1608 bool
init(OSDictionary * dictionary)1609 IOUserClient::init(OSDictionary * dictionary)
1610 {
1611 if (getPropertyTable() || super::init(dictionary)) {
1612 return reserve();
1613 }
1614
1615 return false;
1616 }
1617
1618 bool
initWithTask(task_t owningTask,void * securityID,UInt32 type)1619 IOUserClient::initWithTask(task_t owningTask,
1620 void * securityID,
1621 UInt32 type )
1622 {
1623 if (getPropertyTable() || super::init()) {
1624 return reserve();
1625 }
1626
1627 return false;
1628 }
1629
1630 bool
initWithTask(task_t owningTask,void * securityID,UInt32 type,OSDictionary * properties)1631 IOUserClient::initWithTask(task_t owningTask,
1632 void * securityID,
1633 UInt32 type,
1634 OSDictionary * properties )
1635 {
1636 bool ok;
1637
1638 ok = super::init( properties );
1639 ok &= initWithTask( owningTask, securityID, type );
1640
1641 return ok;
1642 }
1643
1644 bool
reserve()1645 IOUserClient::reserve()
1646 {
1647 if (!reserved) {
1648 reserved = IOMallocType(ExpansionData);
1649 }
1650 setTerminateDefer(NULL, true);
1651 IOStatisticsRegisterCounter();
1652 IORWLockInlineInit(&lock);
1653 IOLockInlineInit(&filterLock);
1654
1655 return true;
1656 }
1657
1658 struct IOUserClientOwner {
1659 task_t task;
1660 queue_chain_t taskLink;
1661 IOUserClient * uc;
1662 queue_chain_t ucLink;
1663 };
1664
1665 IOReturn
registerOwner(task_t task)1666 IOUserClient::registerOwner(task_t task)
1667 {
1668 IOUserClientOwner * owner;
1669 IOReturn ret;
1670 bool newOwner;
1671
1672 IOLockLock(gIOUserClientOwnersLock);
1673
1674 newOwner = true;
1675 ret = kIOReturnSuccess;
1676
1677 if (!owners.next) {
1678 queue_init(&owners);
1679 } else {
1680 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1681 {
1682 if (task != owner->task) {
1683 continue;
1684 }
1685 newOwner = false;
1686 break;
1687 }
1688 }
1689 if (newOwner) {
1690 owner = IOMallocType(IOUserClientOwner);
1691
1692 owner->task = task;
1693 owner->uc = this;
1694 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1695 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1696 if (messageAppSuspended) {
1697 task_set_message_app_suspended(task, true);
1698 }
1699 }
1700
1701 IOLockUnlock(gIOUserClientOwnersLock);
1702
1703 return ret;
1704 }
1705
1706 void
noMoreSenders(void)1707 IOUserClient::noMoreSenders(void)
1708 {
1709 IOUserClientOwner * owner;
1710 IOUserClientOwner * iter;
1711 queue_head_t * taskque;
1712 bool hasMessageAppSuspended;
1713
1714 IOLockLock(gIOUserClientOwnersLock);
1715
1716 if (owners.next) {
1717 while (!queue_empty(&owners)) {
1718 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1719 taskque = task_io_user_clients(owner->task);
1720 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1721 hasMessageAppSuspended = false;
1722 queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1723 hasMessageAppSuspended = iter->uc->messageAppSuspended;
1724 if (hasMessageAppSuspended) {
1725 break;
1726 }
1727 }
1728 task_set_message_app_suspended(owner->task, hasMessageAppSuspended);
1729 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1730 IOFreeType(owner, IOUserClientOwner);
1731 }
1732 owners.next = owners.prev = NULL;
1733 }
1734
1735 IOLockUnlock(gIOUserClientOwnersLock);
1736 }
1737
1738
1739 extern "C" void
iokit_task_app_suspended_changed(task_t task)1740 iokit_task_app_suspended_changed(task_t task)
1741 {
1742 queue_head_t * taskque;
1743 IOUserClientOwner * owner;
1744 OSSet * set;
1745
1746 IOLockLock(gIOUserClientOwnersLock);
1747
1748 taskque = task_io_user_clients(task);
1749 set = NULL;
1750 queue_iterate(taskque, owner, IOUserClientOwner *, taskLink) {
1751 if (!owner->uc->messageAppSuspended) {
1752 continue;
1753 }
1754 if (!set) {
1755 set = OSSet::withCapacity(4);
1756 if (!set) {
1757 break;
1758 }
1759 }
1760 set->setObject(owner->uc);
1761 }
1762
1763 IOLockUnlock(gIOUserClientOwnersLock);
1764
1765 if (set) {
1766 set->iterateObjects(^bool (OSObject * obj) {
1767 IOUserClient * uc;
1768
1769 uc = (typeof(uc))obj;
1770 #if 0
1771 {
1772 OSString * str;
1773 str = IOCopyLogNameForPID(task_pid(task));
1774 IOLog("iokit_task_app_suspended_changed(%s) %s %d\n", str ? str->getCStringNoCopy() : "",
1775 uc->getName(), task_is_app_suspended(task));
1776 OSSafeReleaseNULL(str);
1777 }
1778 #endif
1779 uc->message(kIOMessageTaskAppSuspendedChange, NULL);
1780
1781 return false;
1782 });
1783 set->release();
1784 }
1785 }
1786
1787 static kern_return_t
iokit_task_terminate_phase1(task_t task)1788 iokit_task_terminate_phase1(task_t task)
1789 {
1790 queue_head_t * taskque;
1791 IOUserClientOwner * iter;
1792 OSSet * userServers = NULL;
1793
1794 if (!task_is_driver(task)) {
1795 return KERN_SUCCESS;
1796 }
1797 userServers = OSSet::withCapacity(1);
1798
1799 IOLockLock(gIOUserClientOwnersLock);
1800
1801 taskque = task_io_user_clients(task);
1802 queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1803 userServers->setObject(iter->uc);
1804 }
1805 IOLockUnlock(gIOUserClientOwnersLock);
1806
1807 if (userServers) {
1808 IOUserServer * userServer;
1809 while ((userServer = OSRequiredCast(IOUserServer, userServers->getAnyObject()))) {
1810 userServer->clientDied();
1811 userServers->removeObject(userServer);
1812 }
1813 userServers->release();
1814 }
1815 return KERN_SUCCESS;
1816 }
1817
1818 static kern_return_t
iokit_task_terminate_phase2(task_t task)1819 iokit_task_terminate_phase2(task_t task)
1820 {
1821 queue_head_t * taskque;
1822 IOUserClientOwner * owner;
1823 IOUserClient * dead;
1824 IOUserClient * uc;
1825
1826 IOLockLock(gIOUserClientOwnersLock);
1827 taskque = task_io_user_clients(task);
1828 dead = NULL;
1829 while (!queue_empty(taskque)) {
1830 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1831 uc = owner->uc;
1832 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1833 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1834 if (queue_empty(&uc->owners)) {
1835 uc->retain();
1836 IOLog("destroying out of band connect for %s\n", uc->getName());
1837 // now using the uc queue head as a singly linked queue,
1838 // leaving .next as NULL to mark it empty
1839 uc->owners.next = NULL;
1840 uc->owners.prev = (queue_entry_t) dead;
1841 dead = uc;
1842 }
1843 IOFreeType(owner, IOUserClientOwner);
1844 }
1845 IOLockUnlock(gIOUserClientOwnersLock);
1846
1847 while (dead) {
1848 uc = dead;
1849 dead = (IOUserClient *)(void *) dead->owners.prev;
1850 uc->owners.prev = NULL;
1851 if (uc->sharedInstance || !uc->closed) {
1852 uc->clientDied();
1853 }
1854 uc->release();
1855 }
1856
1857 return KERN_SUCCESS;
1858 }
1859
1860 extern "C" kern_return_t
iokit_task_terminate(task_t task,int phase)1861 iokit_task_terminate(task_t task, int phase)
1862 {
1863 switch (phase) {
1864 case 1:
1865 return iokit_task_terminate_phase1(task);
1866 case 2:
1867 return iokit_task_terminate_phase2(task);
1868 default:
1869 panic("iokit_task_terminate phase %d", phase);
1870 }
1871 }
1872
1873 struct IOUCFilterPolicy {
1874 task_t task;
1875 io_filter_policy_t filterPolicy;
1876 IOUCFilterPolicy * next;
1877 };
1878
1879 io_filter_policy_t
filterForTask(task_t task,io_filter_policy_t addFilterPolicy)1880 IOUserClient::filterForTask(task_t task, io_filter_policy_t addFilterPolicy)
1881 {
1882 IOUCFilterPolicy * elem;
1883 io_filter_policy_t filterPolicy;
1884
1885 filterPolicy = 0;
1886 IOLockLock(&filterLock);
1887
1888 for (elem = reserved->filterPolicies; elem && (elem->task != task); elem = elem->next) {
1889 }
1890
1891 if (elem) {
1892 if (addFilterPolicy) {
1893 assert(addFilterPolicy == elem->filterPolicy);
1894 }
1895 filterPolicy = elem->filterPolicy;
1896 } else if (addFilterPolicy) {
1897 elem = IOMallocType(IOUCFilterPolicy);
1898 elem->task = task;
1899 elem->filterPolicy = addFilterPolicy;
1900 elem->next = reserved->filterPolicies;
1901 reserved->filterPolicies = elem;
1902 filterPolicy = addFilterPolicy;
1903 }
1904
1905 IOLockUnlock(&filterLock);
1906 return filterPolicy;
1907 }
1908
1909 void
free()1910 IOUserClient::free()
1911 {
1912 if (mappings) {
1913 mappings->release();
1914 }
1915
1916 IOStatisticsUnregisterCounter();
1917
1918 assert(!owners.next);
1919 assert(!owners.prev);
1920
1921 if (reserved) {
1922 IOUCFilterPolicy * elem;
1923 IOUCFilterPolicy * nextElem;
1924 for (elem = reserved->filterPolicies; elem; elem = nextElem) {
1925 nextElem = elem->next;
1926 if (elem->filterPolicy && gIOUCFilterCallbacks->io_filter_release) {
1927 gIOUCFilterCallbacks->io_filter_release(elem->filterPolicy);
1928 }
1929 IOFreeType(elem, IOUCFilterPolicy);
1930 }
1931 IOFreeType(reserved, ExpansionData);
1932 IORWLockInlineDestroy(&lock);
1933 IOLockInlineDestroy(&filterLock);
1934 }
1935
1936 super::free();
1937 }
1938
1939 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1940
OSDefineMetaClassAndAbstractStructors(IOUserClient2022,IOUserClient)1941 OSDefineMetaClassAndAbstractStructors( IOUserClient2022, IOUserClient )
1942
1943
1944 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1945
1946 IOReturn
1947 IOUserClient::clientDied( void )
1948 {
1949 IOReturn ret = kIOReturnNotReady;
1950
1951 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed)) {
1952 ret = clientClose();
1953 }
1954
1955 return ret;
1956 }
1957
1958 IOReturn
clientClose(void)1959 IOUserClient::clientClose( void )
1960 {
1961 return kIOReturnUnsupported;
1962 }
1963
1964 IOService *
getService(void)1965 IOUserClient::getService( void )
1966 {
1967 return NULL;
1968 }
1969
1970 IOReturn
registerNotificationPort(mach_port_t,UInt32,UInt32)1971 IOUserClient::registerNotificationPort(
1972 mach_port_t /* port */,
1973 UInt32 /* type */,
1974 UInt32 /* refCon */)
1975 {
1976 return kIOReturnUnsupported;
1977 }
1978
1979 IOReturn
registerNotificationPort(mach_port_t port,UInt32 type,io_user_reference_t refCon)1980 IOUserClient::registerNotificationPort(
1981 mach_port_t port,
1982 UInt32 type,
1983 io_user_reference_t refCon)
1984 {
1985 return registerNotificationPort(port, type, (UInt32) refCon);
1986 }
1987
1988 IOReturn
getNotificationSemaphore(UInt32 notification_type,semaphore_t * semaphore)1989 IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1990 semaphore_t * semaphore )
1991 {
1992 return kIOReturnUnsupported;
1993 }
1994
1995 IOReturn
connectClient(IOUserClient *)1996 IOUserClient::connectClient( IOUserClient * /* client */ )
1997 {
1998 return kIOReturnUnsupported;
1999 }
2000
2001 IOReturn
clientMemoryForType(UInt32 type,IOOptionBits * options,IOMemoryDescriptor ** memory)2002 IOUserClient::clientMemoryForType( UInt32 type,
2003 IOOptionBits * options,
2004 IOMemoryDescriptor ** memory )
2005 {
2006 return kIOReturnUnsupported;
2007 }
2008
2009 IOReturn
clientMemoryForType(UInt32 type,IOOptionBits * options,OSSharedPtr<IOMemoryDescriptor> & memory)2010 IOUserClient::clientMemoryForType( UInt32 type,
2011 IOOptionBits * options,
2012 OSSharedPtr<IOMemoryDescriptor>& memory )
2013 {
2014 IOMemoryDescriptor* memoryRaw = nullptr;
2015 IOReturn result = clientMemoryForType(type, options, &memoryRaw);
2016 memory.reset(memoryRaw, OSNoRetain);
2017 return result;
2018 }
2019
2020 #if !__LP64__
2021 IOMemoryMap *
mapClientMemory(IOOptionBits type,task_t task,IOOptionBits mapFlags,IOVirtualAddress atAddress)2022 IOUserClient::mapClientMemory(
2023 IOOptionBits type,
2024 task_t task,
2025 IOOptionBits mapFlags,
2026 IOVirtualAddress atAddress )
2027 {
2028 return NULL;
2029 }
2030 #endif
2031
2032 IOMemoryMap *
mapClientMemory64(IOOptionBits type,task_t task,IOOptionBits mapFlags,mach_vm_address_t atAddress)2033 IOUserClient::mapClientMemory64(
2034 IOOptionBits type,
2035 task_t task,
2036 IOOptionBits mapFlags,
2037 mach_vm_address_t atAddress )
2038 {
2039 IOReturn err;
2040 IOOptionBits options = 0;
2041 IOMemoryDescriptor * memory = NULL;
2042 IOMemoryMap * map = NULL;
2043
2044 err = clientMemoryForType((UInt32) type, &options, &memory );
2045
2046 if (memory && (kIOReturnSuccess == err)) {
2047 FAKE_STACK_FRAME(getMetaClass());
2048
2049 options = (options & ~kIOMapUserOptionsMask)
2050 | (mapFlags & kIOMapUserOptionsMask);
2051 map = memory->createMappingInTask( task, atAddress, options );
2052 memory->release();
2053
2054 FAKE_STACK_FRAME_END();
2055 }
2056
2057 return map;
2058 }
2059
2060 IOReturn
exportObjectToClient(task_t task,OSObject * obj,io_object_t * clientObj)2061 IOUserClient::exportObjectToClient(task_t task,
2062 OSObject *obj, io_object_t *clientObj)
2063 {
2064 mach_port_name_t name;
2065
2066 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
2067
2068 *clientObj = (io_object_t)(uintptr_t) name;
2069
2070 if (obj) {
2071 obj->release();
2072 }
2073
2074 return kIOReturnSuccess;
2075 }
2076
2077 IOReturn
copyPortNameForObjectInTask(task_t task,OSObject * obj,mach_port_name_t * port_name)2078 IOUserClient::copyPortNameForObjectInTask(task_t task,
2079 OSObject *obj, mach_port_name_t * port_name)
2080 {
2081 mach_port_name_t name;
2082
2083 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT );
2084
2085 *(mach_port_name_t *) port_name = name;
2086
2087 return kIOReturnSuccess;
2088 }
2089
2090 IOReturn
copyObjectForPortNameInTask(task_t task,mach_port_name_t port_name,OSObject ** obj)2091 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2092 OSObject **obj)
2093 {
2094 OSObject * object;
2095
2096 object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task);
2097
2098 *obj = object;
2099
2100 return object ? kIOReturnSuccess : kIOReturnIPCError;
2101 }
2102
2103 IOReturn
copyObjectForPortNameInTask(task_t task,mach_port_name_t port_name,OSSharedPtr<OSObject> & obj)2104 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2105 OSSharedPtr<OSObject>& obj)
2106 {
2107 OSObject* objRaw = NULL;
2108 IOReturn result = copyObjectForPortNameInTask(task, port_name, &objRaw);
2109 obj.reset(objRaw, OSNoRetain);
2110 return result;
2111 }
2112
2113 IOReturn
adjustPortNameReferencesInTask(task_t task,mach_port_name_t port_name,mach_port_delta_t delta)2114 IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta)
2115 {
2116 return iokit_mod_send_right(task, port_name, delta);
2117 }
2118
2119 IOExternalMethod *
getExternalMethodForIndex(UInt32)2120 IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
2121 {
2122 return NULL;
2123 }
2124
2125 IOExternalAsyncMethod *
getExternalAsyncMethodForIndex(UInt32)2126 IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
2127 {
2128 return NULL;
2129 }
2130
2131 IOExternalTrap *
2132 IOUserClient::
getExternalTrapForIndex(UInt32 index)2133 getExternalTrapForIndex(UInt32 index)
2134 {
2135 return NULL;
2136 }
2137
2138 #pragma clang diagnostic push
2139 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2140
2141 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
2142 // functions can break clients of kexts implementing getExternalMethodForIndex()
2143 IOExternalMethod *
2144 IOUserClient::
getTargetAndMethodForIndex(IOService ** targetP,UInt32 index)2145 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
2146 {
2147 IOExternalMethod *method = getExternalMethodForIndex(index);
2148
2149 if (method) {
2150 *targetP = (IOService *) method->object;
2151 }
2152
2153 return method;
2154 }
2155
2156 IOExternalMethod *
2157 IOUserClient::
getTargetAndMethodForIndex(OSSharedPtr<IOService> & targetP,UInt32 index)2158 getTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2159 {
2160 IOService* targetPRaw = NULL;
2161 IOExternalMethod* result = getTargetAndMethodForIndex(&targetPRaw, index);
2162 targetP.reset(targetPRaw, OSRetain);
2163 return result;
2164 }
2165
2166 IOExternalAsyncMethod *
2167 IOUserClient::
getAsyncTargetAndMethodForIndex(IOService ** targetP,UInt32 index)2168 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
2169 {
2170 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
2171
2172 if (method) {
2173 *targetP = (IOService *) method->object;
2174 }
2175
2176 return method;
2177 }
2178
2179 IOExternalAsyncMethod *
2180 IOUserClient::
getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService> & targetP,UInt32 index)2181 getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2182 {
2183 IOService* targetPRaw = NULL;
2184 IOExternalAsyncMethod* result = getAsyncTargetAndMethodForIndex(&targetPRaw, index);
2185 targetP.reset(targetPRaw, OSRetain);
2186 return result;
2187 }
2188
2189 IOExternalTrap *
2190 IOUserClient::
getTargetAndTrapForIndex(IOService ** targetP,UInt32 index)2191 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
2192 {
2193 IOExternalTrap *trap = getExternalTrapForIndex(index);
2194
2195 if (trap) {
2196 *targetP = trap->object;
2197 }
2198
2199 return trap;
2200 }
2201 #pragma clang diagnostic pop
2202
2203 IOReturn
releaseAsyncReference64(OSAsyncReference64 reference)2204 IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
2205 {
2206 mach_port_t port;
2207 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2208
2209 if (MACH_PORT_NULL != port) {
2210 iokit_release_port_send(port);
2211 }
2212
2213 return kIOReturnSuccess;
2214 }
2215
2216 IOReturn
releaseNotificationPort(mach_port_t port)2217 IOUserClient::releaseNotificationPort(mach_port_t port)
2218 {
2219 if (MACH_PORT_NULL != port) {
2220 iokit_release_port_send(port);
2221 }
2222
2223 return kIOReturnSuccess;
2224 }
2225
2226 IOReturn
sendAsyncResult(OSAsyncReference reference,IOReturn result,void * args[],UInt32 numArgs)2227 IOUserClient::sendAsyncResult(OSAsyncReference reference,
2228 IOReturn result, void *args[], UInt32 numArgs)
2229 {
2230 OSAsyncReference64 reference64;
2231 OSBoundedArray<io_user_reference_t, kMaxAsyncArgs> args64;
2232 unsigned int idx;
2233
2234 if (numArgs > kMaxAsyncArgs) {
2235 return kIOReturnMessageTooLarge;
2236 }
2237
2238 for (idx = 0; idx < kOSAsyncRef64Count; idx++) {
2239 reference64[idx] = REF64(reference[idx]);
2240 }
2241
2242 for (idx = 0; idx < numArgs; idx++) {
2243 args64[idx] = REF64(args[idx]);
2244 }
2245
2246 return sendAsyncResult64(reference64, result, args64.data(), numArgs);
2247 }
2248
2249 IOReturn
sendAsyncResult64WithOptions(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs,IOOptionBits options)2250 IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
2251 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2252 {
2253 return _sendAsyncResult64(reference, result, args, numArgs, options);
2254 }
2255
2256 IOReturn
sendAsyncResult64(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs)2257 IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
2258 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
2259 {
2260 return _sendAsyncResult64(reference, result, args, numArgs, 0);
2261 }
2262
2263 IOReturn
_sendAsyncResult64(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs,IOOptionBits options)2264 IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
2265 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2266 {
2267 struct ReplyMsg {
2268 mach_msg_header_t msgHdr;
2269 union{
2270 struct{
2271 OSNotificationHeader notifyHdr;
2272 IOAsyncCompletionContent asyncContent;
2273 uint32_t args[kMaxAsyncArgs];
2274 } msg32;
2275 struct{
2276 OSNotificationHeader64 notifyHdr;
2277 IOAsyncCompletionContent asyncContent;
2278 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
2279 } msg64;
2280 } m;
2281 };
2282 ReplyMsg replyMsg;
2283 mach_port_t replyPort;
2284 kern_return_t kr;
2285
2286 // If no reply port, do nothing.
2287 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2288 if (replyPort == MACH_PORT_NULL) {
2289 return kIOReturnSuccess;
2290 }
2291
2292 if (numArgs > kMaxAsyncArgs) {
2293 return kIOReturnMessageTooLarge;
2294 }
2295
2296 bzero(&replyMsg, sizeof(replyMsg));
2297 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
2298 0 /*local*/);
2299 replyMsg.msgHdr.msgh_remote_port = replyPort;
2300 replyMsg.msgHdr.msgh_local_port = NULL;
2301 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
2302 if (kIOUCAsync64Flag & reference[0]) {
2303 replyMsg.msgHdr.msgh_size =
2304 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
2305 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
2306 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2307 + numArgs * sizeof(io_user_reference_t);
2308 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
2309 /* Copy reference except for reference[0], which is left as 0 from the earlier bzero */
2310 bcopy(&reference[1], &replyMsg.m.msg64.notifyHdr.reference[1], sizeof(OSAsyncReference64) - sizeof(reference[0]));
2311
2312 replyMsg.m.msg64.asyncContent.result = result;
2313 if (numArgs) {
2314 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
2315 }
2316 } else {
2317 unsigned int idx;
2318
2319 replyMsg.msgHdr.msgh_size =
2320 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
2321 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
2322
2323 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2324 + numArgs * sizeof(uint32_t);
2325 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
2326
2327 /* Skip reference[0] which is left as 0 from the earlier bzero */
2328 for (idx = 1; idx < kOSAsyncRefCount; idx++) {
2329 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
2330 }
2331
2332 replyMsg.m.msg32.asyncContent.result = result;
2333
2334 for (idx = 0; idx < numArgs; idx++) {
2335 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
2336 }
2337 }
2338
2339 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
2340 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
2341 replyMsg.msgHdr.msgh_size, MACH64_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
2342 } else {
2343 /* Fail on full queue. */
2344 kr = mach_msg_send_from_kernel(&replyMsg.msgHdr,
2345 replyMsg.msgHdr.msgh_size);
2346 }
2347 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0])) {
2348 reference[0] |= kIOUCAsyncErrorLoggedFlag;
2349 IOLog("%s: mach_msg_send_from_kernel(0x%x)\n", __PRETTY_FUNCTION__, kr );
2350 }
2351 return kr;
2352 }
2353
2354
2355 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2356
2357 extern "C" {
2358 #define CHECK(cls, obj, out) \
2359 cls * out; \
2360 if( !(out = OSDynamicCast( cls, obj))) \
2361 return( kIOReturnBadArgument )
2362
2363 #define CHECKLOCKED(cls, obj, out) \
2364 IOUserIterator * oIter; \
2365 cls * out; \
2366 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
2367 return (kIOReturnBadArgument); \
2368 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
2369 return (kIOReturnBadArgument)
2370
2371 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2372
2373 // Create a vm_map_copy_t or kalloc'ed data for memory
2374 // to be copied out. ipc will free after the copyout.
2375
2376 static kern_return_t
copyoutkdata(const void * data,vm_size_t len,io_buf_ptr_t * buf)2377 copyoutkdata( const void * data, vm_size_t len,
2378 io_buf_ptr_t * buf )
2379 {
2380 kern_return_t err;
2381 vm_map_copy_t copy;
2382
2383 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2384 false /* src_destroy */, ©);
2385
2386 assert( err == KERN_SUCCESS );
2387 if (err == KERN_SUCCESS) {
2388 *buf = (char *) copy;
2389 }
2390
2391 return err;
2392 }
2393
2394 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2395
2396 /* Routine io_server_version */
2397 kern_return_t
is_io_server_version(mach_port_t main_port,uint64_t * version)2398 is_io_server_version(
2399 mach_port_t main_port,
2400 uint64_t *version)
2401 {
2402 *version = IOKIT_SERVER_VERSION;
2403 return kIOReturnSuccess;
2404 }
2405
2406 /* Routine io_object_get_class */
2407 kern_return_t
is_io_object_get_class(io_object_t object,io_name_t className)2408 is_io_object_get_class(
2409 io_object_t object,
2410 io_name_t className )
2411 {
2412 const OSMetaClass* my_obj = NULL;
2413
2414 if (!object) {
2415 return kIOReturnBadArgument;
2416 }
2417
2418 my_obj = object->getMetaClass();
2419 if (!my_obj) {
2420 return kIOReturnNotFound;
2421 }
2422
2423 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
2424
2425 return kIOReturnSuccess;
2426 }
2427
2428 /* Routine io_object_get_superclass */
2429 kern_return_t
is_io_object_get_superclass(mach_port_t main_port,io_name_t obj_name,io_name_t class_name)2430 is_io_object_get_superclass(
2431 mach_port_t main_port,
2432 io_name_t obj_name,
2433 io_name_t class_name)
2434 {
2435 IOReturn ret;
2436 const OSMetaClass * meta;
2437 const OSMetaClass * super;
2438 const OSSymbol * name;
2439 const char * cstr;
2440
2441 if (!obj_name || !class_name) {
2442 return kIOReturnBadArgument;
2443 }
2444 if (main_port != main_device_port) {
2445 return kIOReturnNotPrivileged;
2446 }
2447
2448 ret = kIOReturnNotFound;
2449 meta = NULL;
2450 do{
2451 name = OSSymbol::withCString(obj_name);
2452 if (!name) {
2453 break;
2454 }
2455 meta = OSMetaClass::copyMetaClassWithName(name);
2456 if (!meta) {
2457 break;
2458 }
2459 super = meta->getSuperClass();
2460 if (!super) {
2461 break;
2462 }
2463 cstr = super->getClassName();
2464 if (!cstr) {
2465 break;
2466 }
2467 strlcpy(class_name, cstr, sizeof(io_name_t));
2468 ret = kIOReturnSuccess;
2469 }while (false);
2470
2471 OSSafeReleaseNULL(name);
2472 if (meta) {
2473 meta->releaseMetaClass();
2474 }
2475
2476 return ret;
2477 }
2478
2479 /* Routine io_object_get_bundle_identifier */
2480 kern_return_t
is_io_object_get_bundle_identifier(mach_port_t main_port,io_name_t obj_name,io_name_t bundle_name)2481 is_io_object_get_bundle_identifier(
2482 mach_port_t main_port,
2483 io_name_t obj_name,
2484 io_name_t bundle_name)
2485 {
2486 IOReturn ret;
2487 const OSMetaClass * meta;
2488 const OSSymbol * name;
2489 const OSSymbol * identifier;
2490 const char * cstr;
2491
2492 if (!obj_name || !bundle_name) {
2493 return kIOReturnBadArgument;
2494 }
2495 if (main_port != main_device_port) {
2496 return kIOReturnNotPrivileged;
2497 }
2498
2499 ret = kIOReturnNotFound;
2500 meta = NULL;
2501 do{
2502 name = OSSymbol::withCString(obj_name);
2503 if (!name) {
2504 break;
2505 }
2506 meta = OSMetaClass::copyMetaClassWithName(name);
2507 if (!meta) {
2508 break;
2509 }
2510 identifier = meta->getKmodName();
2511 if (!identifier) {
2512 break;
2513 }
2514 cstr = identifier->getCStringNoCopy();
2515 if (!cstr) {
2516 break;
2517 }
2518 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
2519 ret = kIOReturnSuccess;
2520 }while (false);
2521
2522 OSSafeReleaseNULL(name);
2523 if (meta) {
2524 meta->releaseMetaClass();
2525 }
2526
2527 return ret;
2528 }
2529
2530 /* Routine io_object_conforms_to */
2531 kern_return_t
is_io_object_conforms_to(io_object_t object,io_name_t className,boolean_t * conforms)2532 is_io_object_conforms_to(
2533 io_object_t object,
2534 io_name_t className,
2535 boolean_t *conforms )
2536 {
2537 if (!object) {
2538 return kIOReturnBadArgument;
2539 }
2540
2541 *conforms = (NULL != object->metaCast( className ));
2542
2543 return kIOReturnSuccess;
2544 }
2545
2546 /* Routine io_object_get_retain_count */
2547 kern_return_t
is_io_object_get_retain_count(io_object_t object,uint32_t * retainCount)2548 is_io_object_get_retain_count(
2549 io_object_t object,
2550 uint32_t *retainCount )
2551 {
2552 if (!object) {
2553 return kIOReturnBadArgument;
2554 }
2555
2556 *retainCount = object->getRetainCount();
2557 return kIOReturnSuccess;
2558 }
2559
2560 /* Routine io_iterator_next */
2561 kern_return_t
is_io_iterator_next(io_object_t iterator,io_object_t * object)2562 is_io_iterator_next(
2563 io_object_t iterator,
2564 io_object_t *object )
2565 {
2566 IOReturn ret;
2567 OSObject * obj;
2568 OSIterator * iter;
2569 IOUserIterator * uiter;
2570
2571 if ((uiter = OSDynamicCast(IOUserIterator, iterator))) {
2572 obj = uiter->copyNextObject();
2573 } else if ((iter = OSDynamicCast(OSIterator, iterator))) {
2574 obj = iter->getNextObject();
2575 if (obj) {
2576 obj->retain();
2577 }
2578 } else {
2579 return kIOReturnBadArgument;
2580 }
2581
2582 if (obj) {
2583 *object = obj;
2584 ret = kIOReturnSuccess;
2585 } else {
2586 ret = kIOReturnNoDevice;
2587 }
2588
2589 return ret;
2590 }
2591
2592 /* Routine io_iterator_reset */
2593 kern_return_t
is_io_iterator_reset(io_object_t iterator)2594 is_io_iterator_reset(
2595 io_object_t iterator )
2596 {
2597 CHECK( OSIterator, iterator, iter );
2598
2599 iter->reset();
2600
2601 return kIOReturnSuccess;
2602 }
2603
2604 /* Routine io_iterator_is_valid */
2605 kern_return_t
is_io_iterator_is_valid(io_object_t iterator,boolean_t * is_valid)2606 is_io_iterator_is_valid(
2607 io_object_t iterator,
2608 boolean_t *is_valid )
2609 {
2610 CHECK( OSIterator, iterator, iter );
2611
2612 *is_valid = iter->isValid();
2613
2614 return kIOReturnSuccess;
2615 }
2616
2617 static kern_return_t
internal_io_service_match_property_table(io_service_t _service,const char * matching,mach_msg_type_number_t matching_size,boolean_t * matches)2618 internal_io_service_match_property_table(
2619 io_service_t _service,
2620 const char * matching,
2621 mach_msg_type_number_t matching_size,
2622 boolean_t *matches)
2623 {
2624 CHECK( IOService, _service, service );
2625
2626 kern_return_t kr;
2627 OSObject * obj;
2628 OSDictionary * dict;
2629
2630 assert(matching_size);
2631
2632
2633 obj = OSUnserializeXML(matching, matching_size);
2634
2635 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2636 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2637 *matches = service->passiveMatch( dict );
2638 kr = kIOReturnSuccess;
2639 } else {
2640 kr = kIOReturnBadArgument;
2641 }
2642
2643 if (obj) {
2644 obj->release();
2645 }
2646
2647 return kr;
2648 }
2649
2650 /* Routine io_service_match_property_table */
2651 kern_return_t
is_io_service_match_property_table(io_service_t service,io_string_t matching,boolean_t * matches)2652 is_io_service_match_property_table(
2653 io_service_t service,
2654 io_string_t matching,
2655 boolean_t *matches )
2656 {
2657 return kIOReturnUnsupported;
2658 }
2659
2660
2661 /* Routine io_service_match_property_table_ool */
2662 kern_return_t
is_io_service_match_property_table_ool(io_object_t service,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,boolean_t * matches)2663 is_io_service_match_property_table_ool(
2664 io_object_t service,
2665 io_buf_ptr_t matching,
2666 mach_msg_type_number_t matchingCnt,
2667 kern_return_t *result,
2668 boolean_t *matches )
2669 {
2670 kern_return_t kr;
2671 vm_offset_t data;
2672 vm_map_offset_t map_data;
2673
2674 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2675 data = CAST_DOWN(vm_offset_t, map_data);
2676
2677 if (KERN_SUCCESS == kr) {
2678 // must return success after vm_map_copyout() succeeds
2679 *result = internal_io_service_match_property_table(service,
2680 (const char *)data, matchingCnt, matches );
2681 vm_deallocate( kernel_map, data, matchingCnt );
2682 }
2683
2684 return kr;
2685 }
2686
2687 /* Routine io_service_match_property_table_bin */
2688 kern_return_t
is_io_service_match_property_table_bin(io_object_t service,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,boolean_t * matches)2689 is_io_service_match_property_table_bin(
2690 io_object_t service,
2691 io_struct_inband_t matching,
2692 mach_msg_type_number_t matchingCnt,
2693 boolean_t *matches)
2694 {
2695 return internal_io_service_match_property_table(service, matching, matchingCnt, matches);
2696 }
2697
2698 static kern_return_t
internal_io_service_get_matching_services(mach_port_t main_port,const char * matching,mach_msg_type_number_t matching_size,io_iterator_t * existing)2699 internal_io_service_get_matching_services(
2700 mach_port_t main_port,
2701 const char * matching,
2702 mach_msg_type_number_t matching_size,
2703 io_iterator_t *existing )
2704 {
2705 kern_return_t kr;
2706 OSObject * obj;
2707 OSDictionary * dict;
2708
2709 if (main_port != main_device_port) {
2710 return kIOReturnNotPrivileged;
2711 }
2712
2713 assert(matching_size);
2714 obj = OSUnserializeXML(matching, matching_size);
2715
2716 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2717 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2718 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2719 kr = kIOReturnSuccess;
2720 } else {
2721 kr = kIOReturnBadArgument;
2722 }
2723
2724 if (obj) {
2725 obj->release();
2726 }
2727
2728 return kr;
2729 }
2730
2731 /* Routine io_service_get_matching_services */
2732 kern_return_t
is_io_service_get_matching_services(mach_port_t main_port,io_string_t matching,io_iterator_t * existing)2733 is_io_service_get_matching_services(
2734 mach_port_t main_port,
2735 io_string_t matching,
2736 io_iterator_t *existing )
2737 {
2738 return kIOReturnUnsupported;
2739 }
2740
2741 /* Routine io_service_get_matching_services_ool */
2742 kern_return_t
is_io_service_get_matching_services_ool(mach_port_t main_port,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,io_object_t * existing)2743 is_io_service_get_matching_services_ool(
2744 mach_port_t main_port,
2745 io_buf_ptr_t matching,
2746 mach_msg_type_number_t matchingCnt,
2747 kern_return_t *result,
2748 io_object_t *existing )
2749 {
2750 kern_return_t kr;
2751 vm_offset_t data;
2752 vm_map_offset_t map_data;
2753
2754 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2755 data = CAST_DOWN(vm_offset_t, map_data);
2756
2757 if (KERN_SUCCESS == kr) {
2758 // must return success after vm_map_copyout() succeeds
2759 // and mig will copy out objects on success
2760 *existing = NULL;
2761 *result = internal_io_service_get_matching_services(main_port,
2762 (const char *) data, matchingCnt, existing);
2763 vm_deallocate( kernel_map, data, matchingCnt );
2764 }
2765
2766 return kr;
2767 }
2768
2769 /* Routine io_service_get_matching_services_bin */
2770 kern_return_t
is_io_service_get_matching_services_bin(mach_port_t main_port,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,io_object_t * existing)2771 is_io_service_get_matching_services_bin(
2772 mach_port_t main_port,
2773 io_struct_inband_t matching,
2774 mach_msg_type_number_t matchingCnt,
2775 io_object_t *existing)
2776 {
2777 return internal_io_service_get_matching_services(main_port, matching, matchingCnt, existing);
2778 }
2779
2780
2781 static kern_return_t
internal_io_service_get_matching_service(mach_port_t main_port,const char * matching,mach_msg_type_number_t matching_size,io_service_t * service)2782 internal_io_service_get_matching_service(
2783 mach_port_t main_port,
2784 const char * matching,
2785 mach_msg_type_number_t matching_size,
2786 io_service_t *service )
2787 {
2788 kern_return_t kr;
2789 OSObject * obj;
2790 OSDictionary * dict;
2791
2792 if (main_port != main_device_port) {
2793 return kIOReturnNotPrivileged;
2794 }
2795
2796 assert(matching_size);
2797 obj = OSUnserializeXML(matching, matching_size);
2798
2799 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2800 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2801 *service = IOService::copyMatchingService( dict );
2802 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2803 } else {
2804 kr = kIOReturnBadArgument;
2805 }
2806
2807 if (obj) {
2808 obj->release();
2809 }
2810
2811 return kr;
2812 }
2813
2814 /* Routine io_service_get_matching_service */
2815 kern_return_t
is_io_service_get_matching_service(mach_port_t main_port,io_string_t matching,io_service_t * service)2816 is_io_service_get_matching_service(
2817 mach_port_t main_port,
2818 io_string_t matching,
2819 io_service_t *service )
2820 {
2821 return kIOReturnUnsupported;
2822 }
2823
2824 /* Routine io_service_get_matching_services_ool */
2825 kern_return_t
is_io_service_get_matching_service_ool(mach_port_t main_port,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,io_object_t * service)2826 is_io_service_get_matching_service_ool(
2827 mach_port_t main_port,
2828 io_buf_ptr_t matching,
2829 mach_msg_type_number_t matchingCnt,
2830 kern_return_t *result,
2831 io_object_t *service )
2832 {
2833 kern_return_t kr;
2834 vm_offset_t data;
2835 vm_map_offset_t map_data;
2836
2837 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2838 data = CAST_DOWN(vm_offset_t, map_data);
2839
2840 if (KERN_SUCCESS == kr) {
2841 // must return success after vm_map_copyout() succeeds
2842 // and mig will copy out objects on success
2843 *service = NULL;
2844 *result = internal_io_service_get_matching_service(main_port,
2845 (const char *) data, matchingCnt, service );
2846 vm_deallocate( kernel_map, data, matchingCnt );
2847 }
2848
2849 return kr;
2850 }
2851
2852 /* Routine io_service_get_matching_service_bin */
2853 kern_return_t
is_io_service_get_matching_service_bin(mach_port_t main_port,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,io_object_t * service)2854 is_io_service_get_matching_service_bin(
2855 mach_port_t main_port,
2856 io_struct_inband_t matching,
2857 mach_msg_type_number_t matchingCnt,
2858 io_object_t *service)
2859 {
2860 return internal_io_service_get_matching_service(main_port, matching, matchingCnt, service);
2861 }
2862
2863 static kern_return_t
internal_io_service_add_notification(mach_port_t main_port,io_name_t notification_type,const char * matching,size_t matching_size,mach_port_t port,void * reference,vm_size_t referenceSize,bool client64,io_object_t * notification)2864 internal_io_service_add_notification(
2865 mach_port_t main_port,
2866 io_name_t notification_type,
2867 const char * matching,
2868 size_t matching_size,
2869 mach_port_t port,
2870 void * reference,
2871 vm_size_t referenceSize,
2872 bool client64,
2873 io_object_t * notification )
2874 {
2875 IOServiceUserNotification * userNotify = NULL;
2876 IONotifier * notify = NULL;
2877 const OSSymbol * sym;
2878 OSObject * obj;
2879 OSDictionary * dict;
2880 IOReturn err;
2881 natural_t userMsgType;
2882
2883 if (main_port != main_device_port) {
2884 return kIOReturnNotPrivileged;
2885 }
2886
2887 do {
2888 err = kIOReturnNoResources;
2889
2890 if (matching_size > (sizeof(io_struct_inband_t) * 1024)) {
2891 return kIOReturnMessageTooLarge;
2892 }
2893
2894 if (!(sym = OSSymbol::withCString( notification_type ))) {
2895 err = kIOReturnNoResources;
2896 }
2897
2898 assert(matching_size);
2899 obj = OSUnserializeXML(matching, matching_size);
2900 dict = OSDynamicCast(OSDictionary, obj);
2901 if (!dict) {
2902 err = kIOReturnBadArgument;
2903 continue;
2904 }
2905 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2906
2907 if ((sym == gIOPublishNotification)
2908 || (sym == gIOFirstPublishNotification)) {
2909 userMsgType = kIOServicePublishNotificationType;
2910 } else if ((sym == gIOMatchedNotification)
2911 || (sym == gIOFirstMatchNotification)) {
2912 userMsgType = kIOServiceMatchedNotificationType;
2913 } else if ((sym == gIOTerminatedNotification)
2914 || (sym == gIOWillTerminateNotification)) {
2915 userMsgType = kIOServiceTerminatedNotificationType;
2916 } else {
2917 userMsgType = kLastIOKitNotificationType;
2918 }
2919
2920 userNotify = new IOServiceUserNotification;
2921
2922 if (userNotify && !userNotify->init( port, userMsgType,
2923 reference, referenceSize, client64)) {
2924 userNotify->release();
2925 userNotify = NULL;
2926 }
2927 if (!userNotify) {
2928 continue;
2929 }
2930
2931 notify = IOService::addMatchingNotification( sym, dict,
2932 &userNotify->_handler, userNotify );
2933 if (notify) {
2934 *notification = userNotify;
2935 userNotify->setNotification( notify );
2936 err = kIOReturnSuccess;
2937 } else {
2938 err = kIOReturnUnsupported;
2939 }
2940 } while (false);
2941
2942 if ((kIOReturnSuccess != err) && userNotify) {
2943 userNotify->setNotification(NULL);
2944 userNotify->invalidatePort();
2945 userNotify->release();
2946 userNotify = NULL;
2947 }
2948
2949 if (sym) {
2950 sym->release();
2951 }
2952 if (obj) {
2953 obj->release();
2954 }
2955
2956 return err;
2957 }
2958
2959
2960 /* Routine io_service_add_notification */
2961 kern_return_t
is_io_service_add_notification(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2962 is_io_service_add_notification(
2963 mach_port_t main_port,
2964 io_name_t notification_type,
2965 io_string_t matching,
2966 mach_port_t port,
2967 io_async_ref_t reference,
2968 mach_msg_type_number_t referenceCnt,
2969 io_object_t * notification )
2970 {
2971 return kIOReturnUnsupported;
2972 }
2973
2974 /* Routine io_service_add_notification_64 */
2975 kern_return_t
is_io_service_add_notification_64(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2976 is_io_service_add_notification_64(
2977 mach_port_t main_port,
2978 io_name_t notification_type,
2979 io_string_t matching,
2980 mach_port_t wake_port,
2981 io_async_ref64_t reference,
2982 mach_msg_type_number_t referenceCnt,
2983 io_object_t *notification )
2984 {
2985 return kIOReturnUnsupported;
2986 }
2987
2988 /* Routine io_service_add_notification_bin */
2989 kern_return_t
is_io_service_add_notification_bin(mach_port_t main_port,io_name_t notification_type,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2990 is_io_service_add_notification_bin
2991 (
2992 mach_port_t main_port,
2993 io_name_t notification_type,
2994 io_struct_inband_t matching,
2995 mach_msg_type_number_t matchingCnt,
2996 mach_port_t wake_port,
2997 io_async_ref_t reference,
2998 mach_msg_type_number_t referenceCnt,
2999 io_object_t *notification)
3000 {
3001 io_async_ref_t zreference;
3002
3003 if (referenceCnt > ASYNC_REF_COUNT) {
3004 return kIOReturnBadArgument;
3005 }
3006 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3007 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3008
3009 return internal_io_service_add_notification(main_port, notification_type,
3010 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
3011 false, notification);
3012 }
3013
3014 /* Routine io_service_add_notification_bin_64 */
3015 kern_return_t
is_io_service_add_notification_bin_64(mach_port_t main_port,io_name_t notification_type,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3016 is_io_service_add_notification_bin_64
3017 (
3018 mach_port_t main_port,
3019 io_name_t notification_type,
3020 io_struct_inband_t matching,
3021 mach_msg_type_number_t matchingCnt,
3022 mach_port_t wake_port,
3023 io_async_ref64_t reference,
3024 mach_msg_type_number_t referenceCnt,
3025 io_object_t *notification)
3026 {
3027 io_async_ref64_t zreference;
3028
3029 if (referenceCnt > ASYNC_REF64_COUNT) {
3030 return kIOReturnBadArgument;
3031 }
3032 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3033 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3034
3035 return internal_io_service_add_notification(main_port, notification_type,
3036 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
3037 true, notification);
3038 }
3039
3040 static kern_return_t
internal_io_service_add_notification_ool(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,void * reference,vm_size_t referenceSize,bool client64,kern_return_t * result,io_object_t * notification)3041 internal_io_service_add_notification_ool(
3042 mach_port_t main_port,
3043 io_name_t notification_type,
3044 io_buf_ptr_t matching,
3045 mach_msg_type_number_t matchingCnt,
3046 mach_port_t wake_port,
3047 void * reference,
3048 vm_size_t referenceSize,
3049 bool client64,
3050 kern_return_t *result,
3051 io_object_t *notification )
3052 {
3053 kern_return_t kr;
3054 vm_offset_t data;
3055 vm_map_offset_t map_data;
3056
3057 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
3058 data = CAST_DOWN(vm_offset_t, map_data);
3059
3060 if (KERN_SUCCESS == kr) {
3061 // must return success after vm_map_copyout() succeeds
3062 // and mig will copy out objects on success
3063 *notification = NULL;
3064 *result = internal_io_service_add_notification( main_port, notification_type,
3065 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
3066 vm_deallocate( kernel_map, data, matchingCnt );
3067 }
3068
3069 return kr;
3070 }
3071
3072 /* Routine io_service_add_notification_ool */
3073 kern_return_t
is_io_service_add_notification_ool(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,kern_return_t * result,io_object_t * notification)3074 is_io_service_add_notification_ool(
3075 mach_port_t main_port,
3076 io_name_t notification_type,
3077 io_buf_ptr_t matching,
3078 mach_msg_type_number_t matchingCnt,
3079 mach_port_t wake_port,
3080 io_async_ref_t reference,
3081 mach_msg_type_number_t referenceCnt,
3082 kern_return_t *result,
3083 io_object_t *notification )
3084 {
3085 io_async_ref_t zreference;
3086
3087 if (referenceCnt > ASYNC_REF_COUNT) {
3088 return kIOReturnBadArgument;
3089 }
3090 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3091 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3092
3093 return internal_io_service_add_notification_ool(main_port, notification_type,
3094 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
3095 false, result, notification);
3096 }
3097
3098 /* Routine io_service_add_notification_ool_64 */
3099 kern_return_t
is_io_service_add_notification_ool_64(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,kern_return_t * result,io_object_t * notification)3100 is_io_service_add_notification_ool_64(
3101 mach_port_t main_port,
3102 io_name_t notification_type,
3103 io_buf_ptr_t matching,
3104 mach_msg_type_number_t matchingCnt,
3105 mach_port_t wake_port,
3106 io_async_ref64_t reference,
3107 mach_msg_type_number_t referenceCnt,
3108 kern_return_t *result,
3109 io_object_t *notification )
3110 {
3111 io_async_ref64_t zreference;
3112
3113 if (referenceCnt > ASYNC_REF64_COUNT) {
3114 return kIOReturnBadArgument;
3115 }
3116 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3117 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3118
3119 return internal_io_service_add_notification_ool(main_port, notification_type,
3120 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
3121 true, result, notification);
3122 }
3123
3124 /* Routine io_service_add_notification_old */
3125 kern_return_t
is_io_service_add_notification_old(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t port,natural_t ref,io_object_t * notification)3126 is_io_service_add_notification_old(
3127 mach_port_t main_port,
3128 io_name_t notification_type,
3129 io_string_t matching,
3130 mach_port_t port,
3131 // for binary compatibility reasons, this must be natural_t for ILP32
3132 natural_t ref,
3133 io_object_t * notification )
3134 {
3135 return is_io_service_add_notification( main_port, notification_type,
3136 matching, port, &ref, 1, notification );
3137 }
3138
3139
3140 static kern_return_t
internal_io_service_add_interest_notification(io_object_t _service,io_name_t type_of_interest,mach_port_t port,void * reference,vm_size_t referenceSize,bool client64,io_object_t * notification)3141 internal_io_service_add_interest_notification(
3142 io_object_t _service,
3143 io_name_t type_of_interest,
3144 mach_port_t port,
3145 void * reference,
3146 vm_size_t referenceSize,
3147 bool client64,
3148 io_object_t * notification )
3149 {
3150 IOServiceMessageUserNotification * userNotify = NULL;
3151 IONotifier * notify = NULL;
3152 const OSSymbol * sym;
3153 IOReturn err;
3154
3155 CHECK( IOService, _service, service );
3156
3157 err = kIOReturnNoResources;
3158 if ((sym = OSSymbol::withCString( type_of_interest ))) {
3159 do {
3160 userNotify = new IOServiceMessageUserNotification;
3161
3162 if (userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
3163 reference, referenceSize, client64 )) {
3164 userNotify->release();
3165 userNotify = NULL;
3166 }
3167 if (!userNotify) {
3168 continue;
3169 }
3170
3171 notify = service->registerInterest( sym,
3172 &userNotify->_handler, userNotify );
3173 if (notify) {
3174 *notification = userNotify;
3175 userNotify->setNotification( notify );
3176 err = kIOReturnSuccess;
3177 } else {
3178 err = kIOReturnUnsupported;
3179 }
3180 } while (false);
3181
3182 sym->release();
3183 }
3184
3185 if ((kIOReturnSuccess != err) && userNotify) {
3186 userNotify->setNotification(NULL);
3187 userNotify->invalidatePort();
3188 userNotify->release();
3189 userNotify = NULL;
3190 }
3191
3192 return err;
3193 }
3194
3195 /* Routine io_service_add_message_notification */
3196 kern_return_t
is_io_service_add_interest_notification(io_object_t service,io_name_t type_of_interest,mach_port_t port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3197 is_io_service_add_interest_notification(
3198 io_object_t service,
3199 io_name_t type_of_interest,
3200 mach_port_t port,
3201 io_async_ref_t reference,
3202 mach_msg_type_number_t referenceCnt,
3203 io_object_t * notification )
3204 {
3205 io_async_ref_t zreference;
3206
3207 if (referenceCnt > ASYNC_REF_COUNT) {
3208 return kIOReturnBadArgument;
3209 }
3210 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3211 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3212
3213 return internal_io_service_add_interest_notification(service, type_of_interest,
3214 port, &zreference[0], sizeof(io_async_ref_t), false, notification);
3215 }
3216
3217 /* Routine io_service_add_interest_notification_64 */
3218 kern_return_t
is_io_service_add_interest_notification_64(io_object_t service,io_name_t type_of_interest,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3219 is_io_service_add_interest_notification_64(
3220 io_object_t service,
3221 io_name_t type_of_interest,
3222 mach_port_t wake_port,
3223 io_async_ref64_t reference,
3224 mach_msg_type_number_t referenceCnt,
3225 io_object_t *notification )
3226 {
3227 io_async_ref64_t zreference;
3228
3229 if (referenceCnt > ASYNC_REF64_COUNT) {
3230 return kIOReturnBadArgument;
3231 }
3232 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3233 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3234
3235 return internal_io_service_add_interest_notification(service, type_of_interest,
3236 wake_port, &zreference[0], sizeof(io_async_ref64_t), true, notification);
3237 }
3238
3239
3240 /* Routine io_service_acknowledge_notification */
3241 kern_return_t
is_io_service_acknowledge_notification(io_object_t _service,natural_t notify_ref,natural_t response)3242 is_io_service_acknowledge_notification(
3243 io_object_t _service,
3244 natural_t notify_ref,
3245 natural_t response )
3246 {
3247 CHECK( IOService, _service, service );
3248
3249 return service->acknowledgeNotification((IONotificationRef)(uintptr_t) notify_ref,
3250 (IOOptionBits) response );
3251 }
3252
3253 /* Routine io_connect_get_semaphore */
3254 kern_return_t
is_io_connect_get_notification_semaphore(io_connect_t connection,natural_t notification_type,semaphore_t * semaphore)3255 is_io_connect_get_notification_semaphore(
3256 io_connect_t connection,
3257 natural_t notification_type,
3258 semaphore_t *semaphore )
3259 {
3260 IOReturn ret;
3261 CHECK( IOUserClient, connection, client );
3262
3263 IOStatisticsClientCall();
3264 client->ipcEnter(kIPCLockWrite);
3265 ret = client->getNotificationSemaphore((UInt32) notification_type,
3266 semaphore );
3267 client->ipcExit(kIPCLockWrite);
3268
3269 return ret;
3270 }
3271
3272 /* Routine io_registry_get_root_entry */
3273 kern_return_t
is_io_registry_get_root_entry(mach_port_t main_port,io_object_t * root)3274 is_io_registry_get_root_entry(
3275 mach_port_t main_port,
3276 io_object_t *root )
3277 {
3278 IORegistryEntry * entry;
3279
3280 if (main_port != main_device_port) {
3281 return kIOReturnNotPrivileged;
3282 }
3283
3284 entry = IORegistryEntry::getRegistryRoot();
3285 if (entry) {
3286 entry->retain();
3287 }
3288 *root = entry;
3289
3290 return kIOReturnSuccess;
3291 }
3292
3293 /* Routine io_registry_create_iterator */
3294 kern_return_t
is_io_registry_create_iterator(mach_port_t main_port,io_name_t plane,uint32_t options,io_object_t * iterator)3295 is_io_registry_create_iterator(
3296 mach_port_t main_port,
3297 io_name_t plane,
3298 uint32_t options,
3299 io_object_t *iterator )
3300 {
3301 if (main_port != main_device_port) {
3302 return kIOReturnNotPrivileged;
3303 }
3304
3305 *iterator = IOUserIterator::withIterator(
3306 IORegistryIterator::iterateOver(
3307 IORegistryEntry::getPlane( plane ), options ));
3308
3309 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3310 }
3311
3312 /* Routine io_registry_entry_create_iterator */
3313 kern_return_t
is_io_registry_entry_create_iterator(io_object_t registry_entry,io_name_t plane,uint32_t options,io_object_t * iterator)3314 is_io_registry_entry_create_iterator(
3315 io_object_t registry_entry,
3316 io_name_t plane,
3317 uint32_t options,
3318 io_object_t *iterator )
3319 {
3320 CHECK( IORegistryEntry, registry_entry, entry );
3321
3322 *iterator = IOUserIterator::withIterator(
3323 IORegistryIterator::iterateOver( entry,
3324 IORegistryEntry::getPlane( plane ), options ));
3325
3326 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3327 }
3328
3329 /* Routine io_registry_iterator_enter */
3330 kern_return_t
is_io_registry_iterator_enter_entry(io_object_t iterator)3331 is_io_registry_iterator_enter_entry(
3332 io_object_t iterator )
3333 {
3334 CHECKLOCKED( IORegistryIterator, iterator, iter );
3335
3336 IOLockLock(&oIter->lock);
3337 iter->enterEntry();
3338 IOLockUnlock(&oIter->lock);
3339
3340 return kIOReturnSuccess;
3341 }
3342
3343 /* Routine io_registry_iterator_exit */
3344 kern_return_t
is_io_registry_iterator_exit_entry(io_object_t iterator)3345 is_io_registry_iterator_exit_entry(
3346 io_object_t iterator )
3347 {
3348 bool didIt;
3349
3350 CHECKLOCKED( IORegistryIterator, iterator, iter );
3351
3352 IOLockLock(&oIter->lock);
3353 didIt = iter->exitEntry();
3354 IOLockUnlock(&oIter->lock);
3355
3356 return didIt ? kIOReturnSuccess : kIOReturnNoDevice;
3357 }
3358
3359 /* Routine io_registry_entry_from_path */
3360 kern_return_t
is_io_registry_entry_from_path(mach_port_t main_port,io_string_t path,io_object_t * registry_entry)3361 is_io_registry_entry_from_path(
3362 mach_port_t main_port,
3363 io_string_t path,
3364 io_object_t *registry_entry )
3365 {
3366 IORegistryEntry * entry;
3367
3368 if (main_port != main_device_port) {
3369 return kIOReturnNotPrivileged;
3370 }
3371
3372 entry = IORegistryEntry::fromPath( path );
3373
3374 if (!entry && IOTaskRegistryCompatibility(current_task())) {
3375 OSDictionary * matching;
3376 const OSObject * objects[2] = { kOSBooleanTrue, NULL };
3377 const OSSymbol * keys[2] = { gIOCompatibilityMatchKey, gIOPathMatchKey };
3378
3379 objects[1] = OSString::withCStringNoCopy(path);
3380 matching = OSDictionary::withObjects(objects, keys, 2, 2);
3381 if (matching) {
3382 entry = IOService::copyMatchingService(matching);
3383 }
3384 OSSafeReleaseNULL(matching);
3385 OSSafeReleaseNULL(objects[1]);
3386 }
3387
3388 *registry_entry = entry;
3389
3390 return kIOReturnSuccess;
3391 }
3392
3393
3394 /* Routine io_registry_entry_from_path */
3395 kern_return_t
is_io_registry_entry_from_path_ool(mach_port_t main_port,io_string_inband_t path,io_buf_ptr_t path_ool,mach_msg_type_number_t path_oolCnt,kern_return_t * result,io_object_t * registry_entry)3396 is_io_registry_entry_from_path_ool(
3397 mach_port_t main_port,
3398 io_string_inband_t path,
3399 io_buf_ptr_t path_ool,
3400 mach_msg_type_number_t path_oolCnt,
3401 kern_return_t *result,
3402 io_object_t *registry_entry)
3403 {
3404 IORegistryEntry * entry;
3405 vm_map_offset_t map_data;
3406 const char * cpath;
3407 IOReturn res;
3408 kern_return_t err;
3409
3410 if (main_port != main_device_port) {
3411 return kIOReturnNotPrivileged;
3412 }
3413
3414 map_data = 0;
3415 entry = NULL;
3416 res = err = KERN_SUCCESS;
3417 if (path[0]) {
3418 cpath = path;
3419 } else {
3420 if (!path_oolCnt) {
3421 return kIOReturnBadArgument;
3422 }
3423 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) {
3424 return kIOReturnMessageTooLarge;
3425 }
3426
3427 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
3428 if (KERN_SUCCESS == err) {
3429 // must return success to mig after vm_map_copyout() succeeds, so result is actual
3430 cpath = CAST_DOWN(const char *, map_data);
3431 if (cpath[path_oolCnt - 1]) {
3432 res = kIOReturnBadArgument;
3433 }
3434 }
3435 }
3436
3437 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) {
3438 entry = IORegistryEntry::fromPath(cpath);
3439 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
3440 }
3441
3442 if (map_data) {
3443 vm_deallocate(kernel_map, map_data, path_oolCnt);
3444 }
3445
3446 if (KERN_SUCCESS != err) {
3447 res = err;
3448 }
3449 *registry_entry = entry;
3450 *result = res;
3451
3452 return err;
3453 }
3454
3455
3456 /* Routine io_registry_entry_in_plane */
3457 kern_return_t
is_io_registry_entry_in_plane(io_object_t registry_entry,io_name_t plane,boolean_t * inPlane)3458 is_io_registry_entry_in_plane(
3459 io_object_t registry_entry,
3460 io_name_t plane,
3461 boolean_t *inPlane )
3462 {
3463 CHECK( IORegistryEntry, registry_entry, entry );
3464
3465 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
3466
3467 return kIOReturnSuccess;
3468 }
3469
3470
3471 /* Routine io_registry_entry_get_path */
3472 kern_return_t
is_io_registry_entry_get_path(io_object_t registry_entry,io_name_t plane,io_string_t path)3473 is_io_registry_entry_get_path(
3474 io_object_t registry_entry,
3475 io_name_t plane,
3476 io_string_t path )
3477 {
3478 int length;
3479 CHECK( IORegistryEntry, registry_entry, entry );
3480
3481 length = sizeof(io_string_t);
3482 if (entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) {
3483 return kIOReturnSuccess;
3484 } else {
3485 return kIOReturnBadArgument;
3486 }
3487 }
3488
3489 /* Routine io_registry_entry_get_path */
3490 kern_return_t
is_io_registry_entry_get_path_ool(io_object_t registry_entry,io_name_t plane,io_string_inband_t path,io_buf_ptr_t * path_ool,mach_msg_type_number_t * path_oolCnt)3491 is_io_registry_entry_get_path_ool(
3492 io_object_t registry_entry,
3493 io_name_t plane,
3494 io_string_inband_t path,
3495 io_buf_ptr_t *path_ool,
3496 mach_msg_type_number_t *path_oolCnt)
3497 {
3498 enum { kMaxPath = 16384 };
3499 IOReturn err;
3500 int length;
3501 char * buf;
3502
3503 CHECK( IORegistryEntry, registry_entry, entry );
3504
3505 *path_ool = NULL;
3506 *path_oolCnt = 0;
3507 length = sizeof(io_string_inband_t);
3508 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) {
3509 err = kIOReturnSuccess;
3510 } else {
3511 length = kMaxPath;
3512 buf = IONewData(char, length);
3513 if (!buf) {
3514 err = kIOReturnNoMemory;
3515 } else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) {
3516 err = kIOReturnError;
3517 } else {
3518 *path_oolCnt = length;
3519 err = copyoutkdata(buf, length, path_ool);
3520 }
3521 if (buf) {
3522 IODeleteData(buf, char, kMaxPath);
3523 }
3524 }
3525
3526 return err;
3527 }
3528
3529
3530 /* Routine io_registry_entry_get_name */
3531 kern_return_t
is_io_registry_entry_get_name(io_object_t registry_entry,io_name_t name)3532 is_io_registry_entry_get_name(
3533 io_object_t registry_entry,
3534 io_name_t name )
3535 {
3536 CHECK( IORegistryEntry, registry_entry, entry );
3537
3538 strncpy( name, entry->getName(), sizeof(io_name_t));
3539
3540 return kIOReturnSuccess;
3541 }
3542
3543 /* Routine io_registry_entry_get_name_in_plane */
3544 kern_return_t
is_io_registry_entry_get_name_in_plane(io_object_t registry_entry,io_name_t planeName,io_name_t name)3545 is_io_registry_entry_get_name_in_plane(
3546 io_object_t registry_entry,
3547 io_name_t planeName,
3548 io_name_t name )
3549 {
3550 const IORegistryPlane * plane;
3551 CHECK( IORegistryEntry, registry_entry, entry );
3552
3553 if (planeName[0]) {
3554 plane = IORegistryEntry::getPlane( planeName );
3555 } else {
3556 plane = NULL;
3557 }
3558
3559 strncpy( name, entry->getName( plane), sizeof(io_name_t));
3560
3561 return kIOReturnSuccess;
3562 }
3563
3564 /* Routine io_registry_entry_get_location_in_plane */
3565 kern_return_t
is_io_registry_entry_get_location_in_plane(io_object_t registry_entry,io_name_t planeName,io_name_t location)3566 is_io_registry_entry_get_location_in_plane(
3567 io_object_t registry_entry,
3568 io_name_t planeName,
3569 io_name_t location )
3570 {
3571 const IORegistryPlane * plane;
3572 CHECK( IORegistryEntry, registry_entry, entry );
3573
3574 if (planeName[0]) {
3575 plane = IORegistryEntry::getPlane( planeName );
3576 } else {
3577 plane = NULL;
3578 }
3579
3580 const char * cstr = entry->getLocation( plane );
3581
3582 if (cstr) {
3583 strncpy( location, cstr, sizeof(io_name_t));
3584 return kIOReturnSuccess;
3585 } else {
3586 return kIOReturnNotFound;
3587 }
3588 }
3589
3590 /* Routine io_registry_entry_get_registry_entry_id */
3591 kern_return_t
is_io_registry_entry_get_registry_entry_id(io_object_t registry_entry,uint64_t * entry_id)3592 is_io_registry_entry_get_registry_entry_id(
3593 io_object_t registry_entry,
3594 uint64_t *entry_id )
3595 {
3596 CHECK( IORegistryEntry, registry_entry, entry );
3597
3598 *entry_id = entry->getRegistryEntryID();
3599
3600 return kIOReturnSuccess;
3601 }
3602
3603
3604 static OSObject *
IOCopyPropertyCompatible(IORegistryEntry * regEntry,const char * name)3605 IOCopyPropertyCompatible(IORegistryEntry * regEntry, const char * name)
3606 {
3607 OSObject * obj;
3608 OSObject * compatProperties;
3609 OSDictionary * props;
3610
3611 obj = regEntry->copyProperty(name);
3612 if (obj) {
3613 return obj;
3614 }
3615
3616 compatProperties = regEntry->copyProperty(gIOUserServicePropertiesKey);
3617 if (!compatProperties
3618 && IOTaskRegistryCompatibility(current_task())) {
3619 compatProperties = regEntry->copyProperty(gIOCompatibilityPropertiesKey);
3620 }
3621 if (compatProperties) {
3622 props = OSDynamicCast(OSDictionary, compatProperties);
3623 if (props) {
3624 obj = props->getObject(name);
3625 if (obj) {
3626 obj->retain();
3627 }
3628 }
3629 compatProperties->release();
3630 }
3631
3632 return obj;
3633 }
3634
3635 /* Routine io_registry_entry_get_property */
3636 kern_return_t
is_io_registry_entry_get_property_bytes(io_object_t registry_entry,io_name_t property_name,io_struct_inband_t buf,mach_msg_type_number_t * dataCnt)3637 is_io_registry_entry_get_property_bytes(
3638 io_object_t registry_entry,
3639 io_name_t property_name,
3640 io_struct_inband_t buf,
3641 mach_msg_type_number_t *dataCnt )
3642 {
3643 OSObject * obj;
3644 OSData * data;
3645 OSString * str;
3646 OSBoolean * boo;
3647 OSNumber * off;
3648 UInt64 offsetBytes;
3649 unsigned int len = 0;
3650 const void * bytes = NULL;
3651 IOReturn ret = kIOReturnSuccess;
3652
3653 CHECK( IORegistryEntry, registry_entry, entry );
3654
3655 #if CONFIG_MACF
3656 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3657 return kIOReturnNotPermitted;
3658 }
3659 #endif
3660
3661 obj = IOCopyPropertyCompatible(entry, property_name);
3662 if (!obj) {
3663 return kIOReturnNoResources;
3664 }
3665
3666 // One day OSData will be a common container base class
3667 // until then...
3668 if ((data = OSDynamicCast( OSData, obj ))) {
3669 len = data->getLength();
3670 bytes = data->getBytesNoCopy();
3671 if (!data->isSerializable()) {
3672 len = 0;
3673 }
3674 } else if ((str = OSDynamicCast( OSString, obj ))) {
3675 len = str->getLength() + 1;
3676 bytes = str->getCStringNoCopy();
3677 } else if ((boo = OSDynamicCast( OSBoolean, obj ))) {
3678 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
3679 bytes = boo->isTrue() ? "Yes" : "No";
3680 } else if ((off = OSDynamicCast( OSNumber, obj ))) {
3681 offsetBytes = off->unsigned64BitValue();
3682 len = off->numberOfBytes();
3683 if (len > sizeof(offsetBytes)) {
3684 len = sizeof(offsetBytes);
3685 }
3686 bytes = &offsetBytes;
3687 #ifdef __BIG_ENDIAN__
3688 bytes = (const void *)
3689 (((UInt32) bytes) + (sizeof(UInt64) - len));
3690 #endif
3691 } else {
3692 ret = kIOReturnBadArgument;
3693 }
3694
3695 if (bytes) {
3696 if (*dataCnt < len) {
3697 ret = kIOReturnIPCError;
3698 } else {
3699 *dataCnt = len;
3700 bcopy( bytes, buf, len );
3701 }
3702 }
3703 obj->release();
3704
3705 return ret;
3706 }
3707
3708
3709 /* Routine io_registry_entry_get_property */
3710 kern_return_t
is_io_registry_entry_get_property(io_object_t registry_entry,io_name_t property_name,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3711 is_io_registry_entry_get_property(
3712 io_object_t registry_entry,
3713 io_name_t property_name,
3714 io_buf_ptr_t *properties,
3715 mach_msg_type_number_t *propertiesCnt )
3716 {
3717 kern_return_t err;
3718 unsigned int len;
3719 OSObject * obj;
3720
3721 CHECK( IORegistryEntry, registry_entry, entry );
3722
3723 #if CONFIG_MACF
3724 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3725 return kIOReturnNotPermitted;
3726 }
3727 #endif
3728
3729 obj = IOCopyPropertyCompatible(entry, property_name);
3730 if (!obj) {
3731 return kIOReturnNotFound;
3732 }
3733
3734 OSSerialize * s = OSSerialize::withCapacity(4096);
3735 if (!s) {
3736 obj->release();
3737 return kIOReturnNoMemory;
3738 }
3739
3740 if (obj->serialize( s )) {
3741 len = s->getLength();
3742 *propertiesCnt = len;
3743 err = copyoutkdata( s->text(), len, properties );
3744 } else {
3745 err = kIOReturnUnsupported;
3746 }
3747
3748 s->release();
3749 obj->release();
3750
3751 return err;
3752 }
3753
3754 /* Routine io_registry_entry_get_property_recursively */
3755 kern_return_t
is_io_registry_entry_get_property_recursively(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3756 is_io_registry_entry_get_property_recursively(
3757 io_object_t registry_entry,
3758 io_name_t plane,
3759 io_name_t property_name,
3760 uint32_t options,
3761 io_buf_ptr_t *properties,
3762 mach_msg_type_number_t *propertiesCnt )
3763 {
3764 kern_return_t err;
3765 unsigned int len;
3766 OSObject * obj;
3767
3768 CHECK( IORegistryEntry, registry_entry, entry );
3769
3770 #if CONFIG_MACF
3771 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3772 return kIOReturnNotPermitted;
3773 }
3774 #endif
3775
3776 obj = entry->copyProperty( property_name,
3777 IORegistryEntry::getPlane( plane ), options );
3778 if (!obj) {
3779 return kIOReturnNotFound;
3780 }
3781
3782 OSSerialize * s = OSSerialize::withCapacity(4096);
3783 if (!s) {
3784 obj->release();
3785 return kIOReturnNoMemory;
3786 }
3787
3788 if (obj->serialize( s )) {
3789 len = s->getLength();
3790 *propertiesCnt = len;
3791 err = copyoutkdata( s->text(), len, properties );
3792 } else {
3793 err = kIOReturnUnsupported;
3794 }
3795
3796 s->release();
3797 obj->release();
3798
3799 return err;
3800 }
3801
3802 /* Routine io_registry_entry_get_properties */
3803 kern_return_t
is_io_registry_entry_get_properties(io_object_t registry_entry,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3804 is_io_registry_entry_get_properties(
3805 io_object_t registry_entry,
3806 io_buf_ptr_t *properties,
3807 mach_msg_type_number_t *propertiesCnt )
3808 {
3809 return kIOReturnUnsupported;
3810 }
3811
3812 #if CONFIG_MACF
3813
3814 struct GetPropertiesEditorRef {
3815 kauth_cred_t cred;
3816 IORegistryEntry * entry;
3817 OSCollection * root;
3818 };
3819
3820 static const LIBKERN_RETURNS_RETAINED OSMetaClassBase *
GetPropertiesEditor(void * reference,OSSerialize * s,OSCollection * container,const OSSymbol * name,const OSMetaClassBase * value)3821 GetPropertiesEditor(void * reference,
3822 OSSerialize * s,
3823 OSCollection * container,
3824 const OSSymbol * name,
3825 const OSMetaClassBase * value)
3826 {
3827 GetPropertiesEditorRef * ref = (typeof(ref))reference;
3828
3829 if (!ref->root) {
3830 ref->root = container;
3831 }
3832 if (ref->root == container) {
3833 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) {
3834 value = NULL;
3835 }
3836 }
3837 if (value) {
3838 value->retain();
3839 }
3840 return value;
3841 }
3842
3843 #endif /* CONFIG_MACF */
3844
3845 /* Routine io_registry_entry_get_properties_bin_buf */
3846 kern_return_t
is_io_registry_entry_get_properties_bin_buf(io_object_t registry_entry,mach_vm_address_t buf,mach_vm_size_t * bufsize,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3847 is_io_registry_entry_get_properties_bin_buf(
3848 io_object_t registry_entry,
3849 mach_vm_address_t buf,
3850 mach_vm_size_t *bufsize,
3851 io_buf_ptr_t *properties,
3852 mach_msg_type_number_t *propertiesCnt)
3853 {
3854 kern_return_t err = kIOReturnSuccess;
3855 unsigned int len;
3856 OSObject * compatProperties;
3857 OSSerialize * s;
3858 OSSerialize::Editor editor = NULL;
3859 void * editRef = NULL;
3860
3861 CHECK(IORegistryEntry, registry_entry, entry);
3862
3863 #if CONFIG_MACF
3864 GetPropertiesEditorRef ref;
3865 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
3866 editor = &GetPropertiesEditor;
3867 editRef = &ref;
3868 ref.cred = kauth_cred_get();
3869 ref.entry = entry;
3870 ref.root = NULL;
3871 }
3872 #endif
3873
3874 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3875 if (!s) {
3876 return kIOReturnNoMemory;
3877 }
3878
3879
3880 compatProperties = entry->copyProperty(gIOUserServicePropertiesKey);
3881 if (!compatProperties
3882 && IOTaskRegistryCompatibility(current_task())) {
3883 compatProperties = entry->copyProperty(gIOCompatibilityPropertiesKey);
3884 }
3885
3886 if (compatProperties) {
3887 OSDictionary * dict;
3888
3889 dict = entry->dictionaryWithProperties();
3890 if (!dict) {
3891 err = kIOReturnNoMemory;
3892 } else {
3893 dict->removeObject(gIOUserServicePropertiesKey);
3894 dict->removeObject(gIOCompatibilityPropertiesKey);
3895 dict->merge(OSDynamicCast(OSDictionary, compatProperties));
3896 if (!dict->serialize(s)) {
3897 err = kIOReturnUnsupported;
3898 }
3899 dict->release();
3900 }
3901 compatProperties->release();
3902 } else if (!entry->serializeProperties(s)) {
3903 err = kIOReturnUnsupported;
3904 }
3905
3906 if (kIOReturnSuccess == err) {
3907 len = s->getLength();
3908 if (buf && bufsize && len <= *bufsize) {
3909 *bufsize = len;
3910 *propertiesCnt = 0;
3911 *properties = nullptr;
3912 if (copyout(s->text(), buf, len)) {
3913 err = kIOReturnVMError;
3914 } else {
3915 err = kIOReturnSuccess;
3916 }
3917 } else {
3918 if (bufsize) {
3919 *bufsize = 0;
3920 }
3921 *propertiesCnt = len;
3922 err = copyoutkdata( s->text(), len, properties );
3923 }
3924 }
3925 s->release();
3926
3927 return err;
3928 }
3929
3930 /* Routine io_registry_entry_get_properties_bin */
3931 kern_return_t
is_io_registry_entry_get_properties_bin(io_object_t registry_entry,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3932 is_io_registry_entry_get_properties_bin(
3933 io_object_t registry_entry,
3934 io_buf_ptr_t *properties,
3935 mach_msg_type_number_t *propertiesCnt)
3936 {
3937 return is_io_registry_entry_get_properties_bin_buf(registry_entry,
3938 0, NULL, properties, propertiesCnt);
3939 }
3940
3941 /* Routine io_registry_entry_get_property_bin_buf */
3942 kern_return_t
is_io_registry_entry_get_property_bin_buf(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,mach_vm_address_t buf,mach_vm_size_t * bufsize,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3943 is_io_registry_entry_get_property_bin_buf(
3944 io_object_t registry_entry,
3945 io_name_t plane,
3946 io_name_t property_name,
3947 uint32_t options,
3948 mach_vm_address_t buf,
3949 mach_vm_size_t *bufsize,
3950 io_buf_ptr_t *properties,
3951 mach_msg_type_number_t *propertiesCnt )
3952 {
3953 kern_return_t err;
3954 unsigned int len;
3955 OSObject * obj;
3956 const OSSymbol * sym;
3957
3958 CHECK( IORegistryEntry, registry_entry, entry );
3959
3960 #if CONFIG_MACF
3961 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3962 return kIOReturnNotPermitted;
3963 }
3964 #endif
3965
3966 sym = OSSymbol::withCString(property_name);
3967 if (!sym) {
3968 return kIOReturnNoMemory;
3969 }
3970
3971 err = kIOReturnNotFound;
3972 if (gIORegistryEntryPropertyKeysKey == sym) {
3973 obj = entry->copyPropertyKeys();
3974 } else {
3975 if ((kIORegistryIterateRecursively & options) && plane[0]) {
3976 obj = IOCopyPropertyCompatible(entry, property_name);
3977 if (obj == NULL) {
3978 IORegistryIterator * iter = IORegistryIterator::iterateOver(entry, IORegistryEntry::getPlane(plane), options);
3979 if (iter) {
3980 while ((NULL == obj) && (entry = iter->getNextObject())) {
3981 OSObject * currentObj = IOCopyPropertyCompatible(entry, property_name);
3982 #if CONFIG_MACF
3983 if (currentObj != NULL && 0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3984 // Record that MAC hook blocked this entry and property, and continue to next entry
3985 err = kIOReturnNotPermitted;
3986 OSSafeReleaseNULL(currentObj);
3987 continue;
3988 }
3989 #endif
3990 obj = currentObj;
3991 }
3992 iter->release();
3993 }
3994 }
3995 } else {
3996 obj = IOCopyPropertyCompatible(entry, property_name);
3997 }
3998 if (obj && gIORemoveOnReadProperties->containsObject(sym)) {
3999 entry->removeProperty(sym);
4000 }
4001 }
4002
4003 sym->release();
4004 if (!obj) {
4005 return err;
4006 }
4007
4008 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
4009 if (!s) {
4010 obj->release();
4011 return kIOReturnNoMemory;
4012 }
4013
4014 if (obj->serialize( s )) {
4015 len = s->getLength();
4016 if (buf && bufsize && len <= *bufsize) {
4017 *bufsize = len;
4018 *propertiesCnt = 0;
4019 *properties = nullptr;
4020 if (copyout(s->text(), buf, len)) {
4021 err = kIOReturnVMError;
4022 } else {
4023 err = kIOReturnSuccess;
4024 }
4025 } else {
4026 if (bufsize) {
4027 *bufsize = 0;
4028 }
4029 *propertiesCnt = len;
4030 err = copyoutkdata( s->text(), len, properties );
4031 }
4032 } else {
4033 err = kIOReturnUnsupported;
4034 }
4035
4036 s->release();
4037 obj->release();
4038
4039 return err;
4040 }
4041
4042 /* Routine io_registry_entry_get_property_bin */
4043 kern_return_t
is_io_registry_entry_get_property_bin(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)4044 is_io_registry_entry_get_property_bin(
4045 io_object_t registry_entry,
4046 io_name_t plane,
4047 io_name_t property_name,
4048 uint32_t options,
4049 io_buf_ptr_t *properties,
4050 mach_msg_type_number_t *propertiesCnt )
4051 {
4052 return is_io_registry_entry_get_property_bin_buf(registry_entry, plane,
4053 property_name, options, 0, NULL, properties, propertiesCnt);
4054 }
4055
4056
4057 /* Routine io_registry_entry_set_properties */
4058 kern_return_t
is_io_registry_entry_set_properties(io_object_t registry_entry,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result)4059 is_io_registry_entry_set_properties
4060 (
4061 io_object_t registry_entry,
4062 io_buf_ptr_t properties,
4063 mach_msg_type_number_t propertiesCnt,
4064 kern_return_t * result)
4065 {
4066 OSObject * obj;
4067 kern_return_t err;
4068 IOReturn res;
4069 vm_offset_t data;
4070 vm_map_offset_t map_data;
4071
4072 CHECK( IORegistryEntry, registry_entry, entry );
4073
4074 if (propertiesCnt > sizeof(io_struct_inband_t) * 1024) {
4075 return kIOReturnMessageTooLarge;
4076 }
4077
4078 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4079 data = CAST_DOWN(vm_offset_t, map_data);
4080
4081 if (KERN_SUCCESS == err) {
4082 FAKE_STACK_FRAME(entry->getMetaClass());
4083
4084 // must return success after vm_map_copyout() succeeds
4085 obj = OSUnserializeXML((const char *) data, propertiesCnt );
4086 vm_deallocate( kernel_map, data, propertiesCnt );
4087
4088 if (!obj) {
4089 res = kIOReturnBadArgument;
4090 }
4091 #if CONFIG_MACF
4092 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
4093 registry_entry, obj)) {
4094 res = kIOReturnNotPermitted;
4095 }
4096 #endif
4097 else {
4098 IOService * service = OSDynamicCast(IOService, entry);
4099 OSDictionary * props = OSDynamicCast(OSDictionary, obj);
4100 OSObject * allowable = entry->copyProperty(gIORegistryEntryAllowableSetPropertiesKey);
4101 OSArray * allowableArray;
4102
4103 if (!allowable) {
4104 res = kIOReturnSuccess;
4105 } else {
4106 if (!props) {
4107 res = kIOReturnNotPermitted;
4108 } else if (!(allowableArray = OSDynamicCast(OSArray, allowable))) {
4109 res = kIOReturnNotPermitted;
4110 } else {
4111 bool allFound __block, found __block;
4112
4113 allFound = true;
4114 props->iterateObjects(^(const OSSymbol * key, OSObject * value) {
4115 found = false;
4116 for (unsigned int idx = 0; !found; idx++) {
4117 OSObject * next = allowableArray->getObject(idx);
4118 if (!next) {
4119 break;
4120 }
4121 found = next->isEqualTo(key);
4122 }
4123 allFound &= found;
4124 if (!found) {
4125 IOLog("IORegistryEntrySetProperties(%s, %s) disallowed due to " kIORegistryEntryAllowableSetPropertiesKey "\n",
4126 entry->getName(), key->getCStringNoCopy());
4127 }
4128 return !allFound;
4129 });
4130 res = allFound ? kIOReturnSuccess : kIOReturnBadArgument;
4131 }
4132 }
4133 if (kIOReturnSuccess == res) {
4134 IOUserClient *
4135 client = OSDynamicCast(IOUserClient, entry);
4136
4137 if (client && client->defaultLockingSetProperties) {
4138 IORWLockWrite(&client->lock);
4139 }
4140
4141 if (!client && (kOSBooleanTrue == entry->getProperty(gIORegistryEntryDefaultLockingSetPropertiesKey))) {
4142 res = entry->runPropertyActionBlock(^IOReturn (void) {
4143 return entry->setProperties( obj );
4144 });
4145 } else {
4146 res = entry->setProperties( obj );
4147 }
4148
4149 if (client && client->defaultLockingSetProperties) {
4150 IORWLockUnlock(&client->lock);
4151 }
4152 if (service && props && service->hasUserServer()) {
4153 res = service->UserSetProperties(props);
4154 }
4155 }
4156 OSSafeReleaseNULL(allowable);
4157 }
4158 if (obj) {
4159 obj->release();
4160 }
4161
4162 FAKE_STACK_FRAME_END();
4163 } else {
4164 res = err;
4165 }
4166
4167 *result = res;
4168 return err;
4169 }
4170
4171 /* Routine io_registry_entry_get_child_iterator */
4172 kern_return_t
is_io_registry_entry_get_child_iterator(io_object_t registry_entry,io_name_t plane,io_object_t * iterator)4173 is_io_registry_entry_get_child_iterator(
4174 io_object_t registry_entry,
4175 io_name_t plane,
4176 io_object_t *iterator )
4177 {
4178 CHECK( IORegistryEntry, registry_entry, entry );
4179
4180 *iterator = IOUserIterator::withIterator(entry->getChildIterator(
4181 IORegistryEntry::getPlane( plane )));
4182
4183 return kIOReturnSuccess;
4184 }
4185
4186 /* Routine io_registry_entry_get_parent_iterator */
4187 kern_return_t
is_io_registry_entry_get_parent_iterator(io_object_t registry_entry,io_name_t plane,io_object_t * iterator)4188 is_io_registry_entry_get_parent_iterator(
4189 io_object_t registry_entry,
4190 io_name_t plane,
4191 io_object_t *iterator)
4192 {
4193 CHECK( IORegistryEntry, registry_entry, entry );
4194
4195 *iterator = IOUserIterator::withIterator(entry->getParentIterator(
4196 IORegistryEntry::getPlane( plane )));
4197
4198 return kIOReturnSuccess;
4199 }
4200
4201 /* Routine io_service_get_busy_state */
4202 kern_return_t
is_io_service_get_busy_state(io_object_t _service,uint32_t * busyState)4203 is_io_service_get_busy_state(
4204 io_object_t _service,
4205 uint32_t *busyState )
4206 {
4207 CHECK( IOService, _service, service );
4208
4209 *busyState = service->getBusyState();
4210
4211 return kIOReturnSuccess;
4212 }
4213
4214 /* Routine io_service_get_state */
4215 kern_return_t
is_io_service_get_state(io_object_t _service,uint64_t * state,uint32_t * busy_state,uint64_t * accumulated_busy_time)4216 is_io_service_get_state(
4217 io_object_t _service,
4218 uint64_t *state,
4219 uint32_t *busy_state,
4220 uint64_t *accumulated_busy_time )
4221 {
4222 CHECK( IOService, _service, service );
4223
4224 *state = service->getState();
4225 *busy_state = service->getBusyState();
4226 *accumulated_busy_time = service->getAccumulatedBusyTime();
4227
4228 return kIOReturnSuccess;
4229 }
4230
4231 /* Routine io_service_wait_quiet */
4232 kern_return_t
is_io_service_wait_quiet(io_object_t _service,mach_timespec_t wait_time)4233 is_io_service_wait_quiet(
4234 io_object_t _service,
4235 mach_timespec_t wait_time )
4236 {
4237 uint64_t timeoutNS;
4238
4239 CHECK( IOService, _service, service );
4240
4241 timeoutNS = wait_time.tv_sec;
4242 timeoutNS *= kSecondScale;
4243 timeoutNS += wait_time.tv_nsec;
4244
4245 return service->waitQuiet(timeoutNS);
4246 }
4247
4248 /* Routine io_service_wait_quiet_with_options */
4249 kern_return_t
is_io_service_wait_quiet_with_options(io_object_t _service,mach_timespec_t wait_time,uint32_t options)4250 is_io_service_wait_quiet_with_options(
4251 io_object_t _service,
4252 mach_timespec_t wait_time,
4253 uint32_t options )
4254 {
4255 uint64_t timeoutNS;
4256
4257 CHECK( IOService, _service, service );
4258
4259 timeoutNS = wait_time.tv_sec;
4260 timeoutNS *= kSecondScale;
4261 timeoutNS += wait_time.tv_nsec;
4262
4263 if ((options & kIOWaitQuietPanicOnFailure) && !IOCurrentTaskHasEntitlement(kIOWaitQuietPanicsEntitlement)) {
4264 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
4265 IOLog("IOServiceWaitQuietWithOptions(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
4266 OSSafeReleaseNULL(taskName);
4267
4268 /* strip this option from the options before calling waitQuietWithOptions */
4269 options &= ~kIOWaitQuietPanicOnFailure;
4270 }
4271
4272 return service->waitQuietWithOptions(timeoutNS, options);
4273 }
4274
4275
4276 /* Routine io_service_request_probe */
4277 kern_return_t
is_io_service_request_probe(io_object_t _service,uint32_t options)4278 is_io_service_request_probe(
4279 io_object_t _service,
4280 uint32_t options )
4281 {
4282 CHECK( IOService, _service, service );
4283
4284 return service->requestProbe( options );
4285 }
4286
4287 /* Routine io_service_get_authorization_id */
4288 kern_return_t
is_io_service_get_authorization_id(io_object_t _service,uint64_t * authorization_id)4289 is_io_service_get_authorization_id(
4290 io_object_t _service,
4291 uint64_t *authorization_id )
4292 {
4293 kern_return_t kr;
4294
4295 CHECK( IOService, _service, service );
4296
4297 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
4298 kIOClientPrivilegeAdministrator );
4299 if (kIOReturnSuccess != kr) {
4300 return kr;
4301 }
4302
4303 #if defined(XNU_TARGET_OS_OSX)
4304 *authorization_id = service->getAuthorizationID();
4305 #else /* defined(XNU_TARGET_OS_OSX) */
4306 *authorization_id = 0;
4307 kr = kIOReturnUnsupported;
4308 #endif /* defined(XNU_TARGET_OS_OSX) */
4309
4310 return kr;
4311 }
4312
4313 /* Routine io_service_set_authorization_id */
4314 kern_return_t
is_io_service_set_authorization_id(io_object_t _service,uint64_t authorization_id)4315 is_io_service_set_authorization_id(
4316 io_object_t _service,
4317 uint64_t authorization_id )
4318 {
4319 CHECK( IOService, _service, service );
4320
4321 #if defined(XNU_TARGET_OS_OSX)
4322 return service->setAuthorizationID( authorization_id );
4323 #else /* defined(XNU_TARGET_OS_OSX) */
4324 return kIOReturnUnsupported;
4325 #endif /* defined(XNU_TARGET_OS_OSX) */
4326 }
4327
4328 /* Routine io_service_open_ndr */
4329 kern_return_t
is_io_service_open_extended(io_object_t _service,task_t owningTask,uint32_t connect_type,NDR_record_t ndr,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result,io_object_t * connection)4330 is_io_service_open_extended(
4331 io_object_t _service,
4332 task_t owningTask,
4333 uint32_t connect_type,
4334 NDR_record_t ndr,
4335 io_buf_ptr_t properties,
4336 mach_msg_type_number_t propertiesCnt,
4337 kern_return_t * result,
4338 io_object_t *connection )
4339 {
4340 IOUserClient * client = NULL;
4341 kern_return_t err = KERN_SUCCESS;
4342 IOReturn res = kIOReturnSuccess;
4343 OSDictionary * propertiesDict = NULL;
4344 bool disallowAccess = false;
4345
4346 CHECK( IOService, _service, service );
4347
4348 if (!owningTask) {
4349 return kIOReturnBadArgument;
4350 }
4351 assert(owningTask == current_task());
4352 if (owningTask != current_task()) {
4353 return kIOReturnBadArgument;
4354 }
4355
4356 #if CONFIG_MACF
4357 if (mac_iokit_check_open_service(kauth_cred_get(), service, connect_type) != 0) {
4358 return kIOReturnNotPermitted;
4359 }
4360 #endif
4361 do{
4362 if (properties) {
4363 return kIOReturnUnsupported;
4364 }
4365 #if 0
4366 {
4367 OSObject * obj;
4368 vm_offset_t data;
4369 vm_map_offset_t map_data;
4370
4371 if (propertiesCnt > sizeof(io_struct_inband_t)) {
4372 return kIOReturnMessageTooLarge;
4373 }
4374
4375 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4376 res = err;
4377 data = CAST_DOWN(vm_offset_t, map_data);
4378 if (KERN_SUCCESS == err) {
4379 // must return success after vm_map_copyout() succeeds
4380 obj = OSUnserializeXML((const char *) data, propertiesCnt );
4381 vm_deallocate( kernel_map, data, propertiesCnt );
4382 propertiesDict = OSDynamicCast(OSDictionary, obj);
4383 if (!propertiesDict) {
4384 res = kIOReturnBadArgument;
4385 if (obj) {
4386 obj->release();
4387 }
4388 }
4389 }
4390 if (kIOReturnSuccess != res) {
4391 break;
4392 }
4393 }
4394 #endif
4395 res = service->newUserClient( owningTask, (void *) owningTask,
4396 connect_type, propertiesDict, &client );
4397
4398 if (propertiesDict) {
4399 propertiesDict->release();
4400 }
4401
4402 if (res == kIOReturnSuccess && OSDynamicCast(IOUserClient, client) == NULL) {
4403 // client should always be a IOUserClient
4404 res = kIOReturnError;
4405 }
4406
4407 if (res == kIOReturnSuccess) {
4408 if (!client->reserved) {
4409 if (!client->reserve()) {
4410 client->clientClose();
4411 OSSafeReleaseNULL(client);
4412 res = kIOReturnNoMemory;
4413 }
4414 }
4415 }
4416
4417 if (res == kIOReturnSuccess) {
4418 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
4419 if (creatorName) {
4420 client->setProperty(kIOUserClientCreatorKey, creatorName);
4421 }
4422 const char * creatorNameCStr = creatorName ? creatorName->getCStringNoCopy() : "<unknown>";
4423 client->sharedInstance = (NULL != client->getProperty(kIOUserClientSharedInstanceKey));
4424 if (client->sharedInstance) {
4425 IOLockLock(gIOUserClientOwnersLock);
4426 }
4427 if (!client->opened) {
4428 client->opened = true;
4429
4430 client->messageAppSuspended = (NULL != client->getProperty(kIOUserClientMessageAppSuspendedKey));
4431 {
4432 OSObject * obj;
4433 extern const OSSymbol * gIOSurfaceIdentifier;
4434 obj = client->getProperty(kIOUserClientDefaultLockingKey);
4435 bool hasProps = false;
4436
4437 client->uc2022 = (NULL != OSDynamicCast(IOUserClient2022, client));
4438 if (obj) {
4439 hasProps = true;
4440 client->defaultLocking = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingKey));
4441 } else if (client->uc2022) {
4442 res = kIOReturnError;
4443 }
4444 obj = client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey);
4445 if (obj) {
4446 hasProps = true;
4447 client->defaultLockingSetProperties = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey));
4448 } else if (client->uc2022) {
4449 res = kIOReturnError;
4450 }
4451 obj = client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey);
4452 if (obj) {
4453 hasProps = true;
4454 client->defaultLockingSingleThreadExternalMethod = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey));
4455 } else if (client->uc2022) {
4456 res = kIOReturnError;
4457 }
4458 if (kIOReturnSuccess != res) {
4459 IOLog("IOUC %s requires kIOUserClientDefaultLockingKey, kIOUserClientDefaultLockingSetPropertiesKey, kIOUserClientDefaultLockingSingleThreadExternalMethodKey\n",
4460 client->getMetaClass()->getClassName());
4461 }
4462 if (!hasProps) {
4463 const OSMetaClass * meta;
4464 OSKext * kext;
4465 meta = client->getMetaClass();
4466 kext = meta->getKext();
4467 if (!kext || !kext->hasDependency(gIOSurfaceIdentifier)) {
4468 client->defaultLocking = true;
4469 client->defaultLockingSetProperties = false;
4470 client->defaultLockingSingleThreadExternalMethod = false;
4471 client->setProperty(kIOUserClientDefaultLockingKey, kOSBooleanTrue);
4472 }
4473 }
4474 }
4475 }
4476 if (client->sharedInstance) {
4477 IOLockUnlock(gIOUserClientOwnersLock);
4478 }
4479
4480 OSObject * requiredEntitlement = client->copyProperty(gIOUserClientEntitlementsKey);
4481 OSString * requiredEntitlementString = OSDynamicCast(OSString, requiredEntitlement);
4482 //If this is an IOUserClient2022, having kIOUserClientEntitlementsKey is mandatory.
4483 //If it has kIOUserClientEntitlementsKey, the value must be either kOSBooleanFalse or an OSString
4484 //If the value is kOSBooleanFalse, we allow access.
4485 //If the value is an OSString, we allow access if the task has the named entitlement
4486 if (client->uc2022) {
4487 if (!requiredEntitlement) {
4488 IOLog("IOUC %s missing " kIOUserClientEntitlementsKey " property\n",
4489 client->getMetaClass()->getClassName());
4490 disallowAccess = true;
4491 } else if (!requiredEntitlementString && requiredEntitlement != kOSBooleanFalse) {
4492 IOLog("IOUC %s had " kIOUserClientEntitlementsKey "with value not boolean false or string\n", client->getMetaClass()->getClassName());
4493 disallowAccess = true;
4494 }
4495 }
4496
4497 if (requiredEntitlement && disallowAccess == false) {
4498 if (kOSBooleanFalse == requiredEntitlement) {
4499 // allow
4500 disallowAccess = false;
4501 } else {
4502 disallowAccess = !IOTaskHasEntitlement(owningTask, requiredEntitlementString->getCStringNoCopy());
4503 if (disallowAccess) {
4504 IOLog("IOUC %s missing entitlement in process %s\n",
4505 client->getMetaClass()->getClassName(), creatorNameCStr);
4506 }
4507 }
4508 }
4509
4510 OSSafeReleaseNULL(requiredEntitlement);
4511
4512 if (disallowAccess) {
4513 res = kIOReturnNotPrivileged;
4514 }
4515 #if CONFIG_MACF
4516 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) {
4517 IOLog("IOUC %s failed MACF in process %s\n",
4518 client->getMetaClass()->getClassName(), creatorNameCStr);
4519 res = kIOReturnNotPermitted;
4520 }
4521 #endif
4522
4523 if ((kIOReturnSuccess == res)
4524 && gIOUCFilterCallbacks
4525 && gIOUCFilterCallbacks->io_filter_resolver) {
4526 io_filter_policy_t filterPolicy;
4527 filterPolicy = client->filterForTask(owningTask, 0);
4528 if (!filterPolicy) {
4529 res = gIOUCFilterCallbacks->io_filter_resolver(owningTask, client, connect_type, &filterPolicy);
4530 if (kIOReturnUnsupported == res) {
4531 res = kIOReturnSuccess;
4532 } else if (kIOReturnSuccess == res) {
4533 client->filterForTask(owningTask, filterPolicy);
4534 } else {
4535 IOLog("IOUC %s failed sandbox in process %s\n",
4536 client->getMetaClass()->getClassName(), creatorNameCStr);
4537 }
4538 }
4539 }
4540
4541 if (kIOReturnSuccess == res) {
4542 res = client->registerOwner(owningTask);
4543 }
4544 OSSafeReleaseNULL(creatorName);
4545
4546 if (kIOReturnSuccess != res) {
4547 IOStatisticsClientCall();
4548 client->clientClose();
4549 client->setTerminateDefer(service, false);
4550 client->release();
4551 client = NULL;
4552 break;
4553 }
4554 client->setTerminateDefer(service, false);
4555 }
4556 }while (false);
4557
4558 *connection = client;
4559 *result = res;
4560
4561 return err;
4562 }
4563
4564 /* Routine io_service_close */
4565 kern_return_t
is_io_service_close(io_connect_t connection)4566 is_io_service_close(
4567 io_connect_t connection )
4568 {
4569 OSSet * mappings;
4570 if ((mappings = OSDynamicCast(OSSet, connection))) {
4571 return kIOReturnSuccess;
4572 }
4573
4574 CHECK( IOUserClient, connection, client );
4575
4576 IOStatisticsClientCall();
4577
4578 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) {
4579 client->ipcEnter(kIPCLockWrite);
4580 client->clientClose();
4581 client->ipcExit(kIPCLockWrite);
4582 } else {
4583 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
4584 client->getRegistryEntryID(), client->getName());
4585 }
4586
4587 return kIOReturnSuccess;
4588 }
4589
4590 /* Routine io_connect_get_service */
4591 kern_return_t
is_io_connect_get_service(io_connect_t connection,io_object_t * service)4592 is_io_connect_get_service(
4593 io_connect_t connection,
4594 io_object_t *service )
4595 {
4596 IOService * theService;
4597
4598 CHECK( IOUserClient, connection, client );
4599
4600 client->ipcEnter(kIPCLockNone);
4601
4602 theService = client->getService();
4603 if (theService) {
4604 theService->retain();
4605 }
4606
4607 client->ipcExit(kIPCLockNone);
4608
4609 *service = theService;
4610
4611 return theService ? kIOReturnSuccess : kIOReturnUnsupported;
4612 }
4613
4614 /* Routine io_connect_set_notification_port */
4615 kern_return_t
is_io_connect_set_notification_port(io_connect_t connection,uint32_t notification_type,mach_port_t port,uint32_t reference)4616 is_io_connect_set_notification_port(
4617 io_connect_t connection,
4618 uint32_t notification_type,
4619 mach_port_t port,
4620 uint32_t reference)
4621 {
4622 kern_return_t ret;
4623 CHECK( IOUserClient, connection, client );
4624
4625 IOStatisticsClientCall();
4626
4627 client->ipcEnter(kIPCLockWrite);
4628 ret = client->registerNotificationPort( port, notification_type,
4629 (io_user_reference_t) reference );
4630 client->ipcExit(kIPCLockWrite);
4631
4632 return ret;
4633 }
4634
4635 /* Routine io_connect_set_notification_port */
4636 kern_return_t
is_io_connect_set_notification_port_64(io_connect_t connection,uint32_t notification_type,mach_port_t port,io_user_reference_t reference)4637 is_io_connect_set_notification_port_64(
4638 io_connect_t connection,
4639 uint32_t notification_type,
4640 mach_port_t port,
4641 io_user_reference_t reference)
4642 {
4643 kern_return_t ret;
4644 CHECK( IOUserClient, connection, client );
4645
4646 IOStatisticsClientCall();
4647
4648 client->ipcEnter(kIPCLockWrite);
4649 ret = client->registerNotificationPort( port, notification_type,
4650 reference );
4651 client->ipcExit(kIPCLockWrite);
4652
4653 return ret;
4654 }
4655
4656 /* Routine io_connect_map_memory_into_task */
4657 kern_return_t
is_io_connect_map_memory_into_task(io_connect_t connection,uint32_t memory_type,task_t into_task,mach_vm_address_t * address,mach_vm_size_t * size,uint32_t flags)4658 is_io_connect_map_memory_into_task
4659 (
4660 io_connect_t connection,
4661 uint32_t memory_type,
4662 task_t into_task,
4663 mach_vm_address_t *address,
4664 mach_vm_size_t *size,
4665 uint32_t flags
4666 )
4667 {
4668 IOReturn err;
4669 IOMemoryMap * map;
4670
4671 CHECK( IOUserClient, connection, client );
4672
4673 if (!into_task) {
4674 return kIOReturnBadArgument;
4675 }
4676
4677 IOStatisticsClientCall();
4678
4679 client->ipcEnter(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4680 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
4681
4682 if (map) {
4683 *address = map->getAddress();
4684 if (size) {
4685 *size = map->getSize();
4686 }
4687
4688 if (client->sharedInstance
4689 || (into_task != current_task())) {
4690 // push a name out to the task owning the map,
4691 // so we can clean up maps
4692 mach_port_name_t name __unused =
4693 IOMachPort::makeSendRightForTask(
4694 into_task, map, IKOT_IOKIT_OBJECT );
4695 map->release();
4696 } else {
4697 // keep it with the user client
4698 IOLockLock( gIOObjectPortLock);
4699 if (NULL == client->mappings) {
4700 client->mappings = OSSet::withCapacity(2);
4701 }
4702 if (client->mappings) {
4703 client->mappings->setObject( map);
4704 }
4705 IOLockUnlock( gIOObjectPortLock);
4706 map->release();
4707 }
4708 err = kIOReturnSuccess;
4709 } else {
4710 err = kIOReturnBadArgument;
4711 }
4712
4713 client->ipcExit(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4714
4715 return err;
4716 }
4717
4718 /* Routine is_io_connect_map_memory */
4719 kern_return_t
is_io_connect_map_memory(io_object_t connect,uint32_t type,task_t task,uint32_t * mapAddr,uint32_t * mapSize,uint32_t flags)4720 is_io_connect_map_memory(
4721 io_object_t connect,
4722 uint32_t type,
4723 task_t task,
4724 uint32_t * mapAddr,
4725 uint32_t * mapSize,
4726 uint32_t flags )
4727 {
4728 IOReturn err;
4729 mach_vm_address_t address;
4730 mach_vm_size_t size;
4731
4732 address = SCALAR64(*mapAddr);
4733 size = SCALAR64(*mapSize);
4734
4735 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
4736
4737 *mapAddr = SCALAR32(address);
4738 *mapSize = SCALAR32(size);
4739
4740 return err;
4741 }
4742 } /* extern "C" */
4743
4744 IOMemoryMap *
removeMappingForDescriptor(IOMemoryDescriptor * mem)4745 IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
4746 {
4747 OSIterator * iter;
4748 IOMemoryMap * map = NULL;
4749
4750 IOLockLock(gIOObjectPortLock);
4751
4752 iter = OSCollectionIterator::withCollection(mappings);
4753 if (iter) {
4754 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) {
4755 if (mem == map->getMemoryDescriptor()) {
4756 map->retain();
4757 mappings->removeObject(map);
4758 break;
4759 }
4760 }
4761 iter->release();
4762 }
4763
4764 IOLockUnlock(gIOObjectPortLock);
4765
4766 return map;
4767 }
4768
4769 extern "C" {
4770 /* Routine io_connect_unmap_memory_from_task */
4771 kern_return_t
is_io_connect_unmap_memory_from_task(io_connect_t connection,uint32_t memory_type,task_t from_task,mach_vm_address_t address)4772 is_io_connect_unmap_memory_from_task
4773 (
4774 io_connect_t connection,
4775 uint32_t memory_type,
4776 task_t from_task,
4777 mach_vm_address_t address)
4778 {
4779 IOReturn err;
4780 IOOptionBits options = 0;
4781 IOMemoryDescriptor * memory = NULL;
4782 IOMemoryMap * map;
4783
4784 CHECK( IOUserClient, connection, client );
4785
4786 if (!from_task) {
4787 return kIOReturnBadArgument;
4788 }
4789
4790 IOStatisticsClientCall();
4791
4792 client->ipcEnter(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4793 err = client->clientMemoryForType((UInt32) memory_type, &options, &memory );
4794
4795 if (memory && (kIOReturnSuccess == err)) {
4796 options = (options & ~kIOMapUserOptionsMask)
4797 | kIOMapAnywhere | kIOMapReference;
4798
4799 map = memory->createMappingInTask( from_task, address, options );
4800 memory->release();
4801 if (map) {
4802 IOLockLock( gIOObjectPortLock);
4803 if (client->mappings) {
4804 client->mappings->removeObject( map);
4805 }
4806 IOLockUnlock( gIOObjectPortLock);
4807
4808 mach_port_name_t name = 0;
4809 bool is_shared_instance_or_from_current_task = from_task != current_task() || client->sharedInstance;
4810 if (is_shared_instance_or_from_current_task) {
4811 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
4812 map->release();
4813 }
4814
4815 if (name) {
4816 map->userClientUnmap();
4817 err = iokit_mod_send_right( from_task, name, -2 );
4818 err = kIOReturnSuccess;
4819 } else {
4820 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
4821 }
4822 if (!is_shared_instance_or_from_current_task) {
4823 map->release();
4824 }
4825 } else {
4826 err = kIOReturnBadArgument;
4827 }
4828 }
4829
4830 client->ipcExit(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4831
4832 return err;
4833 }
4834
4835 kern_return_t
is_io_connect_unmap_memory(io_object_t connect,uint32_t type,task_t task,uint32_t mapAddr)4836 is_io_connect_unmap_memory(
4837 io_object_t connect,
4838 uint32_t type,
4839 task_t task,
4840 uint32_t mapAddr )
4841 {
4842 IOReturn err;
4843 mach_vm_address_t address;
4844
4845 address = SCALAR64(mapAddr);
4846
4847 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
4848
4849 return err;
4850 }
4851
4852
4853 /* Routine io_connect_add_client */
4854 kern_return_t
is_io_connect_add_client(io_connect_t connection,io_object_t connect_to)4855 is_io_connect_add_client(
4856 io_connect_t connection,
4857 io_object_t connect_to)
4858 {
4859 CHECK( IOUserClient, connection, client );
4860 CHECK( IOUserClient, connect_to, to );
4861
4862 IOReturn ret;
4863
4864 IOStatisticsClientCall();
4865
4866 client->ipcEnter(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4867 ret = client->connectClient( to );
4868 client->ipcExit(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4869
4870 return ret;
4871 }
4872
4873
4874 /* Routine io_connect_set_properties */
4875 kern_return_t
is_io_connect_set_properties(io_connect_t connection,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result)4876 is_io_connect_set_properties(
4877 io_connect_t connection,
4878 io_buf_ptr_t properties,
4879 mach_msg_type_number_t propertiesCnt,
4880 kern_return_t * result)
4881 {
4882 return is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result );
4883 }
4884
4885 /* Routine io_user_client_method */
4886 kern_return_t
is_io_connect_method_var_output(io_connect_t connection,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,io_buf_ptr_t * var_output,mach_msg_type_number_t * var_outputCnt)4887 is_io_connect_method_var_output
4888 (
4889 io_connect_t connection,
4890 uint32_t selector,
4891 io_scalar_inband64_t scalar_input,
4892 mach_msg_type_number_t scalar_inputCnt,
4893 io_struct_inband_t inband_input,
4894 mach_msg_type_number_t inband_inputCnt,
4895 mach_vm_address_t ool_input,
4896 mach_vm_size_t ool_input_size,
4897 io_struct_inband_t inband_output,
4898 mach_msg_type_number_t *inband_outputCnt,
4899 io_scalar_inband64_t scalar_output,
4900 mach_msg_type_number_t *scalar_outputCnt,
4901 io_buf_ptr_t *var_output,
4902 mach_msg_type_number_t *var_outputCnt
4903 )
4904 {
4905 CHECK( IOUserClient, connection, client );
4906
4907 IOExternalMethodArguments args;
4908 IOReturn ret;
4909 IOMemoryDescriptor * inputMD = NULL;
4910 OSObject * structureVariableOutputData = NULL;
4911
4912 bzero(&args.__reserved[0], sizeof(args.__reserved));
4913 args.__reservedA = 0;
4914 args.version = kIOExternalMethodArgumentsCurrentVersion;
4915
4916 args.selector = selector;
4917
4918 args.asyncWakePort = MACH_PORT_NULL;
4919 args.asyncReference = NULL;
4920 args.asyncReferenceCount = 0;
4921 args.structureVariableOutputData = &structureVariableOutputData;
4922
4923 args.scalarInput = scalar_input;
4924 args.scalarInputCount = scalar_inputCnt;
4925 args.structureInput = inband_input;
4926 args.structureInputSize = inband_inputCnt;
4927
4928 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4929 return kIOReturnIPCError;
4930 }
4931
4932 if (ool_input) {
4933 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4934 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4935 current_task());
4936 }
4937
4938 args.structureInputDescriptor = inputMD;
4939
4940 args.scalarOutput = scalar_output;
4941 args.scalarOutputCount = *scalar_outputCnt;
4942 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4943 args.structureOutput = inband_output;
4944 args.structureOutputSize = *inband_outputCnt;
4945 args.structureOutputDescriptor = NULL;
4946 args.structureOutputDescriptorSize = 0;
4947
4948 IOStatisticsClientCall();
4949 ret = kIOReturnSuccess;
4950
4951 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
4952 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
4953 ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
4954 }
4955
4956 if (kIOReturnSuccess == ret) {
4957 ret = client->callExternalMethod(selector, &args);
4958 }
4959
4960 *scalar_outputCnt = args.scalarOutputCount;
4961 *inband_outputCnt = args.structureOutputSize;
4962
4963 if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) {
4964 OSSerialize * serialize;
4965 OSData * data;
4966 unsigned int len;
4967
4968 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) {
4969 len = serialize->getLength();
4970 *var_outputCnt = len;
4971 ret = copyoutkdata(serialize->text(), len, var_output);
4972 } else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) {
4973 data->clipForCopyout();
4974 len = data->getLength();
4975 *var_outputCnt = len;
4976 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
4977 } else {
4978 ret = kIOReturnUnderrun;
4979 }
4980 }
4981
4982 if (inputMD) {
4983 inputMD->release();
4984 }
4985 if (structureVariableOutputData) {
4986 structureVariableOutputData->release();
4987 }
4988
4989 return ret;
4990 }
4991
4992 /* Routine io_user_client_method */
4993 kern_return_t
is_io_connect_method(io_connect_t connection,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,mach_vm_address_t ool_output,mach_vm_size_t * ool_output_size)4994 is_io_connect_method
4995 (
4996 io_connect_t connection,
4997 uint32_t selector,
4998 io_scalar_inband64_t scalar_input,
4999 mach_msg_type_number_t scalar_inputCnt,
5000 io_struct_inband_t inband_input,
5001 mach_msg_type_number_t inband_inputCnt,
5002 mach_vm_address_t ool_input,
5003 mach_vm_size_t ool_input_size,
5004 io_struct_inband_t inband_output,
5005 mach_msg_type_number_t *inband_outputCnt,
5006 io_scalar_inband64_t scalar_output,
5007 mach_msg_type_number_t *scalar_outputCnt,
5008 mach_vm_address_t ool_output,
5009 mach_vm_size_t *ool_output_size
5010 )
5011 {
5012 CHECK( IOUserClient, connection, client );
5013
5014 IOExternalMethodArguments args;
5015 IOReturn ret;
5016 IOMemoryDescriptor * inputMD = NULL;
5017 IOMemoryDescriptor * outputMD = NULL;
5018
5019 bzero(&args.__reserved[0], sizeof(args.__reserved));
5020 args.__reservedA = 0;
5021 args.version = kIOExternalMethodArgumentsCurrentVersion;
5022
5023 args.selector = selector;
5024
5025 args.asyncWakePort = MACH_PORT_NULL;
5026 args.asyncReference = NULL;
5027 args.asyncReferenceCount = 0;
5028 args.structureVariableOutputData = NULL;
5029
5030 args.scalarInput = scalar_input;
5031 args.scalarInputCount = scalar_inputCnt;
5032 args.structureInput = inband_input;
5033 args.structureInputSize = inband_inputCnt;
5034
5035 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
5036 return kIOReturnIPCError;
5037 }
5038 if (ool_output) {
5039 if (*ool_output_size <= sizeof(io_struct_inband_t)) {
5040 return kIOReturnIPCError;
5041 }
5042 if (*ool_output_size > UINT_MAX) {
5043 return kIOReturnIPCError;
5044 }
5045 }
5046
5047 if (ool_input) {
5048 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
5049 kIODirectionOut | kIOMemoryMapCopyOnWrite,
5050 current_task());
5051 }
5052
5053 args.structureInputDescriptor = inputMD;
5054
5055 args.scalarOutput = scalar_output;
5056 args.scalarOutputCount = *scalar_outputCnt;
5057 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
5058 args.structureOutput = inband_output;
5059 args.structureOutputSize = *inband_outputCnt;
5060
5061 if (ool_output && ool_output_size) {
5062 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
5063 kIODirectionIn, current_task());
5064 }
5065
5066 args.structureOutputDescriptor = outputMD;
5067 args.structureOutputDescriptorSize = ool_output_size
5068 ? ((typeof(args.structureOutputDescriptorSize)) * ool_output_size)
5069 : 0;
5070
5071 IOStatisticsClientCall();
5072 ret = kIOReturnSuccess;
5073 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
5074 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5075 ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
5076 }
5077 if (kIOReturnSuccess == ret) {
5078 ret = client->callExternalMethod( selector, &args );
5079 }
5080
5081 *scalar_outputCnt = args.scalarOutputCount;
5082 *inband_outputCnt = args.structureOutputSize;
5083 *ool_output_size = args.structureOutputDescriptorSize;
5084
5085 if (inputMD) {
5086 inputMD->release();
5087 }
5088 if (outputMD) {
5089 outputMD->release();
5090 }
5091
5092 return ret;
5093 }
5094
5095 /* Routine io_async_user_client_method */
5096 kern_return_t
is_io_connect_async_method(io_connect_t connection,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,mach_vm_address_t ool_output,mach_vm_size_t * ool_output_size)5097 is_io_connect_async_method
5098 (
5099 io_connect_t connection,
5100 mach_port_t wake_port,
5101 io_async_ref64_t reference,
5102 mach_msg_type_number_t referenceCnt,
5103 uint32_t selector,
5104 io_scalar_inband64_t scalar_input,
5105 mach_msg_type_number_t scalar_inputCnt,
5106 io_struct_inband_t inband_input,
5107 mach_msg_type_number_t inband_inputCnt,
5108 mach_vm_address_t ool_input,
5109 mach_vm_size_t ool_input_size,
5110 io_struct_inband_t inband_output,
5111 mach_msg_type_number_t *inband_outputCnt,
5112 io_scalar_inband64_t scalar_output,
5113 mach_msg_type_number_t *scalar_outputCnt,
5114 mach_vm_address_t ool_output,
5115 mach_vm_size_t * ool_output_size
5116 )
5117 {
5118 CHECK( IOUserClient, connection, client );
5119
5120 IOExternalMethodArguments args;
5121 IOReturn ret;
5122 IOMemoryDescriptor * inputMD = NULL;
5123 IOMemoryDescriptor * outputMD = NULL;
5124
5125 if (referenceCnt < 1) {
5126 return kIOReturnBadArgument;
5127 }
5128
5129 bzero(&args.__reserved[0], sizeof(args.__reserved));
5130 args.__reservedA = 0;
5131 args.version = kIOExternalMethodArgumentsCurrentVersion;
5132
5133 reference[0] = (io_user_reference_t) wake_port;
5134 if (vm_map_is_64bit(get_task_map(current_task()))) {
5135 reference[0] |= kIOUCAsync64Flag;
5136 }
5137
5138 args.selector = selector;
5139
5140 args.asyncWakePort = wake_port;
5141 args.asyncReference = reference;
5142 args.asyncReferenceCount = referenceCnt;
5143
5144 args.structureVariableOutputData = NULL;
5145
5146 args.scalarInput = scalar_input;
5147 args.scalarInputCount = scalar_inputCnt;
5148 args.structureInput = inband_input;
5149 args.structureInputSize = inband_inputCnt;
5150
5151 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
5152 return kIOReturnIPCError;
5153 }
5154 if (ool_output) {
5155 if (*ool_output_size <= sizeof(io_struct_inband_t)) {
5156 return kIOReturnIPCError;
5157 }
5158 if (*ool_output_size > UINT_MAX) {
5159 return kIOReturnIPCError;
5160 }
5161 }
5162
5163 if (ool_input) {
5164 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
5165 kIODirectionOut | kIOMemoryMapCopyOnWrite,
5166 current_task());
5167 }
5168
5169 args.structureInputDescriptor = inputMD;
5170
5171 args.scalarOutput = scalar_output;
5172 args.scalarOutputCount = *scalar_outputCnt;
5173 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
5174 args.structureOutput = inband_output;
5175 args.structureOutputSize = *inband_outputCnt;
5176
5177 if (ool_output) {
5178 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
5179 kIODirectionIn, current_task());
5180 }
5181
5182 args.structureOutputDescriptor = outputMD;
5183 args.structureOutputDescriptorSize = ((typeof(args.structureOutputDescriptorSize)) * ool_output_size);
5184
5185 IOStatisticsClientCall();
5186 ret = kIOReturnSuccess;
5187 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
5188 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5189 ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_async_method, selector);
5190 }
5191 if (kIOReturnSuccess == ret) {
5192 ret = client->callExternalMethod( selector, &args );
5193 }
5194
5195 *scalar_outputCnt = args.scalarOutputCount;
5196 *inband_outputCnt = args.structureOutputSize;
5197 *ool_output_size = args.structureOutputDescriptorSize;
5198
5199 if (inputMD) {
5200 inputMD->release();
5201 }
5202 if (outputMD) {
5203 outputMD->release();
5204 }
5205
5206 return ret;
5207 }
5208
5209 /* Routine io_connect_method_scalarI_scalarO */
5210 kern_return_t
is_io_connect_method_scalarI_scalarO(io_object_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_scalar_inband_t output,mach_msg_type_number_t * outputCount)5211 is_io_connect_method_scalarI_scalarO(
5212 io_object_t connect,
5213 uint32_t index,
5214 io_scalar_inband_t input,
5215 mach_msg_type_number_t inputCount,
5216 io_scalar_inband_t output,
5217 mach_msg_type_number_t * outputCount )
5218 {
5219 IOReturn err;
5220 uint32_t i;
5221 io_scalar_inband64_t _input;
5222 io_scalar_inband64_t _output;
5223
5224 mach_msg_type_number_t struct_outputCnt = 0;
5225 mach_vm_size_t ool_output_size = 0;
5226
5227 bzero(&_output[0], sizeof(_output));
5228 for (i = 0; i < inputCount; i++) {
5229 _input[i] = SCALAR64(input[i]);
5230 }
5231
5232 err = is_io_connect_method(connect, index,
5233 _input, inputCount,
5234 NULL, 0,
5235 0, 0,
5236 NULL, &struct_outputCnt,
5237 _output, outputCount,
5238 0, &ool_output_size);
5239
5240 for (i = 0; i < *outputCount; i++) {
5241 output[i] = SCALAR32(_output[i]);
5242 }
5243
5244 return err;
5245 }
5246
5247 kern_return_t
shim_io_connect_method_scalarI_scalarO(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_user_scalar_t * output,mach_msg_type_number_t * outputCount)5248 shim_io_connect_method_scalarI_scalarO(
5249 IOExternalMethod * method,
5250 IOService * object,
5251 const io_user_scalar_t * input,
5252 mach_msg_type_number_t inputCount,
5253 io_user_scalar_t * output,
5254 mach_msg_type_number_t * outputCount )
5255 {
5256 IOMethod func;
5257 io_scalar_inband_t _output;
5258 IOReturn err;
5259 err = kIOReturnBadArgument;
5260
5261 bzero(&_output[0], sizeof(_output));
5262 do {
5263 if (inputCount != method->count0) {
5264 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5265 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5266 continue;
5267 }
5268 if (*outputCount != method->count1) {
5269 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5270 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5271 continue;
5272 }
5273
5274 func = method->func;
5275
5276 switch (inputCount) {
5277 case 6:
5278 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5279 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5280 break;
5281 case 5:
5282 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5283 ARG32(input[3]), ARG32(input[4]),
5284 &_output[0] );
5285 break;
5286 case 4:
5287 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5288 ARG32(input[3]),
5289 &_output[0], &_output[1] );
5290 break;
5291 case 3:
5292 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5293 &_output[0], &_output[1], &_output[2] );
5294 break;
5295 case 2:
5296 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5297 &_output[0], &_output[1], &_output[2],
5298 &_output[3] );
5299 break;
5300 case 1:
5301 err = (object->*func)( ARG32(input[0]),
5302 &_output[0], &_output[1], &_output[2],
5303 &_output[3], &_output[4] );
5304 break;
5305 case 0:
5306 err = (object->*func)( &_output[0], &_output[1], &_output[2],
5307 &_output[3], &_output[4], &_output[5] );
5308 break;
5309
5310 default:
5311 IOLog("%s: Bad method table\n", object->getName());
5312 }
5313 }while (false);
5314
5315 uint32_t i;
5316 for (i = 0; i < *outputCount; i++) {
5317 output[i] = SCALAR32(_output[i]);
5318 }
5319
5320 return err;
5321 }
5322
5323 /* Routine io_async_method_scalarI_scalarO */
5324 kern_return_t
is_io_async_method_scalarI_scalarO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_scalar_inband_t output,mach_msg_type_number_t * outputCount)5325 is_io_async_method_scalarI_scalarO(
5326 io_object_t connect,
5327 mach_port_t wake_port,
5328 io_async_ref_t reference,
5329 mach_msg_type_number_t referenceCnt,
5330 uint32_t index,
5331 io_scalar_inband_t input,
5332 mach_msg_type_number_t inputCount,
5333 io_scalar_inband_t output,
5334 mach_msg_type_number_t * outputCount )
5335 {
5336 IOReturn err;
5337 uint32_t i;
5338 io_scalar_inband64_t _input;
5339 io_scalar_inband64_t _output;
5340 io_async_ref64_t _reference;
5341
5342 if (referenceCnt > ASYNC_REF64_COUNT) {
5343 return kIOReturnBadArgument;
5344 }
5345 bzero(&_output[0], sizeof(_output));
5346 for (i = 0; i < referenceCnt; i++) {
5347 _reference[i] = REF64(reference[i]);
5348 }
5349 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5350
5351 mach_msg_type_number_t struct_outputCnt = 0;
5352 mach_vm_size_t ool_output_size = 0;
5353
5354 for (i = 0; i < inputCount; i++) {
5355 _input[i] = SCALAR64(input[i]);
5356 }
5357
5358 err = is_io_connect_async_method(connect,
5359 wake_port, _reference, referenceCnt,
5360 index,
5361 _input, inputCount,
5362 NULL, 0,
5363 0, 0,
5364 NULL, &struct_outputCnt,
5365 _output, outputCount,
5366 0, &ool_output_size);
5367
5368 for (i = 0; i < *outputCount; i++) {
5369 output[i] = SCALAR32(_output[i]);
5370 }
5371
5372 return err;
5373 }
5374 /* Routine io_async_method_scalarI_structureO */
5375 kern_return_t
is_io_async_method_scalarI_structureO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5376 is_io_async_method_scalarI_structureO(
5377 io_object_t connect,
5378 mach_port_t wake_port,
5379 io_async_ref_t reference,
5380 mach_msg_type_number_t referenceCnt,
5381 uint32_t index,
5382 io_scalar_inband_t input,
5383 mach_msg_type_number_t inputCount,
5384 io_struct_inband_t output,
5385 mach_msg_type_number_t * outputCount )
5386 {
5387 uint32_t i;
5388 io_scalar_inband64_t _input;
5389 io_async_ref64_t _reference;
5390
5391 if (referenceCnt > ASYNC_REF64_COUNT) {
5392 return kIOReturnBadArgument;
5393 }
5394 for (i = 0; i < referenceCnt; i++) {
5395 _reference[i] = REF64(reference[i]);
5396 }
5397 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5398
5399 mach_msg_type_number_t scalar_outputCnt = 0;
5400 mach_vm_size_t ool_output_size = 0;
5401
5402 for (i = 0; i < inputCount; i++) {
5403 _input[i] = SCALAR64(input[i]);
5404 }
5405
5406 return is_io_connect_async_method(connect,
5407 wake_port, _reference, referenceCnt,
5408 index,
5409 _input, inputCount,
5410 NULL, 0,
5411 0, 0,
5412 output, outputCount,
5413 NULL, &scalar_outputCnt,
5414 0, &ool_output_size);
5415 }
5416
5417 /* Routine io_async_method_scalarI_structureI */
5418 kern_return_t
is_io_async_method_scalarI_structureI(io_connect_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5419 is_io_async_method_scalarI_structureI(
5420 io_connect_t connect,
5421 mach_port_t wake_port,
5422 io_async_ref_t reference,
5423 mach_msg_type_number_t referenceCnt,
5424 uint32_t index,
5425 io_scalar_inband_t input,
5426 mach_msg_type_number_t inputCount,
5427 io_struct_inband_t inputStruct,
5428 mach_msg_type_number_t inputStructCount )
5429 {
5430 uint32_t i;
5431 io_scalar_inband64_t _input;
5432 io_async_ref64_t _reference;
5433
5434 if (referenceCnt > ASYNC_REF64_COUNT) {
5435 return kIOReturnBadArgument;
5436 }
5437 for (i = 0; i < referenceCnt; i++) {
5438 _reference[i] = REF64(reference[i]);
5439 }
5440 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5441
5442 mach_msg_type_number_t scalar_outputCnt = 0;
5443 mach_msg_type_number_t inband_outputCnt = 0;
5444 mach_vm_size_t ool_output_size = 0;
5445
5446 for (i = 0; i < inputCount; i++) {
5447 _input[i] = SCALAR64(input[i]);
5448 }
5449
5450 return is_io_connect_async_method(connect,
5451 wake_port, _reference, referenceCnt,
5452 index,
5453 _input, inputCount,
5454 inputStruct, inputStructCount,
5455 0, 0,
5456 NULL, &inband_outputCnt,
5457 NULL, &scalar_outputCnt,
5458 0, &ool_output_size);
5459 }
5460
5461 /* Routine io_async_method_structureI_structureO */
5462 kern_return_t
is_io_async_method_structureI_structureO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5463 is_io_async_method_structureI_structureO(
5464 io_object_t connect,
5465 mach_port_t wake_port,
5466 io_async_ref_t reference,
5467 mach_msg_type_number_t referenceCnt,
5468 uint32_t index,
5469 io_struct_inband_t input,
5470 mach_msg_type_number_t inputCount,
5471 io_struct_inband_t output,
5472 mach_msg_type_number_t * outputCount )
5473 {
5474 uint32_t i;
5475 mach_msg_type_number_t scalar_outputCnt = 0;
5476 mach_vm_size_t ool_output_size = 0;
5477 io_async_ref64_t _reference;
5478
5479 if (referenceCnt > ASYNC_REF64_COUNT) {
5480 return kIOReturnBadArgument;
5481 }
5482 for (i = 0; i < referenceCnt; i++) {
5483 _reference[i] = REF64(reference[i]);
5484 }
5485 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5486
5487 return is_io_connect_async_method(connect,
5488 wake_port, _reference, referenceCnt,
5489 index,
5490 NULL, 0,
5491 input, inputCount,
5492 0, 0,
5493 output, outputCount,
5494 NULL, &scalar_outputCnt,
5495 0, &ool_output_size);
5496 }
5497
5498
5499 kern_return_t
shim_io_async_method_scalarI_scalarO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_user_scalar_t * output,mach_msg_type_number_t * outputCount)5500 shim_io_async_method_scalarI_scalarO(
5501 IOExternalAsyncMethod * method,
5502 IOService * object,
5503 mach_port_t asyncWakePort,
5504 io_user_reference_t * asyncReference,
5505 uint32_t asyncReferenceCount,
5506 const io_user_scalar_t * input,
5507 mach_msg_type_number_t inputCount,
5508 io_user_scalar_t * output,
5509 mach_msg_type_number_t * outputCount )
5510 {
5511 IOAsyncMethod func;
5512 uint32_t i;
5513 io_scalar_inband_t _output;
5514 IOReturn err;
5515 io_async_ref_t reference;
5516
5517 bzero(&_output[0], sizeof(_output));
5518 for (i = 0; i < asyncReferenceCount; i++) {
5519 reference[i] = REF32(asyncReference[i]);
5520 }
5521
5522 err = kIOReturnBadArgument;
5523
5524 do {
5525 if (inputCount != method->count0) {
5526 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5527 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5528 continue;
5529 }
5530 if (*outputCount != method->count1) {
5531 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5532 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5533 continue;
5534 }
5535
5536 func = method->func;
5537
5538 switch (inputCount) {
5539 case 6:
5540 err = (object->*func)( reference,
5541 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5542 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5543 break;
5544 case 5:
5545 err = (object->*func)( reference,
5546 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5547 ARG32(input[3]), ARG32(input[4]),
5548 &_output[0] );
5549 break;
5550 case 4:
5551 err = (object->*func)( reference,
5552 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5553 ARG32(input[3]),
5554 &_output[0], &_output[1] );
5555 break;
5556 case 3:
5557 err = (object->*func)( reference,
5558 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5559 &_output[0], &_output[1], &_output[2] );
5560 break;
5561 case 2:
5562 err = (object->*func)( reference,
5563 ARG32(input[0]), ARG32(input[1]),
5564 &_output[0], &_output[1], &_output[2],
5565 &_output[3] );
5566 break;
5567 case 1:
5568 err = (object->*func)( reference,
5569 ARG32(input[0]),
5570 &_output[0], &_output[1], &_output[2],
5571 &_output[3], &_output[4] );
5572 break;
5573 case 0:
5574 err = (object->*func)( reference,
5575 &_output[0], &_output[1], &_output[2],
5576 &_output[3], &_output[4], &_output[5] );
5577 break;
5578
5579 default:
5580 IOLog("%s: Bad method table\n", object->getName());
5581 }
5582 }while (false);
5583
5584 for (i = 0; i < *outputCount; i++) {
5585 output[i] = SCALAR32(_output[i]);
5586 }
5587
5588 return err;
5589 }
5590
5591
5592 /* Routine io_connect_method_scalarI_structureO */
5593 kern_return_t
is_io_connect_method_scalarI_structureO(io_object_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5594 is_io_connect_method_scalarI_structureO(
5595 io_object_t connect,
5596 uint32_t index,
5597 io_scalar_inband_t input,
5598 mach_msg_type_number_t inputCount,
5599 io_struct_inband_t output,
5600 mach_msg_type_number_t * outputCount )
5601 {
5602 uint32_t i;
5603 io_scalar_inband64_t _input;
5604
5605 mach_msg_type_number_t scalar_outputCnt = 0;
5606 mach_vm_size_t ool_output_size = 0;
5607
5608 for (i = 0; i < inputCount; i++) {
5609 _input[i] = SCALAR64(input[i]);
5610 }
5611
5612 return is_io_connect_method(connect, index,
5613 _input, inputCount,
5614 NULL, 0,
5615 0, 0,
5616 output, outputCount,
5617 NULL, &scalar_outputCnt,
5618 0, &ool_output_size);
5619 }
5620
5621 kern_return_t
shim_io_connect_method_scalarI_structureO(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t output,IOByteCount * outputCount)5622 shim_io_connect_method_scalarI_structureO(
5623
5624 IOExternalMethod * method,
5625 IOService * object,
5626 const io_user_scalar_t * input,
5627 mach_msg_type_number_t inputCount,
5628 io_struct_inband_t output,
5629 IOByteCount * outputCount )
5630 {
5631 IOMethod func;
5632 IOReturn err;
5633
5634 err = kIOReturnBadArgument;
5635
5636 do {
5637 if (inputCount != method->count0) {
5638 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5639 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5640 continue;
5641 }
5642 if ((kIOUCVariableStructureSize != method->count1)
5643 && (*outputCount != method->count1)) {
5644 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5645 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5646 continue;
5647 }
5648
5649 func = method->func;
5650
5651 switch (inputCount) {
5652 case 5:
5653 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5654 ARG32(input[3]), ARG32(input[4]),
5655 output );
5656 break;
5657 case 4:
5658 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5659 ARG32(input[3]),
5660 output, (void *)outputCount );
5661 break;
5662 case 3:
5663 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5664 output, (void *)outputCount, NULL );
5665 break;
5666 case 2:
5667 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5668 output, (void *)outputCount, NULL, NULL );
5669 break;
5670 case 1:
5671 err = (object->*func)( ARG32(input[0]),
5672 output, (void *)outputCount, NULL, NULL, NULL );
5673 break;
5674 case 0:
5675 err = (object->*func)( output, (void *)outputCount, NULL, NULL, NULL, NULL );
5676 break;
5677
5678 default:
5679 IOLog("%s: Bad method table\n", object->getName());
5680 }
5681 }while (false);
5682
5683 return err;
5684 }
5685
5686
5687 kern_return_t
shim_io_async_method_scalarI_structureO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5688 shim_io_async_method_scalarI_structureO(
5689 IOExternalAsyncMethod * method,
5690 IOService * object,
5691 mach_port_t asyncWakePort,
5692 io_user_reference_t * asyncReference,
5693 uint32_t asyncReferenceCount,
5694 const io_user_scalar_t * input,
5695 mach_msg_type_number_t inputCount,
5696 io_struct_inband_t output,
5697 mach_msg_type_number_t * outputCount )
5698 {
5699 IOAsyncMethod func;
5700 uint32_t i;
5701 IOReturn err;
5702 io_async_ref_t reference;
5703
5704 for (i = 0; i < asyncReferenceCount; i++) {
5705 reference[i] = REF32(asyncReference[i]);
5706 }
5707
5708 err = kIOReturnBadArgument;
5709 do {
5710 if (inputCount != method->count0) {
5711 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5712 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5713 continue;
5714 }
5715 if ((kIOUCVariableStructureSize != method->count1)
5716 && (*outputCount != method->count1)) {
5717 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5718 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5719 continue;
5720 }
5721
5722 func = method->func;
5723
5724 switch (inputCount) {
5725 case 5:
5726 err = (object->*func)( reference,
5727 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5728 ARG32(input[3]), ARG32(input[4]),
5729 output );
5730 break;
5731 case 4:
5732 err = (object->*func)( reference,
5733 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5734 ARG32(input[3]),
5735 output, (void *)outputCount );
5736 break;
5737 case 3:
5738 err = (object->*func)( reference,
5739 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5740 output, (void *)outputCount, NULL );
5741 break;
5742 case 2:
5743 err = (object->*func)( reference,
5744 ARG32(input[0]), ARG32(input[1]),
5745 output, (void *)outputCount, NULL, NULL );
5746 break;
5747 case 1:
5748 err = (object->*func)( reference,
5749 ARG32(input[0]),
5750 output, (void *)outputCount, NULL, NULL, NULL );
5751 break;
5752 case 0:
5753 err = (object->*func)( reference,
5754 output, (void *)outputCount, NULL, NULL, NULL, NULL );
5755 break;
5756
5757 default:
5758 IOLog("%s: Bad method table\n", object->getName());
5759 }
5760 }while (false);
5761
5762 return err;
5763 }
5764
5765 /* Routine io_connect_method_scalarI_structureI */
5766 kern_return_t
is_io_connect_method_scalarI_structureI(io_connect_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5767 is_io_connect_method_scalarI_structureI(
5768 io_connect_t connect,
5769 uint32_t index,
5770 io_scalar_inband_t input,
5771 mach_msg_type_number_t inputCount,
5772 io_struct_inband_t inputStruct,
5773 mach_msg_type_number_t inputStructCount )
5774 {
5775 uint32_t i;
5776 io_scalar_inband64_t _input;
5777
5778 mach_msg_type_number_t scalar_outputCnt = 0;
5779 mach_msg_type_number_t inband_outputCnt = 0;
5780 mach_vm_size_t ool_output_size = 0;
5781
5782 for (i = 0; i < inputCount; i++) {
5783 _input[i] = SCALAR64(input[i]);
5784 }
5785
5786 return is_io_connect_method(connect, index,
5787 _input, inputCount,
5788 inputStruct, inputStructCount,
5789 0, 0,
5790 NULL, &inband_outputCnt,
5791 NULL, &scalar_outputCnt,
5792 0, &ool_output_size);
5793 }
5794
5795 kern_return_t
shim_io_connect_method_scalarI_structureI(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5796 shim_io_connect_method_scalarI_structureI(
5797 IOExternalMethod * method,
5798 IOService * object,
5799 const io_user_scalar_t * input,
5800 mach_msg_type_number_t inputCount,
5801 io_struct_inband_t inputStruct,
5802 mach_msg_type_number_t inputStructCount )
5803 {
5804 IOMethod func;
5805 IOReturn err = kIOReturnBadArgument;
5806
5807 do{
5808 if (inputCount != method->count0) {
5809 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5810 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5811 continue;
5812 }
5813 if ((kIOUCVariableStructureSize != method->count1)
5814 && (inputStructCount != method->count1)) {
5815 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5816 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5817 continue;
5818 }
5819
5820 func = method->func;
5821
5822 switch (inputCount) {
5823 case 5:
5824 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5825 ARG32(input[3]), ARG32(input[4]),
5826 inputStruct );
5827 break;
5828 case 4:
5829 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
5830 ARG32(input[3]),
5831 inputStruct, (void *)(uintptr_t)inputStructCount );
5832 break;
5833 case 3:
5834 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5835 inputStruct, (void *)(uintptr_t)inputStructCount,
5836 NULL );
5837 break;
5838 case 2:
5839 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5840 inputStruct, (void *)(uintptr_t)inputStructCount,
5841 NULL, NULL );
5842 break;
5843 case 1:
5844 err = (object->*func)( ARG32(input[0]),
5845 inputStruct, (void *)(uintptr_t)inputStructCount,
5846 NULL, NULL, NULL );
5847 break;
5848 case 0:
5849 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
5850 NULL, NULL, NULL, NULL );
5851 break;
5852
5853 default:
5854 IOLog("%s: Bad method table\n", object->getName());
5855 }
5856 }while (false);
5857
5858 return err;
5859 }
5860
5861 kern_return_t
shim_io_async_method_scalarI_structureI(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5862 shim_io_async_method_scalarI_structureI(
5863 IOExternalAsyncMethod * method,
5864 IOService * object,
5865 mach_port_t asyncWakePort,
5866 io_user_reference_t * asyncReference,
5867 uint32_t asyncReferenceCount,
5868 const io_user_scalar_t * input,
5869 mach_msg_type_number_t inputCount,
5870 io_struct_inband_t inputStruct,
5871 mach_msg_type_number_t inputStructCount )
5872 {
5873 IOAsyncMethod func;
5874 uint32_t i;
5875 IOReturn err = kIOReturnBadArgument;
5876 io_async_ref_t reference;
5877
5878 for (i = 0; i < asyncReferenceCount; i++) {
5879 reference[i] = REF32(asyncReference[i]);
5880 }
5881
5882 do{
5883 if (inputCount != method->count0) {
5884 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5885 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5886 continue;
5887 }
5888 if ((kIOUCVariableStructureSize != method->count1)
5889 && (inputStructCount != method->count1)) {
5890 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5891 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5892 continue;
5893 }
5894
5895 func = method->func;
5896
5897 switch (inputCount) {
5898 case 5:
5899 err = (object->*func)( reference,
5900 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5901 ARG32(input[3]), ARG32(input[4]),
5902 inputStruct );
5903 break;
5904 case 4:
5905 err = (object->*func)( reference,
5906 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5907 ARG32(input[3]),
5908 inputStruct, (void *)(uintptr_t)inputStructCount );
5909 break;
5910 case 3:
5911 err = (object->*func)( reference,
5912 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5913 inputStruct, (void *)(uintptr_t)inputStructCount,
5914 NULL );
5915 break;
5916 case 2:
5917 err = (object->*func)( reference,
5918 ARG32(input[0]), ARG32(input[1]),
5919 inputStruct, (void *)(uintptr_t)inputStructCount,
5920 NULL, NULL );
5921 break;
5922 case 1:
5923 err = (object->*func)( reference,
5924 ARG32(input[0]),
5925 inputStruct, (void *)(uintptr_t)inputStructCount,
5926 NULL, NULL, NULL );
5927 break;
5928 case 0:
5929 err = (object->*func)( reference,
5930 inputStruct, (void *)(uintptr_t)inputStructCount,
5931 NULL, NULL, NULL, NULL );
5932 break;
5933
5934 default:
5935 IOLog("%s: Bad method table\n", object->getName());
5936 }
5937 }while (false);
5938
5939 return err;
5940 }
5941
5942 /* Routine io_connect_method_structureI_structureO */
5943 kern_return_t
is_io_connect_method_structureI_structureO(io_object_t connect,uint32_t index,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5944 is_io_connect_method_structureI_structureO(
5945 io_object_t connect,
5946 uint32_t index,
5947 io_struct_inband_t input,
5948 mach_msg_type_number_t inputCount,
5949 io_struct_inband_t output,
5950 mach_msg_type_number_t * outputCount )
5951 {
5952 mach_msg_type_number_t scalar_outputCnt = 0;
5953 mach_vm_size_t ool_output_size = 0;
5954
5955 return is_io_connect_method(connect, index,
5956 NULL, 0,
5957 input, inputCount,
5958 0, 0,
5959 output, outputCount,
5960 NULL, &scalar_outputCnt,
5961 0, &ool_output_size);
5962 }
5963
5964 kern_return_t
shim_io_connect_method_structureI_structureO(IOExternalMethod * method,IOService * object,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,IOByteCount * outputCount)5965 shim_io_connect_method_structureI_structureO(
5966 IOExternalMethod * method,
5967 IOService * object,
5968 io_struct_inband_t input,
5969 mach_msg_type_number_t inputCount,
5970 io_struct_inband_t output,
5971 IOByteCount * outputCount )
5972 {
5973 IOMethod func;
5974 IOReturn err = kIOReturnBadArgument;
5975
5976 do{
5977 if ((kIOUCVariableStructureSize != method->count0)
5978 && (inputCount != method->count0)) {
5979 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5980 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5981 continue;
5982 }
5983 if ((kIOUCVariableStructureSize != method->count1)
5984 && (*outputCount != method->count1)) {
5985 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5986 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5987 continue;
5988 }
5989
5990 func = method->func;
5991
5992 if (method->count1) {
5993 if (method->count0) {
5994 err = (object->*func)( input, output,
5995 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5996 } else {
5997 err = (object->*func)( output, outputCount, NULL, NULL, NULL, NULL );
5998 }
5999 } else {
6000 err = (object->*func)( input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
6001 }
6002 }while (false);
6003
6004
6005 return err;
6006 }
6007
6008 kern_return_t
shim_io_async_method_structureI_structureO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)6009 shim_io_async_method_structureI_structureO(
6010 IOExternalAsyncMethod * method,
6011 IOService * object,
6012 mach_port_t asyncWakePort,
6013 io_user_reference_t * asyncReference,
6014 uint32_t asyncReferenceCount,
6015 io_struct_inband_t input,
6016 mach_msg_type_number_t inputCount,
6017 io_struct_inband_t output,
6018 mach_msg_type_number_t * outputCount )
6019 {
6020 IOAsyncMethod func;
6021 uint32_t i;
6022 IOReturn err;
6023 io_async_ref_t reference;
6024
6025 for (i = 0; i < asyncReferenceCount; i++) {
6026 reference[i] = REF32(asyncReference[i]);
6027 }
6028
6029 err = kIOReturnBadArgument;
6030 do{
6031 if ((kIOUCVariableStructureSize != method->count0)
6032 && (inputCount != method->count0)) {
6033 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
6034 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
6035 continue;
6036 }
6037 if ((kIOUCVariableStructureSize != method->count1)
6038 && (*outputCount != method->count1)) {
6039 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
6040 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
6041 continue;
6042 }
6043
6044 func = method->func;
6045
6046 if (method->count1) {
6047 if (method->count0) {
6048 err = (object->*func)( reference,
6049 input, output,
6050 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
6051 } else {
6052 err = (object->*func)( reference,
6053 output, outputCount, NULL, NULL, NULL, NULL );
6054 }
6055 } else {
6056 err = (object->*func)( reference,
6057 input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
6058 }
6059 }while (false);
6060
6061 return err;
6062 }
6063
6064 /* Routine io_catalog_send_data */
6065 kern_return_t
is_io_catalog_send_data(mach_port_t main_port,uint32_t flag,io_buf_ptr_t inData,mach_msg_type_number_t inDataCount,kern_return_t * result)6066 is_io_catalog_send_data(
6067 mach_port_t main_port,
6068 uint32_t flag,
6069 io_buf_ptr_t inData,
6070 mach_msg_type_number_t inDataCount,
6071 kern_return_t * result)
6072 {
6073 // Allow sending catalog data if there is no kextd and the kernel is DEVELOPMENT || DEBUG
6074 #if NO_KEXTD && !(DEVELOPMENT || DEBUG)
6075 return kIOReturnNotPrivileged;
6076 #else /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
6077 OSObject * obj = NULL;
6078 vm_offset_t data;
6079 kern_return_t kr = kIOReturnError;
6080
6081 //printf("io_catalog_send_data called. flag: %d\n", flag);
6082
6083 if (main_port != main_device_port) {
6084 return kIOReturnNotPrivileged;
6085 }
6086
6087 if ((flag != kIOCatalogRemoveKernelLinker__Removed &&
6088 flag != kIOCatalogKextdActive &&
6089 flag != kIOCatalogKextdFinishedLaunching) &&
6090 (!inData || !inDataCount)) {
6091 return kIOReturnBadArgument;
6092 }
6093
6094 if (!IOCurrentTaskHasEntitlement(kIOCatalogManagementEntitlement)) {
6095 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
6096 IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
6097 OSSafeReleaseNULL(taskName);
6098 // For now, fake success to not break applications relying on this function succeeding.
6099 // See <rdar://problem/32554970> for more details.
6100 return kIOReturnSuccess;
6101 }
6102
6103 if (inData) {
6104 vm_map_offset_t map_data;
6105
6106 if (inDataCount > sizeof(io_struct_inband_t) * 1024) {
6107 return kIOReturnMessageTooLarge;
6108 }
6109
6110 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
6111 data = CAST_DOWN(vm_offset_t, map_data);
6112
6113 if (kr != KERN_SUCCESS) {
6114 return kr;
6115 }
6116
6117 // must return success after vm_map_copyout() succeeds
6118
6119 if (inDataCount) {
6120 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
6121 vm_deallocate( kernel_map, data, inDataCount );
6122 if (!obj) {
6123 *result = kIOReturnNoMemory;
6124 return KERN_SUCCESS;
6125 }
6126 }
6127 }
6128
6129 switch (flag) {
6130 case kIOCatalogResetDrivers:
6131 case kIOCatalogResetDriversNoMatch: {
6132 OSArray * array;
6133
6134 array = OSDynamicCast(OSArray, obj);
6135 if (array) {
6136 if (!gIOCatalogue->resetAndAddDrivers(array,
6137 flag == kIOCatalogResetDrivers)) {
6138 kr = kIOReturnError;
6139 }
6140 } else {
6141 kr = kIOReturnBadArgument;
6142 }
6143 }
6144 break;
6145
6146 case kIOCatalogAddDrivers:
6147 case kIOCatalogAddDriversNoMatch: {
6148 OSArray * array;
6149
6150 array = OSDynamicCast(OSArray, obj);
6151 if (array) {
6152 if (!gIOCatalogue->addDrivers( array,
6153 flag == kIOCatalogAddDrivers)) {
6154 kr = kIOReturnError;
6155 }
6156 } else {
6157 kr = kIOReturnBadArgument;
6158 }
6159 }
6160 break;
6161
6162 case kIOCatalogRemoveDrivers:
6163 case kIOCatalogRemoveDriversNoMatch: {
6164 OSDictionary * dict;
6165
6166 dict = OSDynamicCast(OSDictionary, obj);
6167 if (dict) {
6168 if (!gIOCatalogue->removeDrivers( dict,
6169 flag == kIOCatalogRemoveDrivers )) {
6170 kr = kIOReturnError;
6171 }
6172 } else {
6173 kr = kIOReturnBadArgument;
6174 }
6175 }
6176 break;
6177
6178 case kIOCatalogStartMatching__Removed:
6179 case kIOCatalogRemoveKernelLinker__Removed:
6180 case kIOCatalogKextdActive:
6181 case kIOCatalogKextdFinishedLaunching:
6182 kr = KERN_NOT_SUPPORTED;
6183 break;
6184
6185 default:
6186 kr = kIOReturnBadArgument;
6187 break;
6188 }
6189
6190 if (obj) {
6191 obj->release();
6192 }
6193
6194 *result = kr;
6195 return KERN_SUCCESS;
6196 #endif /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
6197 }
6198
6199 /* Routine io_catalog_terminate */
6200 kern_return_t
is_io_catalog_terminate(mach_port_t main_port,uint32_t flag,io_name_t name)6201 is_io_catalog_terminate(
6202 mach_port_t main_port,
6203 uint32_t flag,
6204 io_name_t name )
6205 {
6206 kern_return_t kr;
6207
6208 if (main_port != main_device_port) {
6209 return kIOReturnNotPrivileged;
6210 }
6211
6212 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
6213 kIOClientPrivilegeAdministrator );
6214 if (kIOReturnSuccess != kr) {
6215 return kr;
6216 }
6217
6218 switch (flag) {
6219 #if !defined(SECURE_KERNEL)
6220 case kIOCatalogServiceTerminate:
6221 kr = gIOCatalogue->terminateDrivers(NULL, name, false);
6222 break;
6223
6224 case kIOCatalogModuleUnload:
6225 case kIOCatalogModuleTerminate:
6226 kr = gIOCatalogue->terminateDriversForModule(name,
6227 flag == kIOCatalogModuleUnload);
6228 break;
6229 #endif
6230
6231 default:
6232 kr = kIOReturnBadArgument;
6233 break;
6234 }
6235
6236 return kr;
6237 }
6238
6239 /* Routine io_catalog_get_data */
6240 kern_return_t
is_io_catalog_get_data(mach_port_t main_port,uint32_t flag,io_buf_ptr_t * outData,mach_msg_type_number_t * outDataCount)6241 is_io_catalog_get_data(
6242 mach_port_t main_port,
6243 uint32_t flag,
6244 io_buf_ptr_t *outData,
6245 mach_msg_type_number_t *outDataCount)
6246 {
6247 kern_return_t kr = kIOReturnSuccess;
6248 OSSerialize * s;
6249
6250 if (main_port != main_device_port) {
6251 return kIOReturnNotPrivileged;
6252 }
6253
6254 //printf("io_catalog_get_data called. flag: %d\n", flag);
6255
6256 s = OSSerialize::withCapacity(4096);
6257 if (!s) {
6258 return kIOReturnNoMemory;
6259 }
6260
6261 kr = gIOCatalogue->serializeData(flag, s);
6262
6263 if (kr == kIOReturnSuccess) {
6264 mach_vm_address_t data;
6265 vm_map_copy_t copy;
6266 unsigned int size;
6267
6268 size = s->getLength();
6269 kr = mach_vm_allocate_kernel(kernel_map, &data, size,
6270 VM_MAP_KERNEL_FLAGS_ANYWHERE(.vm_tag = VM_KERN_MEMORY_IOKIT));
6271 if (kr == kIOReturnSuccess) {
6272 bcopy(s->text(), (void *)data, size);
6273 kr = vm_map_copyin(kernel_map, data, size, true, ©);
6274 *outData = (char *)copy;
6275 *outDataCount = size;
6276 }
6277 }
6278
6279 s->release();
6280
6281 return kr;
6282 }
6283
6284 /* Routine io_catalog_get_gen_count */
6285 kern_return_t
is_io_catalog_get_gen_count(mach_port_t main_port,uint32_t * genCount)6286 is_io_catalog_get_gen_count(
6287 mach_port_t main_port,
6288 uint32_t *genCount)
6289 {
6290 if (main_port != main_device_port) {
6291 return kIOReturnNotPrivileged;
6292 }
6293
6294 //printf("io_catalog_get_gen_count called.\n");
6295
6296 if (!genCount) {
6297 return kIOReturnBadArgument;
6298 }
6299
6300 *genCount = gIOCatalogue->getGenerationCount();
6301
6302 return kIOReturnSuccess;
6303 }
6304
6305 /* Routine io_catalog_module_loaded.
6306 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
6307 */
6308 kern_return_t
is_io_catalog_module_loaded(mach_port_t main_port,io_name_t name)6309 is_io_catalog_module_loaded(
6310 mach_port_t main_port,
6311 io_name_t name)
6312 {
6313 if (main_port != main_device_port) {
6314 return kIOReturnNotPrivileged;
6315 }
6316
6317 //printf("io_catalog_module_loaded called. name %s\n", name);
6318
6319 if (!name) {
6320 return kIOReturnBadArgument;
6321 }
6322
6323 gIOCatalogue->moduleHasLoaded(name);
6324
6325 return kIOReturnSuccess;
6326 }
6327
6328 kern_return_t
is_io_catalog_reset(mach_port_t main_port,uint32_t flag)6329 is_io_catalog_reset(
6330 mach_port_t main_port,
6331 uint32_t flag)
6332 {
6333 if (main_port != main_device_port) {
6334 return kIOReturnNotPrivileged;
6335 }
6336
6337 switch (flag) {
6338 case kIOCatalogResetDefault:
6339 gIOCatalogue->reset();
6340 break;
6341
6342 default:
6343 return kIOReturnBadArgument;
6344 }
6345
6346 return kIOReturnSuccess;
6347 }
6348
6349 kern_return_t
iokit_user_client_trap(struct iokit_user_client_trap_args * args)6350 iokit_user_client_trap(struct iokit_user_client_trap_args *args)
6351 {
6352 kern_return_t result = kIOReturnBadArgument;
6353 IOUserClient * userClient;
6354 OSObject * object;
6355 uintptr_t ref;
6356 mach_port_name_t portName;
6357
6358 ref = (uintptr_t) args->userClientRef;
6359
6360 if ((ref == MACH_PORT_DEAD) || (ref == (uintptr_t) MACH_PORT_NULL)) {
6361 return kIOReturnBadArgument;
6362 }
6363 // kobject port names always have b0-1 set, so we use these bits as flags to
6364 // iokit_user_client_trap()
6365 // keep this up to date with ipc_entry_name_mask();
6366 portName = (mach_port_name_t) (ref | 3);
6367 if (((1ULL << 32) & ref) || !(1 & ref)) {
6368 object = iokit_lookup_uext_ref_current_task(portName);
6369 if (object) {
6370 result = IOUserServerUEXTTrap(object, args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6371 }
6372 OSSafeReleaseNULL(object);
6373 } else {
6374 io_object_t ref_current_task = iokit_lookup_connect_ref_current_task((mach_port_name_t) ref);
6375 if ((userClient = OSDynamicCast(IOUserClient, ref_current_task))) {
6376 IOExternalTrap *trap = NULL;
6377 IOService *target = NULL;
6378
6379 result = kIOReturnSuccess;
6380 io_filter_policy_t filterPolicy = userClient->filterForTask(current_task(), 0);
6381 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
6382 result = gIOUCFilterCallbacks->io_filter_applier(userClient, filterPolicy, io_filter_type_trap, args->index);
6383 }
6384 if (kIOReturnSuccess == result) {
6385 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
6386 }
6387 if (trap && target) {
6388 IOTrap func;
6389
6390 func = trap->func;
6391
6392 if (func) {
6393 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6394 }
6395 }
6396
6397 iokit_remove_connect_reference(userClient);
6398 } else {
6399 OSSafeReleaseNULL(ref_current_task);
6400 }
6401 }
6402
6403 return result;
6404 }
6405
6406 /* Routine io_device_tree_entry_exists_with_name */
6407 kern_return_t
is_io_device_tree_entry_exists_with_name(mach_port_t main_port,io_name_t name,boolean_t * exists)6408 is_io_device_tree_entry_exists_with_name(
6409 mach_port_t main_port,
6410 io_name_t name,
6411 boolean_t *exists )
6412 {
6413 OSCollectionIterator *iter;
6414
6415 if (main_port != main_device_port) {
6416 return kIOReturnNotPrivileged;
6417 }
6418
6419 iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, name);
6420 *exists = iter && iter->getNextObject();
6421 OSSafeReleaseNULL(iter);
6422
6423 return kIOReturnSuccess;
6424 }
6425 } /* extern "C" */
6426
6427 IOReturn
callExternalMethod(uint32_t selector,IOExternalMethodArguments * args)6428 IOUserClient::callExternalMethod(uint32_t selector, IOExternalMethodArguments * args)
6429 {
6430 IOReturn ret;
6431
6432 ipcEnter(defaultLocking ? (defaultLockingSingleThreadExternalMethod ? kIPCLockWrite : kIPCLockRead) : kIPCLockNone);
6433 if (uc2022) {
6434 ret = ((IOUserClient2022 *) this)->externalMethod(selector, (IOExternalMethodArgumentsOpaque *) args);
6435 } else {
6436 ret = externalMethod(selector, args);
6437 }
6438 ipcExit(defaultLocking ? (defaultLockingSingleThreadExternalMethod ? kIPCLockWrite : kIPCLockRead) : kIPCLockNone);
6439
6440 return ret;
6441 }
6442
6443 MIG_SERVER_ROUTINE IOReturn
externalMethod(uint32_t selector,IOExternalMethodArguments * arguments,IOExternalMethodDispatch * dispatch,OSObject * target,void * reference)6444 IOUserClient2022::externalMethod(uint32_t selector, IOExternalMethodArguments * arguments,
6445 IOExternalMethodDispatch *dispatch,
6446 OSObject *target, void *reference)
6447 {
6448 panic("wrong externalMethod for IOUserClient2022");
6449 }
6450
6451 IOReturn
dispatchExternalMethod(uint32_t selector,IOExternalMethodArgumentsOpaque * arguments,const IOExternalMethodDispatch2022 dispatchArray[],size_t dispatchArrayCount,OSObject * target,void * reference)6452 IOUserClient2022::dispatchExternalMethod(uint32_t selector, IOExternalMethodArgumentsOpaque *arguments,
6453 const IOExternalMethodDispatch2022 dispatchArray[], size_t dispatchArrayCount,
6454 OSObject * target, void * reference)
6455 {
6456 IOReturn err;
6457 IOExternalMethodArguments * args = (typeof(args))arguments;
6458 const IOExternalMethodDispatch2022 * dispatch;
6459
6460 if (!dispatchArray) {
6461 return kIOReturnError;
6462 }
6463 if (selector >= dispatchArrayCount) {
6464 return kIOReturnBadArgument;
6465 }
6466 dispatch = &dispatchArray[selector];
6467
6468 uint32_t count;
6469 count = dispatch->checkScalarInputCount;
6470 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6471 return kIOReturnBadArgument;
6472 }
6473
6474 count = dispatch->checkStructureInputSize;
6475 if ((kIOUCVariableStructureSize != count)
6476 && (count != ((args->structureInputDescriptor)
6477 ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6478 return kIOReturnBadArgument;
6479 }
6480
6481 count = dispatch->checkScalarOutputCount;
6482 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6483 return kIOReturnBadArgument;
6484 }
6485
6486 count = dispatch->checkStructureOutputSize;
6487 if ((kIOUCVariableStructureSize != count)
6488 && (count != ((args->structureOutputDescriptor)
6489 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6490 return kIOReturnBadArgument;
6491 }
6492
6493 if (args->asyncWakePort && !dispatch->allowAsync) {
6494 return kIOReturnBadArgument;
6495 }
6496
6497 if (dispatch->checkEntitlement) {
6498 if (!IOCurrentTaskHasEntitlement(dispatch->checkEntitlement)) {
6499 return kIOReturnNotPrivileged;
6500 }
6501 }
6502
6503 if (dispatch->function) {
6504 err = (*dispatch->function)(target, reference, args);
6505 } else {
6506 err = kIOReturnNoCompletion; /* implementer can dispatch */
6507 }
6508 return err;
6509 }
6510
6511 IOReturn
externalMethod(uint32_t selector,IOExternalMethodArguments * args,IOExternalMethodDispatch * dispatch,OSObject * target,void * reference)6512 IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
6513 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
6514 {
6515 IOReturn err;
6516 IOService * object;
6517 IOByteCount structureOutputSize;
6518
6519 if (dispatch) {
6520 uint32_t count;
6521 count = dispatch->checkScalarInputCount;
6522 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6523 return kIOReturnBadArgument;
6524 }
6525
6526 count = dispatch->checkStructureInputSize;
6527 if ((kIOUCVariableStructureSize != count)
6528 && (count != ((args->structureInputDescriptor)
6529 ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6530 return kIOReturnBadArgument;
6531 }
6532
6533 count = dispatch->checkScalarOutputCount;
6534 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6535 return kIOReturnBadArgument;
6536 }
6537
6538 count = dispatch->checkStructureOutputSize;
6539 if ((kIOUCVariableStructureSize != count)
6540 && (count != ((args->structureOutputDescriptor)
6541 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6542 return kIOReturnBadArgument;
6543 }
6544
6545 if (dispatch->function) {
6546 err = (*dispatch->function)(target, reference, args);
6547 } else {
6548 err = kIOReturnNoCompletion; /* implementer can dispatch */
6549 }
6550 return err;
6551 }
6552
6553
6554 // pre-Leopard API's don't do ool structs
6555 if (args->structureInputDescriptor || args->structureOutputDescriptor) {
6556 err = kIOReturnIPCError;
6557 return err;
6558 }
6559
6560 structureOutputSize = args->structureOutputSize;
6561
6562 if (args->asyncWakePort) {
6563 IOExternalAsyncMethod * method;
6564 object = NULL;
6565 if (!(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object) {
6566 return kIOReturnUnsupported;
6567 }
6568
6569 if (kIOUCForegroundOnly & method->flags) {
6570 if (task_is_gpu_denied(current_task())) {
6571 return kIOReturnNotPermitted;
6572 }
6573 }
6574
6575 switch (method->flags & kIOUCTypeMask) {
6576 case kIOUCScalarIStructI:
6577 err = shim_io_async_method_scalarI_structureI( method, object,
6578 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6579 args->scalarInput, args->scalarInputCount,
6580 (char *)args->structureInput, args->structureInputSize );
6581 break;
6582
6583 case kIOUCScalarIScalarO:
6584 err = shim_io_async_method_scalarI_scalarO( method, object,
6585 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6586 args->scalarInput, args->scalarInputCount,
6587 args->scalarOutput, &args->scalarOutputCount );
6588 break;
6589
6590 case kIOUCScalarIStructO:
6591 err = shim_io_async_method_scalarI_structureO( method, object,
6592 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6593 args->scalarInput, args->scalarInputCount,
6594 (char *) args->structureOutput, &args->structureOutputSize );
6595 break;
6596
6597
6598 case kIOUCStructIStructO:
6599 err = shim_io_async_method_structureI_structureO( method, object,
6600 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6601 (char *)args->structureInput, args->structureInputSize,
6602 (char *) args->structureOutput, &args->structureOutputSize );
6603 break;
6604
6605 default:
6606 err = kIOReturnBadArgument;
6607 break;
6608 }
6609 } else {
6610 IOExternalMethod * method;
6611 object = NULL;
6612 if (!(method = getTargetAndMethodForIndex(&object, selector)) || !object) {
6613 return kIOReturnUnsupported;
6614 }
6615
6616 if (kIOUCForegroundOnly & method->flags) {
6617 if (task_is_gpu_denied(current_task())) {
6618 return kIOReturnNotPermitted;
6619 }
6620 }
6621
6622 switch (method->flags & kIOUCTypeMask) {
6623 case kIOUCScalarIStructI:
6624 err = shim_io_connect_method_scalarI_structureI( method, object,
6625 args->scalarInput, args->scalarInputCount,
6626 (char *) args->structureInput, args->structureInputSize );
6627 break;
6628
6629 case kIOUCScalarIScalarO:
6630 err = shim_io_connect_method_scalarI_scalarO( method, object,
6631 args->scalarInput, args->scalarInputCount,
6632 args->scalarOutput, &args->scalarOutputCount );
6633 break;
6634
6635 case kIOUCScalarIStructO:
6636 err = shim_io_connect_method_scalarI_structureO( method, object,
6637 args->scalarInput, args->scalarInputCount,
6638 (char *) args->structureOutput, &structureOutputSize );
6639 break;
6640
6641
6642 case kIOUCStructIStructO:
6643 err = shim_io_connect_method_structureI_structureO( method, object,
6644 (char *) args->structureInput, args->structureInputSize,
6645 (char *) args->structureOutput, &structureOutputSize );
6646 break;
6647
6648 default:
6649 err = kIOReturnBadArgument;
6650 break;
6651 }
6652 }
6653
6654 if (structureOutputSize > UINT_MAX) {
6655 structureOutputSize = 0;
6656 err = kIOReturnBadArgument;
6657 }
6658
6659 args->structureOutputSize = ((typeof(args->structureOutputSize))structureOutputSize);
6660
6661 return err;
6662 }
6663
6664 IOReturn
registerFilterCallbacks(const struct io_filter_callbacks * callbacks,size_t size)6665 IOUserClient::registerFilterCallbacks(const struct io_filter_callbacks *callbacks, size_t size)
6666 {
6667 if (size < sizeof(*callbacks)) {
6668 return kIOReturnBadArgument;
6669 }
6670 if (!OSCompareAndSwapPtr(NULL, __DECONST(void *, callbacks), &gIOUCFilterCallbacks)) {
6671 return kIOReturnBusy;
6672 }
6673 return kIOReturnSuccess;
6674 }
6675
6676
6677 OSMetaClassDefineReservedUnused(IOUserClient, 0);
6678 OSMetaClassDefineReservedUnused(IOUserClient, 1);
6679 OSMetaClassDefineReservedUnused(IOUserClient, 2);
6680 OSMetaClassDefineReservedUnused(IOUserClient, 3);
6681 OSMetaClassDefineReservedUnused(IOUserClient, 4);
6682 OSMetaClassDefineReservedUnused(IOUserClient, 5);
6683 OSMetaClassDefineReservedUnused(IOUserClient, 6);
6684 OSMetaClassDefineReservedUnused(IOUserClient, 7);
6685 OSMetaClassDefineReservedUnused(IOUserClient, 8);
6686 OSMetaClassDefineReservedUnused(IOUserClient, 9);
6687 OSMetaClassDefineReservedUnused(IOUserClient, 10);
6688 OSMetaClassDefineReservedUnused(IOUserClient, 11);
6689 OSMetaClassDefineReservedUnused(IOUserClient, 12);
6690 OSMetaClassDefineReservedUnused(IOUserClient, 13);
6691 OSMetaClassDefineReservedUnused(IOUserClient, 14);
6692 OSMetaClassDefineReservedUnused(IOUserClient, 15);
6693
6694 OSMetaClassDefineReservedUnused(IOUserClient2022, 0);
6695 OSMetaClassDefineReservedUnused(IOUserClient2022, 1);
6696 OSMetaClassDefineReservedUnused(IOUserClient2022, 2);
6697 OSMetaClassDefineReservedUnused(IOUserClient2022, 3);
6698