1 /* 2 * Copyright (c) 1998-2014 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 30 #include <libkern/c++/OSKext.h> 31 #include <IOKit/IOKitServer.h> 32 #include <IOKit/IOKitKeysPrivate.h> 33 #include <IOKit/IOUserClient.h> 34 #include <IOKit/IOService.h> 35 #include <IOKit/IORegistryEntry.h> 36 #include <IOKit/IOCatalogue.h> 37 #include <IOKit/IOMemoryDescriptor.h> 38 #include <IOKit/IOBufferMemoryDescriptor.h> 39 #include <IOKit/IOLib.h> 40 #include <IOKit/IOStatisticsPrivate.h> 41 #include <IOKit/IOTimeStamp.h> 42 #include <IOKit/system.h> 43 #include <libkern/OSDebug.h> 44 #include <sys/proc.h> 45 #include <sys/kauth.h> 46 #include <sys/codesign.h> 47 48 #if CONFIG_MACF 49 50 extern "C" { 51 #include <security/mac_framework.h> 52 }; 53 #include <sys/kauth.h> 54 55 #define IOMACF_LOG 0 56 57 #endif /* CONFIG_MACF */ 58 59 #include <IOKit/assert.h> 60 61 #include "IOServicePrivate.h" 62 #include "IOKitKernelInternal.h" 63 64 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x)) 65 #define SCALAR32(x) ((uint32_t )x) 66 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x)) 67 #define REF64(x) ((io_user_reference_t)((UInt64)(x))) 68 #define REF32(x) ((int)(x)) 69 70 enum 71 { 72 kIOUCAsync0Flags = 3ULL, 73 kIOUCAsync64Flag = 1ULL 74 }; 75 76 #if IOKITSTATS 77 78 #define IOStatisticsRegisterCounter() \ 79 do { \ 80 reserved->counter = IOStatistics::registerUserClient(this); \ 81 } while (0) 82 83 #define IOStatisticsUnregisterCounter() \ 84 do { \ 85 if (reserved) \ 86 IOStatistics::unregisterUserClient(reserved->counter); \ 87 } while (0) 88 89 #define IOStatisticsClientCall() \ 90 do { \ 91 IOStatistics::countUserClientCall(client); \ 92 } while (0) 93 94 #else 95 96 #define IOStatisticsRegisterCounter() 97 #define IOStatisticsUnregisterCounter() 98 #define IOStatisticsClientCall() 99 100 #endif /* IOKITSTATS */ 101 102 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 103 104 // definitions we should get from osfmk 105 106 //typedef struct ipc_port * ipc_port_t; 107 typedef natural_t ipc_kobject_type_t; 108 109 #define IKOT_IOKIT_SPARE 27 110 #define IKOT_IOKIT_CONNECT 29 111 #define IKOT_IOKIT_OBJECT 30 112 113 extern "C" { 114 115 extern ipc_port_t iokit_alloc_object_port( io_object_t obj, 116 ipc_kobject_type_t type ); 117 118 extern kern_return_t iokit_destroy_object_port( ipc_port_t port ); 119 120 extern mach_port_name_t iokit_make_send_right( task_t task, 121 io_object_t obj, ipc_kobject_type_t type ); 122 123 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta ); 124 125 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task); 126 127 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef); 128 129 extern ipc_port_t master_device_port; 130 131 extern void iokit_retain_port( ipc_port_t port ); 132 extern void iokit_release_port( ipc_port_t port ); 133 extern void iokit_release_port_send( ipc_port_t port ); 134 135 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type ); 136 137 #include <mach/mach_traps.h> 138 #include <vm/vm_map.h> 139 140 } /* extern "C" */ 141 142 143 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 144 145 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject. 146 147 class IOMachPort : public OSObject 148 { 149 OSDeclareDefaultStructors(IOMachPort) 150 public: 151 OSObject * object; 152 ipc_port_t port; 153 UInt32 mscount; 154 UInt8 holdDestroy; 155 156 static IOMachPort * portForObject( OSObject * obj, 157 ipc_kobject_type_t type ); 158 static bool noMoreSendersForObject( OSObject * obj, 159 ipc_kobject_type_t type, mach_port_mscount_t * mscount ); 160 static void releasePortForObject( OSObject * obj, 161 ipc_kobject_type_t type ); 162 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type ); 163 164 static OSDictionary * dictForType( ipc_kobject_type_t type ); 165 166 static mach_port_name_t makeSendRightForTask( task_t task, 167 io_object_t obj, ipc_kobject_type_t type ); 168 169 virtual void free() APPLE_KEXT_OVERRIDE; 170 }; 171 172 #define super OSObject 173 OSDefineMetaClassAndStructors(IOMachPort, OSObject) 174 175 static IOLock * gIOObjectPortLock; 176 177 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 178 179 // not in dictForType() for debugging ease 180 static OSDictionary * gIOObjectPorts; 181 static OSDictionary * gIOConnectPorts; 182 183 OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type ) 184 { 185 OSDictionary ** dict; 186 187 if( IKOT_IOKIT_OBJECT == type ) 188 dict = &gIOObjectPorts; 189 else if( IKOT_IOKIT_CONNECT == type ) 190 dict = &gIOConnectPorts; 191 else 192 return( 0 ); 193 194 if( 0 == *dict) 195 *dict = OSDictionary::withCapacity( 1 ); 196 197 return( *dict ); 198 } 199 200 IOMachPort * IOMachPort::portForObject ( OSObject * obj, 201 ipc_kobject_type_t type ) 202 { 203 IOMachPort * inst = 0; 204 OSDictionary * dict; 205 206 IOTakeLock( gIOObjectPortLock); 207 208 do { 209 210 dict = dictForType( type ); 211 if( !dict) 212 continue; 213 214 if( (inst = (IOMachPort *) 215 dict->getObject( (const OSSymbol *) obj ))) { 216 inst->mscount++; 217 inst->retain(); 218 continue; 219 } 220 221 inst = new IOMachPort; 222 if( inst && !inst->init()) { 223 inst = 0; 224 continue; 225 } 226 227 inst->port = iokit_alloc_object_port( obj, type ); 228 if( inst->port) { 229 // retains obj 230 dict->setObject( (const OSSymbol *) obj, inst ); 231 inst->mscount++; 232 233 } else { 234 inst->release(); 235 inst = 0; 236 } 237 238 } while( false ); 239 240 IOUnlock( gIOObjectPortLock); 241 242 return( inst ); 243 } 244 245 bool IOMachPort::noMoreSendersForObject( OSObject * obj, 246 ipc_kobject_type_t type, mach_port_mscount_t * mscount ) 247 { 248 OSDictionary * dict; 249 IOMachPort * machPort; 250 bool destroyed = true; 251 252 IOTakeLock( gIOObjectPortLock); 253 254 if( (dict = dictForType( type ))) { 255 obj->retain(); 256 257 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj ); 258 if( machPort) { 259 destroyed = (machPort->mscount <= *mscount); 260 if( destroyed) 261 dict->removeObject( (const OSSymbol *) obj ); 262 else 263 *mscount = machPort->mscount; 264 } 265 obj->release(); 266 } 267 268 IOUnlock( gIOObjectPortLock); 269 270 return( destroyed ); 271 } 272 273 void IOMachPort::releasePortForObject( OSObject * obj, 274 ipc_kobject_type_t type ) 275 { 276 OSDictionary * dict; 277 IOMachPort * machPort; 278 279 IOTakeLock( gIOObjectPortLock); 280 281 if( (dict = dictForType( type ))) { 282 obj->retain(); 283 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj ); 284 if( machPort && !machPort->holdDestroy) 285 dict->removeObject( (const OSSymbol *) obj ); 286 obj->release(); 287 } 288 289 IOUnlock( gIOObjectPortLock); 290 } 291 292 void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type ) 293 { 294 OSDictionary * dict; 295 IOMachPort * machPort; 296 297 IOLockLock( gIOObjectPortLock ); 298 299 if( (dict = dictForType( type ))) { 300 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj ); 301 if( machPort) 302 machPort->holdDestroy = true; 303 } 304 305 IOLockUnlock( gIOObjectPortLock ); 306 } 307 308 void IOUserClient::destroyUserReferences( OSObject * obj ) 309 { 310 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT ); 311 312 // panther, 3160200 313 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT ); 314 315 OSDictionary * dict; 316 317 IOTakeLock( gIOObjectPortLock); 318 obj->retain(); 319 320 if( (dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT ))) 321 { 322 IOMachPort * port; 323 port = (IOMachPort *) dict->getObject( (const OSSymbol *) obj ); 324 if (port) 325 { 326 IOUserClient * uc; 327 if ((uc = OSDynamicCast(IOUserClient, obj)) && uc->mappings) 328 { 329 dict->setObject((const OSSymbol *) uc->mappings, port); 330 iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT); 331 332 uc->mappings->release(); 333 uc->mappings = 0; 334 } 335 dict->removeObject( (const OSSymbol *) obj ); 336 } 337 } 338 obj->release(); 339 IOUnlock( gIOObjectPortLock); 340 } 341 342 mach_port_name_t IOMachPort::makeSendRightForTask( task_t task, 343 io_object_t obj, ipc_kobject_type_t type ) 344 { 345 return( iokit_make_send_right( task, obj, type )); 346 } 347 348 void IOMachPort::free( void ) 349 { 350 if( port) 351 iokit_destroy_object_port( port ); 352 super::free(); 353 } 354 355 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 356 357 class IOUserIterator : public OSIterator 358 { 359 OSDeclareDefaultStructors(IOUserIterator) 360 public: 361 OSObject * userIteratorObject; 362 IOLock * lock; 363 364 static IOUserIterator * withIterator(OSIterator * iter); 365 virtual bool init( void ) APPLE_KEXT_OVERRIDE; 366 virtual void free() APPLE_KEXT_OVERRIDE; 367 368 virtual void reset() APPLE_KEXT_OVERRIDE; 369 virtual bool isValid() APPLE_KEXT_OVERRIDE; 370 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE; 371 }; 372 373 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 374 375 class IOUserNotification : public IOUserIterator 376 { 377 OSDeclareDefaultStructors(IOUserNotification) 378 379 #define holdNotify userIteratorObject 380 381 public: 382 383 virtual void free() APPLE_KEXT_OVERRIDE; 384 385 virtual void setNotification( IONotifier * obj ); 386 387 virtual void reset() APPLE_KEXT_OVERRIDE; 388 virtual bool isValid() APPLE_KEXT_OVERRIDE; 389 }; 390 391 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 392 393 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator ) 394 395 IOUserIterator * 396 IOUserIterator::withIterator(OSIterator * iter) 397 { 398 IOUserIterator * me; 399 400 if (!iter) return (0); 401 402 me = new IOUserIterator; 403 if (me && !me->init()) 404 { 405 me->release(); 406 me = 0; 407 } 408 if (!me) return me; 409 me->userIteratorObject = iter; 410 411 return (me); 412 } 413 414 bool 415 IOUserIterator::init( void ) 416 { 417 if (!OSObject::init()) return (false); 418 419 lock = IOLockAlloc(); 420 if( !lock) 421 return( false ); 422 423 return (true); 424 } 425 426 void 427 IOUserIterator::free() 428 { 429 if (userIteratorObject) userIteratorObject->release(); 430 if (lock) IOLockFree(lock); 431 OSObject::free(); 432 } 433 434 void 435 IOUserIterator::reset() 436 { 437 IOLockLock(lock); 438 assert(OSDynamicCast(OSIterator, userIteratorObject)); 439 ((OSIterator *)userIteratorObject)->reset(); 440 IOLockUnlock(lock); 441 } 442 443 bool 444 IOUserIterator::isValid() 445 { 446 bool ret; 447 448 IOLockLock(lock); 449 assert(OSDynamicCast(OSIterator, userIteratorObject)); 450 ret = ((OSIterator *)userIteratorObject)->isValid(); 451 IOLockUnlock(lock); 452 453 return (ret); 454 } 455 456 OSObject * 457 IOUserIterator::getNextObject() 458 { 459 OSObject * ret; 460 461 IOLockLock(lock); 462 assert(OSDynamicCast(OSIterator, userIteratorObject)); 463 ret = ((OSIterator *)userIteratorObject)->getNextObject(); 464 IOLockUnlock(lock); 465 466 return (ret); 467 } 468 469 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 470 extern "C" { 471 472 // functions called from osfmk/device/iokit_rpc.c 473 474 void 475 iokit_add_reference( io_object_t obj ) 476 { 477 if( obj) 478 obj->retain(); 479 } 480 481 void 482 iokit_remove_reference( io_object_t obj ) 483 { 484 if( obj) 485 obj->release(); 486 } 487 488 ipc_port_t 489 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type ) 490 { 491 IOMachPort * machPort; 492 ipc_port_t port; 493 494 if( (machPort = IOMachPort::portForObject( obj, type ))) { 495 496 port = machPort->port; 497 if( port) 498 iokit_retain_port( port ); 499 500 machPort->release(); 501 502 } else 503 port = NULL; 504 505 return( port ); 506 } 507 508 kern_return_t 509 iokit_client_died( io_object_t obj, ipc_port_t /* port */, 510 ipc_kobject_type_t type, mach_port_mscount_t * mscount ) 511 { 512 IOUserClient * client; 513 IOMemoryMap * map; 514 IOUserNotification * notify; 515 516 if( !IOMachPort::noMoreSendersForObject( obj, type, mscount )) 517 return( kIOReturnNotReady ); 518 519 if( IKOT_IOKIT_CONNECT == type) 520 { 521 if( (client = OSDynamicCast( IOUserClient, obj ))) { 522 IOStatisticsClientCall(); 523 client->clientDied(); 524 } 525 } 526 else if( IKOT_IOKIT_OBJECT == type) 527 { 528 if( (map = OSDynamicCast( IOMemoryMap, obj ))) 529 map->taskDied(); 530 else if( (notify = OSDynamicCast( IOUserNotification, obj ))) 531 notify->setNotification( 0 ); 532 } 533 534 return( kIOReturnSuccess ); 535 } 536 537 }; /* extern "C" */ 538 539 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 540 541 class IOServiceUserNotification : public IOUserNotification 542 { 543 OSDeclareDefaultStructors(IOServiceUserNotification) 544 545 struct PingMsg { 546 mach_msg_header_t msgHdr; 547 OSNotificationHeader64 notifyHeader; 548 }; 549 550 enum { kMaxOutstanding = 1024 }; 551 552 PingMsg * pingMsg; 553 vm_size_t msgSize; 554 OSArray * newSet; 555 OSObject * lastEntry; 556 bool armed; 557 558 public: 559 560 virtual bool init( mach_port_t port, natural_t type, 561 void * reference, vm_size_t referenceSize, 562 bool clientIs64 ); 563 virtual void free() APPLE_KEXT_OVERRIDE; 564 565 static bool _handler( void * target, 566 void * ref, IOService * newService, IONotifier * notifier ); 567 virtual bool handler( void * ref, IOService * newService ); 568 569 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE; 570 }; 571 572 class IOServiceMessageUserNotification : public IOUserNotification 573 { 574 OSDeclareDefaultStructors(IOServiceMessageUserNotification) 575 576 struct PingMsg { 577 mach_msg_header_t msgHdr; 578 mach_msg_body_t msgBody; 579 mach_msg_port_descriptor_t ports[1]; 580 OSNotificationHeader64 notifyHeader __attribute__ ((packed)); 581 }; 582 583 PingMsg * pingMsg; 584 vm_size_t msgSize; 585 uint8_t clientIs64; 586 int owningPID; 587 588 public: 589 590 virtual bool init( mach_port_t port, natural_t type, 591 void * reference, vm_size_t referenceSize, 592 vm_size_t extraSize, 593 bool clientIs64 ); 594 595 virtual void free() APPLE_KEXT_OVERRIDE; 596 597 static IOReturn _handler( void * target, void * ref, 598 UInt32 messageType, IOService * provider, 599 void * messageArgument, vm_size_t argSize ); 600 virtual IOReturn handler( void * ref, 601 UInt32 messageType, IOService * provider, 602 void * messageArgument, vm_size_t argSize ); 603 604 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE; 605 }; 606 607 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 608 609 #undef super 610 #define super IOUserIterator 611 OSDefineMetaClass( IOUserNotification, IOUserIterator ) 612 OSDefineAbstractStructors( IOUserNotification, IOUserIterator ) 613 614 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 615 616 void IOUserNotification::free( void ) 617 { 618 if (holdNotify) 619 { 620 assert(OSDynamicCast(IONotifier, holdNotify)); 621 ((IONotifier *)holdNotify)->remove(); 622 holdNotify = 0; 623 } 624 // can't be in handler now 625 626 super::free(); 627 } 628 629 630 void IOUserNotification::setNotification( IONotifier * notify ) 631 { 632 OSObject * previousNotify; 633 634 IOLockLock( gIOObjectPortLock); 635 636 previousNotify = holdNotify; 637 holdNotify = notify; 638 639 IOLockUnlock( gIOObjectPortLock); 640 641 if( previousNotify) 642 { 643 assert(OSDynamicCast(IONotifier, previousNotify)); 644 ((IONotifier *)previousNotify)->remove(); 645 } 646 } 647 648 void IOUserNotification::reset() 649 { 650 // ? 651 } 652 653 bool IOUserNotification::isValid() 654 { 655 return( true ); 656 } 657 658 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 659 660 #undef super 661 #define super IOUserNotification 662 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification) 663 664 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 665 666 bool IOServiceUserNotification::init( mach_port_t port, natural_t type, 667 void * reference, vm_size_t referenceSize, 668 bool clientIs64 ) 669 { 670 if( !super::init()) 671 return( false ); 672 673 newSet = OSArray::withCapacity( 1 ); 674 if( !newSet) 675 return( false ); 676 677 if (referenceSize > sizeof(OSAsyncReference64)) 678 return( false ); 679 680 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize; 681 pingMsg = (PingMsg *) IOMalloc( msgSize); 682 if( !pingMsg) 683 return( false ); 684 685 bzero( pingMsg, msgSize); 686 687 pingMsg->msgHdr.msgh_remote_port = port; 688 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS( 689 MACH_MSG_TYPE_COPY_SEND /*remote*/, 690 MACH_MSG_TYPE_MAKE_SEND /*local*/); 691 pingMsg->msgHdr.msgh_size = msgSize; 692 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID; 693 694 pingMsg->notifyHeader.size = 0; 695 pingMsg->notifyHeader.type = type; 696 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize ); 697 698 return( true ); 699 } 700 701 void IOServiceUserNotification::free( void ) 702 { 703 PingMsg * _pingMsg; 704 vm_size_t _msgSize; 705 OSArray * _newSet; 706 OSObject * _lastEntry; 707 708 _pingMsg = pingMsg; 709 _msgSize = msgSize; 710 _lastEntry = lastEntry; 711 _newSet = newSet; 712 713 super::free(); 714 715 if( _pingMsg && _msgSize) { 716 if (_pingMsg->msgHdr.msgh_remote_port) { 717 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port); 718 } 719 IOFree(_pingMsg, _msgSize); 720 } 721 722 if( _lastEntry) 723 _lastEntry->release(); 724 725 if( _newSet) 726 _newSet->release(); 727 } 728 729 bool IOServiceUserNotification::_handler( void * target, 730 void * ref, IOService * newService, IONotifier * notifier ) 731 { 732 return( ((IOServiceUserNotification *) target)->handler( ref, newService )); 733 } 734 735 bool IOServiceUserNotification::handler( void * ref, 736 IOService * newService ) 737 { 738 unsigned int count; 739 kern_return_t kr; 740 ipc_port_t port = NULL; 741 bool sendPing = false; 742 743 IOTakeLock( lock ); 744 745 count = newSet->getCount(); 746 if( count < kMaxOutstanding) { 747 748 newSet->setObject( newService ); 749 if( (sendPing = (armed && (0 == count)))) 750 armed = false; 751 } 752 753 IOUnlock( lock ); 754 755 if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type) 756 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT ); 757 758 if( sendPing) { 759 if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) )) 760 pingMsg->msgHdr.msgh_local_port = port; 761 else 762 pingMsg->msgHdr.msgh_local_port = NULL; 763 764 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr, 765 pingMsg->msgHdr.msgh_size, 766 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE), 767 0); 768 if( port) 769 iokit_release_port( port ); 770 771 if( KERN_SUCCESS != kr) 772 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr ); 773 } 774 775 return( true ); 776 } 777 778 OSObject * IOServiceUserNotification::getNextObject() 779 { 780 unsigned int count; 781 OSObject * result; 782 783 IOTakeLock( lock ); 784 785 if( lastEntry) 786 lastEntry->release(); 787 788 count = newSet->getCount(); 789 if( count ) { 790 result = newSet->getObject( count - 1 ); 791 result->retain(); 792 newSet->removeObject( count - 1); 793 } else { 794 result = 0; 795 armed = true; 796 } 797 lastEntry = result; 798 799 IOUnlock( lock ); 800 801 return( result ); 802 } 803 804 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 805 806 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification) 807 808 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 809 810 bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type, 811 void * reference, vm_size_t referenceSize, vm_size_t extraSize, 812 bool client64 ) 813 { 814 if( !super::init()) 815 return( false ); 816 817 if (referenceSize > sizeof(OSAsyncReference64)) 818 return( false ); 819 820 clientIs64 = client64; 821 822 owningPID = proc_selfpid(); 823 824 extraSize += sizeof(IOServiceInterestContent64); 825 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize + extraSize; 826 pingMsg = (PingMsg *) IOMalloc( msgSize); 827 if( !pingMsg) 828 return( false ); 829 830 bzero( pingMsg, msgSize); 831 832 pingMsg->msgHdr.msgh_remote_port = port; 833 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX 834 | MACH_MSGH_BITS( 835 MACH_MSG_TYPE_COPY_SEND /*remote*/, 836 MACH_MSG_TYPE_MAKE_SEND /*local*/); 837 pingMsg->msgHdr.msgh_size = msgSize; 838 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID; 839 840 pingMsg->msgBody.msgh_descriptor_count = 1; 841 842 pingMsg->ports[0].name = 0; 843 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND; 844 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR; 845 846 pingMsg->notifyHeader.size = extraSize; 847 pingMsg->notifyHeader.type = type; 848 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize ); 849 850 return( true ); 851 } 852 853 void IOServiceMessageUserNotification::free( void ) 854 { 855 PingMsg * _pingMsg; 856 vm_size_t _msgSize; 857 858 _pingMsg = pingMsg; 859 _msgSize = msgSize; 860 861 super::free(); 862 863 if( _pingMsg && _msgSize) { 864 if (_pingMsg->msgHdr.msgh_remote_port) { 865 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port); 866 } 867 IOFree( _pingMsg, _msgSize); 868 } 869 } 870 871 IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref, 872 UInt32 messageType, IOService * provider, 873 void * argument, vm_size_t argSize ) 874 { 875 return( ((IOServiceMessageUserNotification *) target)->handler( 876 ref, messageType, provider, argument, argSize)); 877 } 878 879 IOReturn IOServiceMessageUserNotification::handler( void * ref, 880 UInt32 messageType, IOService * provider, 881 void * messageArgument, vm_size_t argSize ) 882 { 883 kern_return_t kr; 884 ipc_port_t thisPort, providerPort; 885 IOServiceInterestContent64 * data = (IOServiceInterestContent64 *) 886 ((((uint8_t *) pingMsg) + msgSize) - pingMsg->notifyHeader.size); 887 // == pingMsg->notifyHeader.content; 888 889 if (kIOMessageCopyClientID == messageType) 890 { 891 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32); 892 return (kIOReturnSuccess); 893 } 894 895 data->messageType = messageType; 896 897 if( argSize == 0) 898 { 899 data->messageArgument[0] = (io_user_reference_t) messageArgument; 900 if (clientIs64) 901 argSize = sizeof(data->messageArgument[0]); 902 else 903 { 904 data->messageArgument[0] |= (data->messageArgument[0] << 32); 905 argSize = sizeof(uint32_t); 906 } 907 } 908 else 909 { 910 if( argSize > kIOUserNotifyMaxMessageSize) 911 argSize = kIOUserNotifyMaxMessageSize; 912 bcopy( messageArgument, data->messageArgument, argSize ); 913 } 914 915 // adjust message size for ipc restrictions 916 natural_t type; 917 type = pingMsg->notifyHeader.type; 918 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift); 919 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift); 920 pingMsg->notifyHeader.type = type; 921 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask; 922 923 pingMsg->msgHdr.msgh_size = msgSize - pingMsg->notifyHeader.size 924 + sizeof( IOServiceInterestContent64 ) 925 - sizeof( data->messageArgument) 926 + argSize; 927 928 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT ); 929 pingMsg->ports[0].name = providerPort; 930 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ); 931 pingMsg->msgHdr.msgh_local_port = thisPort; 932 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr, 933 pingMsg->msgHdr.msgh_size, 934 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE), 935 0); 936 if( thisPort) 937 iokit_release_port( thisPort ); 938 if( providerPort) 939 iokit_release_port( providerPort ); 940 941 if( KERN_SUCCESS != kr) 942 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr ); 943 944 return( kIOReturnSuccess ); 945 } 946 947 OSObject * IOServiceMessageUserNotification::getNextObject() 948 { 949 return( 0 ); 950 } 951 952 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 953 954 #undef super 955 #define super IOService 956 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService ) 957 958 void IOUserClient::initialize( void ) 959 { 960 gIOObjectPortLock = IOLockAlloc(); 961 962 assert( gIOObjectPortLock ); 963 } 964 965 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef, 966 mach_port_t wakePort, 967 void *callback, void *refcon) 968 { 969 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort) 970 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]); 971 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback; 972 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon; 973 } 974 975 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef, 976 mach_port_t wakePort, 977 mach_vm_address_t callback, io_user_reference_t refcon) 978 { 979 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort) 980 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]); 981 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback; 982 asyncRef[kIOAsyncCalloutRefconIndex] = refcon; 983 } 984 985 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef, 986 mach_port_t wakePort, 987 mach_vm_address_t callback, io_user_reference_t refcon, task_t task) 988 { 989 setAsyncReference64(asyncRef, wakePort, callback, refcon); 990 if (vm_map_is_64bit(get_task_map(task))) { 991 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag; 992 } 993 } 994 995 static OSDictionary * CopyConsoleUser(UInt32 uid) 996 { 997 OSArray * array; 998 OSDictionary * user = 0; 999 1000 if ((array = OSDynamicCast(OSArray, 1001 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) 1002 { 1003 for (unsigned int idx = 0; 1004 (user = OSDynamicCast(OSDictionary, array->getObject(idx))); 1005 idx++) { 1006 OSNumber * num; 1007 1008 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey))) 1009 && (uid == num->unsigned32BitValue())) { 1010 user->retain(); 1011 break; 1012 } 1013 } 1014 array->release(); 1015 } 1016 return user; 1017 } 1018 1019 static OSDictionary * CopyUserOnConsole(void) 1020 { 1021 OSArray * array; 1022 OSDictionary * user = 0; 1023 1024 if ((array = OSDynamicCast(OSArray, 1025 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) 1026 { 1027 for (unsigned int idx = 0; 1028 (user = OSDynamicCast(OSDictionary, array->getObject(idx))); 1029 idx++) 1030 { 1031 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) 1032 { 1033 user->retain(); 1034 break; 1035 } 1036 } 1037 array->release(); 1038 } 1039 return (user); 1040 } 1041 1042 IOReturn IOUserClient::clientHasAuthorization( task_t task, 1043 IOService * service ) 1044 { 1045 proc_t p; 1046 1047 p = (proc_t) get_bsdtask_info(task); 1048 if (p) 1049 { 1050 uint64_t authorizationID; 1051 1052 authorizationID = proc_uniqueid(p); 1053 if (authorizationID) 1054 { 1055 if (service->getAuthorizationID() == authorizationID) 1056 { 1057 return (kIOReturnSuccess); 1058 } 1059 } 1060 } 1061 1062 return (kIOReturnNotPermitted); 1063 } 1064 1065 IOReturn IOUserClient::clientHasPrivilege( void * securityToken, 1066 const char * privilegeName ) 1067 { 1068 kern_return_t kr; 1069 security_token_t token; 1070 mach_msg_type_number_t count; 1071 task_t task; 1072 OSDictionary * user; 1073 bool secureConsole; 1074 1075 1076 if (!strncmp(privilegeName, kIOClientPrivilegeForeground, 1077 sizeof(kIOClientPrivilegeForeground))) 1078 { 1079 if (task_is_gpu_denied(current_task())) 1080 return (kIOReturnNotPrivileged); 1081 else 1082 return (kIOReturnSuccess); 1083 } 1084 1085 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession, 1086 sizeof(kIOClientPrivilegeConsoleSession))) 1087 { 1088 kauth_cred_t cred; 1089 proc_t p; 1090 1091 task = (task_t) securityToken; 1092 if (!task) 1093 task = current_task(); 1094 p = (proc_t) get_bsdtask_info(task); 1095 kr = kIOReturnNotPrivileged; 1096 1097 if (p && (cred = kauth_cred_proc_ref(p))) 1098 { 1099 user = CopyUserOnConsole(); 1100 if (user) 1101 { 1102 OSNumber * num; 1103 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey))) 1104 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) 1105 { 1106 kr = kIOReturnSuccess; 1107 } 1108 user->release(); 1109 } 1110 kauth_cred_unref(&cred); 1111 } 1112 return (kr); 1113 } 1114 1115 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess, 1116 sizeof(kIOClientPrivilegeSecureConsoleProcess)))) 1117 task = (task_t)((IOUCProcessToken *)securityToken)->token; 1118 else 1119 task = (task_t)securityToken; 1120 1121 count = TASK_SECURITY_TOKEN_COUNT; 1122 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count ); 1123 1124 if (KERN_SUCCESS != kr) 1125 {} 1126 else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator, 1127 sizeof(kIOClientPrivilegeAdministrator))) { 1128 if (0 != token.val[0]) 1129 kr = kIOReturnNotPrivileged; 1130 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser, 1131 sizeof(kIOClientPrivilegeLocalUser))) { 1132 user = CopyConsoleUser(token.val[0]); 1133 if ( user ) 1134 user->release(); 1135 else 1136 kr = kIOReturnNotPrivileged; 1137 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser, 1138 sizeof(kIOClientPrivilegeConsoleUser))) { 1139 user = CopyConsoleUser(token.val[0]); 1140 if ( user ) { 1141 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) 1142 kr = kIOReturnNotPrivileged; 1143 else if ( secureConsole ) { 1144 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey)); 1145 if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) 1146 kr = kIOReturnNotPrivileged; 1147 } 1148 user->release(); 1149 } 1150 else 1151 kr = kIOReturnNotPrivileged; 1152 } else 1153 kr = kIOReturnUnsupported; 1154 1155 return (kr); 1156 } 1157 1158 OSObject * IOUserClient::copyClientEntitlement( task_t task, 1159 const char * entitlement ) 1160 { 1161 #define MAX_ENTITLEMENTS_LEN (128 * 1024) 1162 1163 proc_t p = NULL; 1164 pid_t pid = 0; 1165 char procname[MAXCOMLEN + 1] = ""; 1166 size_t len = 0; 1167 void *entitlements_blob = NULL; 1168 char *entitlements_data = NULL; 1169 OSObject *entitlements_obj = NULL; 1170 OSDictionary *entitlements = NULL; 1171 OSString *errorString = NULL; 1172 OSObject *value = NULL; 1173 1174 p = (proc_t)get_bsdtask_info(task); 1175 if (p == NULL) 1176 goto fail; 1177 pid = proc_pid(p); 1178 proc_name(pid, procname, (int)sizeof(procname)); 1179 1180 if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0) 1181 goto fail; 1182 1183 if (len <= offsetof(CS_GenericBlob, data)) 1184 goto fail; 1185 1186 /* 1187 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML 1188 * we'll try to parse in the kernel. 1189 */ 1190 len -= offsetof(CS_GenericBlob, data); 1191 if (len > MAX_ENTITLEMENTS_LEN) { 1192 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n", procname, pid, len, MAX_ENTITLEMENTS_LEN); 1193 goto fail; 1194 } 1195 1196 /* 1197 * OSUnserializeXML() expects a nul-terminated string, but that isn't 1198 * what is stored in the entitlements blob. Copy the string and 1199 * terminate it. 1200 */ 1201 entitlements_data = (char *)IOMalloc(len + 1); 1202 if (entitlements_data == NULL) 1203 goto fail; 1204 memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len); 1205 entitlements_data[len] = '\0'; 1206 1207 entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString); 1208 if (errorString != NULL) { 1209 IOLog("failed to parse entitlements for %s[%u]: %s\n", procname, pid, errorString->getCStringNoCopy()); 1210 goto fail; 1211 } 1212 if (entitlements_obj == NULL) 1213 goto fail; 1214 1215 entitlements = OSDynamicCast(OSDictionary, entitlements_obj); 1216 if (entitlements == NULL) 1217 goto fail; 1218 1219 /* Fetch the entitlement value from the dictionary. */ 1220 value = entitlements->getObject(entitlement); 1221 if (value != NULL) 1222 value->retain(); 1223 1224 fail: 1225 if (entitlements_data != NULL) 1226 IOFree(entitlements_data, len + 1); 1227 if (entitlements_obj != NULL) 1228 entitlements_obj->release(); 1229 if (errorString != NULL) 1230 errorString->release(); 1231 return value; 1232 } 1233 1234 bool IOUserClient::init() 1235 { 1236 if (getPropertyTable() || super::init()) 1237 return reserve(); 1238 1239 return false; 1240 } 1241 1242 bool IOUserClient::init(OSDictionary * dictionary) 1243 { 1244 if (getPropertyTable() || super::init(dictionary)) 1245 return reserve(); 1246 1247 return false; 1248 } 1249 1250 bool IOUserClient::initWithTask(task_t owningTask, 1251 void * securityID, 1252 UInt32 type ) 1253 { 1254 if (getPropertyTable() || super::init()) 1255 return reserve(); 1256 1257 return false; 1258 } 1259 1260 bool IOUserClient::initWithTask(task_t owningTask, 1261 void * securityID, 1262 UInt32 type, 1263 OSDictionary * properties ) 1264 { 1265 bool ok; 1266 1267 ok = super::init( properties ); 1268 ok &= initWithTask( owningTask, securityID, type ); 1269 1270 return( ok ); 1271 } 1272 1273 bool IOUserClient::reserve() 1274 { 1275 if(!reserved) { 1276 reserved = IONew(ExpansionData, 1); 1277 if (!reserved) { 1278 return false; 1279 } 1280 } 1281 setTerminateDefer(NULL, true); 1282 IOStatisticsRegisterCounter(); 1283 1284 return true; 1285 } 1286 1287 void IOUserClient::free() 1288 { 1289 if( mappings) 1290 mappings->release(); 1291 1292 IOStatisticsUnregisterCounter(); 1293 1294 if (reserved) 1295 IODelete(reserved, ExpansionData, 1); 1296 1297 super::free(); 1298 } 1299 1300 IOReturn IOUserClient::clientDied( void ) 1301 { 1302 return( clientClose()); 1303 } 1304 1305 IOReturn IOUserClient::clientClose( void ) 1306 { 1307 return( kIOReturnUnsupported ); 1308 } 1309 1310 IOService * IOUserClient::getService( void ) 1311 { 1312 return( 0 ); 1313 } 1314 1315 IOReturn IOUserClient::registerNotificationPort( 1316 mach_port_t /* port */, 1317 UInt32 /* type */, 1318 UInt32 /* refCon */) 1319 { 1320 return( kIOReturnUnsupported); 1321 } 1322 1323 IOReturn IOUserClient::registerNotificationPort( 1324 mach_port_t port, 1325 UInt32 type, 1326 io_user_reference_t refCon) 1327 { 1328 return (registerNotificationPort(port, type, (UInt32) refCon)); 1329 } 1330 1331 IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type, 1332 semaphore_t * semaphore ) 1333 { 1334 return( kIOReturnUnsupported); 1335 } 1336 1337 IOReturn IOUserClient::connectClient( IOUserClient * /* client */ ) 1338 { 1339 return( kIOReturnUnsupported); 1340 } 1341 1342 IOReturn IOUserClient::clientMemoryForType( UInt32 type, 1343 IOOptionBits * options, 1344 IOMemoryDescriptor ** memory ) 1345 { 1346 return( kIOReturnUnsupported); 1347 } 1348 1349 #if !__LP64__ 1350 IOMemoryMap * IOUserClient::mapClientMemory( 1351 IOOptionBits type, 1352 task_t task, 1353 IOOptionBits mapFlags, 1354 IOVirtualAddress atAddress ) 1355 { 1356 return (NULL); 1357 } 1358 #endif 1359 1360 IOMemoryMap * IOUserClient::mapClientMemory64( 1361 IOOptionBits type, 1362 task_t task, 1363 IOOptionBits mapFlags, 1364 mach_vm_address_t atAddress ) 1365 { 1366 IOReturn err; 1367 IOOptionBits options = 0; 1368 IOMemoryDescriptor * memory; 1369 IOMemoryMap * map = 0; 1370 1371 err = clientMemoryForType( (UInt32) type, &options, &memory ); 1372 1373 if( memory && (kIOReturnSuccess == err)) { 1374 1375 options = (options & ~kIOMapUserOptionsMask) 1376 | (mapFlags & kIOMapUserOptionsMask); 1377 map = memory->createMappingInTask( task, atAddress, options ); 1378 memory->release(); 1379 } 1380 1381 return( map ); 1382 } 1383 1384 IOReturn IOUserClient::exportObjectToClient(task_t task, 1385 OSObject *obj, io_object_t *clientObj) 1386 { 1387 mach_port_name_t name; 1388 1389 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT ); 1390 1391 *(mach_port_name_t *)clientObj = name; 1392 return kIOReturnSuccess; 1393 } 1394 1395 IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */) 1396 { 1397 return( 0 ); 1398 } 1399 1400 IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */) 1401 { 1402 return( 0 ); 1403 } 1404 1405 IOExternalMethod * IOUserClient:: 1406 getTargetAndMethodForIndex(IOService **targetP, UInt32 index) 1407 { 1408 IOExternalMethod *method = getExternalMethodForIndex(index); 1409 1410 if (method) 1411 *targetP = (IOService *) method->object; 1412 1413 return method; 1414 } 1415 1416 IOExternalAsyncMethod * IOUserClient:: 1417 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index) 1418 { 1419 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index); 1420 1421 if (method) 1422 *targetP = (IOService *) method->object; 1423 1424 return method; 1425 } 1426 1427 IOExternalTrap * IOUserClient:: 1428 getExternalTrapForIndex(UInt32 index) 1429 { 1430 return NULL; 1431 } 1432 1433 IOExternalTrap * IOUserClient:: 1434 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index) 1435 { 1436 IOExternalTrap *trap = getExternalTrapForIndex(index); 1437 1438 if (trap) { 1439 *targetP = trap->object; 1440 } 1441 1442 return trap; 1443 } 1444 1445 IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference) 1446 { 1447 mach_port_t port; 1448 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags); 1449 1450 if (MACH_PORT_NULL != port) 1451 iokit_release_port_send(port); 1452 1453 return (kIOReturnSuccess); 1454 } 1455 1456 IOReturn IOUserClient::releaseNotificationPort(mach_port_t port) 1457 { 1458 if (MACH_PORT_NULL != port) 1459 iokit_release_port_send(port); 1460 1461 return (kIOReturnSuccess); 1462 } 1463 1464 IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference, 1465 IOReturn result, void *args[], UInt32 numArgs) 1466 { 1467 OSAsyncReference64 reference64; 1468 io_user_reference_t args64[kMaxAsyncArgs]; 1469 unsigned int idx; 1470 1471 if (numArgs > kMaxAsyncArgs) 1472 return kIOReturnMessageTooLarge; 1473 1474 for (idx = 0; idx < kOSAsyncRef64Count; idx++) 1475 reference64[idx] = REF64(reference[idx]); 1476 1477 for (idx = 0; idx < numArgs; idx++) 1478 args64[idx] = REF64(args[idx]); 1479 1480 return (sendAsyncResult64(reference64, result, args64, numArgs)); 1481 } 1482 1483 IOReturn IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference, 1484 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options) 1485 { 1486 return _sendAsyncResult64(reference, result, args, numArgs, options); 1487 } 1488 1489 IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference, 1490 IOReturn result, io_user_reference_t args[], UInt32 numArgs) 1491 { 1492 return _sendAsyncResult64(reference, result, args, numArgs, 0); 1493 } 1494 1495 IOReturn IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference, 1496 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options) 1497 { 1498 struct ReplyMsg 1499 { 1500 mach_msg_header_t msgHdr; 1501 union 1502 { 1503 struct 1504 { 1505 OSNotificationHeader notifyHdr; 1506 IOAsyncCompletionContent asyncContent; 1507 uint32_t args[kMaxAsyncArgs]; 1508 } msg32; 1509 struct 1510 { 1511 OSNotificationHeader64 notifyHdr; 1512 IOAsyncCompletionContent asyncContent; 1513 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed)); 1514 } msg64; 1515 } m; 1516 }; 1517 ReplyMsg replyMsg; 1518 mach_port_t replyPort; 1519 kern_return_t kr; 1520 1521 // If no reply port, do nothing. 1522 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags); 1523 if (replyPort == MACH_PORT_NULL) 1524 return kIOReturnSuccess; 1525 1526 if (numArgs > kMaxAsyncArgs) 1527 return kIOReturnMessageTooLarge; 1528 1529 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/, 1530 0 /*local*/); 1531 replyMsg.msgHdr.msgh_remote_port = replyPort; 1532 replyMsg.msgHdr.msgh_local_port = 0; 1533 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID; 1534 if (kIOUCAsync64Flag & reference[0]) 1535 { 1536 replyMsg.msgHdr.msgh_size = 1537 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64) 1538 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t); 1539 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent) 1540 + numArgs * sizeof(io_user_reference_t); 1541 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType; 1542 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64)); 1543 1544 replyMsg.m.msg64.asyncContent.result = result; 1545 if (numArgs) 1546 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t)); 1547 } 1548 else 1549 { 1550 unsigned int idx; 1551 1552 replyMsg.msgHdr.msgh_size = 1553 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32) 1554 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t); 1555 1556 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent) 1557 + numArgs * sizeof(uint32_t); 1558 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType; 1559 1560 for (idx = 0; idx < kOSAsyncRefCount; idx++) 1561 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]); 1562 1563 replyMsg.m.msg32.asyncContent.result = result; 1564 1565 for (idx = 0; idx < numArgs; idx++) 1566 replyMsg.m.msg32.args[idx] = REF32(args[idx]); 1567 } 1568 1569 if ((options & kIOUserNotifyOptionCanDrop) != 0) { 1570 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr, 1571 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE); 1572 } else { 1573 /* Fail on full queue. */ 1574 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr, 1575 replyMsg.msgHdr.msgh_size); 1576 } 1577 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr)) 1578 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr ); 1579 return kr; 1580 } 1581 1582 1583 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1584 1585 extern "C" { 1586 1587 #define CHECK(cls,obj,out) \ 1588 cls * out; \ 1589 if( !(out = OSDynamicCast( cls, obj))) \ 1590 return( kIOReturnBadArgument ) 1591 1592 #define CHECKLOCKED(cls,obj,out) \ 1593 IOUserIterator * oIter; \ 1594 cls * out; \ 1595 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \ 1596 return (kIOReturnBadArgument); \ 1597 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \ 1598 return (kIOReturnBadArgument) 1599 1600 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1601 1602 // Create a vm_map_copy_t or kalloc'ed data for memory 1603 // to be copied out. ipc will free after the copyout. 1604 1605 static kern_return_t copyoutkdata( const void * data, vm_size_t len, 1606 io_buf_ptr_t * buf ) 1607 { 1608 kern_return_t err; 1609 vm_map_copy_t copy; 1610 1611 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len, 1612 false /* src_destroy */, ©); 1613 1614 assert( err == KERN_SUCCESS ); 1615 if( err == KERN_SUCCESS ) 1616 *buf = (char *) copy; 1617 1618 return( err ); 1619 } 1620 1621 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1622 1623 /* Routine io_server_version */ 1624 kern_return_t is_io_server_version( 1625 mach_port_t master_port, 1626 uint64_t *version) 1627 { 1628 *version = IOKIT_SERVER_VERSION; 1629 return (kIOReturnSuccess); 1630 } 1631 1632 /* Routine io_object_get_class */ 1633 kern_return_t is_io_object_get_class( 1634 io_object_t object, 1635 io_name_t className ) 1636 { 1637 const OSMetaClass* my_obj = NULL; 1638 const char * my_class_name = NULL; 1639 1640 if( !object) 1641 return( kIOReturnBadArgument ); 1642 1643 if ( !my_class_name ) { 1644 my_obj = object->getMetaClass(); 1645 if (!my_obj) { 1646 return (kIOReturnNotFound); 1647 } 1648 1649 my_class_name = my_obj->getClassName(); 1650 } 1651 1652 strlcpy( className, my_class_name, sizeof(io_name_t)); 1653 1654 return( kIOReturnSuccess ); 1655 } 1656 1657 /* Routine io_object_get_superclass */ 1658 kern_return_t is_io_object_get_superclass( 1659 mach_port_t master_port, 1660 io_name_t obj_name, 1661 io_name_t class_name) 1662 { 1663 const OSMetaClass* my_obj = NULL; 1664 const OSMetaClass* superclass = NULL; 1665 const OSSymbol *my_name = NULL; 1666 const char *my_cstr = NULL; 1667 1668 if (!obj_name || !class_name) 1669 return (kIOReturnBadArgument); 1670 1671 if( master_port != master_device_port) 1672 return( kIOReturnNotPrivileged); 1673 1674 my_name = OSSymbol::withCString(obj_name); 1675 1676 if (my_name) { 1677 my_obj = OSMetaClass::getMetaClassWithName(my_name); 1678 my_name->release(); 1679 } 1680 if (my_obj) { 1681 superclass = my_obj->getSuperClass(); 1682 } 1683 1684 if (!superclass) { 1685 return( kIOReturnNotFound ); 1686 } 1687 1688 my_cstr = superclass->getClassName(); 1689 1690 if (my_cstr) { 1691 strlcpy(class_name, my_cstr, sizeof(io_name_t)); 1692 return( kIOReturnSuccess ); 1693 } 1694 return (kIOReturnNotFound); 1695 } 1696 1697 /* Routine io_object_get_bundle_identifier */ 1698 kern_return_t is_io_object_get_bundle_identifier( 1699 mach_port_t master_port, 1700 io_name_t obj_name, 1701 io_name_t bundle_name) 1702 { 1703 const OSMetaClass* my_obj = NULL; 1704 const OSSymbol *my_name = NULL; 1705 const OSSymbol *identifier = NULL; 1706 const char *my_cstr = NULL; 1707 1708 if (!obj_name || !bundle_name) 1709 return (kIOReturnBadArgument); 1710 1711 if( master_port != master_device_port) 1712 return( kIOReturnNotPrivileged); 1713 1714 my_name = OSSymbol::withCString(obj_name); 1715 1716 if (my_name) { 1717 my_obj = OSMetaClass::getMetaClassWithName(my_name); 1718 my_name->release(); 1719 } 1720 1721 if (my_obj) { 1722 identifier = my_obj->getKmodName(); 1723 } 1724 if (!identifier) { 1725 return( kIOReturnNotFound ); 1726 } 1727 1728 my_cstr = identifier->getCStringNoCopy(); 1729 if (my_cstr) { 1730 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t)); 1731 return( kIOReturnSuccess ); 1732 } 1733 1734 return (kIOReturnBadArgument); 1735 } 1736 1737 /* Routine io_object_conforms_to */ 1738 kern_return_t is_io_object_conforms_to( 1739 io_object_t object, 1740 io_name_t className, 1741 boolean_t *conforms ) 1742 { 1743 if( !object) 1744 return( kIOReturnBadArgument ); 1745 1746 *conforms = (0 != object->metaCast( className )); 1747 1748 return( kIOReturnSuccess ); 1749 } 1750 1751 /* Routine io_object_get_retain_count */ 1752 kern_return_t is_io_object_get_retain_count( 1753 io_object_t object, 1754 uint32_t *retainCount ) 1755 { 1756 if( !object) 1757 return( kIOReturnBadArgument ); 1758 1759 *retainCount = object->getRetainCount(); 1760 return( kIOReturnSuccess ); 1761 } 1762 1763 /* Routine io_iterator_next */ 1764 kern_return_t is_io_iterator_next( 1765 io_object_t iterator, 1766 io_object_t *object ) 1767 { 1768 IOReturn ret; 1769 OSObject * obj; 1770 1771 CHECK( OSIterator, iterator, iter ); 1772 1773 obj = iter->getNextObject(); 1774 if( obj) { 1775 obj->retain(); 1776 *object = obj; 1777 ret = kIOReturnSuccess; 1778 } else 1779 ret = kIOReturnNoDevice; 1780 1781 return (ret); 1782 } 1783 1784 /* Routine io_iterator_reset */ 1785 kern_return_t is_io_iterator_reset( 1786 io_object_t iterator ) 1787 { 1788 CHECK( OSIterator, iterator, iter ); 1789 1790 iter->reset(); 1791 1792 return( kIOReturnSuccess ); 1793 } 1794 1795 /* Routine io_iterator_is_valid */ 1796 kern_return_t is_io_iterator_is_valid( 1797 io_object_t iterator, 1798 boolean_t *is_valid ) 1799 { 1800 CHECK( OSIterator, iterator, iter ); 1801 1802 *is_valid = iter->isValid(); 1803 1804 return( kIOReturnSuccess ); 1805 } 1806 1807 1808 static kern_return_t internal_io_service_match_property_table( 1809 io_service_t _service, 1810 const char * matching, 1811 mach_msg_type_number_t matching_size, 1812 boolean_t *matches) 1813 { 1814 CHECK( IOService, _service, service ); 1815 1816 kern_return_t kr; 1817 OSObject * obj; 1818 OSDictionary * dict; 1819 1820 obj = matching_size ? OSUnserializeXML(matching, matching_size) 1821 : OSUnserializeXML(matching); 1822 if( (dict = OSDynamicCast( OSDictionary, obj))) { 1823 1824 *matches = service->passiveMatch( dict ); 1825 kr = kIOReturnSuccess; 1826 } else 1827 kr = kIOReturnBadArgument; 1828 1829 if( obj) 1830 obj->release(); 1831 1832 return( kr ); 1833 } 1834 1835 /* Routine io_service_match_property_table */ 1836 kern_return_t is_io_service_match_property_table( 1837 io_service_t service, 1838 io_string_t matching, 1839 boolean_t *matches ) 1840 { 1841 return (internal_io_service_match_property_table(service, matching, 0, matches)); 1842 } 1843 1844 1845 /* Routine io_service_match_property_table_ool */ 1846 kern_return_t is_io_service_match_property_table_ool( 1847 io_object_t service, 1848 io_buf_ptr_t matching, 1849 mach_msg_type_number_t matchingCnt, 1850 kern_return_t *result, 1851 boolean_t *matches ) 1852 { 1853 kern_return_t kr; 1854 vm_offset_t data; 1855 vm_map_offset_t map_data; 1856 1857 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching ); 1858 data = CAST_DOWN(vm_offset_t, map_data); 1859 1860 if( KERN_SUCCESS == kr) { 1861 // must return success after vm_map_copyout() succeeds 1862 *result = internal_io_service_match_property_table(service, 1863 (const char *)data, matchingCnt, matches ); 1864 vm_deallocate( kernel_map, data, matchingCnt ); 1865 } 1866 1867 return( kr ); 1868 } 1869 1870 /* Routine io_service_match_property_table_bin */ 1871 kern_return_t is_io_service_match_property_table_bin( 1872 io_object_t service, 1873 io_struct_inband_t matching, 1874 mach_msg_type_number_t matchingCnt, 1875 boolean_t *matches) 1876 { 1877 return (internal_io_service_match_property_table(service, matching, matchingCnt, matches)); 1878 } 1879 1880 static kern_return_t internal_io_service_get_matching_services( 1881 mach_port_t master_port, 1882 const char * matching, 1883 mach_msg_type_number_t matching_size, 1884 io_iterator_t *existing ) 1885 { 1886 kern_return_t kr; 1887 OSObject * obj; 1888 OSDictionary * dict; 1889 1890 if( master_port != master_device_port) 1891 return( kIOReturnNotPrivileged); 1892 1893 obj = matching_size ? OSUnserializeXML(matching, matching_size) 1894 : OSUnserializeXML(matching); 1895 if( (dict = OSDynamicCast( OSDictionary, obj))) { 1896 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict )); 1897 kr = kIOReturnSuccess; 1898 } else 1899 kr = kIOReturnBadArgument; 1900 1901 if( obj) 1902 obj->release(); 1903 1904 return( kr ); 1905 } 1906 1907 /* Routine io_service_get_matching_services */ 1908 kern_return_t is_io_service_get_matching_services( 1909 mach_port_t master_port, 1910 io_string_t matching, 1911 io_iterator_t *existing ) 1912 { 1913 return (internal_io_service_get_matching_services(master_port, matching, 0, existing)); 1914 } 1915 1916 /* Routine io_service_get_matching_services_ool */ 1917 kern_return_t is_io_service_get_matching_services_ool( 1918 mach_port_t master_port, 1919 io_buf_ptr_t matching, 1920 mach_msg_type_number_t matchingCnt, 1921 kern_return_t *result, 1922 io_object_t *existing ) 1923 { 1924 kern_return_t kr; 1925 vm_offset_t data; 1926 vm_map_offset_t map_data; 1927 1928 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching ); 1929 data = CAST_DOWN(vm_offset_t, map_data); 1930 1931 if( KERN_SUCCESS == kr) { 1932 // must return success after vm_map_copyout() succeeds 1933 *result = internal_io_service_get_matching_services(master_port, 1934 (const char *) data, matchingCnt, existing); 1935 vm_deallocate( kernel_map, data, matchingCnt ); 1936 } 1937 1938 return( kr ); 1939 } 1940 1941 /* Routine io_service_get_matching_services_bin */ 1942 kern_return_t is_io_service_get_matching_services_bin( 1943 mach_port_t master_port, 1944 io_struct_inband_t matching, 1945 mach_msg_type_number_t matchingCnt, 1946 io_object_t *existing) 1947 { 1948 return (internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing)); 1949 } 1950 1951 1952 static kern_return_t internal_io_service_get_matching_service( 1953 mach_port_t master_port, 1954 const char * matching, 1955 mach_msg_type_number_t matching_size, 1956 io_service_t *service ) 1957 { 1958 kern_return_t kr; 1959 OSObject * obj; 1960 OSDictionary * dict; 1961 1962 if( master_port != master_device_port) 1963 return( kIOReturnNotPrivileged); 1964 1965 obj = matching_size ? OSUnserializeXML(matching, matching_size) 1966 : OSUnserializeXML(matching); 1967 if( (dict = OSDynamicCast( OSDictionary, obj))) { 1968 *service = IOService::copyMatchingService( dict ); 1969 kr = *service ? kIOReturnSuccess : kIOReturnNotFound; 1970 } else 1971 kr = kIOReturnBadArgument; 1972 1973 if( obj) 1974 obj->release(); 1975 1976 return( kr ); 1977 } 1978 1979 /* Routine io_service_get_matching_service */ 1980 kern_return_t is_io_service_get_matching_service( 1981 mach_port_t master_port, 1982 io_string_t matching, 1983 io_service_t *service ) 1984 { 1985 return (internal_io_service_get_matching_service(master_port, matching, 0, service)); 1986 } 1987 1988 /* Routine io_service_get_matching_services_ool */ 1989 kern_return_t is_io_service_get_matching_service_ool( 1990 mach_port_t master_port, 1991 io_buf_ptr_t matching, 1992 mach_msg_type_number_t matchingCnt, 1993 kern_return_t *result, 1994 io_object_t *service ) 1995 { 1996 kern_return_t kr; 1997 vm_offset_t data; 1998 vm_map_offset_t map_data; 1999 2000 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching ); 2001 data = CAST_DOWN(vm_offset_t, map_data); 2002 2003 if( KERN_SUCCESS == kr) { 2004 // must return success after vm_map_copyout() succeeds 2005 *result = internal_io_service_get_matching_service(master_port, 2006 (const char *) data, matchingCnt, service ); 2007 vm_deallocate( kernel_map, data, matchingCnt ); 2008 } 2009 2010 return( kr ); 2011 } 2012 2013 /* Routine io_service_get_matching_service_bin */ 2014 kern_return_t is_io_service_get_matching_service_bin( 2015 mach_port_t master_port, 2016 io_struct_inband_t matching, 2017 mach_msg_type_number_t matchingCnt, 2018 io_object_t *service) 2019 { 2020 return (internal_io_service_get_matching_service(master_port, matching, matchingCnt, service)); 2021 } 2022 2023 static kern_return_t internal_io_service_add_notification( 2024 mach_port_t master_port, 2025 io_name_t notification_type, 2026 const char * matching, 2027 size_t matching_size, 2028 mach_port_t port, 2029 void * reference, 2030 vm_size_t referenceSize, 2031 bool client64, 2032 io_object_t * notification ) 2033 { 2034 IOServiceUserNotification * userNotify = 0; 2035 IONotifier * notify = 0; 2036 const OSSymbol * sym; 2037 OSDictionary * dict; 2038 IOReturn err; 2039 unsigned long int userMsgType; 2040 2041 if( master_port != master_device_port) 2042 return( kIOReturnNotPrivileged); 2043 2044 do { 2045 err = kIOReturnNoResources; 2046 2047 if( !(sym = OSSymbol::withCString( notification_type ))) 2048 err = kIOReturnNoResources; 2049 2050 if (matching_size) 2051 { 2052 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size)); 2053 } 2054 else 2055 { 2056 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching)); 2057 } 2058 2059 if (!dict) { 2060 err = kIOReturnBadArgument; 2061 continue; 2062 } 2063 2064 if( (sym == gIOPublishNotification) 2065 || (sym == gIOFirstPublishNotification)) 2066 userMsgType = kIOServicePublishNotificationType; 2067 else if( (sym == gIOMatchedNotification) 2068 || (sym == gIOFirstMatchNotification)) 2069 userMsgType = kIOServiceMatchedNotificationType; 2070 else if( sym == gIOTerminatedNotification) 2071 userMsgType = kIOServiceTerminatedNotificationType; 2072 else 2073 userMsgType = kLastIOKitNotificationType; 2074 2075 userNotify = new IOServiceUserNotification; 2076 2077 if( userNotify && !userNotify->init( port, userMsgType, 2078 reference, referenceSize, client64)) { 2079 iokit_release_port_send(port); 2080 userNotify->release(); 2081 userNotify = 0; 2082 } 2083 if( !userNotify) 2084 continue; 2085 2086 notify = IOService::addMatchingNotification( sym, dict, 2087 &userNotify->_handler, userNotify ); 2088 if( notify) { 2089 *notification = userNotify; 2090 userNotify->setNotification( notify ); 2091 err = kIOReturnSuccess; 2092 } else 2093 err = kIOReturnUnsupported; 2094 2095 } while( false ); 2096 2097 if( sym) 2098 sym->release(); 2099 if( dict) 2100 dict->release(); 2101 2102 return( err ); 2103 } 2104 2105 2106 /* Routine io_service_add_notification */ 2107 kern_return_t is_io_service_add_notification( 2108 mach_port_t master_port, 2109 io_name_t notification_type, 2110 io_string_t matching, 2111 mach_port_t port, 2112 io_async_ref_t reference, 2113 mach_msg_type_number_t referenceCnt, 2114 io_object_t * notification ) 2115 { 2116 return (internal_io_service_add_notification(master_port, notification_type, 2117 matching, 0, port, &reference[0], sizeof(io_async_ref_t), 2118 false, notification)); 2119 } 2120 2121 /* Routine io_service_add_notification_64 */ 2122 kern_return_t is_io_service_add_notification_64( 2123 mach_port_t master_port, 2124 io_name_t notification_type, 2125 io_string_t matching, 2126 mach_port_t wake_port, 2127 io_async_ref64_t reference, 2128 mach_msg_type_number_t referenceCnt, 2129 io_object_t *notification ) 2130 { 2131 return (internal_io_service_add_notification(master_port, notification_type, 2132 matching, 0, wake_port, &reference[0], sizeof(io_async_ref64_t), 2133 true, notification)); 2134 } 2135 2136 /* Routine io_service_add_notification_bin */ 2137 kern_return_t is_io_service_add_notification_bin 2138 ( 2139 mach_port_t master_port, 2140 io_name_t notification_type, 2141 io_struct_inband_t matching, 2142 mach_msg_type_number_t matchingCnt, 2143 mach_port_t wake_port, 2144 io_async_ref_t reference, 2145 mach_msg_type_number_t referenceCnt, 2146 io_object_t *notification) 2147 { 2148 return (internal_io_service_add_notification(master_port, notification_type, 2149 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t), 2150 false, notification)); 2151 } 2152 2153 /* Routine io_service_add_notification_bin_64 */ 2154 kern_return_t is_io_service_add_notification_bin_64 2155 ( 2156 mach_port_t master_port, 2157 io_name_t notification_type, 2158 io_struct_inband_t matching, 2159 mach_msg_type_number_t matchingCnt, 2160 mach_port_t wake_port, 2161 io_async_ref64_t reference, 2162 mach_msg_type_number_t referenceCnt, 2163 io_object_t *notification) 2164 { 2165 return (internal_io_service_add_notification(master_port, notification_type, 2166 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t), 2167 true, notification)); 2168 } 2169 2170 static kern_return_t internal_io_service_add_notification_ool( 2171 mach_port_t master_port, 2172 io_name_t notification_type, 2173 io_buf_ptr_t matching, 2174 mach_msg_type_number_t matchingCnt, 2175 mach_port_t wake_port, 2176 void * reference, 2177 vm_size_t referenceSize, 2178 bool client64, 2179 kern_return_t *result, 2180 io_object_t *notification ) 2181 { 2182 kern_return_t kr; 2183 vm_offset_t data; 2184 vm_map_offset_t map_data; 2185 2186 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching ); 2187 data = CAST_DOWN(vm_offset_t, map_data); 2188 2189 if( KERN_SUCCESS == kr) { 2190 // must return success after vm_map_copyout() succeeds 2191 *result = internal_io_service_add_notification( master_port, notification_type, 2192 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification ); 2193 vm_deallocate( kernel_map, data, matchingCnt ); 2194 } 2195 2196 return( kr ); 2197 } 2198 2199 /* Routine io_service_add_notification_ool */ 2200 kern_return_t is_io_service_add_notification_ool( 2201 mach_port_t master_port, 2202 io_name_t notification_type, 2203 io_buf_ptr_t matching, 2204 mach_msg_type_number_t matchingCnt, 2205 mach_port_t wake_port, 2206 io_async_ref_t reference, 2207 mach_msg_type_number_t referenceCnt, 2208 kern_return_t *result, 2209 io_object_t *notification ) 2210 { 2211 return (internal_io_service_add_notification_ool(master_port, notification_type, 2212 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t), 2213 false, result, notification)); 2214 } 2215 2216 /* Routine io_service_add_notification_ool_64 */ 2217 kern_return_t is_io_service_add_notification_ool_64( 2218 mach_port_t master_port, 2219 io_name_t notification_type, 2220 io_buf_ptr_t matching, 2221 mach_msg_type_number_t matchingCnt, 2222 mach_port_t wake_port, 2223 io_async_ref64_t reference, 2224 mach_msg_type_number_t referenceCnt, 2225 kern_return_t *result, 2226 io_object_t *notification ) 2227 { 2228 return (internal_io_service_add_notification_ool(master_port, notification_type, 2229 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t), 2230 true, result, notification)); 2231 } 2232 2233 /* Routine io_service_add_notification_old */ 2234 kern_return_t is_io_service_add_notification_old( 2235 mach_port_t master_port, 2236 io_name_t notification_type, 2237 io_string_t matching, 2238 mach_port_t port, 2239 // for binary compatibility reasons, this must be natural_t for ILP32 2240 natural_t ref, 2241 io_object_t * notification ) 2242 { 2243 return( is_io_service_add_notification( master_port, notification_type, 2244 matching, port, &ref, 1, notification )); 2245 } 2246 2247 2248 static kern_return_t internal_io_service_add_interest_notification( 2249 io_object_t _service, 2250 io_name_t type_of_interest, 2251 mach_port_t port, 2252 void * reference, 2253 vm_size_t referenceSize, 2254 bool client64, 2255 io_object_t * notification ) 2256 { 2257 2258 IOServiceMessageUserNotification * userNotify = 0; 2259 IONotifier * notify = 0; 2260 const OSSymbol * sym; 2261 IOReturn err; 2262 2263 CHECK( IOService, _service, service ); 2264 2265 err = kIOReturnNoResources; 2266 if( (sym = OSSymbol::withCString( type_of_interest ))) do { 2267 2268 userNotify = new IOServiceMessageUserNotification; 2269 2270 if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType, 2271 reference, referenceSize, 2272 kIOUserNotifyMaxMessageSize, 2273 client64 )) { 2274 iokit_release_port_send(port); 2275 userNotify->release(); 2276 userNotify = 0; 2277 } 2278 if( !userNotify) 2279 continue; 2280 2281 notify = service->registerInterest( sym, 2282 &userNotify->_handler, userNotify ); 2283 if( notify) { 2284 *notification = userNotify; 2285 userNotify->setNotification( notify ); 2286 err = kIOReturnSuccess; 2287 } else 2288 err = kIOReturnUnsupported; 2289 2290 sym->release(); 2291 2292 } while( false ); 2293 2294 return( err ); 2295 } 2296 2297 /* Routine io_service_add_message_notification */ 2298 kern_return_t is_io_service_add_interest_notification( 2299 io_object_t service, 2300 io_name_t type_of_interest, 2301 mach_port_t port, 2302 io_async_ref_t reference, 2303 mach_msg_type_number_t referenceCnt, 2304 io_object_t * notification ) 2305 { 2306 return (internal_io_service_add_interest_notification(service, type_of_interest, 2307 port, &reference[0], sizeof(io_async_ref_t), false, notification)); 2308 } 2309 2310 /* Routine io_service_add_interest_notification_64 */ 2311 kern_return_t is_io_service_add_interest_notification_64( 2312 io_object_t service, 2313 io_name_t type_of_interest, 2314 mach_port_t wake_port, 2315 io_async_ref64_t reference, 2316 mach_msg_type_number_t referenceCnt, 2317 io_object_t *notification ) 2318 { 2319 return (internal_io_service_add_interest_notification(service, type_of_interest, 2320 wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification)); 2321 } 2322 2323 2324 /* Routine io_service_acknowledge_notification */ 2325 kern_return_t is_io_service_acknowledge_notification( 2326 io_object_t _service, 2327 natural_t notify_ref, 2328 natural_t response ) 2329 { 2330 CHECK( IOService, _service, service ); 2331 2332 return( service->acknowledgeNotification( (IONotificationRef)(uintptr_t) notify_ref, 2333 (IOOptionBits) response )); 2334 2335 } 2336 2337 /* Routine io_connect_get_semaphore */ 2338 kern_return_t is_io_connect_get_notification_semaphore( 2339 io_connect_t connection, 2340 natural_t notification_type, 2341 semaphore_t *semaphore ) 2342 { 2343 CHECK( IOUserClient, connection, client ); 2344 2345 IOStatisticsClientCall(); 2346 return( client->getNotificationSemaphore( (UInt32) notification_type, 2347 semaphore )); 2348 } 2349 2350 /* Routine io_registry_get_root_entry */ 2351 kern_return_t is_io_registry_get_root_entry( 2352 mach_port_t master_port, 2353 io_object_t *root ) 2354 { 2355 IORegistryEntry * entry; 2356 2357 if( master_port != master_device_port) 2358 return( kIOReturnNotPrivileged); 2359 2360 entry = IORegistryEntry::getRegistryRoot(); 2361 if( entry) 2362 entry->retain(); 2363 *root = entry; 2364 2365 return( kIOReturnSuccess ); 2366 } 2367 2368 /* Routine io_registry_create_iterator */ 2369 kern_return_t is_io_registry_create_iterator( 2370 mach_port_t master_port, 2371 io_name_t plane, 2372 uint32_t options, 2373 io_object_t *iterator ) 2374 { 2375 if( master_port != master_device_port) 2376 return( kIOReturnNotPrivileged); 2377 2378 *iterator = IOUserIterator::withIterator( 2379 IORegistryIterator::iterateOver( 2380 IORegistryEntry::getPlane( plane ), options )); 2381 2382 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument ); 2383 } 2384 2385 /* Routine io_registry_entry_create_iterator */ 2386 kern_return_t is_io_registry_entry_create_iterator( 2387 io_object_t registry_entry, 2388 io_name_t plane, 2389 uint32_t options, 2390 io_object_t *iterator ) 2391 { 2392 CHECK( IORegistryEntry, registry_entry, entry ); 2393 2394 *iterator = IOUserIterator::withIterator( 2395 IORegistryIterator::iterateOver( entry, 2396 IORegistryEntry::getPlane( plane ), options )); 2397 2398 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument ); 2399 } 2400 2401 /* Routine io_registry_iterator_enter */ 2402 kern_return_t is_io_registry_iterator_enter_entry( 2403 io_object_t iterator ) 2404 { 2405 CHECKLOCKED( IORegistryIterator, iterator, iter ); 2406 2407 IOLockLock(oIter->lock); 2408 iter->enterEntry(); 2409 IOLockUnlock(oIter->lock); 2410 2411 return( kIOReturnSuccess ); 2412 } 2413 2414 /* Routine io_registry_iterator_exit */ 2415 kern_return_t is_io_registry_iterator_exit_entry( 2416 io_object_t iterator ) 2417 { 2418 bool didIt; 2419 2420 CHECKLOCKED( IORegistryIterator, iterator, iter ); 2421 2422 IOLockLock(oIter->lock); 2423 didIt = iter->exitEntry(); 2424 IOLockUnlock(oIter->lock); 2425 2426 return( didIt ? kIOReturnSuccess : kIOReturnNoDevice ); 2427 } 2428 2429 /* Routine io_registry_entry_from_path */ 2430 kern_return_t is_io_registry_entry_from_path( 2431 mach_port_t master_port, 2432 io_string_t path, 2433 io_object_t *registry_entry ) 2434 { 2435 IORegistryEntry * entry; 2436 2437 if( master_port != master_device_port) 2438 return( kIOReturnNotPrivileged); 2439 2440 entry = IORegistryEntry::fromPath( path ); 2441 2442 *registry_entry = entry; 2443 2444 return( kIOReturnSuccess ); 2445 } 2446 2447 2448 /* Routine io_registry_entry_from_path */ 2449 kern_return_t is_io_registry_entry_from_path_ool( 2450 mach_port_t master_port, 2451 io_string_inband_t path, 2452 io_buf_ptr_t path_ool, 2453 mach_msg_type_number_t path_oolCnt, 2454 kern_return_t *result, 2455 io_object_t *registry_entry) 2456 { 2457 IORegistryEntry * entry; 2458 vm_map_offset_t map_data; 2459 const char * cpath; 2460 IOReturn res; 2461 kern_return_t err; 2462 2463 if (master_port != master_device_port) return(kIOReturnNotPrivileged); 2464 2465 map_data = 0; 2466 entry = 0; 2467 res = err = KERN_SUCCESS; 2468 if (path[0]) cpath = path; 2469 else 2470 { 2471 if (!path_oolCnt) return(kIOReturnBadArgument); 2472 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) return(kIOReturnMessageTooLarge); 2473 2474 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool); 2475 if (KERN_SUCCESS == err) 2476 { 2477 // must return success to mig after vm_map_copyout() succeeds, so result is actual 2478 cpath = CAST_DOWN(const char *, map_data); 2479 if (cpath[path_oolCnt - 1]) res = kIOReturnBadArgument; 2480 } 2481 } 2482 2483 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) 2484 { 2485 entry = IORegistryEntry::fromPath(cpath); 2486 res = entry ? kIOReturnSuccess : kIOReturnNotFound; 2487 } 2488 2489 if (map_data) vm_deallocate(kernel_map, map_data, path_oolCnt); 2490 2491 if (KERN_SUCCESS != err) res = err; 2492 *registry_entry = entry; 2493 *result = res; 2494 2495 return (err); 2496 } 2497 2498 2499 /* Routine io_registry_entry_in_plane */ 2500 kern_return_t is_io_registry_entry_in_plane( 2501 io_object_t registry_entry, 2502 io_name_t plane, 2503 boolean_t *inPlane ) 2504 { 2505 CHECK( IORegistryEntry, registry_entry, entry ); 2506 2507 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane )); 2508 2509 return( kIOReturnSuccess ); 2510 } 2511 2512 2513 /* Routine io_registry_entry_get_path */ 2514 kern_return_t is_io_registry_entry_get_path( 2515 io_object_t registry_entry, 2516 io_name_t plane, 2517 io_string_t path ) 2518 { 2519 int length; 2520 CHECK( IORegistryEntry, registry_entry, entry ); 2521 2522 length = sizeof( io_string_t); 2523 if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) 2524 return( kIOReturnSuccess ); 2525 else 2526 return( kIOReturnBadArgument ); 2527 } 2528 2529 /* Routine io_registry_entry_get_path */ 2530 kern_return_t is_io_registry_entry_get_path_ool( 2531 io_object_t registry_entry, 2532 io_name_t plane, 2533 io_string_inband_t path, 2534 io_buf_ptr_t *path_ool, 2535 mach_msg_type_number_t *path_oolCnt) 2536 { 2537 enum { kMaxPath = 16384 }; 2538 IOReturn err; 2539 int length; 2540 char * buf; 2541 2542 CHECK( IORegistryEntry, registry_entry, entry ); 2543 2544 *path_ool = NULL; 2545 *path_oolCnt = 0; 2546 length = sizeof(io_string_inband_t); 2547 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnSuccess; 2548 else 2549 { 2550 length = kMaxPath; 2551 buf = IONew(char, length); 2552 if (!buf) err = kIOReturnNoMemory; 2553 else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnError; 2554 else 2555 { 2556 *path_oolCnt = length; 2557 err = copyoutkdata(buf, length, path_ool); 2558 } 2559 if (buf) IODelete(buf, char, kMaxPath); 2560 } 2561 2562 return (err); 2563 } 2564 2565 2566 /* Routine io_registry_entry_get_name */ 2567 kern_return_t is_io_registry_entry_get_name( 2568 io_object_t registry_entry, 2569 io_name_t name ) 2570 { 2571 CHECK( IORegistryEntry, registry_entry, entry ); 2572 2573 strncpy( name, entry->getName(), sizeof( io_name_t)); 2574 2575 return( kIOReturnSuccess ); 2576 } 2577 2578 /* Routine io_registry_entry_get_name_in_plane */ 2579 kern_return_t is_io_registry_entry_get_name_in_plane( 2580 io_object_t registry_entry, 2581 io_name_t planeName, 2582 io_name_t name ) 2583 { 2584 const IORegistryPlane * plane; 2585 CHECK( IORegistryEntry, registry_entry, entry ); 2586 2587 if( planeName[0]) 2588 plane = IORegistryEntry::getPlane( planeName ); 2589 else 2590 plane = 0; 2591 2592 strncpy( name, entry->getName( plane), sizeof( io_name_t)); 2593 2594 return( kIOReturnSuccess ); 2595 } 2596 2597 /* Routine io_registry_entry_get_location_in_plane */ 2598 kern_return_t is_io_registry_entry_get_location_in_plane( 2599 io_object_t registry_entry, 2600 io_name_t planeName, 2601 io_name_t location ) 2602 { 2603 const IORegistryPlane * plane; 2604 CHECK( IORegistryEntry, registry_entry, entry ); 2605 2606 if( planeName[0]) 2607 plane = IORegistryEntry::getPlane( planeName ); 2608 else 2609 plane = 0; 2610 2611 const char * cstr = entry->getLocation( plane ); 2612 2613 if( cstr) { 2614 strncpy( location, cstr, sizeof( io_name_t)); 2615 return( kIOReturnSuccess ); 2616 } else 2617 return( kIOReturnNotFound ); 2618 } 2619 2620 /* Routine io_registry_entry_get_registry_entry_id */ 2621 kern_return_t is_io_registry_entry_get_registry_entry_id( 2622 io_object_t registry_entry, 2623 uint64_t *entry_id ) 2624 { 2625 CHECK( IORegistryEntry, registry_entry, entry ); 2626 2627 *entry_id = entry->getRegistryEntryID(); 2628 2629 return (kIOReturnSuccess); 2630 } 2631 2632 /* Routine io_registry_entry_get_property */ 2633 kern_return_t is_io_registry_entry_get_property_bytes( 2634 io_object_t registry_entry, 2635 io_name_t property_name, 2636 io_struct_inband_t buf, 2637 mach_msg_type_number_t *dataCnt ) 2638 { 2639 OSObject * obj; 2640 OSData * data; 2641 OSString * str; 2642 OSBoolean * boo; 2643 OSNumber * off; 2644 UInt64 offsetBytes; 2645 unsigned int len = 0; 2646 const void * bytes = 0; 2647 IOReturn ret = kIOReturnSuccess; 2648 2649 CHECK( IORegistryEntry, registry_entry, entry ); 2650 2651 #if CONFIG_MACF 2652 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) 2653 return kIOReturnNotPermitted; 2654 #endif 2655 2656 obj = entry->copyProperty(property_name); 2657 if( !obj) 2658 return( kIOReturnNoResources ); 2659 2660 // One day OSData will be a common container base class 2661 // until then... 2662 if( (data = OSDynamicCast( OSData, obj ))) { 2663 len = data->getLength(); 2664 bytes = data->getBytesNoCopy(); 2665 2666 } else if( (str = OSDynamicCast( OSString, obj ))) { 2667 len = str->getLength() + 1; 2668 bytes = str->getCStringNoCopy(); 2669 2670 } else if( (boo = OSDynamicCast( OSBoolean, obj ))) { 2671 len = boo->isTrue() ? sizeof("Yes") : sizeof("No"); 2672 bytes = boo->isTrue() ? "Yes" : "No"; 2673 2674 } else if( (off = OSDynamicCast( OSNumber, obj ))) { 2675 offsetBytes = off->unsigned64BitValue(); 2676 len = off->numberOfBytes(); 2677 bytes = &offsetBytes; 2678 #ifdef __BIG_ENDIAN__ 2679 bytes = (const void *) 2680 (((UInt32) bytes) + (sizeof( UInt64) - len)); 2681 #endif 2682 2683 } else 2684 ret = kIOReturnBadArgument; 2685 2686 if( bytes) { 2687 if( *dataCnt < len) 2688 ret = kIOReturnIPCError; 2689 else { 2690 *dataCnt = len; 2691 bcopy( bytes, buf, len ); 2692 } 2693 } 2694 obj->release(); 2695 2696 return( ret ); 2697 } 2698 2699 2700 /* Routine io_registry_entry_get_property */ 2701 kern_return_t is_io_registry_entry_get_property( 2702 io_object_t registry_entry, 2703 io_name_t property_name, 2704 io_buf_ptr_t *properties, 2705 mach_msg_type_number_t *propertiesCnt ) 2706 { 2707 kern_return_t err; 2708 vm_size_t len; 2709 OSObject * obj; 2710 2711 CHECK( IORegistryEntry, registry_entry, entry ); 2712 2713 #if CONFIG_MACF 2714 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) 2715 return kIOReturnNotPermitted; 2716 #endif 2717 2718 obj = entry->copyProperty(property_name); 2719 if( !obj) 2720 return( kIOReturnNotFound ); 2721 2722 OSSerialize * s = OSSerialize::withCapacity(4096); 2723 if( !s) { 2724 obj->release(); 2725 return( kIOReturnNoMemory ); 2726 } 2727 2728 if( obj->serialize( s )) { 2729 len = s->getLength(); 2730 *propertiesCnt = len; 2731 err = copyoutkdata( s->text(), len, properties ); 2732 2733 } else 2734 err = kIOReturnUnsupported; 2735 2736 s->release(); 2737 obj->release(); 2738 2739 return( err ); 2740 } 2741 2742 /* Routine io_registry_entry_get_property_recursively */ 2743 kern_return_t is_io_registry_entry_get_property_recursively( 2744 io_object_t registry_entry, 2745 io_name_t plane, 2746 io_name_t property_name, 2747 uint32_t options, 2748 io_buf_ptr_t *properties, 2749 mach_msg_type_number_t *propertiesCnt ) 2750 { 2751 kern_return_t err; 2752 vm_size_t len; 2753 OSObject * obj; 2754 2755 CHECK( IORegistryEntry, registry_entry, entry ); 2756 2757 #if CONFIG_MACF 2758 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) 2759 return kIOReturnNotPermitted; 2760 #endif 2761 2762 obj = entry->copyProperty( property_name, 2763 IORegistryEntry::getPlane( plane ), options); 2764 if( !obj) 2765 return( kIOReturnNotFound ); 2766 2767 OSSerialize * s = OSSerialize::withCapacity(4096); 2768 if( !s) { 2769 obj->release(); 2770 return( kIOReturnNoMemory ); 2771 } 2772 2773 if( obj->serialize( s )) { 2774 len = s->getLength(); 2775 *propertiesCnt = len; 2776 err = copyoutkdata( s->text(), len, properties ); 2777 2778 } else 2779 err = kIOReturnUnsupported; 2780 2781 s->release(); 2782 obj->release(); 2783 2784 return( err ); 2785 } 2786 2787 #if CONFIG_MACF 2788 2789 static kern_return_t 2790 filteredProperties(IORegistryEntry *entry, OSDictionary *properties, OSDictionary **filteredp) 2791 { 2792 kern_return_t err = 0; 2793 OSDictionary *filtered = NULL; 2794 OSCollectionIterator *iter = NULL; 2795 OSSymbol *key; 2796 OSObject *p; 2797 kauth_cred_t cred = kauth_cred_get(); 2798 2799 if (properties == NULL) 2800 return kIOReturnUnsupported; 2801 2802 if ((iter = OSCollectionIterator::withCollection(properties)) == NULL || 2803 (filtered = OSDictionary::withCapacity(properties->getCapacity())) == NULL) { 2804 err = kIOReturnNoMemory; 2805 goto out; 2806 } 2807 2808 while ((p = iter->getNextObject()) != NULL) { 2809 if ((key = OSDynamicCast(OSSymbol, p)) == NULL || 2810 mac_iokit_check_get_property(cred, entry, key->getCStringNoCopy()) != 0) 2811 continue; 2812 filtered->setObject(key, properties->getObject(key)); 2813 } 2814 2815 out: 2816 if (iter != NULL) 2817 iter->release(); 2818 *filteredp = filtered; 2819 return err; 2820 } 2821 2822 #endif 2823 2824 /* Routine io_registry_entry_get_properties */ 2825 kern_return_t is_io_registry_entry_get_properties( 2826 io_object_t registry_entry, 2827 io_buf_ptr_t *properties, 2828 mach_msg_type_number_t *propertiesCnt ) 2829 { 2830 kern_return_t err = 0; 2831 vm_size_t len; 2832 2833 CHECK( IORegistryEntry, registry_entry, entry ); 2834 2835 OSSerialize * s = OSSerialize::withCapacity(4096); 2836 if( !s) 2837 return( kIOReturnNoMemory ); 2838 2839 if (!entry->serializeProperties(s)) 2840 err = kIOReturnUnsupported; 2841 2842 #if CONFIG_MACF 2843 if (!err && mac_iokit_check_filter_properties(kauth_cred_get(), entry)) { 2844 OSObject *propobj = OSUnserializeXML(s->text(), s->getLength()); 2845 OSDictionary *filteredprops = NULL; 2846 err = filteredProperties(entry, OSDynamicCast(OSDictionary, propobj), &filteredprops); 2847 if (propobj) propobj->release(); 2848 2849 if (!err) { 2850 s->clearText(); 2851 if (!filteredprops->serialize(s)) 2852 err = kIOReturnUnsupported; 2853 } 2854 if (filteredprops != NULL) 2855 filteredprops->release(); 2856 } 2857 #endif /* CONFIG_MACF */ 2858 2859 if (!err) { 2860 len = s->getLength(); 2861 *propertiesCnt = len; 2862 err = copyoutkdata( s->text(), len, properties ); 2863 } 2864 2865 s->release(); 2866 return( err ); 2867 } 2868 2869 #if CONFIG_MACF 2870 2871 struct GetPropertiesEditorRef 2872 { 2873 kauth_cred_t cred; 2874 IORegistryEntry * entry; 2875 OSCollection * root; 2876 }; 2877 2878 static const OSMetaClassBase * 2879 GetPropertiesEditor(void * reference, 2880 OSSerialize * s, 2881 OSCollection * container, 2882 const OSSymbol * name, 2883 const OSMetaClassBase * value) 2884 { 2885 GetPropertiesEditorRef * ref = (typeof(ref)) reference; 2886 2887 if (!ref->root) ref->root = container; 2888 if (ref->root == container) 2889 { 2890 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) 2891 { 2892 value = 0; 2893 } 2894 } 2895 if (value) value->retain(); 2896 return (value); 2897 } 2898 2899 #endif /* CONFIG_MACF */ 2900 2901 /* Routine io_registry_entry_get_properties */ 2902 kern_return_t is_io_registry_entry_get_properties_bin( 2903 io_object_t registry_entry, 2904 io_buf_ptr_t *properties, 2905 mach_msg_type_number_t *propertiesCnt) 2906 { 2907 kern_return_t err = kIOReturnSuccess; 2908 vm_size_t len; 2909 OSSerialize * s; 2910 OSSerialize::Editor editor = 0; 2911 void * editRef = 0; 2912 2913 CHECK(IORegistryEntry, registry_entry, entry); 2914 2915 #if CONFIG_MACF 2916 GetPropertiesEditorRef ref; 2917 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) 2918 { 2919 editor = &GetPropertiesEditor; 2920 editRef = &ref; 2921 ref.cred = kauth_cred_get(); 2922 ref.entry = entry; 2923 ref.root = 0; 2924 } 2925 #endif 2926 2927 s = OSSerialize::binaryWithCapacity(4096, editor, editRef); 2928 if (!s) return (kIOReturnNoMemory); 2929 2930 if (!entry->serializeProperties(s)) err = kIOReturnUnsupported; 2931 2932 if (kIOReturnSuccess == err) 2933 { 2934 len = s->getLength(); 2935 *propertiesCnt = len; 2936 err = copyoutkdata(s->text(), len, properties); 2937 } 2938 s->release(); 2939 2940 return (err); 2941 } 2942 2943 /* Routine io_registry_entry_get_property_bin */ 2944 kern_return_t is_io_registry_entry_get_property_bin( 2945 io_object_t registry_entry, 2946 io_name_t plane, 2947 io_name_t property_name, 2948 uint32_t options, 2949 io_buf_ptr_t *properties, 2950 mach_msg_type_number_t *propertiesCnt ) 2951 { 2952 kern_return_t err; 2953 vm_size_t len; 2954 OSObject * obj; 2955 const OSSymbol * sym; 2956 2957 CHECK( IORegistryEntry, registry_entry, entry ); 2958 2959 #if CONFIG_MACF 2960 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) 2961 return kIOReturnNotPermitted; 2962 #endif 2963 2964 if ((kIORegistryIterateRecursively & options) && plane[0]) 2965 { 2966 obj = entry->copyProperty(property_name, 2967 IORegistryEntry::getPlane(plane), options); 2968 } 2969 else 2970 { 2971 obj = entry->copyProperty(property_name); 2972 } 2973 2974 if( !obj) 2975 return( kIOReturnNotFound ); 2976 2977 sym = OSSymbol::withCString(property_name); 2978 if (sym) 2979 { 2980 if (gIORemoveOnReadProperties->containsObject(sym)) entry->removeProperty(sym); 2981 sym->release(); 2982 } 2983 2984 OSSerialize * s = OSSerialize::binaryWithCapacity(4096); 2985 if( !s) { 2986 obj->release(); 2987 return( kIOReturnNoMemory ); 2988 } 2989 2990 if( obj->serialize( s )) { 2991 len = s->getLength(); 2992 *propertiesCnt = len; 2993 err = copyoutkdata( s->text(), len, properties ); 2994 2995 } else err = kIOReturnUnsupported; 2996 2997 s->release(); 2998 obj->release(); 2999 3000 return( err ); 3001 } 3002 3003 3004 /* Routine io_registry_entry_set_properties */ 3005 kern_return_t is_io_registry_entry_set_properties 3006 ( 3007 io_object_t registry_entry, 3008 io_buf_ptr_t properties, 3009 mach_msg_type_number_t propertiesCnt, 3010 kern_return_t * result) 3011 { 3012 OSObject * obj; 3013 kern_return_t err; 3014 IOReturn res; 3015 vm_offset_t data; 3016 vm_map_offset_t map_data; 3017 3018 CHECK( IORegistryEntry, registry_entry, entry ); 3019 3020 if( propertiesCnt > sizeof(io_struct_inband_t) * 1024) 3021 return( kIOReturnMessageTooLarge); 3022 3023 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties ); 3024 data = CAST_DOWN(vm_offset_t, map_data); 3025 3026 if( KERN_SUCCESS == err) { 3027 3028 // must return success after vm_map_copyout() succeeds 3029 obj = OSUnserializeXML( (const char *) data, propertiesCnt ); 3030 vm_deallocate( kernel_map, data, propertiesCnt ); 3031 3032 if (!obj) 3033 res = kIOReturnBadArgument; 3034 #if CONFIG_MACF 3035 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(), 3036 registry_entry, obj)) 3037 { 3038 res = kIOReturnNotPermitted; 3039 } 3040 #endif 3041 else 3042 { 3043 res = entry->setProperties( obj ); 3044 } 3045 3046 if (obj) 3047 obj->release(); 3048 } else 3049 res = err; 3050 3051 *result = res; 3052 return( err ); 3053 } 3054 3055 /* Routine io_registry_entry_get_child_iterator */ 3056 kern_return_t is_io_registry_entry_get_child_iterator( 3057 io_object_t registry_entry, 3058 io_name_t plane, 3059 io_object_t *iterator ) 3060 { 3061 CHECK( IORegistryEntry, registry_entry, entry ); 3062 3063 *iterator = entry->getChildIterator( 3064 IORegistryEntry::getPlane( plane )); 3065 3066 return( kIOReturnSuccess ); 3067 } 3068 3069 /* Routine io_registry_entry_get_parent_iterator */ 3070 kern_return_t is_io_registry_entry_get_parent_iterator( 3071 io_object_t registry_entry, 3072 io_name_t plane, 3073 io_object_t *iterator) 3074 { 3075 CHECK( IORegistryEntry, registry_entry, entry ); 3076 3077 *iterator = entry->getParentIterator( 3078 IORegistryEntry::getPlane( plane )); 3079 3080 return( kIOReturnSuccess ); 3081 } 3082 3083 /* Routine io_service_get_busy_state */ 3084 kern_return_t is_io_service_get_busy_state( 3085 io_object_t _service, 3086 uint32_t *busyState ) 3087 { 3088 CHECK( IOService, _service, service ); 3089 3090 *busyState = service->getBusyState(); 3091 3092 return( kIOReturnSuccess ); 3093 } 3094 3095 /* Routine io_service_get_state */ 3096 kern_return_t is_io_service_get_state( 3097 io_object_t _service, 3098 uint64_t *state, 3099 uint32_t *busy_state, 3100 uint64_t *accumulated_busy_time ) 3101 { 3102 CHECK( IOService, _service, service ); 3103 3104 *state = service->getState(); 3105 *busy_state = service->getBusyState(); 3106 *accumulated_busy_time = service->getAccumulatedBusyTime(); 3107 3108 return( kIOReturnSuccess ); 3109 } 3110 3111 /* Routine io_service_wait_quiet */ 3112 kern_return_t is_io_service_wait_quiet( 3113 io_object_t _service, 3114 mach_timespec_t wait_time ) 3115 { 3116 uint64_t timeoutNS; 3117 3118 CHECK( IOService, _service, service ); 3119 3120 timeoutNS = wait_time.tv_sec; 3121 timeoutNS *= kSecondScale; 3122 timeoutNS += wait_time.tv_nsec; 3123 3124 return( service->waitQuiet(timeoutNS) ); 3125 } 3126 3127 /* Routine io_service_request_probe */ 3128 kern_return_t is_io_service_request_probe( 3129 io_object_t _service, 3130 uint32_t options ) 3131 { 3132 CHECK( IOService, _service, service ); 3133 3134 return( service->requestProbe( options )); 3135 } 3136 3137 /* Routine io_service_get_authorization_id */ 3138 kern_return_t is_io_service_get_authorization_id( 3139 io_object_t _service, 3140 uint64_t *authorization_id ) 3141 { 3142 kern_return_t kr; 3143 3144 CHECK( IOService, _service, service ); 3145 3146 kr = IOUserClient::clientHasPrivilege( (void *) current_task(), 3147 kIOClientPrivilegeAdministrator ); 3148 if( kIOReturnSuccess != kr) 3149 return( kr ); 3150 3151 *authorization_id = service->getAuthorizationID(); 3152 3153 return( kr ); 3154 } 3155 3156 /* Routine io_service_set_authorization_id */ 3157 kern_return_t is_io_service_set_authorization_id( 3158 io_object_t _service, 3159 uint64_t authorization_id ) 3160 { 3161 CHECK( IOService, _service, service ); 3162 3163 return( service->setAuthorizationID( authorization_id ) ); 3164 } 3165 3166 /* Routine io_service_open_ndr */ 3167 kern_return_t is_io_service_open_extended( 3168 io_object_t _service, 3169 task_t owningTask, 3170 uint32_t connect_type, 3171 NDR_record_t ndr, 3172 io_buf_ptr_t properties, 3173 mach_msg_type_number_t propertiesCnt, 3174 kern_return_t * result, 3175 io_object_t *connection ) 3176 { 3177 IOUserClient * client = 0; 3178 kern_return_t err = KERN_SUCCESS; 3179 IOReturn res = kIOReturnSuccess; 3180 OSDictionary * propertiesDict = 0; 3181 bool crossEndian; 3182 bool disallowAccess; 3183 3184 CHECK( IOService, _service, service ); 3185 3186 if (!owningTask) return (kIOReturnBadArgument); 3187 3188 do 3189 { 3190 if (properties) 3191 { 3192 OSObject * obj; 3193 vm_offset_t data; 3194 vm_map_offset_t map_data; 3195 3196 if( propertiesCnt > sizeof(io_struct_inband_t)) 3197 return( kIOReturnMessageTooLarge); 3198 3199 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties ); 3200 res = err; 3201 data = CAST_DOWN(vm_offset_t, map_data); 3202 if (KERN_SUCCESS == err) 3203 { 3204 // must return success after vm_map_copyout() succeeds 3205 obj = OSUnserializeXML( (const char *) data, propertiesCnt ); 3206 vm_deallocate( kernel_map, data, propertiesCnt ); 3207 propertiesDict = OSDynamicCast(OSDictionary, obj); 3208 if (!propertiesDict) 3209 { 3210 res = kIOReturnBadArgument; 3211 if (obj) 3212 obj->release(); 3213 } 3214 } 3215 if (kIOReturnSuccess != res) 3216 break; 3217 } 3218 3219 crossEndian = (ndr.int_rep != NDR_record.int_rep); 3220 if (crossEndian) 3221 { 3222 if (!propertiesDict) 3223 propertiesDict = OSDictionary::withCapacity(4); 3224 OSData * data = OSData::withBytes(&ndr, sizeof(ndr)); 3225 if (data) 3226 { 3227 if (propertiesDict) 3228 propertiesDict->setObject(kIOUserClientCrossEndianKey, data); 3229 data->release(); 3230 } 3231 } 3232 3233 res = service->newUserClient( owningTask, (void *) owningTask, 3234 connect_type, propertiesDict, &client ); 3235 3236 if (propertiesDict) 3237 propertiesDict->release(); 3238 3239 if (res == kIOReturnSuccess) 3240 { 3241 assert( OSDynamicCast(IOUserClient, client) ); 3242 3243 disallowAccess = (crossEndian 3244 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey)) 3245 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey))); 3246 if (disallowAccess) res = kIOReturnUnsupported; 3247 #if CONFIG_MACF 3248 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) 3249 res = kIOReturnNotPermitted; 3250 #endif 3251 if (kIOReturnSuccess != res) 3252 { 3253 IOStatisticsClientCall(); 3254 client->clientClose(); 3255 client->release(); 3256 client = 0; 3257 break; 3258 } 3259 client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey)); 3260 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid()); 3261 if (creatorName) 3262 { 3263 client->setProperty(kIOUserClientCreatorKey, creatorName); 3264 creatorName->release(); 3265 } 3266 client->setTerminateDefer(service, false); 3267 } 3268 } 3269 while (false); 3270 3271 *connection = client; 3272 *result = res; 3273 3274 return (err); 3275 } 3276 3277 /* Routine io_service_close */ 3278 kern_return_t is_io_service_close( 3279 io_object_t connection ) 3280 { 3281 OSSet * mappings; 3282 if ((mappings = OSDynamicCast(OSSet, connection))) 3283 return( kIOReturnSuccess ); 3284 3285 CHECK( IOUserClient, connection, client ); 3286 3287 IOStatisticsClientCall(); 3288 client->clientClose(); 3289 3290 return( kIOReturnSuccess ); 3291 } 3292 3293 /* Routine io_connect_get_service */ 3294 kern_return_t is_io_connect_get_service( 3295 io_object_t connection, 3296 io_object_t *service ) 3297 { 3298 IOService * theService; 3299 3300 CHECK( IOUserClient, connection, client ); 3301 3302 theService = client->getService(); 3303 if( theService) 3304 theService->retain(); 3305 3306 *service = theService; 3307 3308 return( theService ? kIOReturnSuccess : kIOReturnUnsupported ); 3309 } 3310 3311 /* Routine io_connect_set_notification_port */ 3312 kern_return_t is_io_connect_set_notification_port( 3313 io_object_t connection, 3314 uint32_t notification_type, 3315 mach_port_t port, 3316 uint32_t reference) 3317 { 3318 CHECK( IOUserClient, connection, client ); 3319 3320 IOStatisticsClientCall(); 3321 return( client->registerNotificationPort( port, notification_type, 3322 (io_user_reference_t) reference )); 3323 } 3324 3325 /* Routine io_connect_set_notification_port */ 3326 kern_return_t is_io_connect_set_notification_port_64( 3327 io_object_t connection, 3328 uint32_t notification_type, 3329 mach_port_t port, 3330 io_user_reference_t reference) 3331 { 3332 CHECK( IOUserClient, connection, client ); 3333 3334 IOStatisticsClientCall(); 3335 return( client->registerNotificationPort( port, notification_type, 3336 reference )); 3337 } 3338 3339 /* Routine io_connect_map_memory_into_task */ 3340 kern_return_t is_io_connect_map_memory_into_task 3341 ( 3342 io_connect_t connection, 3343 uint32_t memory_type, 3344 task_t into_task, 3345 mach_vm_address_t *address, 3346 mach_vm_size_t *size, 3347 uint32_t flags 3348 ) 3349 { 3350 IOReturn err; 3351 IOMemoryMap * map; 3352 3353 CHECK( IOUserClient, connection, client ); 3354 3355 if (!into_task) return (kIOReturnBadArgument); 3356 3357 IOStatisticsClientCall(); 3358 map = client->mapClientMemory64( memory_type, into_task, flags, *address ); 3359 3360 if( map) { 3361 *address = map->getAddress(); 3362 if( size) 3363 *size = map->getSize(); 3364 3365 if( client->sharedInstance 3366 || (into_task != current_task())) { 3367 // push a name out to the task owning the map, 3368 // so we can clean up maps 3369 mach_port_name_t name __unused = 3370 IOMachPort::makeSendRightForTask( 3371 into_task, map, IKOT_IOKIT_OBJECT ); 3372 3373 } else { 3374 // keep it with the user client 3375 IOLockLock( gIOObjectPortLock); 3376 if( 0 == client->mappings) 3377 client->mappings = OSSet::withCapacity(2); 3378 if( client->mappings) 3379 client->mappings->setObject( map); 3380 IOLockUnlock( gIOObjectPortLock); 3381 map->release(); 3382 } 3383 err = kIOReturnSuccess; 3384 3385 } else 3386 err = kIOReturnBadArgument; 3387 3388 return( err ); 3389 } 3390 3391 /* Routine is_io_connect_map_memory */ 3392 kern_return_t is_io_connect_map_memory( 3393 io_object_t connect, 3394 uint32_t type, 3395 task_t task, 3396 uint32_t * mapAddr, 3397 uint32_t * mapSize, 3398 uint32_t flags ) 3399 { 3400 IOReturn err; 3401 mach_vm_address_t address; 3402 mach_vm_size_t size; 3403 3404 address = SCALAR64(*mapAddr); 3405 size = SCALAR64(*mapSize); 3406 3407 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags); 3408 3409 *mapAddr = SCALAR32(address); 3410 *mapSize = SCALAR32(size); 3411 3412 return (err); 3413 } 3414 3415 } /* extern "C" */ 3416 3417 IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem) 3418 { 3419 OSIterator * iter; 3420 IOMemoryMap * map = 0; 3421 3422 IOLockLock(gIOObjectPortLock); 3423 3424 iter = OSCollectionIterator::withCollection(mappings); 3425 if(iter) 3426 { 3427 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) 3428 { 3429 if(mem == map->getMemoryDescriptor()) 3430 { 3431 map->retain(); 3432 mappings->removeObject(map); 3433 break; 3434 } 3435 } 3436 iter->release(); 3437 } 3438 3439 IOLockUnlock(gIOObjectPortLock); 3440 3441 return (map); 3442 } 3443 3444 extern "C" { 3445 3446 /* Routine io_connect_unmap_memory_from_task */ 3447 kern_return_t is_io_connect_unmap_memory_from_task 3448 ( 3449 io_connect_t connection, 3450 uint32_t memory_type, 3451 task_t from_task, 3452 mach_vm_address_t address) 3453 { 3454 IOReturn err; 3455 IOOptionBits options = 0; 3456 IOMemoryDescriptor * memory; 3457 IOMemoryMap * map; 3458 3459 CHECK( IOUserClient, connection, client ); 3460 3461 if (!from_task) return (kIOReturnBadArgument); 3462 3463 IOStatisticsClientCall(); 3464 err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory ); 3465 3466 if( memory && (kIOReturnSuccess == err)) { 3467 3468 options = (options & ~kIOMapUserOptionsMask) 3469 | kIOMapAnywhere | kIOMapReference; 3470 3471 map = memory->createMappingInTask( from_task, address, options ); 3472 memory->release(); 3473 if( map) 3474 { 3475 IOLockLock( gIOObjectPortLock); 3476 if( client->mappings) 3477 client->mappings->removeObject( map); 3478 IOLockUnlock( gIOObjectPortLock); 3479 3480 mach_port_name_t name = 0; 3481 if (from_task != current_task()) 3482 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT ); 3483 if (name) 3484 { 3485 map->userClientUnmap(); 3486 err = iokit_mod_send_right( from_task, name, -2 ); 3487 err = kIOReturnSuccess; 3488 } 3489 else 3490 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT ); 3491 if (from_task == current_task()) 3492 map->release(); 3493 } 3494 else 3495 err = kIOReturnBadArgument; 3496 } 3497 3498 return( err ); 3499 } 3500 3501 kern_return_t is_io_connect_unmap_memory( 3502 io_object_t connect, 3503 uint32_t type, 3504 task_t task, 3505 uint32_t mapAddr ) 3506 { 3507 IOReturn err; 3508 mach_vm_address_t address; 3509 3510 address = SCALAR64(mapAddr); 3511 3512 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr); 3513 3514 return (err); 3515 } 3516 3517 3518 /* Routine io_connect_add_client */ 3519 kern_return_t is_io_connect_add_client( 3520 io_object_t connection, 3521 io_object_t connect_to) 3522 { 3523 CHECK( IOUserClient, connection, client ); 3524 CHECK( IOUserClient, connect_to, to ); 3525 3526 IOStatisticsClientCall(); 3527 return( client->connectClient( to ) ); 3528 } 3529 3530 3531 /* Routine io_connect_set_properties */ 3532 kern_return_t is_io_connect_set_properties( 3533 io_object_t connection, 3534 io_buf_ptr_t properties, 3535 mach_msg_type_number_t propertiesCnt, 3536 kern_return_t * result) 3537 { 3538 return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result )); 3539 } 3540 3541 /* Routine io_user_client_method */ 3542 kern_return_t is_io_connect_method_var_output 3543 ( 3544 io_connect_t connection, 3545 uint32_t selector, 3546 io_scalar_inband64_t scalar_input, 3547 mach_msg_type_number_t scalar_inputCnt, 3548 io_struct_inband_t inband_input, 3549 mach_msg_type_number_t inband_inputCnt, 3550 mach_vm_address_t ool_input, 3551 mach_vm_size_t ool_input_size, 3552 io_struct_inband_t inband_output, 3553 mach_msg_type_number_t *inband_outputCnt, 3554 io_scalar_inband64_t scalar_output, 3555 mach_msg_type_number_t *scalar_outputCnt, 3556 io_buf_ptr_t *var_output, 3557 mach_msg_type_number_t *var_outputCnt 3558 ) 3559 { 3560 CHECK( IOUserClient, connection, client ); 3561 3562 IOExternalMethodArguments args; 3563 IOReturn ret; 3564 IOMemoryDescriptor * inputMD = 0; 3565 OSObject * structureVariableOutputData = 0; 3566 3567 bzero(&args.__reserved[0], sizeof(args.__reserved)); 3568 args.version = kIOExternalMethodArgumentsCurrentVersion; 3569 3570 args.selector = selector; 3571 3572 args.asyncWakePort = MACH_PORT_NULL; 3573 args.asyncReference = 0; 3574 args.asyncReferenceCount = 0; 3575 args.structureVariableOutputData = &structureVariableOutputData; 3576 3577 args.scalarInput = scalar_input; 3578 args.scalarInputCount = scalar_inputCnt; 3579 args.structureInput = inband_input; 3580 args.structureInputSize = inband_inputCnt; 3581 3582 if (ool_input) 3583 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size, 3584 kIODirectionOut, current_task()); 3585 3586 args.structureInputDescriptor = inputMD; 3587 3588 args.scalarOutput = scalar_output; 3589 args.scalarOutputCount = *scalar_outputCnt; 3590 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0])); 3591 args.structureOutput = inband_output; 3592 args.structureOutputSize = *inband_outputCnt; 3593 args.structureOutputDescriptor = NULL; 3594 args.structureOutputDescriptorSize = 0; 3595 3596 IOStatisticsClientCall(); 3597 ret = client->externalMethod( selector, &args ); 3598 3599 *scalar_outputCnt = args.scalarOutputCount; 3600 *inband_outputCnt = args.structureOutputSize; 3601 3602 if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) 3603 { 3604 OSSerialize * serialize; 3605 OSData * data; 3606 vm_size_t len; 3607 3608 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) 3609 { 3610 len = serialize->getLength(); 3611 *var_outputCnt = len; 3612 ret = copyoutkdata(serialize->text(), len, var_output); 3613 } 3614 else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) 3615 { 3616 len = data->getLength(); 3617 *var_outputCnt = len; 3618 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output); 3619 } 3620 else 3621 { 3622 ret = kIOReturnUnderrun; 3623 } 3624 } 3625 3626 if (inputMD) 3627 inputMD->release(); 3628 if (structureVariableOutputData) 3629 structureVariableOutputData->release(); 3630 3631 return (ret); 3632 } 3633 3634 /* Routine io_user_client_method */ 3635 kern_return_t is_io_connect_method 3636 ( 3637 io_connect_t connection, 3638 uint32_t selector, 3639 io_scalar_inband64_t scalar_input, 3640 mach_msg_type_number_t scalar_inputCnt, 3641 io_struct_inband_t inband_input, 3642 mach_msg_type_number_t inband_inputCnt, 3643 mach_vm_address_t ool_input, 3644 mach_vm_size_t ool_input_size, 3645 io_struct_inband_t inband_output, 3646 mach_msg_type_number_t *inband_outputCnt, 3647 io_scalar_inband64_t scalar_output, 3648 mach_msg_type_number_t *scalar_outputCnt, 3649 mach_vm_address_t ool_output, 3650 mach_vm_size_t *ool_output_size 3651 ) 3652 { 3653 CHECK( IOUserClient, connection, client ); 3654 3655 IOExternalMethodArguments args; 3656 IOReturn ret; 3657 IOMemoryDescriptor * inputMD = 0; 3658 IOMemoryDescriptor * outputMD = 0; 3659 3660 bzero(&args.__reserved[0], sizeof(args.__reserved)); 3661 args.version = kIOExternalMethodArgumentsCurrentVersion; 3662 3663 args.selector = selector; 3664 3665 args.asyncWakePort = MACH_PORT_NULL; 3666 args.asyncReference = 0; 3667 args.asyncReferenceCount = 0; 3668 args.structureVariableOutputData = 0; 3669 3670 args.scalarInput = scalar_input; 3671 args.scalarInputCount = scalar_inputCnt; 3672 args.structureInput = inband_input; 3673 args.structureInputSize = inband_inputCnt; 3674 3675 if (ool_input) 3676 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size, 3677 kIODirectionOut, current_task()); 3678 3679 args.structureInputDescriptor = inputMD; 3680 3681 args.scalarOutput = scalar_output; 3682 args.scalarOutputCount = *scalar_outputCnt; 3683 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0])); 3684 args.structureOutput = inband_output; 3685 args.structureOutputSize = *inband_outputCnt; 3686 3687 if (ool_output && ool_output_size) 3688 { 3689 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size, 3690 kIODirectionIn, current_task()); 3691 } 3692 3693 args.structureOutputDescriptor = outputMD; 3694 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0; 3695 3696 IOStatisticsClientCall(); 3697 ret = client->externalMethod( selector, &args ); 3698 3699 *scalar_outputCnt = args.scalarOutputCount; 3700 *inband_outputCnt = args.structureOutputSize; 3701 *ool_output_size = args.structureOutputDescriptorSize; 3702 3703 if (inputMD) 3704 inputMD->release(); 3705 if (outputMD) 3706 outputMD->release(); 3707 3708 return (ret); 3709 } 3710 3711 /* Routine io_async_user_client_method */ 3712 kern_return_t is_io_connect_async_method 3713 ( 3714 io_connect_t connection, 3715 mach_port_t wake_port, 3716 io_async_ref64_t reference, 3717 mach_msg_type_number_t referenceCnt, 3718 uint32_t selector, 3719 io_scalar_inband64_t scalar_input, 3720 mach_msg_type_number_t scalar_inputCnt, 3721 io_struct_inband_t inband_input, 3722 mach_msg_type_number_t inband_inputCnt, 3723 mach_vm_address_t ool_input, 3724 mach_vm_size_t ool_input_size, 3725 io_struct_inband_t inband_output, 3726 mach_msg_type_number_t *inband_outputCnt, 3727 io_scalar_inband64_t scalar_output, 3728 mach_msg_type_number_t *scalar_outputCnt, 3729 mach_vm_address_t ool_output, 3730 mach_vm_size_t * ool_output_size 3731 ) 3732 { 3733 CHECK( IOUserClient, connection, client ); 3734 3735 IOExternalMethodArguments args; 3736 IOReturn ret; 3737 IOMemoryDescriptor * inputMD = 0; 3738 IOMemoryDescriptor * outputMD = 0; 3739 3740 bzero(&args.__reserved[0], sizeof(args.__reserved)); 3741 args.version = kIOExternalMethodArgumentsCurrentVersion; 3742 3743 reference[0] = (io_user_reference_t) wake_port; 3744 if (vm_map_is_64bit(get_task_map(current_task()))) 3745 reference[0] |= kIOUCAsync64Flag; 3746 3747 args.selector = selector; 3748 3749 args.asyncWakePort = wake_port; 3750 args.asyncReference = reference; 3751 args.asyncReferenceCount = referenceCnt; 3752 3753 args.scalarInput = scalar_input; 3754 args.scalarInputCount = scalar_inputCnt; 3755 args.structureInput = inband_input; 3756 args.structureInputSize = inband_inputCnt; 3757 3758 if (ool_input) 3759 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size, 3760 kIODirectionOut, current_task()); 3761 3762 args.structureInputDescriptor = inputMD; 3763 3764 args.scalarOutput = scalar_output; 3765 args.scalarOutputCount = *scalar_outputCnt; 3766 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0])); 3767 args.structureOutput = inband_output; 3768 args.structureOutputSize = *inband_outputCnt; 3769 3770 if (ool_output) 3771 { 3772 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size, 3773 kIODirectionIn, current_task()); 3774 } 3775 3776 args.structureOutputDescriptor = outputMD; 3777 args.structureOutputDescriptorSize = *ool_output_size; 3778 3779 IOStatisticsClientCall(); 3780 ret = client->externalMethod( selector, &args ); 3781 3782 *inband_outputCnt = args.structureOutputSize; 3783 *ool_output_size = args.structureOutputDescriptorSize; 3784 3785 if (inputMD) 3786 inputMD->release(); 3787 if (outputMD) 3788 outputMD->release(); 3789 3790 return (ret); 3791 } 3792 3793 /* Routine io_connect_method_scalarI_scalarO */ 3794 kern_return_t is_io_connect_method_scalarI_scalarO( 3795 io_object_t connect, 3796 uint32_t index, 3797 io_scalar_inband_t input, 3798 mach_msg_type_number_t inputCount, 3799 io_scalar_inband_t output, 3800 mach_msg_type_number_t * outputCount ) 3801 { 3802 IOReturn err; 3803 uint32_t i; 3804 io_scalar_inband64_t _input; 3805 io_scalar_inband64_t _output; 3806 3807 mach_msg_type_number_t struct_outputCnt = 0; 3808 mach_vm_size_t ool_output_size = 0; 3809 3810 bzero(&_output[0], sizeof(_output)); 3811 for (i = 0; i < inputCount; i++) 3812 _input[i] = SCALAR64(input[i]); 3813 3814 err = is_io_connect_method(connect, index, 3815 _input, inputCount, 3816 NULL, 0, 3817 0, 0, 3818 NULL, &struct_outputCnt, 3819 _output, outputCount, 3820 0, &ool_output_size); 3821 3822 for (i = 0; i < *outputCount; i++) 3823 output[i] = SCALAR32(_output[i]); 3824 3825 return (err); 3826 } 3827 3828 kern_return_t shim_io_connect_method_scalarI_scalarO( 3829 IOExternalMethod * method, 3830 IOService * object, 3831 const io_user_scalar_t * input, 3832 mach_msg_type_number_t inputCount, 3833 io_user_scalar_t * output, 3834 mach_msg_type_number_t * outputCount ) 3835 { 3836 IOMethod func; 3837 io_scalar_inband_t _output; 3838 IOReturn err; 3839 err = kIOReturnBadArgument; 3840 3841 bzero(&_output[0], sizeof(_output)); 3842 do { 3843 3844 if( inputCount != method->count0) 3845 { 3846 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); 3847 continue; 3848 } 3849 if( *outputCount != method->count1) 3850 { 3851 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); 3852 continue; 3853 } 3854 3855 func = method->func; 3856 3857 switch( inputCount) { 3858 3859 case 6: 3860 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3861 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) ); 3862 break; 3863 case 5: 3864 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3865 ARG32(input[3]), ARG32(input[4]), 3866 &_output[0] ); 3867 break; 3868 case 4: 3869 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3870 ARG32(input[3]), 3871 &_output[0], &_output[1] ); 3872 break; 3873 case 3: 3874 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3875 &_output[0], &_output[1], &_output[2] ); 3876 break; 3877 case 2: 3878 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), 3879 &_output[0], &_output[1], &_output[2], 3880 &_output[3] ); 3881 break; 3882 case 1: 3883 err = (object->*func)( ARG32(input[0]), 3884 &_output[0], &_output[1], &_output[2], 3885 &_output[3], &_output[4] ); 3886 break; 3887 case 0: 3888 err = (object->*func)( &_output[0], &_output[1], &_output[2], 3889 &_output[3], &_output[4], &_output[5] ); 3890 break; 3891 3892 default: 3893 IOLog("%s: Bad method table\n", object->getName()); 3894 } 3895 } 3896 while( false); 3897 3898 uint32_t i; 3899 for (i = 0; i < *outputCount; i++) 3900 output[i] = SCALAR32(_output[i]); 3901 3902 return( err); 3903 } 3904 3905 /* Routine io_async_method_scalarI_scalarO */ 3906 kern_return_t is_io_async_method_scalarI_scalarO( 3907 io_object_t connect, 3908 mach_port_t wake_port, 3909 io_async_ref_t reference, 3910 mach_msg_type_number_t referenceCnt, 3911 uint32_t index, 3912 io_scalar_inband_t input, 3913 mach_msg_type_number_t inputCount, 3914 io_scalar_inband_t output, 3915 mach_msg_type_number_t * outputCount ) 3916 { 3917 IOReturn err; 3918 uint32_t i; 3919 io_scalar_inband64_t _input; 3920 io_scalar_inband64_t _output; 3921 io_async_ref64_t _reference; 3922 3923 bzero(&_output[0], sizeof(_output)); 3924 for (i = 0; i < referenceCnt; i++) 3925 _reference[i] = REF64(reference[i]); 3926 3927 mach_msg_type_number_t struct_outputCnt = 0; 3928 mach_vm_size_t ool_output_size = 0; 3929 3930 for (i = 0; i < inputCount; i++) 3931 _input[i] = SCALAR64(input[i]); 3932 3933 err = is_io_connect_async_method(connect, 3934 wake_port, _reference, referenceCnt, 3935 index, 3936 _input, inputCount, 3937 NULL, 0, 3938 0, 0, 3939 NULL, &struct_outputCnt, 3940 _output, outputCount, 3941 0, &ool_output_size); 3942 3943 for (i = 0; i < *outputCount; i++) 3944 output[i] = SCALAR32(_output[i]); 3945 3946 return (err); 3947 } 3948 /* Routine io_async_method_scalarI_structureO */ 3949 kern_return_t is_io_async_method_scalarI_structureO( 3950 io_object_t connect, 3951 mach_port_t wake_port, 3952 io_async_ref_t reference, 3953 mach_msg_type_number_t referenceCnt, 3954 uint32_t index, 3955 io_scalar_inband_t input, 3956 mach_msg_type_number_t inputCount, 3957 io_struct_inband_t output, 3958 mach_msg_type_number_t * outputCount ) 3959 { 3960 uint32_t i; 3961 io_scalar_inband64_t _input; 3962 io_async_ref64_t _reference; 3963 3964 for (i = 0; i < referenceCnt; i++) 3965 _reference[i] = REF64(reference[i]); 3966 3967 mach_msg_type_number_t scalar_outputCnt = 0; 3968 mach_vm_size_t ool_output_size = 0; 3969 3970 for (i = 0; i < inputCount; i++) 3971 _input[i] = SCALAR64(input[i]); 3972 3973 return (is_io_connect_async_method(connect, 3974 wake_port, _reference, referenceCnt, 3975 index, 3976 _input, inputCount, 3977 NULL, 0, 3978 0, 0, 3979 output, outputCount, 3980 NULL, &scalar_outputCnt, 3981 0, &ool_output_size)); 3982 } 3983 3984 /* Routine io_async_method_scalarI_structureI */ 3985 kern_return_t is_io_async_method_scalarI_structureI( 3986 io_connect_t connect, 3987 mach_port_t wake_port, 3988 io_async_ref_t reference, 3989 mach_msg_type_number_t referenceCnt, 3990 uint32_t index, 3991 io_scalar_inband_t input, 3992 mach_msg_type_number_t inputCount, 3993 io_struct_inband_t inputStruct, 3994 mach_msg_type_number_t inputStructCount ) 3995 { 3996 uint32_t i; 3997 io_scalar_inband64_t _input; 3998 io_async_ref64_t _reference; 3999 4000 for (i = 0; i < referenceCnt; i++) 4001 _reference[i] = REF64(reference[i]); 4002 4003 mach_msg_type_number_t scalar_outputCnt = 0; 4004 mach_msg_type_number_t inband_outputCnt = 0; 4005 mach_vm_size_t ool_output_size = 0; 4006 4007 for (i = 0; i < inputCount; i++) 4008 _input[i] = SCALAR64(input[i]); 4009 4010 return (is_io_connect_async_method(connect, 4011 wake_port, _reference, referenceCnt, 4012 index, 4013 _input, inputCount, 4014 inputStruct, inputStructCount, 4015 0, 0, 4016 NULL, &inband_outputCnt, 4017 NULL, &scalar_outputCnt, 4018 0, &ool_output_size)); 4019 } 4020 4021 /* Routine io_async_method_structureI_structureO */ 4022 kern_return_t is_io_async_method_structureI_structureO( 4023 io_object_t connect, 4024 mach_port_t wake_port, 4025 io_async_ref_t reference, 4026 mach_msg_type_number_t referenceCnt, 4027 uint32_t index, 4028 io_struct_inband_t input, 4029 mach_msg_type_number_t inputCount, 4030 io_struct_inband_t output, 4031 mach_msg_type_number_t * outputCount ) 4032 { 4033 uint32_t i; 4034 mach_msg_type_number_t scalar_outputCnt = 0; 4035 mach_vm_size_t ool_output_size = 0; 4036 io_async_ref64_t _reference; 4037 4038 for (i = 0; i < referenceCnt; i++) 4039 _reference[i] = REF64(reference[i]); 4040 4041 return (is_io_connect_async_method(connect, 4042 wake_port, _reference, referenceCnt, 4043 index, 4044 NULL, 0, 4045 input, inputCount, 4046 0, 0, 4047 output, outputCount, 4048 NULL, &scalar_outputCnt, 4049 0, &ool_output_size)); 4050 } 4051 4052 4053 kern_return_t shim_io_async_method_scalarI_scalarO( 4054 IOExternalAsyncMethod * method, 4055 IOService * object, 4056 mach_port_t asyncWakePort, 4057 io_user_reference_t * asyncReference, 4058 uint32_t asyncReferenceCount, 4059 const io_user_scalar_t * input, 4060 mach_msg_type_number_t inputCount, 4061 io_user_scalar_t * output, 4062 mach_msg_type_number_t * outputCount ) 4063 { 4064 IOAsyncMethod func; 4065 uint32_t i; 4066 io_scalar_inband_t _output; 4067 IOReturn err; 4068 io_async_ref_t reference; 4069 4070 bzero(&_output[0], sizeof(_output)); 4071 for (i = 0; i < asyncReferenceCount; i++) 4072 reference[i] = REF32(asyncReference[i]); 4073 4074 err = kIOReturnBadArgument; 4075 4076 do { 4077 4078 if( inputCount != method->count0) 4079 { 4080 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); 4081 continue; 4082 } 4083 if( *outputCount != method->count1) 4084 { 4085 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); 4086 continue; 4087 } 4088 4089 func = method->func; 4090 4091 switch( inputCount) { 4092 4093 case 6: 4094 err = (object->*func)( reference, 4095 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 4096 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) ); 4097 break; 4098 case 5: 4099 err = (object->*func)( reference, 4100 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 4101 ARG32(input[3]), ARG32(input[4]), 4102 &_output[0] ); 4103 break; 4104 case 4: 4105 err = (object->*func)( reference, 4106 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 4107 ARG32(input[3]), 4108 &_output[0], &_output[1] ); 4109 break; 4110 case 3: 4111 err = (object->*func)( reference, 4112 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 4113 &_output[0], &_output[1], &_output[2] ); 4114 break; 4115 case 2: 4116 err = (object->*func)( reference, 4117 ARG32(input[0]), ARG32(input[1]), 4118 &_output[0], &_output[1], &_output[2], 4119 &_output[3] ); 4120 break; 4121 case 1: 4122 err = (object->*func)( reference, 4123 ARG32(input[0]), 4124 &_output[0], &_output[1], &_output[2], 4125 &_output[3], &_output[4] ); 4126 break; 4127 case 0: 4128 err = (object->*func)( reference, 4129 &_output[0], &_output[1], &_output[2], 4130 &_output[3], &_output[4], &_output[5] ); 4131 break; 4132 4133 default: 4134 IOLog("%s: Bad method table\n", object->getName()); 4135 } 4136 } 4137 while( false); 4138 4139 for (i = 0; i < *outputCount; i++) 4140 output[i] = SCALAR32(_output[i]); 4141 4142 return( err); 4143 } 4144 4145 4146 /* Routine io_connect_method_scalarI_structureO */ 4147 kern_return_t is_io_connect_method_scalarI_structureO( 4148 io_object_t connect, 4149 uint32_t index, 4150 io_scalar_inband_t input, 4151 mach_msg_type_number_t inputCount, 4152 io_struct_inband_t output, 4153 mach_msg_type_number_t * outputCount ) 4154 { 4155 uint32_t i; 4156 io_scalar_inband64_t _input; 4157 4158 mach_msg_type_number_t scalar_outputCnt = 0; 4159 mach_vm_size_t ool_output_size = 0; 4160 4161 for (i = 0; i < inputCount; i++) 4162 _input[i] = SCALAR64(input[i]); 4163 4164 return (is_io_connect_method(connect, index, 4165 _input, inputCount, 4166 NULL, 0, 4167 0, 0, 4168 output, outputCount, 4169 NULL, &scalar_outputCnt, 4170 0, &ool_output_size)); 4171 } 4172 4173 kern_return_t shim_io_connect_method_scalarI_structureO( 4174 4175 IOExternalMethod * method, 4176 IOService * object, 4177 const io_user_scalar_t * input, 4178 mach_msg_type_number_t inputCount, 4179 io_struct_inband_t output, 4180 IOByteCount * outputCount ) 4181 { 4182 IOMethod func; 4183 IOReturn err; 4184 4185 err = kIOReturnBadArgument; 4186 4187 do { 4188 if( inputCount != method->count0) 4189 { 4190 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); 4191 continue; 4192 } 4193 if( (kIOUCVariableStructureSize != method->count1) 4194 && (*outputCount != method->count1)) 4195 { 4196 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); 4197 continue; 4198 } 4199 4200 func = method->func; 4201 4202 switch( inputCount) { 4203 4204 case 5: 4205 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 4206 ARG32(input[3]), ARG32(input[4]), 4207 output ); 4208 break; 4209 case 4: 4210 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 4211 ARG32(input[3]), 4212 output, (void *)outputCount ); 4213 break; 4214 case 3: 4215 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 4216 output, (void *)outputCount, 0 ); 4217 break; 4218 case 2: 4219 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), 4220 output, (void *)outputCount, 0, 0 ); 4221 break; 4222 case 1: 4223 err = (object->*func)( ARG32(input[0]), 4224 output, (void *)outputCount, 0, 0, 0 ); 4225 break; 4226 case 0: 4227 err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 ); 4228 break; 4229 4230 default: 4231 IOLog("%s: Bad method table\n", object->getName()); 4232 } 4233 } 4234 while( false); 4235 4236 return( err); 4237 } 4238 4239 4240 kern_return_t shim_io_async_method_scalarI_structureO( 4241 IOExternalAsyncMethod * method, 4242 IOService * object, 4243 mach_port_t asyncWakePort, 4244 io_user_reference_t * asyncReference, 4245 uint32_t asyncReferenceCount, 4246 const io_user_scalar_t * input, 4247 mach_msg_type_number_t inputCount, 4248 io_struct_inband_t output, 4249 mach_msg_type_number_t * outputCount ) 4250 { 4251 IOAsyncMethod func; 4252 uint32_t i; 4253 IOReturn err; 4254 io_async_ref_t reference; 4255 4256 for (i = 0; i < asyncReferenceCount; i++) 4257 reference[i] = REF32(asyncReference[i]); 4258 4259 err = kIOReturnBadArgument; 4260 do { 4261 if( inputCount != method->count0) 4262 { 4263 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); 4264 continue; 4265 } 4266 if( (kIOUCVariableStructureSize != method->count1) 4267 && (*outputCount != method->count1)) 4268 { 4269 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); 4270 continue; 4271 } 4272 4273 func = method->func; 4274 4275 switch( inputCount) { 4276 4277 case 5: 4278 err = (object->*func)( reference, 4279 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 4280 ARG32(input[3]), ARG32(input[4]), 4281 output ); 4282 break; 4283 case 4: 4284 err = (object->*func)( reference, 4285 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 4286 ARG32(input[3]), 4287 output, (void *)outputCount ); 4288 break; 4289 case 3: 4290 err = (object->*func)( reference, 4291 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 4292 output, (void *)outputCount, 0 ); 4293 break; 4294 case 2: 4295 err = (object->*func)( reference, 4296 ARG32(input[0]), ARG32(input[1]), 4297 output, (void *)outputCount, 0, 0 ); 4298 break; 4299 case 1: 4300 err = (object->*func)( reference, 4301 ARG32(input[0]), 4302 output, (void *)outputCount, 0, 0, 0 ); 4303 break; 4304 case 0: 4305 err = (object->*func)( reference, 4306 output, (void *)outputCount, 0, 0, 0, 0 ); 4307 break; 4308 4309 default: 4310 IOLog("%s: Bad method table\n", object->getName()); 4311 } 4312 } 4313 while( false); 4314 4315 return( err); 4316 } 4317 4318 /* Routine io_connect_method_scalarI_structureI */ 4319 kern_return_t is_io_connect_method_scalarI_structureI( 4320 io_connect_t connect, 4321 uint32_t index, 4322 io_scalar_inband_t input, 4323 mach_msg_type_number_t inputCount, 4324 io_struct_inband_t inputStruct, 4325 mach_msg_type_number_t inputStructCount ) 4326 { 4327 uint32_t i; 4328 io_scalar_inband64_t _input; 4329 4330 mach_msg_type_number_t scalar_outputCnt = 0; 4331 mach_msg_type_number_t inband_outputCnt = 0; 4332 mach_vm_size_t ool_output_size = 0; 4333 4334 for (i = 0; i < inputCount; i++) 4335 _input[i] = SCALAR64(input[i]); 4336 4337 return (is_io_connect_method(connect, index, 4338 _input, inputCount, 4339 inputStruct, inputStructCount, 4340 0, 0, 4341 NULL, &inband_outputCnt, 4342 NULL, &scalar_outputCnt, 4343 0, &ool_output_size)); 4344 } 4345 4346 kern_return_t shim_io_connect_method_scalarI_structureI( 4347 IOExternalMethod * method, 4348 IOService * object, 4349 const io_user_scalar_t * input, 4350 mach_msg_type_number_t inputCount, 4351 io_struct_inband_t inputStruct, 4352 mach_msg_type_number_t inputStructCount ) 4353 { 4354 IOMethod func; 4355 IOReturn err = kIOReturnBadArgument; 4356 4357 do 4358 { 4359 if (inputCount != method->count0) 4360 { 4361 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); 4362 continue; 4363 } 4364 if( (kIOUCVariableStructureSize != method->count1) 4365 && (inputStructCount != method->count1)) 4366 { 4367 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); 4368 continue; 4369 } 4370 4371 func = method->func; 4372 4373 switch( inputCount) { 4374 4375 case 5: 4376 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 4377 ARG32(input[3]), ARG32(input[4]), 4378 inputStruct ); 4379 break; 4380 case 4: 4381 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2], 4382 ARG32(input[3]), 4383 inputStruct, (void *)(uintptr_t)inputStructCount ); 4384 break; 4385 case 3: 4386 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 4387 inputStruct, (void *)(uintptr_t)inputStructCount, 4388 0 ); 4389 break; 4390 case 2: 4391 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), 4392 inputStruct, (void *)(uintptr_t)inputStructCount, 4393 0, 0 ); 4394 break; 4395 case 1: 4396 err = (object->*func)( ARG32(input[0]), 4397 inputStruct, (void *)(uintptr_t)inputStructCount, 4398 0, 0, 0 ); 4399 break; 4400 case 0: 4401 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount, 4402 0, 0, 0, 0 ); 4403 break; 4404 4405 default: 4406 IOLog("%s: Bad method table\n", object->getName()); 4407 } 4408 } 4409 while (false); 4410 4411 return( err); 4412 } 4413 4414 kern_return_t shim_io_async_method_scalarI_structureI( 4415 IOExternalAsyncMethod * method, 4416 IOService * object, 4417 mach_port_t asyncWakePort, 4418 io_user_reference_t * asyncReference, 4419 uint32_t asyncReferenceCount, 4420 const io_user_scalar_t * input, 4421 mach_msg_type_number_t inputCount, 4422 io_struct_inband_t inputStruct, 4423 mach_msg_type_number_t inputStructCount ) 4424 { 4425 IOAsyncMethod func; 4426 uint32_t i; 4427 IOReturn err = kIOReturnBadArgument; 4428 io_async_ref_t reference; 4429 4430 for (i = 0; i < asyncReferenceCount; i++) 4431 reference[i] = REF32(asyncReference[i]); 4432 4433 do 4434 { 4435 if (inputCount != method->count0) 4436 { 4437 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); 4438 continue; 4439 } 4440 if( (kIOUCVariableStructureSize != method->count1) 4441 && (inputStructCount != method->count1)) 4442 { 4443 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); 4444 continue; 4445 } 4446 4447 func = method->func; 4448 4449 switch( inputCount) { 4450 4451 case 5: 4452 err = (object->*func)( reference, 4453 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 4454 ARG32(input[3]), ARG32(input[4]), 4455 inputStruct ); 4456 break; 4457 case 4: 4458 err = (object->*func)( reference, 4459 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 4460 ARG32(input[3]), 4461 inputStruct, (void *)(uintptr_t)inputStructCount ); 4462 break; 4463 case 3: 4464 err = (object->*func)( reference, 4465 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 4466 inputStruct, (void *)(uintptr_t)inputStructCount, 4467 0 ); 4468 break; 4469 case 2: 4470 err = (object->*func)( reference, 4471 ARG32(input[0]), ARG32(input[1]), 4472 inputStruct, (void *)(uintptr_t)inputStructCount, 4473 0, 0 ); 4474 break; 4475 case 1: 4476 err = (object->*func)( reference, 4477 ARG32(input[0]), 4478 inputStruct, (void *)(uintptr_t)inputStructCount, 4479 0, 0, 0 ); 4480 break; 4481 case 0: 4482 err = (object->*func)( reference, 4483 inputStruct, (void *)(uintptr_t)inputStructCount, 4484 0, 0, 0, 0 ); 4485 break; 4486 4487 default: 4488 IOLog("%s: Bad method table\n", object->getName()); 4489 } 4490 } 4491 while (false); 4492 4493 return( err); 4494 } 4495 4496 /* Routine io_connect_method_structureI_structureO */ 4497 kern_return_t is_io_connect_method_structureI_structureO( 4498 io_object_t connect, 4499 uint32_t index, 4500 io_struct_inband_t input, 4501 mach_msg_type_number_t inputCount, 4502 io_struct_inband_t output, 4503 mach_msg_type_number_t * outputCount ) 4504 { 4505 mach_msg_type_number_t scalar_outputCnt = 0; 4506 mach_vm_size_t ool_output_size = 0; 4507 4508 return (is_io_connect_method(connect, index, 4509 NULL, 0, 4510 input, inputCount, 4511 0, 0, 4512 output, outputCount, 4513 NULL, &scalar_outputCnt, 4514 0, &ool_output_size)); 4515 } 4516 4517 kern_return_t shim_io_connect_method_structureI_structureO( 4518 IOExternalMethod * method, 4519 IOService * object, 4520 io_struct_inband_t input, 4521 mach_msg_type_number_t inputCount, 4522 io_struct_inband_t output, 4523 IOByteCount * outputCount ) 4524 { 4525 IOMethod func; 4526 IOReturn err = kIOReturnBadArgument; 4527 4528 do 4529 { 4530 if( (kIOUCVariableStructureSize != method->count0) 4531 && (inputCount != method->count0)) 4532 { 4533 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); 4534 continue; 4535 } 4536 if( (kIOUCVariableStructureSize != method->count1) 4537 && (*outputCount != method->count1)) 4538 { 4539 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); 4540 continue; 4541 } 4542 4543 func = method->func; 4544 4545 if( method->count1) { 4546 if( method->count0) { 4547 err = (object->*func)( input, output, 4548 (void *)(uintptr_t)inputCount, outputCount, 0, 0 ); 4549 } else { 4550 err = (object->*func)( output, outputCount, 0, 0, 0, 0 ); 4551 } 4552 } else { 4553 err = (object->*func)( input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 ); 4554 } 4555 } 4556 while( false); 4557 4558 4559 return( err); 4560 } 4561 4562 kern_return_t shim_io_async_method_structureI_structureO( 4563 IOExternalAsyncMethod * method, 4564 IOService * object, 4565 mach_port_t asyncWakePort, 4566 io_user_reference_t * asyncReference, 4567 uint32_t asyncReferenceCount, 4568 io_struct_inband_t input, 4569 mach_msg_type_number_t inputCount, 4570 io_struct_inband_t output, 4571 mach_msg_type_number_t * outputCount ) 4572 { 4573 IOAsyncMethod func; 4574 uint32_t i; 4575 IOReturn err; 4576 io_async_ref_t reference; 4577 4578 for (i = 0; i < asyncReferenceCount; i++) 4579 reference[i] = REF32(asyncReference[i]); 4580 4581 err = kIOReturnBadArgument; 4582 do 4583 { 4584 if( (kIOUCVariableStructureSize != method->count0) 4585 && (inputCount != method->count0)) 4586 { 4587 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); 4588 continue; 4589 } 4590 if( (kIOUCVariableStructureSize != method->count1) 4591 && (*outputCount != method->count1)) 4592 { 4593 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); 4594 continue; 4595 } 4596 4597 func = method->func; 4598 4599 if( method->count1) { 4600 if( method->count0) { 4601 err = (object->*func)( reference, 4602 input, output, 4603 (void *)(uintptr_t)inputCount, outputCount, 0, 0 ); 4604 } else { 4605 err = (object->*func)( reference, 4606 output, outputCount, 0, 0, 0, 0 ); 4607 } 4608 } else { 4609 err = (object->*func)( reference, 4610 input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 ); 4611 } 4612 } 4613 while( false); 4614 4615 return( err); 4616 } 4617 4618 /* Routine io_catalog_send_data */ 4619 kern_return_t is_io_catalog_send_data( 4620 mach_port_t master_port, 4621 uint32_t flag, 4622 io_buf_ptr_t inData, 4623 mach_msg_type_number_t inDataCount, 4624 kern_return_t * result) 4625 { 4626 OSObject * obj = 0; 4627 vm_offset_t data; 4628 kern_return_t kr = kIOReturnError; 4629 4630 //printf("io_catalog_send_data called. flag: %d\n", flag); 4631 4632 if( master_port != master_device_port) 4633 return kIOReturnNotPrivileged; 4634 4635 if( (flag != kIOCatalogRemoveKernelLinker && 4636 flag != kIOCatalogKextdActive && 4637 flag != kIOCatalogKextdFinishedLaunching) && 4638 ( !inData || !inDataCount) ) 4639 { 4640 return kIOReturnBadArgument; 4641 } 4642 4643 if (inData) { 4644 vm_map_offset_t map_data; 4645 4646 if( inDataCount > sizeof(io_struct_inband_t) * 1024) 4647 return( kIOReturnMessageTooLarge); 4648 4649 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData); 4650 data = CAST_DOWN(vm_offset_t, map_data); 4651 4652 if( kr != KERN_SUCCESS) 4653 return kr; 4654 4655 // must return success after vm_map_copyout() succeeds 4656 4657 if( inDataCount ) { 4658 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount); 4659 vm_deallocate( kernel_map, data, inDataCount ); 4660 if( !obj) { 4661 *result = kIOReturnNoMemory; 4662 return( KERN_SUCCESS); 4663 } 4664 } 4665 } 4666 4667 switch ( flag ) { 4668 case kIOCatalogResetDrivers: 4669 case kIOCatalogResetDriversNoMatch: { 4670 OSArray * array; 4671 4672 array = OSDynamicCast(OSArray, obj); 4673 if (array) { 4674 if ( !gIOCatalogue->resetAndAddDrivers(array, 4675 flag == kIOCatalogResetDrivers) ) { 4676 4677 kr = kIOReturnError; 4678 } 4679 } else { 4680 kr = kIOReturnBadArgument; 4681 } 4682 } 4683 break; 4684 4685 case kIOCatalogAddDrivers: 4686 case kIOCatalogAddDriversNoMatch: { 4687 OSArray * array; 4688 4689 array = OSDynamicCast(OSArray, obj); 4690 if ( array ) { 4691 if ( !gIOCatalogue->addDrivers( array , 4692 flag == kIOCatalogAddDrivers) ) { 4693 kr = kIOReturnError; 4694 } 4695 } 4696 else { 4697 kr = kIOReturnBadArgument; 4698 } 4699 } 4700 break; 4701 4702 case kIOCatalogRemoveDrivers: 4703 case kIOCatalogRemoveDriversNoMatch: { 4704 OSDictionary * dict; 4705 4706 dict = OSDynamicCast(OSDictionary, obj); 4707 if ( dict ) { 4708 if ( !gIOCatalogue->removeDrivers( dict, 4709 flag == kIOCatalogRemoveDrivers ) ) { 4710 kr = kIOReturnError; 4711 } 4712 } 4713 else { 4714 kr = kIOReturnBadArgument; 4715 } 4716 } 4717 break; 4718 4719 case kIOCatalogStartMatching: { 4720 OSDictionary * dict; 4721 4722 dict = OSDynamicCast(OSDictionary, obj); 4723 if ( dict ) { 4724 if ( !gIOCatalogue->startMatching( dict ) ) { 4725 kr = kIOReturnError; 4726 } 4727 } 4728 else { 4729 kr = kIOReturnBadArgument; 4730 } 4731 } 4732 break; 4733 4734 case kIOCatalogRemoveKernelLinker: 4735 kr = KERN_NOT_SUPPORTED; 4736 break; 4737 4738 case kIOCatalogKextdActive: 4739 #if !NO_KEXTD 4740 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0); 4741 OSKext::setKextdActive(); 4742 4743 /* Dump all nonloaded startup extensions; kextd will now send them 4744 * down on request. 4745 */ 4746 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false); 4747 #endif 4748 kr = kIOReturnSuccess; 4749 break; 4750 4751 case kIOCatalogKextdFinishedLaunching: { 4752 #if !NO_KEXTD 4753 static bool clearedBusy = false; 4754 4755 if (!clearedBusy) { 4756 IOService * serviceRoot = IOService::getServiceRoot(); 4757 if (serviceRoot) { 4758 IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0); 4759 serviceRoot->adjustBusy(-1); 4760 clearedBusy = true; 4761 } 4762 } 4763 #endif 4764 kr = kIOReturnSuccess; 4765 } 4766 break; 4767 4768 default: 4769 kr = kIOReturnBadArgument; 4770 break; 4771 } 4772 4773 if (obj) obj->release(); 4774 4775 *result = kr; 4776 return( KERN_SUCCESS); 4777 } 4778 4779 /* Routine io_catalog_terminate */ 4780 kern_return_t is_io_catalog_terminate( 4781 mach_port_t master_port, 4782 uint32_t flag, 4783 io_name_t name ) 4784 { 4785 kern_return_t kr; 4786 4787 if( master_port != master_device_port ) 4788 return kIOReturnNotPrivileged; 4789 4790 kr = IOUserClient::clientHasPrivilege( (void *) current_task(), 4791 kIOClientPrivilegeAdministrator ); 4792 if( kIOReturnSuccess != kr) 4793 return( kr ); 4794 4795 switch ( flag ) { 4796 #if !defined(SECURE_KERNEL) 4797 case kIOCatalogServiceTerminate: 4798 OSIterator * iter; 4799 IOService * service; 4800 4801 iter = IORegistryIterator::iterateOver(gIOServicePlane, 4802 kIORegistryIterateRecursively); 4803 if ( !iter ) 4804 return kIOReturnNoMemory; 4805 4806 do { 4807 iter->reset(); 4808 while( (service = (IOService *)iter->getNextObject()) ) { 4809 if( service->metaCast(name)) { 4810 if ( !service->terminate( kIOServiceRequired 4811 | kIOServiceSynchronous) ) { 4812 kr = kIOReturnUnsupported; 4813 break; 4814 } 4815 } 4816 } 4817 } while( !service && !iter->isValid()); 4818 iter->release(); 4819 break; 4820 4821 case kIOCatalogModuleUnload: 4822 case kIOCatalogModuleTerminate: 4823 kr = gIOCatalogue->terminateDriversForModule(name, 4824 flag == kIOCatalogModuleUnload); 4825 break; 4826 #endif 4827 4828 default: 4829 kr = kIOReturnBadArgument; 4830 break; 4831 } 4832 4833 return( kr ); 4834 } 4835 4836 /* Routine io_catalog_get_data */ 4837 kern_return_t is_io_catalog_get_data( 4838 mach_port_t master_port, 4839 uint32_t flag, 4840 io_buf_ptr_t *outData, 4841 mach_msg_type_number_t *outDataCount) 4842 { 4843 kern_return_t kr = kIOReturnSuccess; 4844 OSSerialize * s; 4845 4846 if( master_port != master_device_port) 4847 return kIOReturnNotPrivileged; 4848 4849 //printf("io_catalog_get_data called. flag: %d\n", flag); 4850 4851 s = OSSerialize::withCapacity(4096); 4852 if ( !s ) 4853 return kIOReturnNoMemory; 4854 4855 kr = gIOCatalogue->serializeData(flag, s); 4856 4857 if ( kr == kIOReturnSuccess ) { 4858 vm_offset_t data; 4859 vm_map_copy_t copy; 4860 vm_size_t size; 4861 4862 size = s->getLength(); 4863 kr = vm_allocate(kernel_map, &data, size, VM_FLAGS_ANYWHERE); 4864 if ( kr == kIOReturnSuccess ) { 4865 bcopy(s->text(), (void *)data, size); 4866 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data, 4867 (vm_map_size_t)size, true, ©); 4868 *outData = (char *)copy; 4869 *outDataCount = size; 4870 } 4871 } 4872 4873 s->release(); 4874 4875 return kr; 4876 } 4877 4878 /* Routine io_catalog_get_gen_count */ 4879 kern_return_t is_io_catalog_get_gen_count( 4880 mach_port_t master_port, 4881 uint32_t *genCount) 4882 { 4883 if( master_port != master_device_port) 4884 return kIOReturnNotPrivileged; 4885 4886 //printf("io_catalog_get_gen_count called.\n"); 4887 4888 if ( !genCount ) 4889 return kIOReturnBadArgument; 4890 4891 *genCount = gIOCatalogue->getGenerationCount(); 4892 4893 return kIOReturnSuccess; 4894 } 4895 4896 /* Routine io_catalog_module_loaded. 4897 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used. 4898 */ 4899 kern_return_t is_io_catalog_module_loaded( 4900 mach_port_t master_port, 4901 io_name_t name) 4902 { 4903 if( master_port != master_device_port) 4904 return kIOReturnNotPrivileged; 4905 4906 //printf("io_catalog_module_loaded called. name %s\n", name); 4907 4908 if ( !name ) 4909 return kIOReturnBadArgument; 4910 4911 gIOCatalogue->moduleHasLoaded(name); 4912 4913 return kIOReturnSuccess; 4914 } 4915 4916 kern_return_t is_io_catalog_reset( 4917 mach_port_t master_port, 4918 uint32_t flag) 4919 { 4920 if( master_port != master_device_port) 4921 return kIOReturnNotPrivileged; 4922 4923 switch ( flag ) { 4924 case kIOCatalogResetDefault: 4925 gIOCatalogue->reset(); 4926 break; 4927 4928 default: 4929 return kIOReturnBadArgument; 4930 } 4931 4932 return kIOReturnSuccess; 4933 } 4934 4935 kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args) 4936 { 4937 kern_return_t result = kIOReturnBadArgument; 4938 IOUserClient *userClient; 4939 4940 if ((userClient = OSDynamicCast(IOUserClient, 4941 iokit_lookup_connect_ref_current_task((OSObject *)(args->userClientRef))))) { 4942 IOExternalTrap *trap; 4943 IOService *target = NULL; 4944 4945 trap = userClient->getTargetAndTrapForIndex(&target, args->index); 4946 4947 if (trap && target) { 4948 IOTrap func; 4949 4950 func = trap->func; 4951 4952 if (func) { 4953 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6); 4954 } 4955 } 4956 4957 userClient->release(); 4958 } 4959 4960 return result; 4961 } 4962 4963 } /* extern "C" */ 4964 4965 IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args, 4966 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference ) 4967 { 4968 IOReturn err; 4969 IOService * object; 4970 IOByteCount structureOutputSize; 4971 4972 if (dispatch) 4973 { 4974 uint32_t count; 4975 count = dispatch->checkScalarInputCount; 4976 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) 4977 { 4978 return (kIOReturnBadArgument); 4979 } 4980 4981 count = dispatch->checkStructureInputSize; 4982 if ((kIOUCVariableStructureSize != count) 4983 && (count != ((args->structureInputDescriptor) 4984 ? args->structureInputDescriptor->getLength() : args->structureInputSize))) 4985 { 4986 return (kIOReturnBadArgument); 4987 } 4988 4989 count = dispatch->checkScalarOutputCount; 4990 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) 4991 { 4992 return (kIOReturnBadArgument); 4993 } 4994 4995 count = dispatch->checkStructureOutputSize; 4996 if ((kIOUCVariableStructureSize != count) 4997 && (count != ((args->structureOutputDescriptor) 4998 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) 4999 { 5000 return (kIOReturnBadArgument); 5001 } 5002 5003 if (dispatch->function) 5004 err = (*dispatch->function)(target, reference, args); 5005 else 5006 err = kIOReturnNoCompletion; /* implementator can dispatch */ 5007 5008 return (err); 5009 } 5010 5011 5012 // pre-Leopard API's don't do ool structs 5013 if (args->structureInputDescriptor || args->structureOutputDescriptor) 5014 { 5015 err = kIOReturnIPCError; 5016 return (err); 5017 } 5018 5019 structureOutputSize = args->structureOutputSize; 5020 5021 if (args->asyncWakePort) 5022 { 5023 IOExternalAsyncMethod * method; 5024 object = 0; 5025 if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object ) 5026 return (kIOReturnUnsupported); 5027 5028 if (kIOUCForegroundOnly & method->flags) 5029 { 5030 if (task_is_gpu_denied(current_task())) 5031 return (kIOReturnNotPermitted); 5032 } 5033 5034 switch (method->flags & kIOUCTypeMask) 5035 { 5036 case kIOUCScalarIStructI: 5037 err = shim_io_async_method_scalarI_structureI( method, object, 5038 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount, 5039 args->scalarInput, args->scalarInputCount, 5040 (char *)args->structureInput, args->structureInputSize ); 5041 break; 5042 5043 case kIOUCScalarIScalarO: 5044 err = shim_io_async_method_scalarI_scalarO( method, object, 5045 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount, 5046 args->scalarInput, args->scalarInputCount, 5047 args->scalarOutput, &args->scalarOutputCount ); 5048 break; 5049 5050 case kIOUCScalarIStructO: 5051 err = shim_io_async_method_scalarI_structureO( method, object, 5052 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount, 5053 args->scalarInput, args->scalarInputCount, 5054 (char *) args->structureOutput, &args->structureOutputSize ); 5055 break; 5056 5057 5058 case kIOUCStructIStructO: 5059 err = shim_io_async_method_structureI_structureO( method, object, 5060 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount, 5061 (char *)args->structureInput, args->structureInputSize, 5062 (char *) args->structureOutput, &args->structureOutputSize ); 5063 break; 5064 5065 default: 5066 err = kIOReturnBadArgument; 5067 break; 5068 } 5069 } 5070 else 5071 { 5072 IOExternalMethod * method; 5073 object = 0; 5074 if( !(method = getTargetAndMethodForIndex(&object, selector)) || !object ) 5075 return (kIOReturnUnsupported); 5076 5077 if (kIOUCForegroundOnly & method->flags) 5078 { 5079 if (task_is_gpu_denied(current_task())) 5080 return (kIOReturnNotPermitted); 5081 } 5082 5083 switch (method->flags & kIOUCTypeMask) 5084 { 5085 case kIOUCScalarIStructI: 5086 err = shim_io_connect_method_scalarI_structureI( method, object, 5087 args->scalarInput, args->scalarInputCount, 5088 (char *) args->structureInput, args->structureInputSize ); 5089 break; 5090 5091 case kIOUCScalarIScalarO: 5092 err = shim_io_connect_method_scalarI_scalarO( method, object, 5093 args->scalarInput, args->scalarInputCount, 5094 args->scalarOutput, &args->scalarOutputCount ); 5095 break; 5096 5097 case kIOUCScalarIStructO: 5098 err = shim_io_connect_method_scalarI_structureO( method, object, 5099 args->scalarInput, args->scalarInputCount, 5100 (char *) args->structureOutput, &structureOutputSize ); 5101 break; 5102 5103 5104 case kIOUCStructIStructO: 5105 err = shim_io_connect_method_structureI_structureO( method, object, 5106 (char *) args->structureInput, args->structureInputSize, 5107 (char *) args->structureOutput, &structureOutputSize ); 5108 break; 5109 5110 default: 5111 err = kIOReturnBadArgument; 5112 break; 5113 } 5114 } 5115 5116 args->structureOutputSize = structureOutputSize; 5117 5118 return (err); 5119 } 5120 5121 5122 #if __LP64__ 5123 OSMetaClassDefineReservedUnused(IOUserClient, 0); 5124 OSMetaClassDefineReservedUnused(IOUserClient, 1); 5125 #else 5126 OSMetaClassDefineReservedUsed(IOUserClient, 0); 5127 OSMetaClassDefineReservedUsed(IOUserClient, 1); 5128 #endif 5129 OSMetaClassDefineReservedUnused(IOUserClient, 2); 5130 OSMetaClassDefineReservedUnused(IOUserClient, 3); 5131 OSMetaClassDefineReservedUnused(IOUserClient, 4); 5132 OSMetaClassDefineReservedUnused(IOUserClient, 5); 5133 OSMetaClassDefineReservedUnused(IOUserClient, 6); 5134 OSMetaClassDefineReservedUnused(IOUserClient, 7); 5135 OSMetaClassDefineReservedUnused(IOUserClient, 8); 5136 OSMetaClassDefineReservedUnused(IOUserClient, 9); 5137 OSMetaClassDefineReservedUnused(IOUserClient, 10); 5138 OSMetaClassDefineReservedUnused(IOUserClient, 11); 5139 OSMetaClassDefineReservedUnused(IOUserClient, 12); 5140 OSMetaClassDefineReservedUnused(IOUserClient, 13); 5141 OSMetaClassDefineReservedUnused(IOUserClient, 14); 5142 OSMetaClassDefineReservedUnused(IOUserClient, 15); 5143 5144