1 /* 2 * Copyright (c) 1998-2008 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 30 #include <libkern/c++/OSKext.h> 31 #include <IOKit/IOKitServer.h> 32 #include <IOKit/IOKitKeysPrivate.h> 33 #include <IOKit/IOUserClient.h> 34 #include <IOKit/IOService.h> 35 #include <IOKit/IORegistryEntry.h> 36 #include <IOKit/IOCatalogue.h> 37 #include <IOKit/IOMemoryDescriptor.h> 38 #include <IOKit/IOBufferMemoryDescriptor.h> 39 #include <IOKit/IOLib.h> 40 #include <IOKit/IOStatisticsPrivate.h> 41 #include <IOKit/IOTimeStamp.h> 42 #include <libkern/OSDebug.h> 43 #include <sys/proc.h> 44 #include <sys/kauth.h> 45 46 #if CONFIG_MACF 47 48 extern "C" { 49 #include <security/mac_framework.h> 50 }; 51 #include <sys/kauth.h> 52 53 #define IOMACF_LOG 0 54 55 #endif /* CONFIG_MACF */ 56 57 #include <IOKit/assert.h> 58 59 #include "IOServicePrivate.h" 60 #include "IOKitKernelInternal.h" 61 62 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x)) 63 #define SCALAR32(x) ((uint32_t )x) 64 #define ARG32(x) ((void *)SCALAR32(x)) 65 #define REF64(x) ((io_user_reference_t)((UInt64)(x))) 66 #define REF32(x) ((int)(x)) 67 68 enum 69 { 70 kIOUCAsync0Flags = 3ULL, 71 kIOUCAsync64Flag = 1ULL 72 }; 73 74 #if IOKITSTATS 75 76 #define IOStatisticsRegisterCounter() \ 77 do { \ 78 reserved->counter = IOStatistics::registerUserClient(this); \ 79 } while (0) 80 81 #define IOStatisticsUnregisterCounter() \ 82 do { \ 83 if (reserved) \ 84 IOStatistics::unregisterUserClient(reserved->counter); \ 85 } while (0) 86 87 #define IOStatisticsClientCall() \ 88 do { \ 89 IOStatistics::countUserClientCall(client); \ 90 } while (0) 91 92 #else 93 94 #define IOStatisticsRegisterCounter() 95 #define IOStatisticsUnregisterCounter() 96 #define IOStatisticsClientCall() 97 98 #endif /* IOKITSTATS */ 99 100 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 101 102 // definitions we should get from osfmk 103 104 //typedef struct ipc_port * ipc_port_t; 105 typedef natural_t ipc_kobject_type_t; 106 107 #define IKOT_IOKIT_SPARE 27 108 #define IKOT_IOKIT_CONNECT 29 109 #define IKOT_IOKIT_OBJECT 30 110 111 extern "C" { 112 113 extern ipc_port_t iokit_alloc_object_port( io_object_t obj, 114 ipc_kobject_type_t type ); 115 116 extern kern_return_t iokit_destroy_object_port( ipc_port_t port ); 117 118 extern mach_port_name_t iokit_make_send_right( task_t task, 119 io_object_t obj, ipc_kobject_type_t type ); 120 121 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta ); 122 123 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task); 124 125 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef); 126 127 extern ipc_port_t master_device_port; 128 129 extern void iokit_retain_port( ipc_port_t port ); 130 extern void iokit_release_port( ipc_port_t port ); 131 extern void iokit_release_port_send( ipc_port_t port ); 132 133 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type ); 134 135 #include <mach/mach_traps.h> 136 #include <vm/vm_map.h> 137 138 } /* extern "C" */ 139 140 141 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 142 143 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject. 144 145 class IOMachPort : public OSObject 146 { 147 OSDeclareDefaultStructors(IOMachPort) 148 public: 149 OSObject * object; 150 ipc_port_t port; 151 UInt32 mscount; 152 UInt8 holdDestroy; 153 154 static IOMachPort * portForObject( OSObject * obj, 155 ipc_kobject_type_t type ); 156 static bool noMoreSendersForObject( OSObject * obj, 157 ipc_kobject_type_t type, mach_port_mscount_t * mscount ); 158 static void releasePortForObject( OSObject * obj, 159 ipc_kobject_type_t type ); 160 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type ); 161 162 static OSDictionary * dictForType( ipc_kobject_type_t type ); 163 164 static mach_port_name_t makeSendRightForTask( task_t task, 165 io_object_t obj, ipc_kobject_type_t type ); 166 167 virtual void free(); 168 }; 169 170 #define super OSObject 171 OSDefineMetaClassAndStructors(IOMachPort, OSObject) 172 173 static IOLock * gIOObjectPortLock; 174 175 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 176 177 // not in dictForType() for debugging ease 178 static OSDictionary * gIOObjectPorts; 179 static OSDictionary * gIOConnectPorts; 180 181 OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type ) 182 { 183 OSDictionary ** dict; 184 185 if( IKOT_IOKIT_OBJECT == type ) 186 dict = &gIOObjectPorts; 187 else if( IKOT_IOKIT_CONNECT == type ) 188 dict = &gIOConnectPorts; 189 else 190 return( 0 ); 191 192 if( 0 == *dict) 193 *dict = OSDictionary::withCapacity( 1 ); 194 195 return( *dict ); 196 } 197 198 IOMachPort * IOMachPort::portForObject ( OSObject * obj, 199 ipc_kobject_type_t type ) 200 { 201 IOMachPort * inst = 0; 202 OSDictionary * dict; 203 204 IOTakeLock( gIOObjectPortLock); 205 206 do { 207 208 dict = dictForType( type ); 209 if( !dict) 210 continue; 211 212 if( (inst = (IOMachPort *) 213 dict->getObject( (const OSSymbol *) obj ))) { 214 inst->mscount++; 215 inst->retain(); 216 continue; 217 } 218 219 inst = new IOMachPort; 220 if( inst && !inst->init()) { 221 inst = 0; 222 continue; 223 } 224 225 inst->port = iokit_alloc_object_port( obj, type ); 226 if( inst->port) { 227 // retains obj 228 dict->setObject( (const OSSymbol *) obj, inst ); 229 inst->mscount++; 230 231 } else { 232 inst->release(); 233 inst = 0; 234 } 235 236 } while( false ); 237 238 IOUnlock( gIOObjectPortLock); 239 240 return( inst ); 241 } 242 243 bool IOMachPort::noMoreSendersForObject( OSObject * obj, 244 ipc_kobject_type_t type, mach_port_mscount_t * mscount ) 245 { 246 OSDictionary * dict; 247 IOMachPort * machPort; 248 bool destroyed = true; 249 250 IOTakeLock( gIOObjectPortLock); 251 252 if( (dict = dictForType( type ))) { 253 obj->retain(); 254 255 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj ); 256 if( machPort) { 257 destroyed = (machPort->mscount <= *mscount); 258 if( destroyed) 259 dict->removeObject( (const OSSymbol *) obj ); 260 else 261 *mscount = machPort->mscount; 262 } 263 obj->release(); 264 } 265 266 IOUnlock( gIOObjectPortLock); 267 268 return( destroyed ); 269 } 270 271 void IOMachPort::releasePortForObject( OSObject * obj, 272 ipc_kobject_type_t type ) 273 { 274 OSDictionary * dict; 275 IOMachPort * machPort; 276 277 IOTakeLock( gIOObjectPortLock); 278 279 if( (dict = dictForType( type ))) { 280 obj->retain(); 281 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj ); 282 if( machPort && !machPort->holdDestroy) 283 dict->removeObject( (const OSSymbol *) obj ); 284 obj->release(); 285 } 286 287 IOUnlock( gIOObjectPortLock); 288 } 289 290 void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type ) 291 { 292 OSDictionary * dict; 293 IOMachPort * machPort; 294 295 IOLockLock( gIOObjectPortLock ); 296 297 if( (dict = dictForType( type ))) { 298 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj ); 299 if( machPort) 300 machPort->holdDestroy = true; 301 } 302 303 IOLockUnlock( gIOObjectPortLock ); 304 } 305 306 void IOUserClient::destroyUserReferences( OSObject * obj ) 307 { 308 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT ); 309 310 // panther, 3160200 311 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT ); 312 313 OSDictionary * dict; 314 315 IOTakeLock( gIOObjectPortLock); 316 obj->retain(); 317 318 if( (dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT ))) 319 { 320 IOMachPort * port; 321 port = (IOMachPort *) dict->getObject( (const OSSymbol *) obj ); 322 if (port) 323 { 324 IOUserClient * uc; 325 if ((uc = OSDynamicCast(IOUserClient, obj)) && uc->mappings) 326 { 327 dict->setObject((const OSSymbol *) uc->mappings, port); 328 iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT); 329 330 uc->mappings->release(); 331 uc->mappings = 0; 332 } 333 dict->removeObject( (const OSSymbol *) obj ); 334 } 335 } 336 obj->release(); 337 IOUnlock( gIOObjectPortLock); 338 } 339 340 mach_port_name_t IOMachPort::makeSendRightForTask( task_t task, 341 io_object_t obj, ipc_kobject_type_t type ) 342 { 343 return( iokit_make_send_right( task, obj, type )); 344 } 345 346 void IOMachPort::free( void ) 347 { 348 if( port) 349 iokit_destroy_object_port( port ); 350 super::free(); 351 } 352 353 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 354 355 class IOUserNotification : public OSIterator 356 { 357 OSDeclareDefaultStructors(IOUserNotification) 358 359 IONotifier * holdNotify; 360 IOLock * lock; 361 362 public: 363 364 virtual bool init( void ); 365 virtual void free(); 366 367 virtual void setNotification( IONotifier * obj ); 368 369 virtual void reset(); 370 virtual bool isValid(); 371 }; 372 373 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 374 375 extern "C" { 376 377 // functions called from osfmk/device/iokit_rpc.c 378 379 void 380 iokit_add_reference( io_object_t obj ) 381 { 382 if( obj) 383 obj->retain(); 384 } 385 386 void 387 iokit_remove_reference( io_object_t obj ) 388 { 389 if( obj) 390 obj->release(); 391 } 392 393 ipc_port_t 394 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type ) 395 { 396 IOMachPort * machPort; 397 ipc_port_t port; 398 399 if( (machPort = IOMachPort::portForObject( obj, type ))) { 400 401 port = machPort->port; 402 if( port) 403 iokit_retain_port( port ); 404 405 machPort->release(); 406 407 } else 408 port = NULL; 409 410 return( port ); 411 } 412 413 kern_return_t 414 iokit_client_died( io_object_t obj, ipc_port_t /* port */, 415 ipc_kobject_type_t type, mach_port_mscount_t * mscount ) 416 { 417 IOUserClient * client; 418 IOMemoryMap * map; 419 IOUserNotification * notify; 420 421 if( !IOMachPort::noMoreSendersForObject( obj, type, mscount )) 422 return( kIOReturnNotReady ); 423 424 if( IKOT_IOKIT_CONNECT == type) 425 { 426 if( (client = OSDynamicCast( IOUserClient, obj ))) { 427 IOStatisticsClientCall(); 428 client->clientDied(); 429 } 430 } 431 else if( IKOT_IOKIT_OBJECT == type) 432 { 433 if( (map = OSDynamicCast( IOMemoryMap, obj ))) 434 map->taskDied(); 435 else if( (notify = OSDynamicCast( IOUserNotification, obj ))) 436 notify->setNotification( 0 ); 437 } 438 439 return( kIOReturnSuccess ); 440 } 441 442 }; /* extern "C" */ 443 444 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 445 446 class IOServiceUserNotification : public IOUserNotification 447 { 448 OSDeclareDefaultStructors(IOServiceUserNotification) 449 450 struct PingMsg { 451 mach_msg_header_t msgHdr; 452 OSNotificationHeader64 notifyHeader; 453 }; 454 455 enum { kMaxOutstanding = 1024 }; 456 457 PingMsg * pingMsg; 458 vm_size_t msgSize; 459 OSArray * newSet; 460 OSObject * lastEntry; 461 bool armed; 462 463 public: 464 465 virtual bool init( mach_port_t port, natural_t type, 466 void * reference, vm_size_t referenceSize, 467 bool clientIs64 ); 468 virtual void free(); 469 470 static bool _handler( void * target, 471 void * ref, IOService * newService, IONotifier * notifier ); 472 virtual bool handler( void * ref, IOService * newService ); 473 474 virtual OSObject * getNextObject(); 475 }; 476 477 class IOServiceMessageUserNotification : public IOUserNotification 478 { 479 OSDeclareDefaultStructors(IOServiceMessageUserNotification) 480 481 struct PingMsg { 482 mach_msg_header_t msgHdr; 483 mach_msg_body_t msgBody; 484 mach_msg_port_descriptor_t ports[1]; 485 OSNotificationHeader64 notifyHeader __attribute__ ((packed)); 486 }; 487 488 PingMsg * pingMsg; 489 vm_size_t msgSize; 490 uint8_t clientIs64; 491 int owningPID; 492 493 public: 494 495 virtual bool init( mach_port_t port, natural_t type, 496 void * reference, vm_size_t referenceSize, 497 vm_size_t extraSize, 498 bool clientIs64 ); 499 500 virtual void free(); 501 502 static IOReturn _handler( void * target, void * ref, 503 UInt32 messageType, IOService * provider, 504 void * messageArgument, vm_size_t argSize ); 505 virtual IOReturn handler( void * ref, 506 UInt32 messageType, IOService * provider, 507 void * messageArgument, vm_size_t argSize ); 508 509 virtual OSObject * getNextObject(); 510 }; 511 512 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 513 514 #undef super 515 #define super OSIterator 516 OSDefineMetaClass( IOUserNotification, OSIterator ) 517 OSDefineAbstractStructors( IOUserNotification, OSIterator ) 518 519 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 520 521 bool IOUserNotification::init( void ) 522 { 523 if( !super::init()) 524 return( false ); 525 526 lock = IOLockAlloc(); 527 if( !lock) 528 return( false ); 529 530 return( true ); 531 } 532 533 void IOUserNotification::free( void ) 534 { 535 if( holdNotify) 536 holdNotify->remove(); 537 // can't be in handler now 538 539 if( lock) 540 IOLockFree( lock ); 541 542 super::free(); 543 } 544 545 546 void IOUserNotification::setNotification( IONotifier * notify ) 547 { 548 IONotifier * previousNotify; 549 550 IOLockLock( gIOObjectPortLock); 551 552 previousNotify = holdNotify; 553 holdNotify = notify; 554 555 IOLockUnlock( gIOObjectPortLock); 556 557 if( previousNotify) 558 previousNotify->remove(); 559 } 560 561 void IOUserNotification::reset() 562 { 563 // ? 564 } 565 566 bool IOUserNotification::isValid() 567 { 568 return( true ); 569 } 570 571 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 572 573 #undef super 574 #define super IOUserNotification 575 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification) 576 577 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 578 579 bool IOServiceUserNotification::init( mach_port_t port, natural_t type, 580 void * reference, vm_size_t referenceSize, 581 bool clientIs64 ) 582 { 583 newSet = OSArray::withCapacity( 1 ); 584 if( !newSet) 585 return( false ); 586 587 if (referenceSize > sizeof(OSAsyncReference64)) 588 return( false ); 589 590 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize; 591 pingMsg = (PingMsg *) IOMalloc( msgSize); 592 if( !pingMsg) 593 return( false ); 594 595 bzero( pingMsg, msgSize); 596 597 pingMsg->msgHdr.msgh_remote_port = port; 598 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS( 599 MACH_MSG_TYPE_COPY_SEND /*remote*/, 600 MACH_MSG_TYPE_MAKE_SEND /*local*/); 601 pingMsg->msgHdr.msgh_size = msgSize; 602 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID; 603 604 pingMsg->notifyHeader.size = 0; 605 pingMsg->notifyHeader.type = type; 606 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize ); 607 608 return( super::init() ); 609 } 610 611 void IOServiceUserNotification::free( void ) 612 { 613 PingMsg * _pingMsg; 614 vm_size_t _msgSize; 615 OSArray * _newSet; 616 OSObject * _lastEntry; 617 618 _pingMsg = pingMsg; 619 _msgSize = msgSize; 620 _lastEntry = lastEntry; 621 _newSet = newSet; 622 623 super::free(); 624 625 if( _pingMsg && _msgSize) 626 IOFree( _pingMsg, _msgSize); 627 628 if( _lastEntry) 629 _lastEntry->release(); 630 631 if( _newSet) 632 _newSet->release(); 633 } 634 635 bool IOServiceUserNotification::_handler( void * target, 636 void * ref, IOService * newService, IONotifier * notifier ) 637 { 638 return( ((IOServiceUserNotification *) target)->handler( ref, newService )); 639 } 640 641 bool IOServiceUserNotification::handler( void * ref, 642 IOService * newService ) 643 { 644 unsigned int count; 645 kern_return_t kr; 646 ipc_port_t port = NULL; 647 bool sendPing = false; 648 649 IOTakeLock( lock ); 650 651 count = newSet->getCount(); 652 if( count < kMaxOutstanding) { 653 654 newSet->setObject( newService ); 655 if( (sendPing = (armed && (0 == count)))) 656 armed = false; 657 } 658 659 IOUnlock( lock ); 660 661 if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type) 662 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT ); 663 664 if( sendPing) { 665 if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) )) 666 pingMsg->msgHdr.msgh_local_port = port; 667 else 668 pingMsg->msgHdr.msgh_local_port = NULL; 669 670 kr = mach_msg_send_from_kernel_proper( &pingMsg->msgHdr, 671 pingMsg->msgHdr.msgh_size); 672 if( port) 673 iokit_release_port( port ); 674 675 if( KERN_SUCCESS != kr) 676 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr ); 677 } 678 679 return( true ); 680 } 681 682 OSObject * IOServiceUserNotification::getNextObject() 683 { 684 unsigned int count; 685 OSObject * result; 686 687 IOTakeLock( lock ); 688 689 if( lastEntry) 690 lastEntry->release(); 691 692 count = newSet->getCount(); 693 if( count ) { 694 result = newSet->getObject( count - 1 ); 695 result->retain(); 696 newSet->removeObject( count - 1); 697 } else { 698 result = 0; 699 armed = true; 700 } 701 lastEntry = result; 702 703 IOUnlock( lock ); 704 705 return( result ); 706 } 707 708 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 709 710 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification) 711 712 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 713 714 bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type, 715 void * reference, vm_size_t referenceSize, vm_size_t extraSize, 716 bool client64 ) 717 { 718 719 if (referenceSize > sizeof(OSAsyncReference64)) 720 return( false ); 721 722 clientIs64 = client64; 723 724 owningPID = proc_selfpid(); 725 726 extraSize += sizeof(IOServiceInterestContent64); 727 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize + extraSize; 728 pingMsg = (PingMsg *) IOMalloc( msgSize); 729 if( !pingMsg) 730 return( false ); 731 732 bzero( pingMsg, msgSize); 733 734 pingMsg->msgHdr.msgh_remote_port = port; 735 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX 736 | MACH_MSGH_BITS( 737 MACH_MSG_TYPE_COPY_SEND /*remote*/, 738 MACH_MSG_TYPE_MAKE_SEND /*local*/); 739 pingMsg->msgHdr.msgh_size = msgSize; 740 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID; 741 742 pingMsg->msgBody.msgh_descriptor_count = 1; 743 744 pingMsg->ports[0].name = 0; 745 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND; 746 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR; 747 748 pingMsg->notifyHeader.size = extraSize; 749 pingMsg->notifyHeader.type = type; 750 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize ); 751 752 return( super::init() ); 753 } 754 755 void IOServiceMessageUserNotification::free( void ) 756 { 757 PingMsg * _pingMsg; 758 vm_size_t _msgSize; 759 760 _pingMsg = pingMsg; 761 _msgSize = msgSize; 762 763 super::free(); 764 765 if( _pingMsg && _msgSize) 766 IOFree( _pingMsg, _msgSize); 767 } 768 769 IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref, 770 UInt32 messageType, IOService * provider, 771 void * argument, vm_size_t argSize ) 772 { 773 return( ((IOServiceMessageUserNotification *) target)->handler( 774 ref, messageType, provider, argument, argSize)); 775 } 776 777 IOReturn IOServiceMessageUserNotification::handler( void * ref, 778 UInt32 messageType, IOService * provider, 779 void * messageArgument, vm_size_t argSize ) 780 { 781 kern_return_t kr; 782 ipc_port_t thisPort, providerPort; 783 IOServiceInterestContent64 * data = (IOServiceInterestContent64 *) 784 ((((uint8_t *) pingMsg) + msgSize) - pingMsg->notifyHeader.size); 785 // == pingMsg->notifyHeader.content; 786 787 if (kIOMessageCopyClientID == messageType) 788 { 789 *((void **) messageArgument) = IOCopyLogNameForPID(owningPID); 790 return (kIOReturnSuccess); 791 } 792 793 data->messageType = messageType; 794 795 if( argSize == 0) 796 { 797 data->messageArgument[0] = (io_user_reference_t) messageArgument; 798 if (clientIs64) 799 argSize = sizeof(data->messageArgument[0]); 800 else 801 { 802 data->messageArgument[0] |= (data->messageArgument[0] << 32); 803 argSize = sizeof(uint32_t); 804 } 805 } 806 else 807 { 808 if( argSize > kIOUserNotifyMaxMessageSize) 809 argSize = kIOUserNotifyMaxMessageSize; 810 bcopy( messageArgument, data->messageArgument, argSize ); 811 } 812 pingMsg->msgHdr.msgh_size = msgSize - pingMsg->notifyHeader.size 813 + sizeof( IOServiceInterestContent64 ) 814 - sizeof( data->messageArgument) 815 + argSize; 816 817 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT ); 818 pingMsg->ports[0].name = providerPort; 819 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ); 820 pingMsg->msgHdr.msgh_local_port = thisPort; 821 kr = mach_msg_send_from_kernel_proper( &pingMsg->msgHdr, 822 pingMsg->msgHdr.msgh_size); 823 if( thisPort) 824 iokit_release_port( thisPort ); 825 if( providerPort) 826 iokit_release_port( providerPort ); 827 828 if( KERN_SUCCESS != kr) 829 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr ); 830 831 return( kIOReturnSuccess ); 832 } 833 834 OSObject * IOServiceMessageUserNotification::getNextObject() 835 { 836 return( 0 ); 837 } 838 839 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 840 841 #undef super 842 #define super IOService 843 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService ) 844 845 void IOUserClient::initialize( void ) 846 { 847 gIOObjectPortLock = IOLockAlloc(); 848 849 assert( gIOObjectPortLock ); 850 } 851 852 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef, 853 mach_port_t wakePort, 854 void *callback, void *refcon) 855 { 856 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort) 857 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]); 858 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback; 859 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon; 860 } 861 862 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef, 863 mach_port_t wakePort, 864 mach_vm_address_t callback, io_user_reference_t refcon) 865 { 866 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort) 867 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]); 868 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback; 869 asyncRef[kIOAsyncCalloutRefconIndex] = refcon; 870 } 871 872 static OSDictionary * CopyConsoleUser(UInt32 uid) 873 { 874 OSArray * array; 875 OSDictionary * user = 0; 876 877 if ((array = OSDynamicCast(OSArray, 878 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) 879 { 880 for (unsigned int idx = 0; 881 (user = OSDynamicCast(OSDictionary, array->getObject(idx))); 882 idx++) { 883 OSNumber * num; 884 885 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey))) 886 && (uid == num->unsigned32BitValue())) { 887 user->retain(); 888 break; 889 } 890 } 891 array->release(); 892 } 893 return user; 894 } 895 896 static OSDictionary * CopyUserOnConsole(void) 897 { 898 OSArray * array; 899 OSDictionary * user = 0; 900 901 if ((array = OSDynamicCast(OSArray, 902 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) 903 { 904 for (unsigned int idx = 0; 905 (user = OSDynamicCast(OSDictionary, array->getObject(idx))); 906 idx++) 907 { 908 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) 909 { 910 user->retain(); 911 break; 912 } 913 } 914 array->release(); 915 } 916 return (user); 917 } 918 919 IOReturn IOUserClient::clientHasPrivilege( void * securityToken, 920 const char * privilegeName ) 921 { 922 kern_return_t kr; 923 security_token_t token; 924 mach_msg_type_number_t count; 925 task_t task; 926 OSDictionary * user; 927 bool secureConsole; 928 929 930 if (!strncmp(privilegeName, kIOClientPrivilegeForeground, 931 sizeof(kIOClientPrivilegeForeground))) 932 { 933 /* is graphics access denied for current task? */ 934 if (proc_get_task_selfgpuacc_deny() != 0) 935 return (kIOReturnNotPrivileged); 936 else 937 return (kIOReturnSuccess); 938 } 939 940 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession, 941 sizeof(kIOClientPrivilegeConsoleSession))) 942 { 943 kauth_cred_t cred; 944 proc_t p; 945 946 task = (task_t) securityToken; 947 if (!task) 948 task = current_task(); 949 p = (proc_t) get_bsdtask_info(task); 950 kr = kIOReturnNotPrivileged; 951 952 if (p && (cred = kauth_cred_proc_ref(p))) 953 { 954 user = CopyUserOnConsole(); 955 if (user) 956 { 957 OSNumber * num; 958 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey))) 959 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) 960 { 961 kr = kIOReturnSuccess; 962 } 963 user->release(); 964 } 965 kauth_cred_unref(&cred); 966 } 967 return (kr); 968 } 969 970 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess, 971 sizeof(kIOClientPrivilegeSecureConsoleProcess)))) 972 task = (task_t)((IOUCProcessToken *)securityToken)->token; 973 else 974 task = (task_t)securityToken; 975 976 count = TASK_SECURITY_TOKEN_COUNT; 977 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count ); 978 979 if (KERN_SUCCESS != kr) 980 {} 981 else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator, 982 sizeof(kIOClientPrivilegeAdministrator))) { 983 if (0 != token.val[0]) 984 kr = kIOReturnNotPrivileged; 985 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser, 986 sizeof(kIOClientPrivilegeLocalUser))) { 987 user = CopyConsoleUser(token.val[0]); 988 if ( user ) 989 user->release(); 990 else 991 kr = kIOReturnNotPrivileged; 992 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser, 993 sizeof(kIOClientPrivilegeConsoleUser))) { 994 user = CopyConsoleUser(token.val[0]); 995 if ( user ) { 996 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) 997 kr = kIOReturnNotPrivileged; 998 else if ( secureConsole ) { 999 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey)); 1000 if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) 1001 kr = kIOReturnNotPrivileged; 1002 } 1003 user->release(); 1004 } 1005 else 1006 kr = kIOReturnNotPrivileged; 1007 } else 1008 kr = kIOReturnUnsupported; 1009 1010 return (kr); 1011 } 1012 1013 bool IOUserClient::init() 1014 { 1015 if (getPropertyTable() || super::init()) 1016 return reserve(); 1017 1018 return false; 1019 } 1020 1021 bool IOUserClient::init(OSDictionary * dictionary) 1022 { 1023 if (getPropertyTable() || super::init(dictionary)) 1024 return reserve(); 1025 1026 return false; 1027 } 1028 1029 bool IOUserClient::initWithTask(task_t owningTask, 1030 void * securityID, 1031 UInt32 type ) 1032 { 1033 if (getPropertyTable() || super::init()) 1034 return reserve(); 1035 1036 return false; 1037 } 1038 1039 bool IOUserClient::initWithTask(task_t owningTask, 1040 void * securityID, 1041 UInt32 type, 1042 OSDictionary * properties ) 1043 { 1044 bool ok; 1045 1046 ok = super::init( properties ); 1047 ok &= initWithTask( owningTask, securityID, type ); 1048 1049 return( ok ); 1050 } 1051 1052 bool IOUserClient::reserve() 1053 { 1054 if(!reserved) { 1055 reserved = IONew(ExpansionData, 1); 1056 if (!reserved) { 1057 return false; 1058 } 1059 } 1060 1061 IOStatisticsRegisterCounter(); 1062 1063 return true; 1064 } 1065 1066 void IOUserClient::free() 1067 { 1068 if( mappings) 1069 mappings->release(); 1070 1071 IOStatisticsUnregisterCounter(); 1072 1073 if (reserved) 1074 IODelete(reserved, ExpansionData, 1); 1075 1076 super::free(); 1077 } 1078 1079 IOReturn IOUserClient::clientDied( void ) 1080 { 1081 return( clientClose()); 1082 } 1083 1084 IOReturn IOUserClient::clientClose( void ) 1085 { 1086 return( kIOReturnUnsupported ); 1087 } 1088 1089 IOService * IOUserClient::getService( void ) 1090 { 1091 return( 0 ); 1092 } 1093 1094 IOReturn IOUserClient::registerNotificationPort( 1095 mach_port_t /* port */, 1096 UInt32 /* type */, 1097 UInt32 /* refCon */) 1098 { 1099 return( kIOReturnUnsupported); 1100 } 1101 1102 IOReturn IOUserClient::registerNotificationPort( 1103 mach_port_t port, 1104 UInt32 type, 1105 io_user_reference_t refCon) 1106 { 1107 return (registerNotificationPort(port, type, (UInt32) refCon)); 1108 } 1109 1110 IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type, 1111 semaphore_t * semaphore ) 1112 { 1113 return( kIOReturnUnsupported); 1114 } 1115 1116 IOReturn IOUserClient::connectClient( IOUserClient * /* client */ ) 1117 { 1118 return( kIOReturnUnsupported); 1119 } 1120 1121 IOReturn IOUserClient::clientMemoryForType( UInt32 type, 1122 IOOptionBits * options, 1123 IOMemoryDescriptor ** memory ) 1124 { 1125 return( kIOReturnUnsupported); 1126 } 1127 1128 #if !__LP64__ 1129 IOMemoryMap * IOUserClient::mapClientMemory( 1130 IOOptionBits type, 1131 task_t task, 1132 IOOptionBits mapFlags, 1133 IOVirtualAddress atAddress ) 1134 { 1135 return (NULL); 1136 } 1137 #endif 1138 1139 IOMemoryMap * IOUserClient::mapClientMemory64( 1140 IOOptionBits type, 1141 task_t task, 1142 IOOptionBits mapFlags, 1143 mach_vm_address_t atAddress ) 1144 { 1145 IOReturn err; 1146 IOOptionBits options = 0; 1147 IOMemoryDescriptor * memory; 1148 IOMemoryMap * map = 0; 1149 1150 err = clientMemoryForType( (UInt32) type, &options, &memory ); 1151 1152 if( memory && (kIOReturnSuccess == err)) { 1153 1154 options = (options & ~kIOMapUserOptionsMask) 1155 | (mapFlags & kIOMapUserOptionsMask); 1156 map = memory->createMappingInTask( task, atAddress, options ); 1157 memory->release(); 1158 } 1159 1160 return( map ); 1161 } 1162 1163 IOReturn IOUserClient::exportObjectToClient(task_t task, 1164 OSObject *obj, io_object_t *clientObj) 1165 { 1166 mach_port_name_t name; 1167 1168 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT ); 1169 assert( name ); 1170 1171 *(mach_port_name_t *)clientObj = name; 1172 return kIOReturnSuccess; 1173 } 1174 1175 IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */) 1176 { 1177 return( 0 ); 1178 } 1179 1180 IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */) 1181 { 1182 return( 0 ); 1183 } 1184 1185 IOExternalMethod * IOUserClient:: 1186 getTargetAndMethodForIndex(IOService **targetP, UInt32 index) 1187 { 1188 IOExternalMethod *method = getExternalMethodForIndex(index); 1189 1190 if (method) 1191 *targetP = (IOService *) method->object; 1192 1193 return method; 1194 } 1195 1196 IOExternalAsyncMethod * IOUserClient:: 1197 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index) 1198 { 1199 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index); 1200 1201 if (method) 1202 *targetP = (IOService *) method->object; 1203 1204 return method; 1205 } 1206 1207 IOExternalTrap * IOUserClient:: 1208 getExternalTrapForIndex(UInt32 index) 1209 { 1210 return NULL; 1211 } 1212 1213 IOExternalTrap * IOUserClient:: 1214 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index) 1215 { 1216 IOExternalTrap *trap = getExternalTrapForIndex(index); 1217 1218 if (trap) { 1219 *targetP = trap->object; 1220 } 1221 1222 return trap; 1223 } 1224 1225 IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference) 1226 { 1227 mach_port_t port; 1228 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags); 1229 1230 if (MACH_PORT_NULL != port) 1231 iokit_release_port_send(port); 1232 1233 return (kIOReturnSuccess); 1234 } 1235 1236 IOReturn IOUserClient::releaseNotificationPort(mach_port_t port) 1237 { 1238 if (MACH_PORT_NULL != port) 1239 iokit_release_port_send(port); 1240 1241 return (kIOReturnSuccess); 1242 } 1243 1244 IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference, 1245 IOReturn result, void *args[], UInt32 numArgs) 1246 { 1247 OSAsyncReference64 reference64; 1248 io_user_reference_t args64[kMaxAsyncArgs]; 1249 unsigned int idx; 1250 1251 if (numArgs > kMaxAsyncArgs) 1252 return kIOReturnMessageTooLarge; 1253 1254 for (idx = 0; idx < kOSAsyncRef64Count; idx++) 1255 reference64[idx] = REF64(reference[idx]); 1256 1257 for (idx = 0; idx < numArgs; idx++) 1258 args64[idx] = REF64(args[idx]); 1259 1260 return (sendAsyncResult64(reference64, result, args64, numArgs)); 1261 } 1262 1263 IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference, 1264 IOReturn result, io_user_reference_t args[], UInt32 numArgs) 1265 { 1266 struct ReplyMsg 1267 { 1268 mach_msg_header_t msgHdr; 1269 union 1270 { 1271 struct 1272 { 1273 OSNotificationHeader notifyHdr; 1274 IOAsyncCompletionContent asyncContent; 1275 uint32_t args[kMaxAsyncArgs]; 1276 } msg32; 1277 struct 1278 { 1279 OSNotificationHeader64 notifyHdr; 1280 IOAsyncCompletionContent asyncContent; 1281 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed)); 1282 } msg64; 1283 } m; 1284 }; 1285 ReplyMsg replyMsg; 1286 mach_port_t replyPort; 1287 kern_return_t kr; 1288 1289 // If no reply port, do nothing. 1290 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags); 1291 if (replyPort == MACH_PORT_NULL) 1292 return kIOReturnSuccess; 1293 1294 if (numArgs > kMaxAsyncArgs) 1295 return kIOReturnMessageTooLarge; 1296 1297 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/, 1298 0 /*local*/); 1299 replyMsg.msgHdr.msgh_remote_port = replyPort; 1300 replyMsg.msgHdr.msgh_local_port = 0; 1301 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID; 1302 if (kIOUCAsync64Flag & reference[0]) 1303 { 1304 replyMsg.msgHdr.msgh_size = 1305 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64) 1306 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t); 1307 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent) 1308 + numArgs * sizeof(io_user_reference_t); 1309 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType; 1310 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64)); 1311 1312 replyMsg.m.msg64.asyncContent.result = result; 1313 if (numArgs) 1314 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t)); 1315 } 1316 else 1317 { 1318 unsigned int idx; 1319 1320 replyMsg.msgHdr.msgh_size = 1321 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32) 1322 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t); 1323 1324 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent) 1325 + numArgs * sizeof(uint32_t); 1326 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType; 1327 1328 for (idx = 0; idx < kOSAsyncRefCount; idx++) 1329 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]); 1330 1331 replyMsg.m.msg32.asyncContent.result = result; 1332 1333 for (idx = 0; idx < numArgs; idx++) 1334 replyMsg.m.msg32.args[idx] = REF32(args[idx]); 1335 } 1336 1337 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr, 1338 replyMsg.msgHdr.msgh_size); 1339 if( KERN_SUCCESS != kr) 1340 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr ); 1341 return kr; 1342 } 1343 1344 1345 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1346 1347 extern "C" { 1348 1349 #define CHECK(cls,obj,out) \ 1350 cls * out; \ 1351 if( !(out = OSDynamicCast( cls, obj))) \ 1352 return( kIOReturnBadArgument ) 1353 1354 /* Routine io_object_get_class */ 1355 kern_return_t is_io_object_get_class( 1356 io_object_t object, 1357 io_name_t className ) 1358 { 1359 const OSMetaClass* my_obj = NULL; 1360 1361 if( !object) 1362 return( kIOReturnBadArgument ); 1363 1364 my_obj = object->getMetaClass(); 1365 if (!my_obj) { 1366 return (kIOReturnNotFound); 1367 } 1368 1369 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t)); 1370 return( kIOReturnSuccess ); 1371 } 1372 1373 /* Routine io_object_get_superclass */ 1374 kern_return_t is_io_object_get_superclass( 1375 mach_port_t master_port, 1376 io_name_t obj_name, 1377 io_name_t class_name) 1378 { 1379 const OSMetaClass* my_obj = NULL; 1380 const OSMetaClass* superclass = NULL; 1381 const OSSymbol *my_name = NULL; 1382 const char *my_cstr = NULL; 1383 1384 if (!obj_name || !class_name) 1385 return (kIOReturnBadArgument); 1386 1387 if( master_port != master_device_port) 1388 return( kIOReturnNotPrivileged); 1389 1390 my_name = OSSymbol::withCString(obj_name); 1391 1392 if (my_name) { 1393 my_obj = OSMetaClass::getMetaClassWithName(my_name); 1394 my_name->release(); 1395 } 1396 if (my_obj) { 1397 superclass = my_obj->getSuperClass(); 1398 } 1399 1400 if (!superclass) { 1401 return( kIOReturnNotFound ); 1402 } 1403 1404 my_cstr = superclass->getClassName(); 1405 1406 if (my_cstr) { 1407 strlcpy(class_name, my_cstr, sizeof(io_name_t)); 1408 return( kIOReturnSuccess ); 1409 } 1410 return (kIOReturnNotFound); 1411 } 1412 1413 /* Routine io_object_get_bundle_identifier */ 1414 kern_return_t is_io_object_get_bundle_identifier( 1415 mach_port_t master_port, 1416 io_name_t obj_name, 1417 io_name_t bundle_name) 1418 { 1419 const OSMetaClass* my_obj = NULL; 1420 const OSSymbol *my_name = NULL; 1421 const OSSymbol *identifier = NULL; 1422 const char *my_cstr = NULL; 1423 1424 if (!obj_name || !bundle_name) 1425 return (kIOReturnBadArgument); 1426 1427 if( master_port != master_device_port) 1428 return( kIOReturnNotPrivileged); 1429 1430 my_name = OSSymbol::withCString(obj_name); 1431 1432 if (my_name) { 1433 my_obj = OSMetaClass::getMetaClassWithName(my_name); 1434 my_name->release(); 1435 } 1436 1437 if (my_obj) { 1438 identifier = my_obj->getKmodName(); 1439 } 1440 if (!identifier) { 1441 return( kIOReturnNotFound ); 1442 } 1443 1444 my_cstr = identifier->getCStringNoCopy(); 1445 if (my_cstr) { 1446 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t)); 1447 return( kIOReturnSuccess ); 1448 } 1449 1450 return (kIOReturnBadArgument); 1451 } 1452 1453 /* Routine io_object_conforms_to */ 1454 kern_return_t is_io_object_conforms_to( 1455 io_object_t object, 1456 io_name_t className, 1457 boolean_t *conforms ) 1458 { 1459 if( !object) 1460 return( kIOReturnBadArgument ); 1461 1462 *conforms = (0 != object->metaCast( className )); 1463 return( kIOReturnSuccess ); 1464 } 1465 1466 /* Routine io_object_get_retain_count */ 1467 kern_return_t is_io_object_get_retain_count( 1468 io_object_t object, 1469 uint32_t *retainCount ) 1470 { 1471 if( !object) 1472 return( kIOReturnBadArgument ); 1473 1474 *retainCount = object->getRetainCount(); 1475 return( kIOReturnSuccess ); 1476 } 1477 1478 /* Routine io_iterator_next */ 1479 kern_return_t is_io_iterator_next( 1480 io_object_t iterator, 1481 io_object_t *object ) 1482 { 1483 OSObject * obj; 1484 1485 CHECK( OSIterator, iterator, iter ); 1486 1487 obj = iter->getNextObject(); 1488 if( obj) { 1489 obj->retain(); 1490 *object = obj; 1491 return( kIOReturnSuccess ); 1492 } else 1493 return( kIOReturnNoDevice ); 1494 } 1495 1496 /* Routine io_iterator_reset */ 1497 kern_return_t is_io_iterator_reset( 1498 io_object_t iterator ) 1499 { 1500 CHECK( OSIterator, iterator, iter ); 1501 1502 iter->reset(); 1503 1504 return( kIOReturnSuccess ); 1505 } 1506 1507 /* Routine io_iterator_is_valid */ 1508 kern_return_t is_io_iterator_is_valid( 1509 io_object_t iterator, 1510 boolean_t *is_valid ) 1511 { 1512 CHECK( OSIterator, iterator, iter ); 1513 1514 *is_valid = iter->isValid(); 1515 1516 return( kIOReturnSuccess ); 1517 } 1518 1519 /* Routine io_service_match_property_table */ 1520 kern_return_t is_io_service_match_property_table( 1521 io_service_t _service, 1522 io_string_t matching, 1523 boolean_t *matches ) 1524 { 1525 CHECK( IOService, _service, service ); 1526 1527 kern_return_t kr; 1528 OSObject * obj; 1529 OSDictionary * dict; 1530 1531 obj = OSUnserializeXML( matching ); 1532 1533 if( (dict = OSDynamicCast( OSDictionary, obj))) { 1534 *matches = service->passiveMatch( dict ); 1535 kr = kIOReturnSuccess; 1536 } else 1537 kr = kIOReturnBadArgument; 1538 1539 if( obj) 1540 obj->release(); 1541 1542 return( kr ); 1543 } 1544 1545 /* Routine io_service_match_property_table_ool */ 1546 kern_return_t is_io_service_match_property_table_ool( 1547 io_object_t service, 1548 io_buf_ptr_t matching, 1549 mach_msg_type_number_t matchingCnt, 1550 kern_return_t *result, 1551 boolean_t *matches ) 1552 { 1553 kern_return_t kr; 1554 vm_offset_t data; 1555 vm_map_offset_t map_data; 1556 1557 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching ); 1558 data = CAST_DOWN(vm_offset_t, map_data); 1559 1560 if( KERN_SUCCESS == kr) { 1561 // must return success after vm_map_copyout() succeeds 1562 *result = is_io_service_match_property_table( service, 1563 (char *) data, matches ); 1564 vm_deallocate( kernel_map, data, matchingCnt ); 1565 } 1566 1567 return( kr ); 1568 } 1569 1570 /* Routine io_service_get_matching_services */ 1571 kern_return_t is_io_service_get_matching_services( 1572 mach_port_t master_port, 1573 io_string_t matching, 1574 io_iterator_t *existing ) 1575 { 1576 kern_return_t kr; 1577 OSObject * obj; 1578 OSDictionary * dict; 1579 1580 if( master_port != master_device_port) 1581 return( kIOReturnNotPrivileged); 1582 1583 obj = OSUnserializeXML( matching ); 1584 1585 if( (dict = OSDynamicCast( OSDictionary, obj))) { 1586 *existing = IOService::getMatchingServices( dict ); 1587 kr = kIOReturnSuccess; 1588 } else 1589 kr = kIOReturnBadArgument; 1590 1591 if( obj) 1592 obj->release(); 1593 1594 return( kr ); 1595 } 1596 1597 /* Routine io_service_get_matching_services_ool */ 1598 kern_return_t is_io_service_get_matching_services_ool( 1599 mach_port_t master_port, 1600 io_buf_ptr_t matching, 1601 mach_msg_type_number_t matchingCnt, 1602 kern_return_t *result, 1603 io_object_t *existing ) 1604 { 1605 kern_return_t kr; 1606 vm_offset_t data; 1607 vm_map_offset_t map_data; 1608 1609 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching ); 1610 data = CAST_DOWN(vm_offset_t, map_data); 1611 1612 if( KERN_SUCCESS == kr) { 1613 // must return success after vm_map_copyout() succeeds 1614 *result = is_io_service_get_matching_services( master_port, 1615 (char *) data, existing ); 1616 vm_deallocate( kernel_map, data, matchingCnt ); 1617 } 1618 1619 return( kr ); 1620 } 1621 1622 static kern_return_t internal_io_service_add_notification( 1623 mach_port_t master_port, 1624 io_name_t notification_type, 1625 io_string_t matching, 1626 mach_port_t port, 1627 void * reference, 1628 vm_size_t referenceSize, 1629 bool client64, 1630 io_object_t * notification ) 1631 { 1632 IOServiceUserNotification * userNotify = 0; 1633 IONotifier * notify = 0; 1634 const OSSymbol * sym; 1635 OSDictionary * dict; 1636 IOReturn err; 1637 unsigned long int userMsgType; 1638 1639 1640 if( master_port != master_device_port) 1641 return( kIOReturnNotPrivileged); 1642 1643 do { 1644 err = kIOReturnNoResources; 1645 1646 if( !(sym = OSSymbol::withCString( notification_type ))) 1647 err = kIOReturnNoResources; 1648 1649 if( !(dict = OSDynamicCast( OSDictionary, 1650 OSUnserializeXML( matching )))) { 1651 err = kIOReturnBadArgument; 1652 continue; 1653 } 1654 1655 if( (sym == gIOPublishNotification) 1656 || (sym == gIOFirstPublishNotification)) 1657 userMsgType = kIOServicePublishNotificationType; 1658 else if( (sym == gIOMatchedNotification) 1659 || (sym == gIOFirstMatchNotification)) 1660 userMsgType = kIOServiceMatchedNotificationType; 1661 else if( sym == gIOTerminatedNotification) 1662 userMsgType = kIOServiceTerminatedNotificationType; 1663 else 1664 userMsgType = kLastIOKitNotificationType; 1665 1666 userNotify = new IOServiceUserNotification; 1667 1668 if( userNotify && !userNotify->init( port, userMsgType, 1669 reference, referenceSize, client64)) { 1670 userNotify->release(); 1671 userNotify = 0; 1672 } 1673 if( !userNotify) 1674 continue; 1675 1676 notify = IOService::addMatchingNotification( sym, dict, 1677 &userNotify->_handler, userNotify ); 1678 if( notify) { 1679 *notification = userNotify; 1680 userNotify->setNotification( notify ); 1681 err = kIOReturnSuccess; 1682 } else 1683 err = kIOReturnUnsupported; 1684 1685 } while( false ); 1686 1687 if( sym) 1688 sym->release(); 1689 if( dict) 1690 dict->release(); 1691 1692 return( err ); 1693 } 1694 1695 1696 /* Routine io_service_add_notification */ 1697 kern_return_t is_io_service_add_notification( 1698 mach_port_t master_port, 1699 io_name_t notification_type, 1700 io_string_t matching, 1701 mach_port_t port, 1702 io_async_ref_t reference, 1703 mach_msg_type_number_t referenceCnt, 1704 io_object_t * notification ) 1705 { 1706 return (internal_io_service_add_notification(master_port, notification_type, 1707 matching, port, &reference[0], sizeof(io_async_ref_t), 1708 false, notification)); 1709 } 1710 1711 /* Routine io_service_add_notification_64 */ 1712 kern_return_t is_io_service_add_notification_64( 1713 mach_port_t master_port, 1714 io_name_t notification_type, 1715 io_string_t matching, 1716 mach_port_t wake_port, 1717 io_async_ref64_t reference, 1718 mach_msg_type_number_t referenceCnt, 1719 io_object_t *notification ) 1720 { 1721 return (internal_io_service_add_notification(master_port, notification_type, 1722 matching, wake_port, &reference[0], sizeof(io_async_ref64_t), 1723 true, notification)); 1724 } 1725 1726 1727 static kern_return_t internal_io_service_add_notification_ool( 1728 mach_port_t master_port, 1729 io_name_t notification_type, 1730 io_buf_ptr_t matching, 1731 mach_msg_type_number_t matchingCnt, 1732 mach_port_t wake_port, 1733 void * reference, 1734 vm_size_t referenceSize, 1735 bool client64, 1736 kern_return_t *result, 1737 io_object_t *notification ) 1738 { 1739 kern_return_t kr; 1740 vm_offset_t data; 1741 vm_map_offset_t map_data; 1742 1743 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching ); 1744 data = CAST_DOWN(vm_offset_t, map_data); 1745 1746 if( KERN_SUCCESS == kr) { 1747 // must return success after vm_map_copyout() succeeds 1748 *result = internal_io_service_add_notification( master_port, notification_type, 1749 (char *) data, wake_port, reference, referenceSize, client64, notification ); 1750 vm_deallocate( kernel_map, data, matchingCnt ); 1751 } 1752 1753 return( kr ); 1754 } 1755 1756 /* Routine io_service_add_notification_ool */ 1757 kern_return_t is_io_service_add_notification_ool( 1758 mach_port_t master_port, 1759 io_name_t notification_type, 1760 io_buf_ptr_t matching, 1761 mach_msg_type_number_t matchingCnt, 1762 mach_port_t wake_port, 1763 io_async_ref_t reference, 1764 mach_msg_type_number_t referenceCnt, 1765 kern_return_t *result, 1766 io_object_t *notification ) 1767 { 1768 return (internal_io_service_add_notification_ool(master_port, notification_type, 1769 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t), 1770 false, result, notification)); 1771 } 1772 1773 /* Routine io_service_add_notification_ool_64 */ 1774 kern_return_t is_io_service_add_notification_ool_64( 1775 mach_port_t master_port, 1776 io_name_t notification_type, 1777 io_buf_ptr_t matching, 1778 mach_msg_type_number_t matchingCnt, 1779 mach_port_t wake_port, 1780 io_async_ref64_t reference, 1781 mach_msg_type_number_t referenceCnt, 1782 kern_return_t *result, 1783 io_object_t *notification ) 1784 { 1785 return (internal_io_service_add_notification_ool(master_port, notification_type, 1786 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t), 1787 true, result, notification)); 1788 } 1789 1790 /* Routine io_service_add_notification_old */ 1791 kern_return_t is_io_service_add_notification_old( 1792 mach_port_t master_port, 1793 io_name_t notification_type, 1794 io_string_t matching, 1795 mach_port_t port, 1796 // for binary compatibility reasons, this must be natural_t for ILP32 1797 natural_t ref, 1798 io_object_t * notification ) 1799 { 1800 return( is_io_service_add_notification( master_port, notification_type, 1801 matching, port, &ref, 1, notification )); 1802 } 1803 1804 1805 static kern_return_t internal_io_service_add_interest_notification( 1806 io_object_t _service, 1807 io_name_t type_of_interest, 1808 mach_port_t port, 1809 void * reference, 1810 vm_size_t referenceSize, 1811 bool client64, 1812 io_object_t * notification ) 1813 { 1814 1815 IOServiceMessageUserNotification * userNotify = 0; 1816 IONotifier * notify = 0; 1817 const OSSymbol * sym; 1818 IOReturn err; 1819 1820 CHECK( IOService, _service, service ); 1821 1822 err = kIOReturnNoResources; 1823 if( (sym = OSSymbol::withCString( type_of_interest ))) do { 1824 1825 userNotify = new IOServiceMessageUserNotification; 1826 1827 if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType, 1828 reference, referenceSize, 1829 kIOUserNotifyMaxMessageSize, 1830 client64 )) { 1831 userNotify->release(); 1832 userNotify = 0; 1833 } 1834 if( !userNotify) 1835 continue; 1836 1837 notify = service->registerInterest( sym, 1838 &userNotify->_handler, userNotify ); 1839 if( notify) { 1840 *notification = userNotify; 1841 userNotify->setNotification( notify ); 1842 err = kIOReturnSuccess; 1843 } else 1844 err = kIOReturnUnsupported; 1845 1846 sym->release(); 1847 1848 } while( false ); 1849 1850 return( err ); 1851 } 1852 1853 /* Routine io_service_add_message_notification */ 1854 kern_return_t is_io_service_add_interest_notification( 1855 io_object_t service, 1856 io_name_t type_of_interest, 1857 mach_port_t port, 1858 io_async_ref_t reference, 1859 mach_msg_type_number_t referenceCnt, 1860 io_object_t * notification ) 1861 { 1862 return (internal_io_service_add_interest_notification(service, type_of_interest, 1863 port, &reference[0], sizeof(io_async_ref_t), false, notification)); 1864 } 1865 1866 /* Routine io_service_add_interest_notification_64 */ 1867 kern_return_t is_io_service_add_interest_notification_64( 1868 io_object_t service, 1869 io_name_t type_of_interest, 1870 mach_port_t wake_port, 1871 io_async_ref64_t reference, 1872 mach_msg_type_number_t referenceCnt, 1873 io_object_t *notification ) 1874 { 1875 return (internal_io_service_add_interest_notification(service, type_of_interest, 1876 wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification)); 1877 } 1878 1879 1880 /* Routine io_service_acknowledge_notification */ 1881 kern_return_t is_io_service_acknowledge_notification( 1882 io_object_t _service, 1883 natural_t notify_ref, 1884 natural_t response ) 1885 { 1886 CHECK( IOService, _service, service ); 1887 1888 return( service->acknowledgeNotification( (IONotificationRef) notify_ref, 1889 (IOOptionBits) response )); 1890 1891 } 1892 1893 /* Routine io_connect_get_semaphore */ 1894 kern_return_t is_io_connect_get_notification_semaphore( 1895 io_connect_t connection, 1896 natural_t notification_type, 1897 semaphore_t *semaphore ) 1898 { 1899 CHECK( IOUserClient, connection, client ); 1900 1901 IOStatisticsClientCall(); 1902 return( client->getNotificationSemaphore( (UInt32) notification_type, 1903 semaphore )); 1904 } 1905 1906 /* Routine io_registry_get_root_entry */ 1907 kern_return_t is_io_registry_get_root_entry( 1908 mach_port_t master_port, 1909 io_object_t *root ) 1910 { 1911 IORegistryEntry * entry; 1912 1913 if( master_port != master_device_port) 1914 return( kIOReturnNotPrivileged); 1915 1916 entry = IORegistryEntry::getRegistryRoot(); 1917 if( entry) 1918 entry->retain(); 1919 *root = entry; 1920 1921 return( kIOReturnSuccess ); 1922 } 1923 1924 /* Routine io_registry_create_iterator */ 1925 kern_return_t is_io_registry_create_iterator( 1926 mach_port_t master_port, 1927 io_name_t plane, 1928 uint32_t options, 1929 io_object_t *iterator ) 1930 { 1931 if( master_port != master_device_port) 1932 return( kIOReturnNotPrivileged); 1933 1934 *iterator = IORegistryIterator::iterateOver( 1935 IORegistryEntry::getPlane( plane ), options ); 1936 1937 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument ); 1938 } 1939 1940 /* Routine io_registry_entry_create_iterator */ 1941 kern_return_t is_io_registry_entry_create_iterator( 1942 io_object_t registry_entry, 1943 io_name_t plane, 1944 uint32_t options, 1945 io_object_t *iterator ) 1946 { 1947 CHECK( IORegistryEntry, registry_entry, entry ); 1948 1949 *iterator = IORegistryIterator::iterateOver( entry, 1950 IORegistryEntry::getPlane( plane ), options ); 1951 1952 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument ); 1953 } 1954 1955 /* Routine io_registry_iterator_enter */ 1956 kern_return_t is_io_registry_iterator_enter_entry( 1957 io_object_t iterator ) 1958 { 1959 CHECK( IORegistryIterator, iterator, iter ); 1960 1961 iter->enterEntry(); 1962 1963 return( kIOReturnSuccess ); 1964 } 1965 1966 /* Routine io_registry_iterator_exit */ 1967 kern_return_t is_io_registry_iterator_exit_entry( 1968 io_object_t iterator ) 1969 { 1970 bool didIt; 1971 1972 CHECK( IORegistryIterator, iterator, iter ); 1973 1974 didIt = iter->exitEntry(); 1975 1976 return( didIt ? kIOReturnSuccess : kIOReturnNoDevice ); 1977 } 1978 1979 /* Routine io_registry_entry_from_path */ 1980 kern_return_t is_io_registry_entry_from_path( 1981 mach_port_t master_port, 1982 io_string_t path, 1983 io_object_t *registry_entry ) 1984 { 1985 IORegistryEntry * entry; 1986 1987 if( master_port != master_device_port) 1988 return( kIOReturnNotPrivileged); 1989 1990 entry = IORegistryEntry::fromPath( path ); 1991 1992 *registry_entry = entry; 1993 1994 return( kIOReturnSuccess ); 1995 } 1996 1997 /* Routine io_registry_entry_in_plane */ 1998 kern_return_t is_io_registry_entry_in_plane( 1999 io_object_t registry_entry, 2000 io_name_t plane, 2001 boolean_t *inPlane ) 2002 { 2003 CHECK( IORegistryEntry, registry_entry, entry ); 2004 2005 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane )); 2006 2007 return( kIOReturnSuccess ); 2008 } 2009 2010 2011 /* Routine io_registry_entry_get_path */ 2012 kern_return_t is_io_registry_entry_get_path( 2013 io_object_t registry_entry, 2014 io_name_t plane, 2015 io_string_t path ) 2016 { 2017 int length; 2018 CHECK( IORegistryEntry, registry_entry, entry ); 2019 2020 length = sizeof( io_string_t); 2021 if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) 2022 return( kIOReturnSuccess ); 2023 else 2024 return( kIOReturnBadArgument ); 2025 } 2026 2027 2028 /* Routine io_registry_entry_get_name */ 2029 kern_return_t is_io_registry_entry_get_name( 2030 io_object_t registry_entry, 2031 io_name_t name ) 2032 { 2033 CHECK( IORegistryEntry, registry_entry, entry ); 2034 2035 strncpy( name, entry->getName(), sizeof( io_name_t)); 2036 2037 return( kIOReturnSuccess ); 2038 } 2039 2040 /* Routine io_registry_entry_get_name_in_plane */ 2041 kern_return_t is_io_registry_entry_get_name_in_plane( 2042 io_object_t registry_entry, 2043 io_name_t planeName, 2044 io_name_t name ) 2045 { 2046 const IORegistryPlane * plane; 2047 CHECK( IORegistryEntry, registry_entry, entry ); 2048 2049 if( planeName[0]) 2050 plane = IORegistryEntry::getPlane( planeName ); 2051 else 2052 plane = 0; 2053 2054 strncpy( name, entry->getName( plane), sizeof( io_name_t)); 2055 2056 return( kIOReturnSuccess ); 2057 } 2058 2059 /* Routine io_registry_entry_get_location_in_plane */ 2060 kern_return_t is_io_registry_entry_get_location_in_plane( 2061 io_object_t registry_entry, 2062 io_name_t planeName, 2063 io_name_t location ) 2064 { 2065 const IORegistryPlane * plane; 2066 CHECK( IORegistryEntry, registry_entry, entry ); 2067 2068 if( planeName[0]) 2069 plane = IORegistryEntry::getPlane( planeName ); 2070 else 2071 plane = 0; 2072 2073 const char * cstr = entry->getLocation( plane ); 2074 2075 if( cstr) { 2076 strncpy( location, cstr, sizeof( io_name_t)); 2077 return( kIOReturnSuccess ); 2078 } else 2079 return( kIOReturnNotFound ); 2080 } 2081 2082 /* Routine io_registry_entry_get_registry_entry_id */ 2083 kern_return_t is_io_registry_entry_get_registry_entry_id( 2084 io_object_t registry_entry, 2085 uint64_t *entry_id ) 2086 { 2087 CHECK( IORegistryEntry, registry_entry, entry ); 2088 2089 *entry_id = entry->getRegistryEntryID(); 2090 2091 return (kIOReturnSuccess); 2092 } 2093 2094 // Create a vm_map_copy_t or kalloc'ed data for memory 2095 // to be copied out. ipc will free after the copyout. 2096 2097 static kern_return_t copyoutkdata( void * data, vm_size_t len, 2098 io_buf_ptr_t * buf ) 2099 { 2100 kern_return_t err; 2101 vm_map_copy_t copy; 2102 2103 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len, 2104 false /* src_destroy */, ©); 2105 2106 assert( err == KERN_SUCCESS ); 2107 if( err == KERN_SUCCESS ) 2108 *buf = (char *) copy; 2109 2110 return( err ); 2111 } 2112 2113 /* Routine io_registry_entry_get_property */ 2114 kern_return_t is_io_registry_entry_get_property_bytes( 2115 io_object_t registry_entry, 2116 io_name_t property_name, 2117 io_struct_inband_t buf, 2118 mach_msg_type_number_t *dataCnt ) 2119 { 2120 OSObject * obj; 2121 OSData * data; 2122 OSString * str; 2123 OSBoolean * boo; 2124 OSNumber * off; 2125 UInt64 offsetBytes; 2126 unsigned int len = 0; 2127 const void * bytes = 0; 2128 IOReturn ret = kIOReturnSuccess; 2129 2130 CHECK( IORegistryEntry, registry_entry, entry ); 2131 2132 obj = entry->copyProperty(property_name); 2133 if( !obj) 2134 return( kIOReturnNoResources ); 2135 2136 // One day OSData will be a common container base class 2137 // until then... 2138 if( (data = OSDynamicCast( OSData, obj ))) { 2139 len = data->getLength(); 2140 bytes = data->getBytesNoCopy(); 2141 2142 } else if( (str = OSDynamicCast( OSString, obj ))) { 2143 len = str->getLength() + 1; 2144 bytes = str->getCStringNoCopy(); 2145 2146 } else if( (boo = OSDynamicCast( OSBoolean, obj ))) { 2147 len = boo->isTrue() ? sizeof("Yes") : sizeof("No"); 2148 bytes = boo->isTrue() ? "Yes" : "No"; 2149 2150 } else if( (off = OSDynamicCast( OSNumber, obj ))) { 2151 offsetBytes = off->unsigned64BitValue(); 2152 len = off->numberOfBytes(); 2153 bytes = &offsetBytes; 2154 #ifdef __BIG_ENDIAN__ 2155 bytes = (const void *) 2156 (((UInt32) bytes) + (sizeof( UInt64) - len)); 2157 #endif 2158 2159 } else 2160 ret = kIOReturnBadArgument; 2161 2162 if( bytes) { 2163 if( *dataCnt < len) 2164 ret = kIOReturnIPCError; 2165 else { 2166 *dataCnt = len; 2167 bcopy( bytes, buf, len ); 2168 } 2169 } 2170 obj->release(); 2171 2172 return( ret ); 2173 } 2174 2175 2176 /* Routine io_registry_entry_get_property */ 2177 kern_return_t is_io_registry_entry_get_property( 2178 io_object_t registry_entry, 2179 io_name_t property_name, 2180 io_buf_ptr_t *properties, 2181 mach_msg_type_number_t *propertiesCnt ) 2182 { 2183 kern_return_t err; 2184 vm_size_t len; 2185 OSObject * obj; 2186 2187 CHECK( IORegistryEntry, registry_entry, entry ); 2188 2189 obj = entry->copyProperty(property_name); 2190 if( !obj) 2191 return( kIOReturnNotFound ); 2192 2193 OSSerialize * s = OSSerialize::withCapacity(4096); 2194 if( !s) { 2195 obj->release(); 2196 return( kIOReturnNoMemory ); 2197 } 2198 s->clearText(); 2199 2200 if( obj->serialize( s )) { 2201 len = s->getLength(); 2202 *propertiesCnt = len; 2203 err = copyoutkdata( s->text(), len, properties ); 2204 2205 } else 2206 err = kIOReturnUnsupported; 2207 2208 s->release(); 2209 obj->release(); 2210 2211 return( err ); 2212 } 2213 2214 /* Routine io_registry_entry_get_property_recursively */ 2215 kern_return_t is_io_registry_entry_get_property_recursively( 2216 io_object_t registry_entry, 2217 io_name_t plane, 2218 io_name_t property_name, 2219 uint32_t options, 2220 io_buf_ptr_t *properties, 2221 mach_msg_type_number_t *propertiesCnt ) 2222 { 2223 kern_return_t err; 2224 vm_size_t len; 2225 OSObject * obj; 2226 2227 CHECK( IORegistryEntry, registry_entry, entry ); 2228 2229 obj = entry->copyProperty( property_name, 2230 IORegistryEntry::getPlane( plane ), options); 2231 if( !obj) 2232 return( kIOReturnNotFound ); 2233 2234 OSSerialize * s = OSSerialize::withCapacity(4096); 2235 if( !s) { 2236 obj->release(); 2237 return( kIOReturnNoMemory ); 2238 } 2239 2240 s->clearText(); 2241 2242 if( obj->serialize( s )) { 2243 len = s->getLength(); 2244 *propertiesCnt = len; 2245 err = copyoutkdata( s->text(), len, properties ); 2246 2247 } else 2248 err = kIOReturnUnsupported; 2249 2250 s->release(); 2251 obj->release(); 2252 2253 return( err ); 2254 } 2255 2256 /* Routine io_registry_entry_get_properties */ 2257 kern_return_t is_io_registry_entry_get_properties( 2258 io_object_t registry_entry, 2259 io_buf_ptr_t *properties, 2260 mach_msg_type_number_t *propertiesCnt ) 2261 { 2262 kern_return_t err; 2263 vm_size_t len; 2264 2265 CHECK( IORegistryEntry, registry_entry, entry ); 2266 2267 OSSerialize * s = OSSerialize::withCapacity(4096); 2268 if( !s) 2269 return( kIOReturnNoMemory ); 2270 2271 s->clearText(); 2272 2273 if( entry->serializeProperties( s )) { 2274 len = s->getLength(); 2275 *propertiesCnt = len; 2276 err = copyoutkdata( s->text(), len, properties ); 2277 2278 } else 2279 err = kIOReturnUnsupported; 2280 2281 s->release(); 2282 2283 return( err ); 2284 } 2285 2286 /* Routine io_registry_entry_set_properties */ 2287 kern_return_t is_io_registry_entry_set_properties 2288 ( 2289 io_object_t registry_entry, 2290 io_buf_ptr_t properties, 2291 mach_msg_type_number_t propertiesCnt, 2292 kern_return_t * result) 2293 { 2294 OSObject * obj; 2295 kern_return_t err; 2296 IOReturn res; 2297 vm_offset_t data; 2298 vm_map_offset_t map_data; 2299 2300 CHECK( IORegistryEntry, registry_entry, entry ); 2301 2302 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties ); 2303 data = CAST_DOWN(vm_offset_t, map_data); 2304 2305 if( KERN_SUCCESS == err) { 2306 2307 // must return success after vm_map_copyout() succeeds 2308 obj = OSUnserializeXML( (const char *) data ); 2309 vm_deallocate( kernel_map, data, propertiesCnt ); 2310 2311 if (!obj) 2312 res = kIOReturnBadArgument; 2313 #if CONFIG_MACF 2314 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(), 2315 registry_entry, obj)) 2316 res = kIOReturnNotPermitted; 2317 #endif 2318 else 2319 res = entry->setProperties( obj ); 2320 if (obj) 2321 obj->release(); 2322 } else 2323 res = err; 2324 2325 *result = res; 2326 return( err ); 2327 } 2328 2329 /* Routine io_registry_entry_get_child_iterator */ 2330 kern_return_t is_io_registry_entry_get_child_iterator( 2331 io_object_t registry_entry, 2332 io_name_t plane, 2333 io_object_t *iterator ) 2334 { 2335 CHECK( IORegistryEntry, registry_entry, entry ); 2336 2337 *iterator = entry->getChildIterator( 2338 IORegistryEntry::getPlane( plane )); 2339 2340 return( kIOReturnSuccess ); 2341 } 2342 2343 /* Routine io_registry_entry_get_parent_iterator */ 2344 kern_return_t is_io_registry_entry_get_parent_iterator( 2345 io_object_t registry_entry, 2346 io_name_t plane, 2347 io_object_t *iterator) 2348 { 2349 CHECK( IORegistryEntry, registry_entry, entry ); 2350 2351 *iterator = entry->getParentIterator( 2352 IORegistryEntry::getPlane( plane )); 2353 2354 return( kIOReturnSuccess ); 2355 } 2356 2357 /* Routine io_service_get_busy_state */ 2358 kern_return_t is_io_service_get_busy_state( 2359 io_object_t _service, 2360 uint32_t *busyState ) 2361 { 2362 CHECK( IOService, _service, service ); 2363 2364 *busyState = service->getBusyState(); 2365 2366 return( kIOReturnSuccess ); 2367 } 2368 2369 /* Routine io_service_get_state */ 2370 kern_return_t is_io_service_get_state( 2371 io_object_t _service, 2372 uint64_t *state, 2373 uint32_t *busy_state, 2374 uint64_t *accumulated_busy_time ) 2375 { 2376 CHECK( IOService, _service, service ); 2377 2378 *state = service->getState(); 2379 *busy_state = service->getBusyState(); 2380 *accumulated_busy_time = service->getAccumulatedBusyTime(); 2381 2382 return( kIOReturnSuccess ); 2383 } 2384 2385 /* Routine io_service_wait_quiet */ 2386 kern_return_t is_io_service_wait_quiet( 2387 io_object_t _service, 2388 mach_timespec_t wait_time ) 2389 { 2390 uint64_t timeoutNS; 2391 2392 CHECK( IOService, _service, service ); 2393 2394 timeoutNS = wait_time.tv_sec; 2395 timeoutNS *= kSecondScale; 2396 timeoutNS += wait_time.tv_nsec; 2397 2398 return( service->waitQuiet(timeoutNS) ); 2399 } 2400 2401 /* Routine io_service_request_probe */ 2402 kern_return_t is_io_service_request_probe( 2403 io_object_t _service, 2404 uint32_t options ) 2405 { 2406 CHECK( IOService, _service, service ); 2407 2408 return( service->requestProbe( options )); 2409 } 2410 2411 /* Routine io_service_open_ndr */ 2412 kern_return_t is_io_service_open_extended( 2413 io_object_t _service, 2414 task_t owningTask, 2415 uint32_t connect_type, 2416 NDR_record_t ndr, 2417 io_buf_ptr_t properties, 2418 mach_msg_type_number_t propertiesCnt, 2419 kern_return_t * result, 2420 io_object_t *connection ) 2421 { 2422 IOUserClient * client = 0; 2423 kern_return_t err = KERN_SUCCESS; 2424 IOReturn res = kIOReturnSuccess; 2425 OSDictionary * propertiesDict = 0; 2426 bool crossEndian; 2427 bool disallowAccess; 2428 2429 CHECK( IOService, _service, service ); 2430 2431 do 2432 { 2433 if (properties) 2434 { 2435 OSObject * obj; 2436 vm_offset_t data; 2437 vm_map_offset_t map_data; 2438 2439 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties ); 2440 res = err; 2441 data = CAST_DOWN(vm_offset_t, map_data); 2442 if (KERN_SUCCESS == err) 2443 { 2444 // must return success after vm_map_copyout() succeeds 2445 obj = OSUnserializeXML( (const char *) data ); 2446 vm_deallocate( kernel_map, data, propertiesCnt ); 2447 propertiesDict = OSDynamicCast(OSDictionary, obj); 2448 if (!propertiesDict) 2449 { 2450 res = kIOReturnBadArgument; 2451 if (obj) 2452 obj->release(); 2453 } 2454 } 2455 if (kIOReturnSuccess != res) 2456 break; 2457 } 2458 2459 crossEndian = (ndr.int_rep != NDR_record.int_rep); 2460 if (crossEndian) 2461 { 2462 if (!propertiesDict) 2463 propertiesDict = OSDictionary::withCapacity(4); 2464 OSData * data = OSData::withBytes(&ndr, sizeof(ndr)); 2465 if (data) 2466 { 2467 if (propertiesDict) 2468 propertiesDict->setObject(kIOUserClientCrossEndianKey, data); 2469 data->release(); 2470 } 2471 } 2472 2473 res = service->newUserClient( owningTask, (void *) owningTask, 2474 connect_type, propertiesDict, &client ); 2475 2476 if (propertiesDict) 2477 propertiesDict->release(); 2478 2479 if (res == kIOReturnSuccess) 2480 { 2481 assert( OSDynamicCast(IOUserClient, client) ); 2482 2483 disallowAccess = (crossEndian 2484 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey)) 2485 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey))); 2486 if (disallowAccess) res = kIOReturnUnsupported; 2487 #if CONFIG_MACF 2488 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) 2489 res = kIOReturnNotPermitted; 2490 #endif 2491 if (kIOReturnSuccess != res) 2492 { 2493 IOStatisticsClientCall(); 2494 client->clientClose(); 2495 client->release(); 2496 client = 0; 2497 break; 2498 } 2499 client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey)); 2500 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid()); 2501 if (creatorName) 2502 { 2503 client->setProperty(kIOUserClientCreatorKey, creatorName); 2504 creatorName->release(); 2505 } 2506 } 2507 } 2508 while (false); 2509 2510 *connection = client; 2511 *result = res; 2512 2513 return (err); 2514 } 2515 2516 /* Routine io_service_close */ 2517 kern_return_t is_io_service_close( 2518 io_object_t connection ) 2519 { 2520 OSSet * mappings; 2521 if ((mappings = OSDynamicCast(OSSet, connection))) 2522 return( kIOReturnSuccess ); 2523 2524 CHECK( IOUserClient, connection, client ); 2525 2526 IOStatisticsClientCall(); 2527 client->clientClose(); 2528 2529 return( kIOReturnSuccess ); 2530 } 2531 2532 /* Routine io_connect_get_service */ 2533 kern_return_t is_io_connect_get_service( 2534 io_object_t connection, 2535 io_object_t *service ) 2536 { 2537 IOService * theService; 2538 2539 CHECK( IOUserClient, connection, client ); 2540 2541 theService = client->getService(); 2542 if( theService) 2543 theService->retain(); 2544 2545 *service = theService; 2546 2547 return( theService ? kIOReturnSuccess : kIOReturnUnsupported ); 2548 } 2549 2550 /* Routine io_connect_set_notification_port */ 2551 kern_return_t is_io_connect_set_notification_port( 2552 io_object_t connection, 2553 uint32_t notification_type, 2554 mach_port_t port, 2555 uint32_t reference) 2556 { 2557 CHECK( IOUserClient, connection, client ); 2558 2559 IOStatisticsClientCall(); 2560 return( client->registerNotificationPort( port, notification_type, 2561 (io_user_reference_t) reference )); 2562 } 2563 2564 /* Routine io_connect_set_notification_port */ 2565 kern_return_t is_io_connect_set_notification_port_64( 2566 io_object_t connection, 2567 uint32_t notification_type, 2568 mach_port_t port, 2569 io_user_reference_t reference) 2570 { 2571 CHECK( IOUserClient, connection, client ); 2572 2573 IOStatisticsClientCall(); 2574 return( client->registerNotificationPort( port, notification_type, 2575 reference )); 2576 } 2577 2578 /* Routine io_connect_map_memory_into_task */ 2579 kern_return_t is_io_connect_map_memory_into_task 2580 ( 2581 io_connect_t connection, 2582 uint32_t memory_type, 2583 task_t into_task, 2584 mach_vm_address_t *address, 2585 mach_vm_size_t *size, 2586 uint32_t flags 2587 ) 2588 { 2589 IOReturn err; 2590 IOMemoryMap * map; 2591 2592 CHECK( IOUserClient, connection, client ); 2593 2594 IOStatisticsClientCall(); 2595 map = client->mapClientMemory64( memory_type, into_task, flags, *address ); 2596 2597 if( map) { 2598 *address = map->getAddress(); 2599 if( size) 2600 *size = map->getSize(); 2601 2602 if( client->sharedInstance 2603 || (into_task != current_task())) { 2604 // push a name out to the task owning the map, 2605 // so we can clean up maps 2606 mach_port_name_t name __unused = 2607 IOMachPort::makeSendRightForTask( 2608 into_task, map, IKOT_IOKIT_OBJECT ); 2609 assert( name ); 2610 2611 } else { 2612 // keep it with the user client 2613 IOLockLock( gIOObjectPortLock); 2614 if( 0 == client->mappings) 2615 client->mappings = OSSet::withCapacity(2); 2616 if( client->mappings) 2617 client->mappings->setObject( map); 2618 IOLockUnlock( gIOObjectPortLock); 2619 map->release(); 2620 } 2621 err = kIOReturnSuccess; 2622 2623 } else 2624 err = kIOReturnBadArgument; 2625 2626 return( err ); 2627 } 2628 2629 /* Routine is_io_connect_map_memory */ 2630 kern_return_t is_io_connect_map_memory( 2631 io_object_t connect, 2632 uint32_t type, 2633 task_t task, 2634 vm_address_t * mapAddr, 2635 vm_size_t * mapSize, 2636 uint32_t flags ) 2637 { 2638 IOReturn err; 2639 mach_vm_address_t address; 2640 mach_vm_size_t size; 2641 2642 address = SCALAR64(*mapAddr); 2643 size = SCALAR64(*mapSize); 2644 2645 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags); 2646 2647 *mapAddr = SCALAR32(address); 2648 *mapSize = SCALAR32(size); 2649 2650 return (err); 2651 } 2652 2653 } /* extern "C" */ 2654 2655 IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem) 2656 { 2657 OSIterator * iter; 2658 IOMemoryMap * map = 0; 2659 2660 IOLockLock(gIOObjectPortLock); 2661 2662 iter = OSCollectionIterator::withCollection(mappings); 2663 if(iter) 2664 { 2665 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) 2666 { 2667 if(mem == map->getMemoryDescriptor()) 2668 { 2669 map->retain(); 2670 mappings->removeObject(map); 2671 break; 2672 } 2673 } 2674 iter->release(); 2675 } 2676 2677 IOLockUnlock(gIOObjectPortLock); 2678 2679 return (map); 2680 } 2681 2682 extern "C" { 2683 2684 /* Routine io_connect_unmap_memory_from_task */ 2685 kern_return_t is_io_connect_unmap_memory_from_task 2686 ( 2687 io_connect_t connection, 2688 uint32_t memory_type, 2689 task_t from_task, 2690 mach_vm_address_t address) 2691 { 2692 IOReturn err; 2693 IOOptionBits options = 0; 2694 IOMemoryDescriptor * memory; 2695 IOMemoryMap * map; 2696 2697 CHECK( IOUserClient, connection, client ); 2698 2699 IOStatisticsClientCall(); 2700 err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory ); 2701 2702 if( memory && (kIOReturnSuccess == err)) { 2703 2704 options = (options & ~kIOMapUserOptionsMask) 2705 | kIOMapAnywhere | kIOMapReference; 2706 2707 map = memory->createMappingInTask( from_task, address, options ); 2708 memory->release(); 2709 if( map) 2710 { 2711 IOLockLock( gIOObjectPortLock); 2712 if( client->mappings) 2713 client->mappings->removeObject( map); 2714 IOLockUnlock( gIOObjectPortLock); 2715 2716 mach_port_name_t name = 0; 2717 if (from_task != current_task()) 2718 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT ); 2719 if (name) 2720 { 2721 map->userClientUnmap(); 2722 err = iokit_mod_send_right( from_task, name, -2 ); 2723 err = kIOReturnSuccess; 2724 } 2725 else 2726 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT ); 2727 if (from_task == current_task()) 2728 map->release(); 2729 } 2730 else 2731 err = kIOReturnBadArgument; 2732 } 2733 2734 return( err ); 2735 } 2736 2737 kern_return_t is_io_connect_unmap_memory( 2738 io_object_t connect, 2739 uint32_t type, 2740 task_t task, 2741 vm_address_t mapAddr ) 2742 { 2743 IOReturn err; 2744 mach_vm_address_t address; 2745 2746 address = SCALAR64(mapAddr); 2747 2748 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr); 2749 2750 return (err); 2751 } 2752 2753 2754 /* Routine io_connect_add_client */ 2755 kern_return_t is_io_connect_add_client( 2756 io_object_t connection, 2757 io_object_t connect_to) 2758 { 2759 CHECK( IOUserClient, connection, client ); 2760 CHECK( IOUserClient, connect_to, to ); 2761 2762 IOStatisticsClientCall(); 2763 return( client->connectClient( to ) ); 2764 } 2765 2766 2767 /* Routine io_connect_set_properties */ 2768 kern_return_t is_io_connect_set_properties( 2769 io_object_t connection, 2770 io_buf_ptr_t properties, 2771 mach_msg_type_number_t propertiesCnt, 2772 kern_return_t * result) 2773 { 2774 return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result )); 2775 } 2776 2777 2778 /* Routine io_user_client_method */ 2779 kern_return_t is_io_connect_method 2780 ( 2781 io_connect_t connection, 2782 uint32_t selector, 2783 io_scalar_inband64_t scalar_input, 2784 mach_msg_type_number_t scalar_inputCnt, 2785 io_struct_inband_t inband_input, 2786 mach_msg_type_number_t inband_inputCnt, 2787 mach_vm_address_t ool_input, 2788 mach_vm_size_t ool_input_size, 2789 io_struct_inband_t inband_output, 2790 mach_msg_type_number_t *inband_outputCnt, 2791 io_scalar_inband64_t scalar_output, 2792 mach_msg_type_number_t *scalar_outputCnt, 2793 mach_vm_address_t ool_output, 2794 mach_vm_size_t * ool_output_size 2795 ) 2796 { 2797 CHECK( IOUserClient, connection, client ); 2798 2799 IOExternalMethodArguments args; 2800 IOReturn ret; 2801 IOMemoryDescriptor * inputMD = 0; 2802 IOMemoryDescriptor * outputMD = 0; 2803 2804 bzero(&args.__reserved[0], sizeof(args.__reserved)); 2805 args.version = kIOExternalMethodArgumentsCurrentVersion; 2806 2807 args.selector = selector; 2808 2809 args.asyncWakePort = MACH_PORT_NULL; 2810 args.asyncReference = 0; 2811 args.asyncReferenceCount = 0; 2812 2813 args.scalarInput = scalar_input; 2814 args.scalarInputCount = scalar_inputCnt; 2815 args.structureInput = inband_input; 2816 args.structureInputSize = inband_inputCnt; 2817 2818 if (ool_input) 2819 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size, 2820 kIODirectionOut, current_task()); 2821 2822 args.structureInputDescriptor = inputMD; 2823 2824 args.scalarOutput = scalar_output; 2825 args.scalarOutputCount = *scalar_outputCnt; 2826 args.structureOutput = inband_output; 2827 args.structureOutputSize = *inband_outputCnt; 2828 2829 if (ool_output) 2830 { 2831 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size, 2832 kIODirectionIn, current_task()); 2833 } 2834 2835 args.structureOutputDescriptor = outputMD; 2836 args.structureOutputDescriptorSize = *ool_output_size; 2837 2838 IOStatisticsClientCall(); 2839 ret = client->externalMethod( selector, &args ); 2840 2841 *scalar_outputCnt = args.scalarOutputCount; 2842 *inband_outputCnt = args.structureOutputSize; 2843 *ool_output_size = args.structureOutputDescriptorSize; 2844 2845 if (inputMD) 2846 inputMD->release(); 2847 if (outputMD) 2848 outputMD->release(); 2849 2850 return (ret); 2851 } 2852 2853 /* Routine io_async_user_client_method */ 2854 kern_return_t is_io_connect_async_method 2855 ( 2856 io_connect_t connection, 2857 mach_port_t wake_port, 2858 io_async_ref64_t reference, 2859 mach_msg_type_number_t referenceCnt, 2860 uint32_t selector, 2861 io_scalar_inband64_t scalar_input, 2862 mach_msg_type_number_t scalar_inputCnt, 2863 io_struct_inband_t inband_input, 2864 mach_msg_type_number_t inband_inputCnt, 2865 mach_vm_address_t ool_input, 2866 mach_vm_size_t ool_input_size, 2867 io_struct_inband_t inband_output, 2868 mach_msg_type_number_t *inband_outputCnt, 2869 io_scalar_inband64_t scalar_output, 2870 mach_msg_type_number_t *scalar_outputCnt, 2871 mach_vm_address_t ool_output, 2872 mach_vm_size_t * ool_output_size 2873 ) 2874 { 2875 CHECK( IOUserClient, connection, client ); 2876 2877 IOExternalMethodArguments args; 2878 IOReturn ret; 2879 IOMemoryDescriptor * inputMD = 0; 2880 IOMemoryDescriptor * outputMD = 0; 2881 2882 bzero(&args.__reserved[0], sizeof(args.__reserved)); 2883 args.version = kIOExternalMethodArgumentsCurrentVersion; 2884 2885 reference[0] = (io_user_reference_t) wake_port; 2886 if (vm_map_is_64bit(get_task_map(current_task()))) 2887 reference[0] |= kIOUCAsync64Flag; 2888 2889 args.selector = selector; 2890 2891 args.asyncWakePort = wake_port; 2892 args.asyncReference = reference; 2893 args.asyncReferenceCount = referenceCnt; 2894 2895 args.scalarInput = scalar_input; 2896 args.scalarInputCount = scalar_inputCnt; 2897 args.structureInput = inband_input; 2898 args.structureInputSize = inband_inputCnt; 2899 2900 if (ool_input) 2901 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size, 2902 kIODirectionOut, current_task()); 2903 2904 args.structureInputDescriptor = inputMD; 2905 2906 args.scalarOutput = scalar_output; 2907 args.scalarOutputCount = *scalar_outputCnt; 2908 args.structureOutput = inband_output; 2909 args.structureOutputSize = *inband_outputCnt; 2910 2911 if (ool_output) 2912 { 2913 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size, 2914 kIODirectionIn, current_task()); 2915 } 2916 2917 args.structureOutputDescriptor = outputMD; 2918 args.structureOutputDescriptorSize = *ool_output_size; 2919 2920 IOStatisticsClientCall(); 2921 ret = client->externalMethod( selector, &args ); 2922 2923 *inband_outputCnt = args.structureOutputSize; 2924 *ool_output_size = args.structureOutputDescriptorSize; 2925 2926 if (inputMD) 2927 inputMD->release(); 2928 if (outputMD) 2929 outputMD->release(); 2930 2931 return (ret); 2932 } 2933 2934 /* Routine io_connect_method_scalarI_scalarO */ 2935 kern_return_t is_io_connect_method_scalarI_scalarO( 2936 io_object_t connect, 2937 uint32_t index, 2938 io_scalar_inband_t input, 2939 mach_msg_type_number_t inputCount, 2940 io_scalar_inband_t output, 2941 mach_msg_type_number_t * outputCount ) 2942 { 2943 IOReturn err; 2944 uint32_t i; 2945 io_scalar_inband64_t _input; 2946 io_scalar_inband64_t _output; 2947 2948 mach_msg_type_number_t struct_outputCnt = 0; 2949 mach_vm_size_t ool_output_size = 0; 2950 2951 for (i = 0; i < inputCount; i++) 2952 _input[i] = SCALAR64(input[i]); 2953 2954 err = is_io_connect_method(connect, index, 2955 _input, inputCount, 2956 NULL, 0, 2957 0, 0, 2958 NULL, &struct_outputCnt, 2959 _output, outputCount, 2960 0, &ool_output_size); 2961 2962 for (i = 0; i < *outputCount; i++) 2963 output[i] = SCALAR32(_output[i]); 2964 2965 return (err); 2966 } 2967 2968 kern_return_t shim_io_connect_method_scalarI_scalarO( 2969 IOExternalMethod * method, 2970 IOService * object, 2971 const io_user_scalar_t * input, 2972 mach_msg_type_number_t inputCount, 2973 io_user_scalar_t * output, 2974 mach_msg_type_number_t * outputCount ) 2975 { 2976 IOMethod func; 2977 io_scalar_inband_t _output; 2978 IOReturn err; 2979 err = kIOReturnBadArgument; 2980 2981 do { 2982 2983 if( inputCount != method->count0) 2984 { 2985 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); 2986 continue; 2987 } 2988 if( *outputCount != method->count1) 2989 { 2990 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); 2991 continue; 2992 } 2993 2994 func = method->func; 2995 2996 switch( inputCount) { 2997 2998 case 6: 2999 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3000 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) ); 3001 break; 3002 case 5: 3003 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3004 ARG32(input[3]), ARG32(input[4]), 3005 &_output[0] ); 3006 break; 3007 case 4: 3008 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3009 ARG32(input[3]), 3010 &_output[0], &_output[1] ); 3011 break; 3012 case 3: 3013 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3014 &_output[0], &_output[1], &_output[2] ); 3015 break; 3016 case 2: 3017 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), 3018 &_output[0], &_output[1], &_output[2], 3019 &_output[3] ); 3020 break; 3021 case 1: 3022 err = (object->*func)( ARG32(input[0]), 3023 &_output[0], &_output[1], &_output[2], 3024 &_output[3], &_output[4] ); 3025 break; 3026 case 0: 3027 err = (object->*func)( &_output[0], &_output[1], &_output[2], 3028 &_output[3], &_output[4], &_output[5] ); 3029 break; 3030 3031 default: 3032 IOLog("%s: Bad method table\n", object->getName()); 3033 } 3034 } 3035 while( false); 3036 3037 uint32_t i; 3038 for (i = 0; i < *outputCount; i++) 3039 output[i] = SCALAR32(_output[i]); 3040 3041 return( err); 3042 } 3043 3044 /* Routine io_async_method_scalarI_scalarO */ 3045 kern_return_t is_io_async_method_scalarI_scalarO( 3046 io_object_t connect, 3047 mach_port_t wake_port, 3048 io_async_ref_t reference, 3049 mach_msg_type_number_t referenceCnt, 3050 uint32_t index, 3051 io_scalar_inband_t input, 3052 mach_msg_type_number_t inputCount, 3053 io_scalar_inband_t output, 3054 mach_msg_type_number_t * outputCount ) 3055 { 3056 IOReturn err; 3057 uint32_t i; 3058 io_scalar_inband64_t _input; 3059 io_scalar_inband64_t _output; 3060 io_async_ref64_t _reference; 3061 3062 for (i = 0; i < referenceCnt; i++) 3063 _reference[i] = REF64(reference[i]); 3064 3065 mach_msg_type_number_t struct_outputCnt = 0; 3066 mach_vm_size_t ool_output_size = 0; 3067 3068 for (i = 0; i < inputCount; i++) 3069 _input[i] = SCALAR64(input[i]); 3070 3071 err = is_io_connect_async_method(connect, 3072 wake_port, _reference, referenceCnt, 3073 index, 3074 _input, inputCount, 3075 NULL, 0, 3076 0, 0, 3077 NULL, &struct_outputCnt, 3078 _output, outputCount, 3079 0, &ool_output_size); 3080 3081 for (i = 0; i < *outputCount; i++) 3082 output[i] = SCALAR32(_output[i]); 3083 3084 return (err); 3085 } 3086 /* Routine io_async_method_scalarI_structureO */ 3087 kern_return_t is_io_async_method_scalarI_structureO( 3088 io_object_t connect, 3089 mach_port_t wake_port, 3090 io_async_ref_t reference, 3091 mach_msg_type_number_t referenceCnt, 3092 uint32_t index, 3093 io_scalar_inband_t input, 3094 mach_msg_type_number_t inputCount, 3095 io_struct_inband_t output, 3096 mach_msg_type_number_t * outputCount ) 3097 { 3098 uint32_t i; 3099 io_scalar_inband64_t _input; 3100 io_async_ref64_t _reference; 3101 3102 for (i = 0; i < referenceCnt; i++) 3103 _reference[i] = REF64(reference[i]); 3104 3105 mach_msg_type_number_t scalar_outputCnt = 0; 3106 mach_vm_size_t ool_output_size = 0; 3107 3108 for (i = 0; i < inputCount; i++) 3109 _input[i] = SCALAR64(input[i]); 3110 3111 return (is_io_connect_async_method(connect, 3112 wake_port, _reference, referenceCnt, 3113 index, 3114 _input, inputCount, 3115 NULL, 0, 3116 0, 0, 3117 output, outputCount, 3118 NULL, &scalar_outputCnt, 3119 0, &ool_output_size)); 3120 } 3121 3122 /* Routine io_async_method_scalarI_structureI */ 3123 kern_return_t is_io_async_method_scalarI_structureI( 3124 io_connect_t connect, 3125 mach_port_t wake_port, 3126 io_async_ref_t reference, 3127 mach_msg_type_number_t referenceCnt, 3128 uint32_t index, 3129 io_scalar_inband_t input, 3130 mach_msg_type_number_t inputCount, 3131 io_struct_inband_t inputStruct, 3132 mach_msg_type_number_t inputStructCount ) 3133 { 3134 uint32_t i; 3135 io_scalar_inband64_t _input; 3136 io_async_ref64_t _reference; 3137 3138 for (i = 0; i < referenceCnt; i++) 3139 _reference[i] = REF64(reference[i]); 3140 3141 mach_msg_type_number_t scalar_outputCnt = 0; 3142 mach_msg_type_number_t inband_outputCnt = 0; 3143 mach_vm_size_t ool_output_size = 0; 3144 3145 for (i = 0; i < inputCount; i++) 3146 _input[i] = SCALAR64(input[i]); 3147 3148 return (is_io_connect_async_method(connect, 3149 wake_port, _reference, referenceCnt, 3150 index, 3151 _input, inputCount, 3152 inputStruct, inputStructCount, 3153 0, 0, 3154 NULL, &inband_outputCnt, 3155 NULL, &scalar_outputCnt, 3156 0, &ool_output_size)); 3157 } 3158 3159 /* Routine io_async_method_structureI_structureO */ 3160 kern_return_t is_io_async_method_structureI_structureO( 3161 io_object_t connect, 3162 mach_port_t wake_port, 3163 io_async_ref_t reference, 3164 mach_msg_type_number_t referenceCnt, 3165 uint32_t index, 3166 io_struct_inband_t input, 3167 mach_msg_type_number_t inputCount, 3168 io_struct_inband_t output, 3169 mach_msg_type_number_t * outputCount ) 3170 { 3171 uint32_t i; 3172 mach_msg_type_number_t scalar_outputCnt = 0; 3173 mach_vm_size_t ool_output_size = 0; 3174 io_async_ref64_t _reference; 3175 3176 for (i = 0; i < referenceCnt; i++) 3177 _reference[i] = REF64(reference[i]); 3178 3179 return (is_io_connect_async_method(connect, 3180 wake_port, _reference, referenceCnt, 3181 index, 3182 NULL, 0, 3183 input, inputCount, 3184 0, 0, 3185 output, outputCount, 3186 NULL, &scalar_outputCnt, 3187 0, &ool_output_size)); 3188 } 3189 3190 3191 kern_return_t shim_io_async_method_scalarI_scalarO( 3192 IOExternalAsyncMethod * method, 3193 IOService * object, 3194 mach_port_t asyncWakePort, 3195 io_user_reference_t * asyncReference, 3196 uint32_t asyncReferenceCount, 3197 const io_user_scalar_t * input, 3198 mach_msg_type_number_t inputCount, 3199 io_user_scalar_t * output, 3200 mach_msg_type_number_t * outputCount ) 3201 { 3202 IOAsyncMethod func; 3203 uint32_t i; 3204 io_scalar_inband_t _output; 3205 IOReturn err; 3206 io_async_ref_t reference; 3207 3208 for (i = 0; i < asyncReferenceCount; i++) 3209 reference[i] = REF32(asyncReference[i]); 3210 3211 err = kIOReturnBadArgument; 3212 3213 do { 3214 3215 if( inputCount != method->count0) 3216 { 3217 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); 3218 continue; 3219 } 3220 if( *outputCount != method->count1) 3221 { 3222 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); 3223 continue; 3224 } 3225 3226 func = method->func; 3227 3228 switch( inputCount) { 3229 3230 case 6: 3231 err = (object->*func)( reference, 3232 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3233 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) ); 3234 break; 3235 case 5: 3236 err = (object->*func)( reference, 3237 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3238 ARG32(input[3]), ARG32(input[4]), 3239 &_output[0] ); 3240 break; 3241 case 4: 3242 err = (object->*func)( reference, 3243 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3244 ARG32(input[3]), 3245 &_output[0], &_output[1] ); 3246 break; 3247 case 3: 3248 err = (object->*func)( reference, 3249 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3250 &_output[0], &_output[1], &_output[2] ); 3251 break; 3252 case 2: 3253 err = (object->*func)( reference, 3254 ARG32(input[0]), ARG32(input[1]), 3255 &_output[0], &_output[1], &_output[2], 3256 &_output[3] ); 3257 break; 3258 case 1: 3259 err = (object->*func)( reference, 3260 ARG32(input[0]), 3261 &_output[0], &_output[1], &_output[2], 3262 &_output[3], &_output[4] ); 3263 break; 3264 case 0: 3265 err = (object->*func)( reference, 3266 &_output[0], &_output[1], &_output[2], 3267 &_output[3], &_output[4], &_output[5] ); 3268 break; 3269 3270 default: 3271 IOLog("%s: Bad method table\n", object->getName()); 3272 } 3273 } 3274 while( false); 3275 3276 for (i = 0; i < *outputCount; i++) 3277 output[i] = SCALAR32(_output[i]); 3278 3279 return( err); 3280 } 3281 3282 3283 /* Routine io_connect_method_scalarI_structureO */ 3284 kern_return_t is_io_connect_method_scalarI_structureO( 3285 io_object_t connect, 3286 uint32_t index, 3287 io_scalar_inband_t input, 3288 mach_msg_type_number_t inputCount, 3289 io_struct_inband_t output, 3290 mach_msg_type_number_t * outputCount ) 3291 { 3292 uint32_t i; 3293 io_scalar_inband64_t _input; 3294 3295 mach_msg_type_number_t scalar_outputCnt = 0; 3296 mach_vm_size_t ool_output_size = 0; 3297 3298 for (i = 0; i < inputCount; i++) 3299 _input[i] = SCALAR64(input[i]); 3300 3301 return (is_io_connect_method(connect, index, 3302 _input, inputCount, 3303 NULL, 0, 3304 0, 0, 3305 output, outputCount, 3306 NULL, &scalar_outputCnt, 3307 0, &ool_output_size)); 3308 } 3309 3310 kern_return_t shim_io_connect_method_scalarI_structureO( 3311 3312 IOExternalMethod * method, 3313 IOService * object, 3314 const io_user_scalar_t * input, 3315 mach_msg_type_number_t inputCount, 3316 io_struct_inband_t output, 3317 IOByteCount * outputCount ) 3318 { 3319 IOMethod func; 3320 IOReturn err; 3321 3322 err = kIOReturnBadArgument; 3323 3324 do { 3325 if( inputCount != method->count0) 3326 { 3327 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); 3328 continue; 3329 } 3330 if( (kIOUCVariableStructureSize != method->count1) 3331 && (*outputCount != method->count1)) 3332 { 3333 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); 3334 continue; 3335 } 3336 3337 func = method->func; 3338 3339 switch( inputCount) { 3340 3341 case 5: 3342 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3343 ARG32(input[3]), ARG32(input[4]), 3344 output ); 3345 break; 3346 case 4: 3347 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3348 ARG32(input[3]), 3349 output, (void *)outputCount ); 3350 break; 3351 case 3: 3352 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3353 output, (void *)outputCount, 0 ); 3354 break; 3355 case 2: 3356 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), 3357 output, (void *)outputCount, 0, 0 ); 3358 break; 3359 case 1: 3360 err = (object->*func)( ARG32(input[0]), 3361 output, (void *)outputCount, 0, 0, 0 ); 3362 break; 3363 case 0: 3364 err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 ); 3365 break; 3366 3367 default: 3368 IOLog("%s: Bad method table\n", object->getName()); 3369 } 3370 } 3371 while( false); 3372 3373 return( err); 3374 } 3375 3376 3377 kern_return_t shim_io_async_method_scalarI_structureO( 3378 IOExternalAsyncMethod * method, 3379 IOService * object, 3380 mach_port_t asyncWakePort, 3381 io_user_reference_t * asyncReference, 3382 uint32_t asyncReferenceCount, 3383 const io_user_scalar_t * input, 3384 mach_msg_type_number_t inputCount, 3385 io_struct_inband_t output, 3386 mach_msg_type_number_t * outputCount ) 3387 { 3388 IOAsyncMethod func; 3389 uint32_t i; 3390 IOReturn err; 3391 io_async_ref_t reference; 3392 3393 for (i = 0; i < asyncReferenceCount; i++) 3394 reference[i] = REF32(asyncReference[i]); 3395 3396 err = kIOReturnBadArgument; 3397 do { 3398 if( inputCount != method->count0) 3399 { 3400 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); 3401 continue; 3402 } 3403 if( (kIOUCVariableStructureSize != method->count1) 3404 && (*outputCount != method->count1)) 3405 { 3406 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); 3407 continue; 3408 } 3409 3410 func = method->func; 3411 3412 switch( inputCount) { 3413 3414 case 5: 3415 err = (object->*func)( reference, 3416 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3417 ARG32(input[3]), ARG32(input[4]), 3418 output ); 3419 break; 3420 case 4: 3421 err = (object->*func)( reference, 3422 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3423 ARG32(input[3]), 3424 output, (void *)outputCount ); 3425 break; 3426 case 3: 3427 err = (object->*func)( reference, 3428 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3429 output, (void *)outputCount, 0 ); 3430 break; 3431 case 2: 3432 err = (object->*func)( reference, 3433 ARG32(input[0]), ARG32(input[1]), 3434 output, (void *)outputCount, 0, 0 ); 3435 break; 3436 case 1: 3437 err = (object->*func)( reference, 3438 ARG32(input[0]), 3439 output, (void *)outputCount, 0, 0, 0 ); 3440 break; 3441 case 0: 3442 err = (object->*func)( reference, 3443 output, (void *)outputCount, 0, 0, 0, 0 ); 3444 break; 3445 3446 default: 3447 IOLog("%s: Bad method table\n", object->getName()); 3448 } 3449 } 3450 while( false); 3451 3452 return( err); 3453 } 3454 3455 /* Routine io_connect_method_scalarI_structureI */ 3456 kern_return_t is_io_connect_method_scalarI_structureI( 3457 io_connect_t connect, 3458 uint32_t index, 3459 io_scalar_inband_t input, 3460 mach_msg_type_number_t inputCount, 3461 io_struct_inband_t inputStruct, 3462 mach_msg_type_number_t inputStructCount ) 3463 { 3464 uint32_t i; 3465 io_scalar_inband64_t _input; 3466 3467 mach_msg_type_number_t scalar_outputCnt = 0; 3468 mach_msg_type_number_t inband_outputCnt = 0; 3469 mach_vm_size_t ool_output_size = 0; 3470 3471 for (i = 0; i < inputCount; i++) 3472 _input[i] = SCALAR64(input[i]); 3473 3474 return (is_io_connect_method(connect, index, 3475 _input, inputCount, 3476 inputStruct, inputStructCount, 3477 0, 0, 3478 NULL, &inband_outputCnt, 3479 NULL, &scalar_outputCnt, 3480 0, &ool_output_size)); 3481 } 3482 3483 kern_return_t shim_io_connect_method_scalarI_structureI( 3484 IOExternalMethod * method, 3485 IOService * object, 3486 const io_user_scalar_t * input, 3487 mach_msg_type_number_t inputCount, 3488 io_struct_inband_t inputStruct, 3489 mach_msg_type_number_t inputStructCount ) 3490 { 3491 IOMethod func; 3492 IOReturn err = kIOReturnBadArgument; 3493 3494 do 3495 { 3496 if( (kIOUCVariableStructureSize != method->count0) 3497 && (inputCount != method->count0)) 3498 { 3499 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); 3500 continue; 3501 } 3502 if( (kIOUCVariableStructureSize != method->count1) 3503 && (inputStructCount != method->count1)) 3504 { 3505 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); 3506 continue; 3507 } 3508 3509 func = method->func; 3510 3511 switch( inputCount) { 3512 3513 case 5: 3514 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3515 ARG32(input[3]), ARG32(input[4]), 3516 inputStruct ); 3517 break; 3518 case 4: 3519 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2], 3520 ARG32(input[3]), 3521 inputStruct, (void *)inputStructCount ); 3522 break; 3523 case 3: 3524 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3525 inputStruct, (void *)inputStructCount, 3526 0 ); 3527 break; 3528 case 2: 3529 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), 3530 inputStruct, (void *)inputStructCount, 3531 0, 0 ); 3532 break; 3533 case 1: 3534 err = (object->*func)( ARG32(input[0]), 3535 inputStruct, (void *)inputStructCount, 3536 0, 0, 0 ); 3537 break; 3538 case 0: 3539 err = (object->*func)( inputStruct, (void *)inputStructCount, 3540 0, 0, 0, 0 ); 3541 break; 3542 3543 default: 3544 IOLog("%s: Bad method table\n", object->getName()); 3545 } 3546 } 3547 while (false); 3548 3549 return( err); 3550 } 3551 3552 kern_return_t shim_io_async_method_scalarI_structureI( 3553 IOExternalAsyncMethod * method, 3554 IOService * object, 3555 mach_port_t asyncWakePort, 3556 io_user_reference_t * asyncReference, 3557 uint32_t asyncReferenceCount, 3558 const io_user_scalar_t * input, 3559 mach_msg_type_number_t inputCount, 3560 io_struct_inband_t inputStruct, 3561 mach_msg_type_number_t inputStructCount ) 3562 { 3563 IOAsyncMethod func; 3564 uint32_t i; 3565 IOReturn err = kIOReturnBadArgument; 3566 io_async_ref_t reference; 3567 3568 for (i = 0; i < asyncReferenceCount; i++) 3569 reference[i] = REF32(asyncReference[i]); 3570 3571 do 3572 { 3573 if( (kIOUCVariableStructureSize != method->count0) 3574 && (inputCount != method->count0)) 3575 { 3576 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); 3577 continue; 3578 } 3579 if( (kIOUCVariableStructureSize != method->count1) 3580 && (inputStructCount != method->count1)) 3581 { 3582 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); 3583 continue; 3584 } 3585 3586 func = method->func; 3587 3588 switch( inputCount) { 3589 3590 case 5: 3591 err = (object->*func)( reference, 3592 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3593 ARG32(input[3]), ARG32(input[4]), 3594 inputStruct ); 3595 break; 3596 case 4: 3597 err = (object->*func)( reference, 3598 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3599 ARG32(input[3]), 3600 inputStruct, (void *)inputStructCount ); 3601 break; 3602 case 3: 3603 err = (object->*func)( reference, 3604 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), 3605 inputStruct, (void *)inputStructCount, 3606 0 ); 3607 break; 3608 case 2: 3609 err = (object->*func)( reference, 3610 ARG32(input[0]), ARG32(input[1]), 3611 inputStruct, (void *)inputStructCount, 3612 0, 0 ); 3613 break; 3614 case 1: 3615 err = (object->*func)( reference, 3616 ARG32(input[0]), 3617 inputStruct, (void *)inputStructCount, 3618 0, 0, 0 ); 3619 break; 3620 case 0: 3621 err = (object->*func)( reference, 3622 inputStruct, (void *)inputStructCount, 3623 0, 0, 0, 0 ); 3624 break; 3625 3626 default: 3627 IOLog("%s: Bad method table\n", object->getName()); 3628 } 3629 } 3630 while (false); 3631 3632 return( err); 3633 } 3634 3635 /* Routine io_connect_method_structureI_structureO */ 3636 kern_return_t is_io_connect_method_structureI_structureO( 3637 io_object_t connect, 3638 uint32_t index, 3639 io_struct_inband_t input, 3640 mach_msg_type_number_t inputCount, 3641 io_struct_inband_t output, 3642 mach_msg_type_number_t * outputCount ) 3643 { 3644 mach_msg_type_number_t scalar_outputCnt = 0; 3645 mach_vm_size_t ool_output_size = 0; 3646 3647 return (is_io_connect_method(connect, index, 3648 NULL, 0, 3649 input, inputCount, 3650 0, 0, 3651 output, outputCount, 3652 NULL, &scalar_outputCnt, 3653 0, &ool_output_size)); 3654 } 3655 3656 kern_return_t shim_io_connect_method_structureI_structureO( 3657 IOExternalMethod * method, 3658 IOService * object, 3659 io_struct_inband_t input, 3660 mach_msg_type_number_t inputCount, 3661 io_struct_inband_t output, 3662 IOByteCount * outputCount ) 3663 { 3664 IOMethod func; 3665 IOReturn err = kIOReturnBadArgument; 3666 3667 do 3668 { 3669 if( (kIOUCVariableStructureSize != method->count0) 3670 && (inputCount != method->count0)) 3671 { 3672 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); 3673 continue; 3674 } 3675 if( (kIOUCVariableStructureSize != method->count1) 3676 && (*outputCount != method->count1)) 3677 { 3678 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); 3679 continue; 3680 } 3681 3682 func = method->func; 3683 3684 if( method->count1) { 3685 if( method->count0) { 3686 err = (object->*func)( input, output, 3687 (void *)inputCount, outputCount, 0, 0 ); 3688 } else { 3689 err = (object->*func)( output, outputCount, 0, 0, 0, 0 ); 3690 } 3691 } else { 3692 err = (object->*func)( input, (void *)inputCount, 0, 0, 0, 0 ); 3693 } 3694 } 3695 while( false); 3696 3697 3698 return( err); 3699 } 3700 3701 kern_return_t shim_io_async_method_structureI_structureO( 3702 IOExternalAsyncMethod * method, 3703 IOService * object, 3704 mach_port_t asyncWakePort, 3705 io_user_reference_t * asyncReference, 3706 uint32_t asyncReferenceCount, 3707 io_struct_inband_t input, 3708 mach_msg_type_number_t inputCount, 3709 io_struct_inband_t output, 3710 mach_msg_type_number_t * outputCount ) 3711 { 3712 IOAsyncMethod func; 3713 uint32_t i; 3714 IOReturn err; 3715 io_async_ref_t reference; 3716 3717 for (i = 0; i < asyncReferenceCount; i++) 3718 reference[i] = REF32(asyncReference[i]); 3719 3720 err = kIOReturnBadArgument; 3721 do 3722 { 3723 if( (kIOUCVariableStructureSize != method->count0) 3724 && (inputCount != method->count0)) 3725 { 3726 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); 3727 continue; 3728 } 3729 if( (kIOUCVariableStructureSize != method->count1) 3730 && (*outputCount != method->count1)) 3731 { 3732 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); 3733 continue; 3734 } 3735 3736 func = method->func; 3737 3738 if( method->count1) { 3739 if( method->count0) { 3740 err = (object->*func)( reference, 3741 input, output, 3742 (void *)inputCount, outputCount, 0, 0 ); 3743 } else { 3744 err = (object->*func)( reference, 3745 output, outputCount, 0, 0, 0, 0 ); 3746 } 3747 } else { 3748 err = (object->*func)( reference, 3749 input, (void *)inputCount, 0, 0, 0, 0 ); 3750 } 3751 } 3752 while( false); 3753 3754 return( err); 3755 } 3756 3757 /* Routine io_make_matching */ 3758 kern_return_t is_io_make_matching( 3759 mach_port_t master_port, 3760 uint32_t type, 3761 uint32_t options, 3762 io_struct_inband_t input, 3763 mach_msg_type_number_t inputCount, 3764 io_string_t matching ) 3765 { 3766 OSSerialize * s; 3767 IOReturn err = kIOReturnSuccess; 3768 OSDictionary * dict; 3769 3770 if( master_port != master_device_port) 3771 return( kIOReturnNotPrivileged); 3772 3773 switch( type) { 3774 3775 case kIOServiceMatching: 3776 dict = IOService::serviceMatching( gIOServiceKey ); 3777 break; 3778 3779 case kIOBSDNameMatching: 3780 dict = IOBSDNameMatching( (const char *) input ); 3781 break; 3782 3783 case kIOOFPathMatching: 3784 dict = IOOFPathMatching( (const char *) input, 3785 matching, sizeof( io_string_t)); 3786 break; 3787 3788 default: 3789 dict = 0; 3790 } 3791 3792 if( !dict) 3793 return( kIOReturnUnsupported); 3794 3795 do { 3796 s = OSSerialize::withCapacity(4096); 3797 if( !s) { 3798 err = kIOReturnNoMemory; 3799 continue; 3800 } 3801 s->clearText(); 3802 if( !dict->serialize( s )) { 3803 err = kIOReturnUnsupported; 3804 continue; 3805 } 3806 3807 if( s->getLength() > sizeof( io_string_t)) { 3808 err = kIOReturnNoMemory; 3809 continue; 3810 } else 3811 strlcpy(matching, s->text(), sizeof(io_string_t)); 3812 } 3813 while( false); 3814 3815 if( s) 3816 s->release(); 3817 if( dict) 3818 dict->release(); 3819 3820 return( err); 3821 } 3822 3823 /* Routine io_catalog_send_data */ 3824 kern_return_t is_io_catalog_send_data( 3825 mach_port_t master_port, 3826 uint32_t flag, 3827 io_buf_ptr_t inData, 3828 mach_msg_type_number_t inDataCount, 3829 kern_return_t * result) 3830 { 3831 OSObject * obj = 0; 3832 vm_offset_t data; 3833 kern_return_t kr = kIOReturnError; 3834 3835 //printf("io_catalog_send_data called. flag: %d\n", flag); 3836 3837 if( master_port != master_device_port) 3838 return kIOReturnNotPrivileged; 3839 3840 if( (flag != kIOCatalogRemoveKernelLinker && 3841 flag != kIOCatalogKextdActive && 3842 flag != kIOCatalogKextdFinishedLaunching) && 3843 ( !inData || !inDataCount) ) 3844 { 3845 return kIOReturnBadArgument; 3846 } 3847 3848 if (inData) { 3849 vm_map_offset_t map_data; 3850 3851 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData); 3852 data = CAST_DOWN(vm_offset_t, map_data); 3853 3854 if( kr != KERN_SUCCESS) 3855 return kr; 3856 3857 // must return success after vm_map_copyout() succeeds 3858 3859 if( inDataCount ) { 3860 obj = (OSObject *)OSUnserializeXML((const char *)data); 3861 vm_deallocate( kernel_map, data, inDataCount ); 3862 if( !obj) { 3863 *result = kIOReturnNoMemory; 3864 return( KERN_SUCCESS); 3865 } 3866 } 3867 } 3868 3869 switch ( flag ) { 3870 case kIOCatalogResetDrivers: 3871 case kIOCatalogResetDriversNoMatch: { 3872 OSArray * array; 3873 3874 array = OSDynamicCast(OSArray, obj); 3875 if (array) { 3876 if ( !gIOCatalogue->resetAndAddDrivers(array, 3877 flag == kIOCatalogResetDrivers) ) { 3878 3879 kr = kIOReturnError; 3880 } 3881 } else { 3882 kr = kIOReturnBadArgument; 3883 } 3884 } 3885 break; 3886 3887 case kIOCatalogAddDrivers: 3888 case kIOCatalogAddDriversNoMatch: { 3889 OSArray * array; 3890 3891 array = OSDynamicCast(OSArray, obj); 3892 if ( array ) { 3893 if ( !gIOCatalogue->addDrivers( array , 3894 flag == kIOCatalogAddDrivers) ) { 3895 kr = kIOReturnError; 3896 } 3897 } 3898 else { 3899 kr = kIOReturnBadArgument; 3900 } 3901 } 3902 break; 3903 3904 case kIOCatalogRemoveDrivers: 3905 case kIOCatalogRemoveDriversNoMatch: { 3906 OSDictionary * dict; 3907 3908 dict = OSDynamicCast(OSDictionary, obj); 3909 if ( dict ) { 3910 if ( !gIOCatalogue->removeDrivers( dict, 3911 flag == kIOCatalogRemoveDrivers ) ) { 3912 kr = kIOReturnError; 3913 } 3914 } 3915 else { 3916 kr = kIOReturnBadArgument; 3917 } 3918 } 3919 break; 3920 3921 case kIOCatalogStartMatching: { 3922 OSDictionary * dict; 3923 3924 dict = OSDynamicCast(OSDictionary, obj); 3925 if ( dict ) { 3926 if ( !gIOCatalogue->startMatching( dict ) ) { 3927 kr = kIOReturnError; 3928 } 3929 } 3930 else { 3931 kr = kIOReturnBadArgument; 3932 } 3933 } 3934 break; 3935 3936 case kIOCatalogRemoveKernelLinker: 3937 kr = KERN_NOT_SUPPORTED; 3938 break; 3939 3940 case kIOCatalogKextdActive: 3941 #if !NO_KEXTD 3942 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0); 3943 OSKext::setKextdActive(); 3944 3945 /* Dump all nonloaded startup extensions; kextd will now send them 3946 * down on request. 3947 */ 3948 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false); 3949 #endif 3950 kr = kIOReturnSuccess; 3951 break; 3952 3953 case kIOCatalogKextdFinishedLaunching: { 3954 #if !NO_KEXTD 3955 static bool clearedBusy = false; 3956 3957 if (!clearedBusy) { 3958 IOService * serviceRoot = IOService::getServiceRoot(); 3959 if (serviceRoot) { 3960 IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0); 3961 serviceRoot->adjustBusy(-1); 3962 clearedBusy = true; 3963 } 3964 } 3965 #endif 3966 kr = kIOReturnSuccess; 3967 } 3968 break; 3969 3970 default: 3971 kr = kIOReturnBadArgument; 3972 break; 3973 } 3974 3975 if (obj) obj->release(); 3976 3977 *result = kr; 3978 return( KERN_SUCCESS); 3979 } 3980 3981 /* Routine io_catalog_terminate */ 3982 kern_return_t is_io_catalog_terminate( 3983 mach_port_t master_port, 3984 uint32_t flag, 3985 io_name_t name ) 3986 { 3987 kern_return_t kr; 3988 3989 if( master_port != master_device_port ) 3990 return kIOReturnNotPrivileged; 3991 3992 kr = IOUserClient::clientHasPrivilege( (void *) current_task(), 3993 kIOClientPrivilegeAdministrator ); 3994 if( kIOReturnSuccess != kr) 3995 return( kr ); 3996 3997 switch ( flag ) { 3998 #if !defined(SECURE_KERNEL) 3999 case kIOCatalogServiceTerminate: 4000 OSIterator * iter; 4001 IOService * service; 4002 4003 iter = IORegistryIterator::iterateOver(gIOServicePlane, 4004 kIORegistryIterateRecursively); 4005 if ( !iter ) 4006 return kIOReturnNoMemory; 4007 4008 do { 4009 iter->reset(); 4010 while( (service = (IOService *)iter->getNextObject()) ) { 4011 if( service->metaCast(name)) { 4012 if ( !service->terminate( kIOServiceRequired 4013 | kIOServiceSynchronous) ) { 4014 kr = kIOReturnUnsupported; 4015 break; 4016 } 4017 } 4018 } 4019 } while( !service && !iter->isValid()); 4020 iter->release(); 4021 break; 4022 4023 case kIOCatalogModuleUnload: 4024 case kIOCatalogModuleTerminate: 4025 kr = gIOCatalogue->terminateDriversForModule(name, 4026 flag == kIOCatalogModuleUnload); 4027 break; 4028 #endif 4029 4030 default: 4031 kr = kIOReturnBadArgument; 4032 break; 4033 } 4034 4035 return( kr ); 4036 } 4037 4038 /* Routine io_catalog_get_data */ 4039 kern_return_t is_io_catalog_get_data( 4040 mach_port_t master_port, 4041 uint32_t flag, 4042 io_buf_ptr_t *outData, 4043 mach_msg_type_number_t *outDataCount) 4044 { 4045 kern_return_t kr = kIOReturnSuccess; 4046 OSSerialize * s; 4047 4048 if( master_port != master_device_port) 4049 return kIOReturnNotPrivileged; 4050 4051 //printf("io_catalog_get_data called. flag: %d\n", flag); 4052 4053 s = OSSerialize::withCapacity(4096); 4054 if ( !s ) 4055 return kIOReturnNoMemory; 4056 4057 s->clearText(); 4058 4059 kr = gIOCatalogue->serializeData(flag, s); 4060 4061 if ( kr == kIOReturnSuccess ) { 4062 vm_offset_t data; 4063 vm_map_copy_t copy; 4064 vm_size_t size; 4065 4066 size = s->getLength(); 4067 kr = vm_allocate(kernel_map, &data, size, VM_FLAGS_ANYWHERE); 4068 if ( kr == kIOReturnSuccess ) { 4069 bcopy(s->text(), (void *)data, size); 4070 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data, 4071 (vm_map_size_t)size, true, ©); 4072 *outData = (char *)copy; 4073 *outDataCount = size; 4074 } 4075 } 4076 4077 s->release(); 4078 4079 return kr; 4080 } 4081 4082 /* Routine io_catalog_get_gen_count */ 4083 kern_return_t is_io_catalog_get_gen_count( 4084 mach_port_t master_port, 4085 uint32_t *genCount) 4086 { 4087 if( master_port != master_device_port) 4088 return kIOReturnNotPrivileged; 4089 4090 //printf("io_catalog_get_gen_count called.\n"); 4091 4092 if ( !genCount ) 4093 return kIOReturnBadArgument; 4094 4095 *genCount = gIOCatalogue->getGenerationCount(); 4096 4097 return kIOReturnSuccess; 4098 } 4099 4100 /* Routine io_catalog_module_loaded. 4101 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used. 4102 */ 4103 kern_return_t is_io_catalog_module_loaded( 4104 mach_port_t master_port, 4105 io_name_t name) 4106 { 4107 if( master_port != master_device_port) 4108 return kIOReturnNotPrivileged; 4109 4110 //printf("io_catalog_module_loaded called. name %s\n", name); 4111 4112 if ( !name ) 4113 return kIOReturnBadArgument; 4114 4115 gIOCatalogue->moduleHasLoaded(name); 4116 4117 return kIOReturnSuccess; 4118 } 4119 4120 kern_return_t is_io_catalog_reset( 4121 mach_port_t master_port, 4122 uint32_t flag) 4123 { 4124 if( master_port != master_device_port) 4125 return kIOReturnNotPrivileged; 4126 4127 switch ( flag ) { 4128 case kIOCatalogResetDefault: 4129 gIOCatalogue->reset(); 4130 break; 4131 4132 default: 4133 return kIOReturnBadArgument; 4134 } 4135 4136 return kIOReturnSuccess; 4137 } 4138 4139 kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args) 4140 { 4141 kern_return_t result = kIOReturnBadArgument; 4142 IOUserClient *userClient; 4143 4144 if ((userClient = OSDynamicCast(IOUserClient, 4145 iokit_lookup_connect_ref_current_task((OSObject *)(args->userClientRef))))) { 4146 IOExternalTrap *trap; 4147 IOService *target = NULL; 4148 4149 trap = userClient->getTargetAndTrapForIndex(&target, args->index); 4150 4151 if (trap && target) { 4152 IOTrap func; 4153 4154 func = trap->func; 4155 4156 if (func) { 4157 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6); 4158 } 4159 } 4160 4161 userClient->release(); 4162 } 4163 4164 return result; 4165 } 4166 4167 } /* extern "C" */ 4168 4169 IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args, 4170 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference ) 4171 { 4172 IOReturn err; 4173 IOService * object; 4174 IOByteCount structureOutputSize; 4175 4176 if (dispatch) 4177 { 4178 uint32_t count; 4179 count = dispatch->checkScalarInputCount; 4180 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) 4181 { 4182 return (kIOReturnBadArgument); 4183 } 4184 4185 count = dispatch->checkStructureInputSize; 4186 if ((kIOUCVariableStructureSize != count) 4187 && (count != ((args->structureInputDescriptor) 4188 ? args->structureInputDescriptor->getLength() : args->structureInputSize))) 4189 { 4190 return (kIOReturnBadArgument); 4191 } 4192 4193 count = dispatch->checkScalarOutputCount; 4194 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) 4195 { 4196 return (kIOReturnBadArgument); 4197 } 4198 4199 count = dispatch->checkStructureOutputSize; 4200 if ((kIOUCVariableStructureSize != count) 4201 && (count != ((args->structureOutputDescriptor) 4202 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) 4203 { 4204 return (kIOReturnBadArgument); 4205 } 4206 4207 if (dispatch->function) 4208 err = (*dispatch->function)(target, reference, args); 4209 else 4210 err = kIOReturnNoCompletion; /* implementator can dispatch */ 4211 4212 return (err); 4213 } 4214 4215 4216 // pre-Leopard API's don't do ool structs 4217 if (args->structureInputDescriptor || args->structureOutputDescriptor) 4218 { 4219 err = kIOReturnIPCError; 4220 return (err); 4221 } 4222 4223 structureOutputSize = args->structureOutputSize; 4224 4225 if (args->asyncWakePort) 4226 { 4227 IOExternalAsyncMethod * method; 4228 4229 if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) ) 4230 return (kIOReturnUnsupported); 4231 4232 if (kIOUCForegroundOnly & method->flags) 4233 { 4234 /* is graphics access denied for current task? */ 4235 if (proc_get_task_selfgpuacc_deny() != 0) 4236 return (kIOReturnNotPermitted); 4237 } 4238 4239 switch (method->flags & kIOUCTypeMask) 4240 { 4241 case kIOUCScalarIStructI: 4242 err = shim_io_async_method_scalarI_structureI( method, object, 4243 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount, 4244 args->scalarInput, args->scalarInputCount, 4245 (char *)args->structureInput, args->structureInputSize ); 4246 break; 4247 4248 case kIOUCScalarIScalarO: 4249 err = shim_io_async_method_scalarI_scalarO( method, object, 4250 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount, 4251 args->scalarInput, args->scalarInputCount, 4252 args->scalarOutput, &args->scalarOutputCount ); 4253 break; 4254 4255 case kIOUCScalarIStructO: 4256 err = shim_io_async_method_scalarI_structureO( method, object, 4257 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount, 4258 args->scalarInput, args->scalarInputCount, 4259 (char *) args->structureOutput, &args->structureOutputSize ); 4260 break; 4261 4262 4263 case kIOUCStructIStructO: 4264 err = shim_io_async_method_structureI_structureO( method, object, 4265 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount, 4266 (char *)args->structureInput, args->structureInputSize, 4267 (char *) args->structureOutput, &args->structureOutputSize ); 4268 break; 4269 4270 default: 4271 err = kIOReturnBadArgument; 4272 break; 4273 } 4274 } 4275 else 4276 { 4277 IOExternalMethod * method; 4278 4279 if( !(method = getTargetAndMethodForIndex(&object, selector)) ) 4280 return (kIOReturnUnsupported); 4281 4282 if (kIOUCForegroundOnly & method->flags) 4283 { 4284 /* is graphics access denied for current task? */ 4285 if (proc_get_task_selfgpuacc_deny() != 0) 4286 return (kIOReturnNotPermitted); 4287 4288 } 4289 4290 switch (method->flags & kIOUCTypeMask) 4291 { 4292 case kIOUCScalarIStructI: 4293 err = shim_io_connect_method_scalarI_structureI( method, object, 4294 args->scalarInput, args->scalarInputCount, 4295 (char *) args->structureInput, args->structureInputSize ); 4296 break; 4297 4298 case kIOUCScalarIScalarO: 4299 err = shim_io_connect_method_scalarI_scalarO( method, object, 4300 args->scalarInput, args->scalarInputCount, 4301 args->scalarOutput, &args->scalarOutputCount ); 4302 break; 4303 4304 case kIOUCScalarIStructO: 4305 err = shim_io_connect_method_scalarI_structureO( method, object, 4306 args->scalarInput, args->scalarInputCount, 4307 (char *) args->structureOutput, &structureOutputSize ); 4308 break; 4309 4310 4311 case kIOUCStructIStructO: 4312 err = shim_io_connect_method_structureI_structureO( method, object, 4313 (char *) args->structureInput, args->structureInputSize, 4314 (char *) args->structureOutput, &structureOutputSize ); 4315 break; 4316 4317 default: 4318 err = kIOReturnBadArgument; 4319 break; 4320 } 4321 } 4322 4323 args->structureOutputSize = structureOutputSize; 4324 4325 return (err); 4326 } 4327 4328 4329 #if __LP64__ 4330 OSMetaClassDefineReservedUnused(IOUserClient, 0); 4331 OSMetaClassDefineReservedUnused(IOUserClient, 1); 4332 #else 4333 OSMetaClassDefineReservedUsed(IOUserClient, 0); 4334 OSMetaClassDefineReservedUsed(IOUserClient, 1); 4335 #endif 4336 OSMetaClassDefineReservedUnused(IOUserClient, 2); 4337 OSMetaClassDefineReservedUnused(IOUserClient, 3); 4338 OSMetaClassDefineReservedUnused(IOUserClient, 4); 4339 OSMetaClassDefineReservedUnused(IOUserClient, 5); 4340 OSMetaClassDefineReservedUnused(IOUserClient, 6); 4341 OSMetaClassDefineReservedUnused(IOUserClient, 7); 4342 OSMetaClassDefineReservedUnused(IOUserClient, 8); 4343 OSMetaClassDefineReservedUnused(IOUserClient, 9); 4344 OSMetaClassDefineReservedUnused(IOUserClient, 10); 4345 OSMetaClassDefineReservedUnused(IOUserClient, 11); 4346 OSMetaClassDefineReservedUnused(IOUserClient, 12); 4347 OSMetaClassDefineReservedUnused(IOUserClient, 13); 4348 OSMetaClassDefineReservedUnused(IOUserClient, 14); 4349 OSMetaClassDefineReservedUnused(IOUserClient, 15); 4350 4351