1 /* 2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 30 #include <sys/sysctl.h> 31 extern "C" { 32 #include <vm/vm_kern.h> 33 #include <kern/task.h> 34 #include <kern/debug.h> 35 } 36 37 #include <libkern/c++/OSContainers.h> 38 #include <libkern/OSDebug.h> 39 #include <libkern/c++/OSCPPDebug.h> 40 #include <kern/backtrace.h> 41 42 #include <IOKit/IOKitDebug.h> 43 #include <IOKit/IOLib.h> 44 #include <IOKit/assert.h> 45 #include <IOKit/IODeviceTreeSupport.h> 46 #include <IOKit/IOService.h> 47 48 #include "IOKitKernelInternal.h" 49 50 #ifdef IOKITDEBUG 51 #define DEBUG_INIT_VALUE IOKITDEBUG 52 #else 53 #define DEBUG_INIT_VALUE 0 54 #endif 55 56 SInt64 gIOKitDebug = DEBUG_INIT_VALUE; 57 SInt64 gIOKitTrace = 0; 58 59 #if DEVELOPMENT || DEBUG 60 #define IODEBUG_CTLFLAGS CTLFLAG_RW 61 #else 62 #define IODEBUG_CTLFLAGS CTLFLAG_RD 63 #endif 64 65 SYSCTL_QUAD(_debug, OID_AUTO, iotrace, CTLFLAG_RW | CTLFLAG_LOCKED, &gIOKitTrace, "trace io"); 66 67 static int 68 sysctl_debug_iokit 69 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) 70 { 71 SInt64 newValue; 72 int changed, error = sysctl_io_number(req, gIOKitDebug, sizeof(gIOKitDebug), &newValue, &changed); 73 if (changed) { 74 gIOKitDebug = ((gIOKitDebug & ~kIOKitDebugUserOptions) | (newValue & kIOKitDebugUserOptions)); 75 } 76 return (error); 77 } 78 79 SYSCTL_PROC(_debug, OID_AUTO, iokit, 80 CTLTYPE_QUAD | IODEBUG_CTLFLAGS | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, 81 &gIOKitDebug, 0, sysctl_debug_iokit, "Q", "boot_arg io"); 82 83 int debug_malloc_size; 84 int debug_iomalloc_size; 85 86 vm_size_t debug_iomallocpageable_size; 87 int debug_container_malloc_size; 88 // int debug_ivars_size; // in OSObject.cpp 89 90 extern "C" { 91 92 #if 0 93 #define DEBG(fmt, args...) { kprintf(fmt, ## args); } 94 #else 95 #define DEBG(fmt, args...) { IOLog(fmt, ## args); } 96 #endif 97 98 void IOPrintPlane( const IORegistryPlane * plane ) 99 { 100 IORegistryEntry * next; 101 IORegistryIterator * iter; 102 OSOrderedSet * all; 103 char format[] = "%xxxs"; 104 IOService * service; 105 106 iter = IORegistryIterator::iterateOver( plane ); 107 assert( iter ); 108 all = iter->iterateAll(); 109 if( all) { 110 DEBG("Count %d\n", all->getCount() ); 111 all->release(); 112 } else 113 DEBG("Empty\n"); 114 115 iter->reset(); 116 while( (next = iter->getNextObjectRecursive())) { 117 snprintf(format + 1, sizeof(format) - 1, "%ds", 2 * next->getDepth( plane )); 118 DEBG( format, ""); 119 DEBG( "\033[33m%s", next->getName( plane )); 120 if( (next->getLocation( plane ))) 121 DEBG("@%s", next->getLocation( plane )); 122 DEBG("\033[0m <class %s", next->getMetaClass()->getClassName()); 123 if( (service = OSDynamicCast(IOService, next))) 124 DEBG(", busy %ld", (long) service->getBusyState()); 125 DEBG( ">\n"); 126 // IOSleep(250); 127 } 128 iter->release(); 129 } 130 131 void db_piokjunk(void) 132 { 133 } 134 135 void db_dumpiojunk( const IORegistryPlane * plane __unused ) 136 { 137 } 138 139 void IOPrintMemory( void ) 140 { 141 142 // OSMetaClass::printInstanceCounts(); 143 144 IOLog("\n" 145 "ivar kalloc() 0x%08x\n" 146 "malloc() 0x%08x\n" 147 "containers kalloc() 0x%08x\n" 148 "IOMalloc() 0x%08x\n" 149 "----------------------------------------\n", 150 debug_ivars_size, 151 debug_malloc_size, 152 debug_container_malloc_size, 153 debug_iomalloc_size 154 ); 155 } 156 157 } /* extern "C" */ 158 159 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 160 161 #define super OSObject 162 OSDefineMetaClassAndStructors(IOKitDiagnostics, OSObject) 163 164 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 165 166 OSObject * IOKitDiagnostics::diagnostics( void ) 167 { 168 IOKitDiagnostics * diags; 169 170 diags = new IOKitDiagnostics; 171 if( diags && !diags->init()) { 172 diags->release(); 173 diags = 0; 174 } 175 176 return( diags ); 177 } 178 179 void IOKitDiagnostics::updateOffset( OSDictionary * dict, 180 UInt64 value, const char * name ) 181 { 182 OSNumber * off; 183 184 off = OSNumber::withNumber( value, 64 ); 185 if( !off) 186 return; 187 188 dict->setObject( name, off ); 189 off->release(); 190 } 191 192 bool IOKitDiagnostics::serialize(OSSerialize *s) const 193 { 194 OSDictionary * dict; 195 bool ok; 196 197 dict = OSDictionary::withCapacity( 5 ); 198 if( !dict) 199 return( false ); 200 201 updateOffset( dict, debug_ivars_size, "Instance allocation" ); 202 updateOffset( dict, debug_container_malloc_size, "Container allocation" ); 203 updateOffset( dict, debug_iomalloc_size, "IOMalloc allocation" ); 204 updateOffset( dict, debug_iomallocpageable_size, "Pageable allocation" ); 205 206 OSMetaClass::serializeClassDictionary(dict); 207 208 ok = dict->serialize( s ); 209 210 dict->release(); 211 212 return( ok ); 213 } 214 215 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 216 217 #if IOTRACKING 218 219 #include <libkern/c++/OSCPPDebug.h> 220 #include <libkern/c++/OSKext.h> 221 #include <kern/zalloc.h> 222 223 __private_extern__ "C" void qsort( 224 void * array, 225 size_t nmembers, 226 size_t member_size, 227 int (*)(const void *, const void *)); 228 229 extern "C" ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); 230 extern "C" ppnum_t pmap_valid_page(ppnum_t pn); 231 232 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 233 234 struct IOTRecursiveLock 235 { 236 lck_mtx_t * mutex; 237 thread_t thread; 238 UInt32 count; 239 }; 240 241 struct IOTrackingQueue 242 { 243 queue_chain_t link; 244 IOTRecursiveLock lock; 245 const char * name; 246 uintptr_t btEntry; 247 size_t allocSize; 248 size_t minCaptureSize; 249 uint32_t siteCount; 250 uint32_t type; 251 uint32_t numSiteQs; 252 uint8_t captureOn; 253 queue_head_t sites[]; 254 }; 255 256 struct IOTrackingCallSite 257 { 258 queue_chain_t link; 259 IOTrackingQueue * queue; 260 uint32_t crc; 261 262 vm_tag_t tag; 263 uint32_t count; 264 size_t size[2]; 265 uintptr_t bt[kIOTrackingCallSiteBTs]; 266 267 queue_head_t instances; 268 IOTracking * addresses; 269 }; 270 271 struct IOTrackingLeaksRef 272 { 273 uintptr_t * instances; 274 uint32_t zoneSize; 275 uint32_t count; 276 uint32_t found; 277 uint32_t foundzlen; 278 size_t bytes; 279 }; 280 281 lck_mtx_t * gIOTrackingLock; 282 queue_head_t gIOTrackingQ; 283 284 enum 285 { 286 kTrackingAddressFlagAllocated = 0x00000001 287 }; 288 289 #if defined(__LP64__) 290 #define IOTrackingAddressFlags(ptr) (ptr->flags) 291 #else 292 #define IOTrackingAddressFlags(ptr) (ptr->tracking.flags) 293 #endif 294 295 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 296 297 static void 298 IOTRecursiveLockLock(IOTRecursiveLock * lock) 299 { 300 if (lock->thread == current_thread()) lock->count++; 301 else 302 { 303 lck_mtx_lock(lock->mutex); 304 assert(lock->thread == 0); 305 assert(lock->count == 0); 306 lock->thread = current_thread(); 307 lock->count = 1; 308 } 309 } 310 311 static void 312 IOTRecursiveLockUnlock(IOTRecursiveLock * lock) 313 { 314 assert(lock->thread == current_thread()); 315 if (0 == (--lock->count)) 316 { 317 lock->thread = 0; 318 lck_mtx_unlock(lock->mutex); 319 } 320 } 321 322 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 323 324 void 325 IOTrackingInit(void) 326 { 327 queue_init(&gIOTrackingQ); 328 gIOTrackingLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); 329 } 330 331 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 332 333 IOTrackingQueue * 334 IOTrackingQueueAlloc(const char * name, uintptr_t btEntry, 335 size_t allocSize, size_t minCaptureSize, 336 uint32_t type, uint32_t numSiteQs) 337 { 338 IOTrackingQueue * queue; 339 uint32_t idx; 340 341 if (!numSiteQs) numSiteQs = 1; 342 queue = (typeof(queue)) kalloc(sizeof(IOTrackingQueue) + numSiteQs * sizeof(queue->sites[0])); 343 bzero(queue, sizeof(IOTrackingQueue)); 344 345 queue->name = name; 346 queue->btEntry = btEntry; 347 queue->allocSize = allocSize; 348 queue->minCaptureSize = minCaptureSize; 349 queue->lock.mutex = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); 350 queue->numSiteQs = numSiteQs; 351 queue->type = type; 352 enum { kFlags = (kIOTracking | kIOTrackingBoot) }; 353 queue->captureOn = (kFlags == (kFlags & gIOKitDebug)) 354 || (kIOTrackingQueueTypeDefaultOn & type); 355 356 for (idx = 0; idx < numSiteQs; idx++) queue_init(&queue->sites[idx]); 357 358 lck_mtx_lock(gIOTrackingLock); 359 queue_enter(&gIOTrackingQ, queue, IOTrackingQueue *, link); 360 lck_mtx_unlock(gIOTrackingLock); 361 362 return (queue); 363 }; 364 365 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 366 367 void 368 IOTrackingQueueFree(IOTrackingQueue * queue) 369 { 370 lck_mtx_lock(gIOTrackingLock); 371 IOTrackingReset(queue); 372 remque(&queue->link); 373 lck_mtx_unlock(gIOTrackingLock); 374 375 lck_mtx_free(queue->lock.mutex, IOLockGroup); 376 377 kfree(queue, sizeof(IOTrackingQueue) + queue->numSiteQs * sizeof(queue->sites[0])); 378 }; 379 380 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 381 382 /* fasthash 383 The MIT License 384 385 Copyright (C) 2012 Zilong Tan ([email protected]) 386 387 Permission is hereby granted, free of charge, to any person 388 obtaining a copy of this software and associated documentation 389 files (the "Software"), to deal in the Software without 390 restriction, including without limitation the rights to use, copy, 391 modify, merge, publish, distribute, sublicense, and/or sell copies 392 of the Software, and to permit persons to whom the Software is 393 furnished to do so, subject to the following conditions: 394 395 The above copyright notice and this permission notice shall be 396 included in all copies or substantial portions of the Software. 397 398 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 399 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 400 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 401 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 402 BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 403 ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 404 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 405 SOFTWARE. 406 */ 407 408 409 // Compression function for Merkle-Damgard construction. 410 // This function is generated using the framework provided. 411 #define mix(h) ({ \ 412 (h) ^= (h) >> 23; \ 413 (h) *= 0x2127599bf4325c37ULL; \ 414 (h) ^= (h) >> 47; }) 415 416 static uint64_t 417 fasthash64(const void *buf, size_t len, uint64_t seed) 418 { 419 const uint64_t m = 0x880355f21e6d1965ULL; 420 const uint64_t *pos = (const uint64_t *)buf; 421 const uint64_t *end = pos + (len / 8); 422 const unsigned char *pos2; 423 uint64_t h = seed ^ (len * m); 424 uint64_t v; 425 426 while (pos != end) { 427 v = *pos++; 428 h ^= mix(v); 429 h *= m; 430 } 431 432 pos2 = (const unsigned char*)pos; 433 v = 0; 434 435 switch (len & 7) { 436 case 7: v ^= (uint64_t)pos2[6] << 48; 437 [[clang::fallthrough]]; 438 case 6: v ^= (uint64_t)pos2[5] << 40; 439 [[clang::fallthrough]]; 440 case 5: v ^= (uint64_t)pos2[4] << 32; 441 [[clang::fallthrough]]; 442 case 4: v ^= (uint64_t)pos2[3] << 24; 443 [[clang::fallthrough]]; 444 case 3: v ^= (uint64_t)pos2[2] << 16; 445 [[clang::fallthrough]]; 446 case 2: v ^= (uint64_t)pos2[1] << 8; 447 [[clang::fallthrough]]; 448 case 1: v ^= (uint64_t)pos2[0]; 449 h ^= mix(v); 450 h *= m; 451 } 452 453 return mix(h); 454 } 455 456 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 457 458 static uint32_t 459 fasthash32(const void *buf, size_t len, uint32_t seed) 460 { 461 // the following trick converts the 64-bit hashcode to Fermat 462 // residue, which shall retain information from both the higher 463 // and lower parts of hashcode. 464 uint64_t h = fasthash64(buf, len, seed); 465 return h - (h >> 32); 466 } 467 468 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 469 470 void 471 IOTrackingAddUser(IOTrackingQueue * queue, IOTrackingUser * mem, vm_size_t size) 472 { 473 uint32_t num; 474 proc_t self; 475 476 if (!queue->captureOn) return; 477 if (size < queue->minCaptureSize) return; 478 479 assert(!mem->link.next); 480 481 num = backtrace(&mem->bt[0], kIOTrackingCallSiteBTs); 482 num = 0; 483 if ((kernel_task != current_task()) && (self = proc_self())) 484 { 485 bool user_64; 486 mem->btPID = proc_pid(self); 487 (void)backtrace_user(&mem->btUser[0], kIOTrackingCallSiteBTs - 1, &num, 488 &user_64); 489 mem->user32 = !user_64; 490 proc_rele(self); 491 } 492 assert(num <= kIOTrackingCallSiteBTs); 493 mem->userCount = num; 494 495 IOTRecursiveLockLock(&queue->lock); 496 queue_enter/*last*/(&queue->sites[0], mem, IOTrackingUser *, link); 497 queue->siteCount++; 498 IOTRecursiveLockUnlock(&queue->lock); 499 } 500 501 void 502 IOTrackingRemoveUser(IOTrackingQueue * queue, IOTrackingUser * mem) 503 { 504 if (!mem->link.next) return; 505 506 IOTRecursiveLockLock(&queue->lock); 507 if (mem->link.next) 508 { 509 remque(&mem->link); 510 assert(queue->siteCount); 511 queue->siteCount--; 512 } 513 IOTRecursiveLockUnlock(&queue->lock); 514 } 515 516 uint64_t gIOTrackingAddTime; 517 518 void 519 IOTrackingAdd(IOTrackingQueue * queue, IOTracking * mem, size_t size, bool address, vm_tag_t tag) 520 { 521 IOTrackingCallSite * site; 522 uint32_t crc, num; 523 uintptr_t bt[kIOTrackingCallSiteBTs + 1]; 524 queue_head_t * que; 525 526 if (mem->site) return; 527 if (!queue->captureOn) return; 528 if (size < queue->minCaptureSize) return; 529 530 assert(!mem->link.next); 531 532 num = backtrace(&bt[0], kIOTrackingCallSiteBTs + 1); 533 if (!num) return; 534 num--; 535 crc = fasthash32(&bt[1], num * sizeof(bt[0]), 0x04C11DB7); 536 537 IOTRecursiveLockLock(&queue->lock); 538 que = &queue->sites[crc % queue->numSiteQs]; 539 queue_iterate(que, site, IOTrackingCallSite *, link) 540 { 541 if (tag != site->tag) continue; 542 if (crc == site->crc) break; 543 } 544 545 if (queue_end(que, (queue_entry_t) site)) 546 { 547 site = (typeof(site)) kalloc(sizeof(IOTrackingCallSite)); 548 549 queue_init(&site->instances); 550 site->addresses = (IOTracking *) &site->instances; 551 site->queue = queue; 552 site->crc = crc; 553 site->count = 0; 554 site->tag = tag; 555 memset(&site->size[0], 0, sizeof(site->size)); 556 bcopy(&bt[1], &site->bt[0], num * sizeof(site->bt[0])); 557 assert(num <= kIOTrackingCallSiteBTs); 558 bzero(&site->bt[num], (kIOTrackingCallSiteBTs - num) * sizeof(site->bt[0])); 559 560 queue_enter_first(que, site, IOTrackingCallSite *, link); 561 queue->siteCount++; 562 } 563 564 if (address) 565 { 566 queue_enter/*last*/(&site->instances, mem, IOTracking *, link); 567 if (queue_end(&site->instances, (queue_entry_t)site->addresses)) site->addresses = mem; 568 } 569 else queue_enter_first(&site->instances, mem, IOTracking *, link); 570 571 mem->site = site; 572 site->size[0] += size; 573 site->count++; 574 575 IOTRecursiveLockUnlock(&queue->lock); 576 } 577 578 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 579 580 void 581 IOTrackingRemove(IOTrackingQueue * queue, IOTracking * mem, size_t size) 582 { 583 if (!mem->link.next) return; 584 585 IOTRecursiveLockLock(&queue->lock); 586 if (mem->link.next) 587 { 588 assert(mem->site); 589 590 if (mem == mem->site->addresses) mem->site->addresses = (IOTracking *) queue_next(&mem->link); 591 remque(&mem->link); 592 593 assert(mem->site->count); 594 mem->site->count--; 595 assert(mem->site->size[0] >= size); 596 mem->site->size[0] -= size; 597 if (!mem->site->count) 598 { 599 assert(queue_empty(&mem->site->instances)); 600 assert(!mem->site->size[0]); 601 assert(!mem->site->size[1]); 602 603 remque(&mem->site->link); 604 assert(queue->siteCount); 605 queue->siteCount--; 606 kfree(mem->site, sizeof(IOTrackingCallSite)); 607 } 608 mem->site = NULL; 609 } 610 IOTRecursiveLockUnlock(&queue->lock); 611 } 612 613 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 614 615 void 616 IOTrackingAlloc(IOTrackingQueue * queue, uintptr_t address, size_t size) 617 { 618 IOTrackingAddress * tracking; 619 620 if (!queue->captureOn) return; 621 if (size < queue->minCaptureSize) return; 622 623 address = ~address; 624 tracking = (typeof(tracking)) kalloc(sizeof(IOTrackingAddress)); 625 bzero(tracking, sizeof(IOTrackingAddress)); 626 IOTrackingAddressFlags(tracking) |= kTrackingAddressFlagAllocated; 627 tracking->address = address; 628 tracking->size = size; 629 630 IOTrackingAdd(queue, &tracking->tracking, size, true, VM_KERN_MEMORY_NONE); 631 } 632 633 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 634 635 void 636 IOTrackingFree(IOTrackingQueue * queue, uintptr_t address, size_t size) 637 { 638 IOTrackingCallSite * site; 639 IOTrackingAddress * tracking; 640 uint32_t idx; 641 bool done; 642 643 address = ~address; 644 IOTRecursiveLockLock(&queue->lock); 645 done = false; 646 for (idx = 0; idx < queue->numSiteQs; idx++) 647 { 648 queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link) 649 { 650 for (tracking = (IOTrackingAddress *) site->addresses; 651 !done && !queue_end(&site->instances, &tracking->tracking.link); 652 tracking = (IOTrackingAddress *) queue_next(&tracking->tracking.link)) 653 { 654 if ((done = (address == tracking->address))) 655 { 656 IOTrackingRemove(queue, &tracking->tracking, size); 657 kfree(tracking, sizeof(IOTrackingAddress)); 658 } 659 } 660 if (done) break; 661 } 662 if (done) break; 663 } 664 IOTRecursiveLockUnlock(&queue->lock); 665 } 666 667 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 668 669 void 670 IOTrackingAccumSize(IOTrackingQueue * queue, IOTracking * mem, size_t size) 671 { 672 IOTRecursiveLockLock(&queue->lock); 673 if (mem->link.next) 674 { 675 assert(mem->site); 676 assert((size > 0) || (mem->site->size[1] >= -size)); 677 mem->site->size[1] += size; 678 }; 679 IOTRecursiveLockUnlock(&queue->lock); 680 } 681 682 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 683 684 void 685 IOTrackingReset(IOTrackingQueue * queue) 686 { 687 IOTrackingCallSite * site; 688 IOTrackingUser * user; 689 IOTracking * tracking; 690 IOTrackingAddress * trackingAddress; 691 uint32_t idx; 692 bool addresses; 693 694 IOTRecursiveLockLock(&queue->lock); 695 for (idx = 0; idx < queue->numSiteQs; idx++) 696 { 697 while (!queue_empty(&queue->sites[idx])) 698 { 699 if (kIOTrackingQueueTypeMap & queue->type) 700 { 701 queue_remove_first(&queue->sites[idx], user, IOTrackingUser *, link); 702 user->link.next = user->link.prev = NULL; 703 } 704 else 705 { 706 queue_remove_first(&queue->sites[idx], site, IOTrackingCallSite *, link); 707 addresses = false; 708 while (!queue_empty(&site->instances)) 709 { 710 queue_remove_first(&site->instances, tracking, IOTracking *, link); 711 if (tracking == site->addresses) addresses = true; 712 if (addresses) 713 { 714 trackingAddress = (typeof(trackingAddress)) tracking; 715 if (kTrackingAddressFlagAllocated & IOTrackingAddressFlags(trackingAddress)) 716 { 717 kfree(tracking, sizeof(IOTrackingAddress)); 718 } 719 } 720 } 721 kfree(site, sizeof(IOTrackingCallSite)); 722 } 723 } 724 } 725 queue->siteCount = 0; 726 IOTRecursiveLockUnlock(&queue->lock); 727 } 728 729 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 730 731 static int 732 IOTrackingCallSiteInfoCompare(const void * left, const void * right) 733 { 734 IOTrackingCallSiteInfo * l = (typeof(l)) left; 735 IOTrackingCallSiteInfo * r = (typeof(r)) right; 736 size_t lsize, rsize; 737 738 rsize = r->size[0] + r->size[1]; 739 lsize = l->size[0] + l->size[1]; 740 741 return ((rsize > lsize) ? 1 : ((rsize == lsize) ? 0 : -1)); 742 } 743 744 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 745 746 static int 747 IOTrackingAddressCompare(const void * left, const void * right) 748 { 749 IOTracking * instance; 750 uintptr_t inst, laddr, raddr; 751 752 inst = ((typeof(inst) *) left)[0]; 753 instance = (typeof(instance)) INSTANCE_GET(inst); 754 if (kInstanceFlagAddress & inst) laddr = ~((IOTrackingAddress *)instance)->address; 755 else laddr = (uintptr_t) (instance + 1); 756 757 inst = ((typeof(inst) *) right)[0]; 758 instance = (typeof(instance)) (inst & ~kInstanceFlags); 759 if (kInstanceFlagAddress & inst) raddr = ~((IOTrackingAddress *)instance)->address; 760 else raddr = (uintptr_t) (instance + 1); 761 762 return ((laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1)); 763 } 764 765 766 static int 767 IOTrackingZoneElementCompare(const void * left, const void * right) 768 { 769 uintptr_t inst, laddr, raddr; 770 771 inst = ((typeof(inst) *) left)[0]; 772 laddr = INSTANCE_PUT(inst); 773 inst = ((typeof(inst) *) right)[0]; 774 raddr = INSTANCE_PUT(inst); 775 776 return ((laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1)); 777 } 778 779 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 780 781 static void 782 CopyOutKernelBacktrace(IOTrackingCallSite * site, IOTrackingCallSiteInfo * siteInfo) 783 { 784 uint32_t j; 785 mach_vm_address_t bt, btEntry; 786 787 btEntry = site->queue->btEntry; 788 for (j = 0; j < kIOTrackingCallSiteBTs; j++) 789 { 790 bt = site->bt[j]; 791 if (btEntry 792 && (!bt || (j == (kIOTrackingCallSiteBTs - 1)))) 793 { 794 bt = btEntry; 795 btEntry = 0; 796 } 797 siteInfo->bt[0][j] = VM_KERNEL_UNSLIDE(bt); 798 } 799 } 800 801 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 802 803 static void 804 IOTrackingLeakScan(void * refcon) 805 { 806 IOTrackingLeaksRef * ref = (typeof(ref)) refcon; 807 uintptr_t * instances; 808 IOTracking * instance; 809 uint64_t vaddr, vincr; 810 ppnum_t ppn; 811 uintptr_t ptr, addr, vphysaddr, inst; 812 size_t size, origsize; 813 uint32_t baseIdx, lim, ptrIdx, count; 814 boolean_t is; 815 AbsoluteTime deadline; 816 817 instances = ref->instances; 818 count = ref->count; 819 size = origsize = ref->zoneSize; 820 821 for (deadline = 0, vaddr = VM_MIN_KERNEL_AND_KEXT_ADDRESS; 822 ; 823 vaddr += vincr) 824 { 825 if ((mach_absolute_time() > deadline) || (vaddr >= VM_MAX_KERNEL_ADDRESS)) 826 { 827 if (deadline) 828 { 829 ml_set_interrupts_enabled(is); 830 IODelay(10); 831 } 832 if (vaddr >= VM_MAX_KERNEL_ADDRESS) break; 833 is = ml_set_interrupts_enabled(false); 834 clock_interval_to_deadline(10, kMillisecondScale, &deadline); 835 } 836 837 ppn = kernel_pmap_present_mapping(vaddr, &vincr, &vphysaddr); 838 // check noencrypt to avoid VM structs (map entries) with pointers 839 if (ppn && (!pmap_valid_page(ppn) || (!ref->zoneSize && pmap_is_noencrypt(ppn)))) ppn = 0; 840 if (!ppn) continue; 841 842 for (ptrIdx = 0; ptrIdx < (page_size / sizeof(uintptr_t)); ptrIdx++) 843 { 844 ptr = ((uintptr_t *)vphysaddr)[ptrIdx]; 845 846 for (lim = count, baseIdx = 0; lim; lim >>= 1) 847 { 848 inst = instances[baseIdx + (lim >> 1)]; 849 instance = (typeof(instance)) INSTANCE_GET(inst); 850 851 if (ref->zoneSize) 852 { 853 addr = INSTANCE_PUT(inst) & ~kInstanceFlags; 854 } 855 else if (kInstanceFlagAddress & inst) 856 { 857 addr = ~((IOTrackingAddress *)instance)->address; 858 origsize = size = ((IOTrackingAddress *)instance)->size; 859 if (!size) size = 1; 860 } 861 else 862 { 863 addr = (uintptr_t) (instance + 1); 864 origsize = size = instance->site->queue->allocSize; 865 } 866 if ((ptr >= addr) && (ptr < (addr + size)) 867 868 && (((vaddr + ptrIdx * sizeof(uintptr_t)) < addr) 869 || ((vaddr + ptrIdx * sizeof(uintptr_t)) >= (addr + size)))) 870 { 871 if (!(kInstanceFlagReferenced & inst)) 872 { 873 inst |= kInstanceFlagReferenced; 874 instances[baseIdx + (lim >> 1)] = inst; 875 ref->found++; 876 if (!origsize) ref->foundzlen++; 877 } 878 break; 879 } 880 if (ptr > addr) 881 { 882 // move right 883 baseIdx += (lim >> 1) + 1; 884 lim--; 885 } 886 // else move left 887 } 888 } 889 ref->bytes += page_size; 890 } 891 } 892 893 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 894 895 extern "C" void 896 zone_leaks_scan(uintptr_t * instances, uint32_t count, uint32_t zoneSize, uint32_t * found) 897 { 898 IOTrackingLeaksRef ref; 899 IOTrackingCallSiteInfo siteInfo; 900 uint32_t idx; 901 902 qsort(instances, count, sizeof(*instances), &IOTrackingZoneElementCompare); 903 904 bzero(&siteInfo, sizeof(siteInfo)); 905 bzero(&ref, sizeof(ref)); 906 ref.instances = instances; 907 ref.count = count; 908 ref.zoneSize = zoneSize; 909 910 for (idx = 0; idx < 2; idx++) 911 { 912 ref.bytes = 0; 913 IOTrackingLeakScan(&ref); 914 IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d\n", idx, ref.bytes / 1024 / 1024, count, ref.found); 915 if (count <= ref.found) break; 916 } 917 918 *found = ref.found; 919 } 920 921 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 922 923 static void 924 ZoneSiteProc(void * refCon, uint32_t siteCount, uint32_t zoneSize, 925 uintptr_t * backtrace, uint32_t btCount) 926 { 927 IOTrackingCallSiteInfo siteInfo; 928 OSData * leakData; 929 uint32_t idx; 930 931 leakData = (typeof(leakData)) refCon; 932 933 bzero(&siteInfo, sizeof(siteInfo)); 934 siteInfo.count = siteCount; 935 siteInfo.size[0] = zoneSize * siteCount; 936 937 for (idx = 0; (idx < btCount) && (idx < kIOTrackingCallSiteBTs); idx++) 938 { 939 siteInfo.bt[0][idx] = VM_KERNEL_UNSLIDE(backtrace[idx]); 940 } 941 942 leakData->appendBytes(&siteInfo, sizeof(siteInfo)); 943 } 944 945 946 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 947 948 static OSData * 949 IOTrackingLeaks(OSData * data) 950 { 951 IOTrackingLeaksRef ref; 952 IOTrackingCallSiteInfo siteInfo; 953 IOTrackingCallSite * site; 954 OSData * leakData; 955 uintptr_t * instances; 956 IOTracking * instance; 957 uintptr_t inst; 958 uint32_t count, idx, numSites, dups, siteCount; 959 960 instances = (typeof(instances)) data->getBytesNoCopy(); 961 count = (data->getLength() / sizeof(*instances)); 962 qsort(instances, count, sizeof(*instances), &IOTrackingAddressCompare); 963 964 bzero(&siteInfo, sizeof(siteInfo)); 965 bzero(&ref, sizeof(ref)); 966 ref.instances = instances; 967 ref.count = count; 968 for (idx = 0; idx < 2; idx++) 969 { 970 ref.bytes = 0; 971 IOTrackingLeakScan(&ref); 972 IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d (zlen %d)\n", idx, ref.bytes / 1024 / 1024, count, ref.found, ref.foundzlen); 973 if (count <= ref.found) break; 974 } 975 976 leakData = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo)); 977 978 for (numSites = 0, idx = 0; idx < count; idx++) 979 { 980 inst = instances[idx]; 981 if (kInstanceFlagReferenced & inst) continue; 982 instance = (typeof(instance)) INSTANCE_GET(inst); 983 site = instance->site; 984 instances[numSites] = (uintptr_t) site; 985 numSites++; 986 } 987 988 for (idx = 0; idx < numSites; idx++) 989 { 990 inst = instances[idx]; 991 if (!inst) continue; 992 site = (typeof(site)) inst; 993 for (siteCount = 1, dups = (idx + 1); dups < numSites; dups++) 994 { 995 if (instances[dups] == (uintptr_t) site) 996 { 997 siteCount++; 998 instances[dups] = 0; 999 } 1000 } 1001 siteInfo.count = siteCount; 1002 siteInfo.size[0] = (site->size[0] * site->count) / siteCount; 1003 siteInfo.size[1] = (site->size[1] * site->count) / siteCount;; 1004 CopyOutKernelBacktrace(site, &siteInfo); 1005 leakData->appendBytes(&siteInfo, sizeof(siteInfo)); 1006 } 1007 data->release(); 1008 1009 return (leakData); 1010 } 1011 1012 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1013 1014 static bool 1015 SkipName(uint32_t options, const char * name, size_t namesLen, const char * names) 1016 { 1017 const char * scan; 1018 const char * next; 1019 bool exclude, found; 1020 size_t qLen, sLen; 1021 1022 if (!namesLen || !names) return (false); 1023 // <len><name>...<len><name><0> 1024 exclude = (0 != (kIOTrackingExcludeNames & options)); 1025 qLen = strlen(name); 1026 scan = names; 1027 found = false; 1028 do 1029 { 1030 sLen = scan[0]; 1031 scan++; 1032 next = scan + sLen; 1033 if (next >= (names + namesLen)) break; 1034 found = ((sLen == qLen) && !strncmp(scan, name, sLen)); 1035 scan = next; 1036 } 1037 while (!found && (scan < (names + namesLen))); 1038 1039 return (!(exclude ^ found)); 1040 } 1041 1042 #endif /* IOTRACKING */ 1043 1044 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1045 1046 static kern_return_t 1047 IOTrackingDebug(uint32_t selector, uint32_t options, uint64_t value, 1048 uint32_t intag, uint32_t inzsize, 1049 const char * names, size_t namesLen, 1050 size_t size, OSObject ** result) 1051 { 1052 kern_return_t ret; 1053 OSData * data; 1054 1055 if (result) *result = 0; 1056 data = 0; 1057 ret = kIOReturnNotReady; 1058 1059 #if IOTRACKING 1060 1061 kern_return_t kr; 1062 IOTrackingQueue * queue; 1063 IOTracking * instance; 1064 IOTrackingCallSite * site; 1065 IOTrackingCallSiteInfo siteInfo; 1066 IOTrackingUser * user; 1067 task_t mapTask; 1068 mach_vm_address_t mapAddress; 1069 mach_vm_size_t mapSize; 1070 uint32_t num, idx, qIdx; 1071 uintptr_t instFlags; 1072 proc_t proc; 1073 bool addresses; 1074 1075 ret = kIOReturnNotFound; 1076 proc = NULL; 1077 if (kIOTrackingGetMappings == selector) 1078 { 1079 if (value != -1ULL) 1080 { 1081 proc = proc_find(value); 1082 if (!proc) return (kIOReturnNotFound); 1083 } 1084 } 1085 1086 bzero(&siteInfo, sizeof(siteInfo)); 1087 lck_mtx_lock(gIOTrackingLock); 1088 queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link) 1089 { 1090 if (SkipName(options, queue->name, namesLen, names)) continue; 1091 1092 if (!(kIOTracking & gIOKitDebug) && (kIOTrackingQueueTypeAlloc & queue->type)) continue; 1093 1094 switch (selector) 1095 { 1096 case kIOTrackingResetTracking: 1097 { 1098 IOTrackingReset(queue); 1099 ret = kIOReturnSuccess; 1100 break; 1101 } 1102 1103 case kIOTrackingStartCapture: 1104 case kIOTrackingStopCapture: 1105 { 1106 queue->captureOn = (kIOTrackingStartCapture == selector); 1107 ret = kIOReturnSuccess; 1108 break; 1109 } 1110 1111 case kIOTrackingSetMinCaptureSize: 1112 { 1113 queue->minCaptureSize = size; 1114 ret = kIOReturnSuccess; 1115 break; 1116 } 1117 1118 case kIOTrackingLeaks: 1119 { 1120 if (!(kIOTrackingQueueTypeAlloc & queue->type)) break; 1121 1122 if (!data) data = OSData::withCapacity(1024 * sizeof(uintptr_t)); 1123 1124 IOTRecursiveLockLock(&queue->lock); 1125 for (idx = 0; idx < queue->numSiteQs; idx++) 1126 { 1127 queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link) 1128 { 1129 addresses = false; 1130 queue_iterate(&site->instances, instance, IOTracking *, link) 1131 { 1132 if (instance == site->addresses) addresses = true; 1133 instFlags = (typeof(instFlags)) instance; 1134 if (addresses) instFlags |= kInstanceFlagAddress; 1135 data->appendBytes(&instFlags, sizeof(instFlags)); 1136 } 1137 } 1138 } 1139 // queue is locked 1140 ret = kIOReturnSuccess; 1141 break; 1142 } 1143 1144 1145 case kIOTrackingGetTracking: 1146 { 1147 if (kIOTrackingQueueTypeMap & queue->type) break; 1148 1149 if (!data) data = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo)); 1150 1151 IOTRecursiveLockLock(&queue->lock); 1152 num = queue->siteCount; 1153 idx = 0; 1154 for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) 1155 { 1156 queue_iterate(&queue->sites[qIdx], site, IOTrackingCallSite *, link) 1157 { 1158 assert(idx < num); 1159 idx++; 1160 1161 size_t tsize[2]; 1162 uint32_t count = site->count; 1163 tsize[0] = site->size[0]; 1164 tsize[1] = site->size[1]; 1165 1166 if (intag || inzsize) 1167 { 1168 uintptr_t addr; 1169 vm_size_t size, zoneSize; 1170 vm_tag_t tag; 1171 1172 if (kIOTrackingQueueTypeAlloc & queue->type) 1173 { 1174 addresses = false; 1175 count = 0; 1176 tsize[0] = tsize[1] = 0; 1177 queue_iterate(&site->instances, instance, IOTracking *, link) 1178 { 1179 if (instance == site->addresses) addresses = true; 1180 1181 if (addresses) addr = ~((IOTrackingAddress *)instance)->address; 1182 else addr = (uintptr_t) (instance + 1); 1183 1184 kr = vm_kern_allocation_info(addr, &size, &tag, &zoneSize); 1185 if (KERN_SUCCESS != kr) continue; 1186 1187 if ((VM_KERN_MEMORY_NONE != intag) && (intag != tag)) continue; 1188 if (inzsize && (inzsize != zoneSize)) continue; 1189 1190 count++; 1191 tsize[0] += size; 1192 } 1193 } 1194 else 1195 { 1196 if (!intag || inzsize || (intag != site->tag)) continue; 1197 } 1198 } 1199 1200 if (!count) continue; 1201 if (size && ((tsize[0] + tsize[1]) < size)) continue; 1202 1203 siteInfo.count = count; 1204 siteInfo.size[0] = tsize[0]; 1205 siteInfo.size[1] = tsize[1]; 1206 1207 CopyOutKernelBacktrace(site, &siteInfo); 1208 data->appendBytes(&siteInfo, sizeof(siteInfo)); 1209 } 1210 } 1211 assert(idx == num); 1212 IOTRecursiveLockUnlock(&queue->lock); 1213 ret = kIOReturnSuccess; 1214 break; 1215 } 1216 1217 case kIOTrackingGetMappings: 1218 { 1219 if (!(kIOTrackingQueueTypeMap & queue->type)) break; 1220 if (!data) data = OSData::withCapacity(page_size); 1221 1222 IOTRecursiveLockLock(&queue->lock); 1223 num = queue->siteCount; 1224 idx = 0; 1225 for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) 1226 { 1227 queue_iterate(&queue->sites[qIdx], user, IOTrackingUser *, link) 1228 { 1229 assert(idx < num); 1230 idx++; 1231 1232 kr = IOMemoryMapTracking(user, &mapTask, &mapAddress, &mapSize); 1233 if (kIOReturnSuccess != kr) continue; 1234 if (proc && (mapTask != proc_task(proc))) continue; 1235 if (size && (mapSize < size)) continue; 1236 1237 siteInfo.count = 1; 1238 siteInfo.size[0] = mapSize; 1239 siteInfo.address = mapAddress; 1240 siteInfo.addressPID = task_pid(mapTask); 1241 siteInfo.btPID = user->btPID; 1242 1243 for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) 1244 { 1245 siteInfo.bt[0][j] = VM_KERNEL_UNSLIDE(user->bt[j]); 1246 } 1247 uint32_t * bt32 = (typeof(bt32)) &user->btUser[0]; 1248 uint64_t * bt64 = (typeof(bt64)) ((void *) &user->btUser[0]); 1249 for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) 1250 { 1251 if (j >= user->userCount) siteInfo.bt[1][j] = 0; 1252 else if (user->user32) siteInfo.bt[1][j] = bt32[j]; 1253 else siteInfo.bt[1][j] = bt64[j]; 1254 } 1255 data->appendBytes(&siteInfo, sizeof(siteInfo)); 1256 } 1257 } 1258 assert(idx == num); 1259 IOTRecursiveLockUnlock(&queue->lock); 1260 ret = kIOReturnSuccess; 1261 break; 1262 } 1263 1264 default: 1265 ret = kIOReturnUnsupported; 1266 break; 1267 } 1268 } 1269 1270 if ((kIOTrackingLeaks == selector) && data) 1271 { 1272 data = IOTrackingLeaks(data); 1273 queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link) 1274 { 1275 if (SkipName(options, queue->name, namesLen, names)) continue; 1276 if (!(kIOTrackingQueueTypeAlloc & queue->type)) continue; 1277 IOTRecursiveLockUnlock(&queue->lock); 1278 } 1279 } 1280 1281 lck_mtx_unlock(gIOTrackingLock); 1282 1283 if ((kIOTrackingLeaks == selector) && namesLen && names) 1284 { 1285 const char * scan; 1286 const char * next; 1287 size_t sLen; 1288 1289 if (!data) data = OSData::withCapacity(4096 * sizeof(uintptr_t)); 1290 1291 // <len><name>...<len><name><0> 1292 scan = names; 1293 do 1294 { 1295 sLen = scan[0]; 1296 scan++; 1297 next = scan + sLen; 1298 if (next >= (names + namesLen)) break; 1299 kr = zone_leaks(scan, sLen, &ZoneSiteProc, data); 1300 if (KERN_SUCCESS == kr) ret = kIOReturnSuccess; 1301 else if (KERN_INVALID_NAME != kr) ret = kIOReturnVMError; 1302 scan = next; 1303 } 1304 while (scan < (names + namesLen)); 1305 } 1306 1307 if (data) switch (selector) 1308 { 1309 case kIOTrackingLeaks: 1310 case kIOTrackingGetTracking: 1311 case kIOTrackingGetMappings: 1312 { 1313 IOTrackingCallSiteInfo * siteInfos; 1314 siteInfos = (typeof(siteInfos)) data->getBytesNoCopy(); 1315 num = (data->getLength() / sizeof(*siteInfos)); 1316 qsort(siteInfos, num, sizeof(*siteInfos), &IOTrackingCallSiteInfoCompare); 1317 break; 1318 } 1319 default: assert(false); break; 1320 } 1321 1322 *result = data; 1323 if (proc) proc_rele(proc); 1324 1325 #endif /* IOTRACKING */ 1326 1327 return (ret); 1328 } 1329 1330 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1331 1332 #include <IOKit/IOKitDiagnosticsUserClient.h> 1333 1334 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1335 1336 #undef super 1337 #define super IOUserClient 1338 1339 OSDefineMetaClassAndStructors(IOKitDiagnosticsClient, IOUserClient) 1340 1341 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1342 1343 IOUserClient * IOKitDiagnosticsClient::withTask(task_t owningTask) 1344 { 1345 IOKitDiagnosticsClient * inst; 1346 1347 inst = new IOKitDiagnosticsClient; 1348 if (inst && !inst->init()) 1349 { 1350 inst->release(); 1351 inst = 0; 1352 } 1353 1354 return (inst); 1355 } 1356 1357 IOReturn IOKitDiagnosticsClient::clientClose(void) 1358 { 1359 terminate(); 1360 return (kIOReturnSuccess); 1361 } 1362 1363 IOReturn IOKitDiagnosticsClient::setProperties(OSObject * properties) 1364 { 1365 IOReturn kr = kIOReturnUnsupported; 1366 return (kr); 1367 } 1368 1369 IOReturn IOKitDiagnosticsClient::externalMethod(uint32_t selector, IOExternalMethodArguments * args, 1370 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference) 1371 { 1372 IOReturn ret = kIOReturnBadArgument; 1373 const IOKitDiagnosticsParameters * params; 1374 const char * names; 1375 size_t namesLen; 1376 OSObject * result; 1377 1378 if (args->structureInputSize < sizeof(IOKitDiagnosticsParameters)) return (kIOReturnBadArgument); 1379 params = (typeof(params)) args->structureInput; 1380 if (!params) return (kIOReturnBadArgument); 1381 1382 names = 0; 1383 namesLen = args->structureInputSize - sizeof(IOKitDiagnosticsParameters); 1384 if (namesLen) names = (typeof(names))(params + 1); 1385 1386 ret = IOTrackingDebug(selector, params->options, params->value, params->tag, params->zsize, names, namesLen, params->size, &result); 1387 1388 if ((kIOReturnSuccess == ret) && args->structureVariableOutputData) *args->structureVariableOutputData = result; 1389 else if (result) result->release(); 1390 1391 return (ret); 1392 } 1393 1394 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1395