1 /* 2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 30 #include <sys/sysctl.h> 31 extern "C" { 32 #include <vm/vm_kern.h> 33 #include <kern/task.h> 34 #include <kern/debug.h> 35 } 36 37 #include <libkern/c++/OSContainers.h> 38 #include <libkern/OSDebug.h> 39 #include <libkern/c++/OSCPPDebug.h> 40 #include <kern/backtrace.h> 41 42 #include <IOKit/IOKitDebug.h> 43 #include <IOKit/IOLib.h> 44 #include <IOKit/assert.h> 45 #include <IOKit/IODeviceTreeSupport.h> 46 #include <IOKit/IOService.h> 47 48 #include "IOKitKernelInternal.h" 49 50 #ifdef IOKITDEBUG 51 #define DEBUG_INIT_VALUE IOKITDEBUG 52 #else 53 #define DEBUG_INIT_VALUE 0 54 #endif 55 56 SInt64 gIOKitDebug = DEBUG_INIT_VALUE; 57 SInt64 gIOKitTrace = 0; 58 59 #if DEVELOPMENT || DEBUG 60 #define IODEBUG_CTLFLAGS CTLFLAG_RW 61 #else 62 #define IODEBUG_CTLFLAGS CTLFLAG_RD 63 #endif 64 65 SYSCTL_QUAD(_debug, OID_AUTO, iokit, IODEBUG_CTLFLAGS | CTLFLAG_LOCKED, &gIOKitDebug, "boot_arg io"); 66 SYSCTL_QUAD(_debug, OID_AUTO, iotrace, CTLFLAG_RW | CTLFLAG_LOCKED, &gIOKitTrace, "trace io"); 67 68 69 int debug_malloc_size; 70 int debug_iomalloc_size; 71 72 vm_size_t debug_iomallocpageable_size; 73 int debug_container_malloc_size; 74 // int debug_ivars_size; // in OSObject.cpp 75 76 extern "C" { 77 78 #if 0 79 #define DEBG(fmt, args...) { kprintf(fmt, ## args); } 80 #else 81 #define DEBG(fmt, args...) { IOLog(fmt, ## args); } 82 #endif 83 84 void IOPrintPlane( const IORegistryPlane * plane ) 85 { 86 IORegistryEntry * next; 87 IORegistryIterator * iter; 88 OSOrderedSet * all; 89 char format[] = "%xxxs"; 90 IOService * service; 91 92 iter = IORegistryIterator::iterateOver( plane ); 93 assert( iter ); 94 all = iter->iterateAll(); 95 if( all) { 96 DEBG("Count %d\n", all->getCount() ); 97 all->release(); 98 } else 99 DEBG("Empty\n"); 100 101 iter->reset(); 102 while( (next = iter->getNextObjectRecursive())) { 103 snprintf(format + 1, sizeof(format) - 1, "%ds", 2 * next->getDepth( plane )); 104 DEBG( format, ""); 105 DEBG( "\033[33m%s", next->getName( plane )); 106 if( (next->getLocation( plane ))) 107 DEBG("@%s", next->getLocation( plane )); 108 DEBG("\033[0m <class %s", next->getMetaClass()->getClassName()); 109 if( (service = OSDynamicCast(IOService, next))) 110 DEBG(", busy %ld", (long) service->getBusyState()); 111 DEBG( ">\n"); 112 // IOSleep(250); 113 } 114 iter->release(); 115 } 116 117 void db_piokjunk(void) 118 { 119 } 120 121 void db_dumpiojunk( const IORegistryPlane * plane __unused ) 122 { 123 } 124 125 void IOPrintMemory( void ) 126 { 127 128 // OSMetaClass::printInstanceCounts(); 129 130 IOLog("\n" 131 "ivar kalloc() 0x%08x\n" 132 "malloc() 0x%08x\n" 133 "containers kalloc() 0x%08x\n" 134 "IOMalloc() 0x%08x\n" 135 "----------------------------------------\n", 136 debug_ivars_size, 137 debug_malloc_size, 138 debug_container_malloc_size, 139 debug_iomalloc_size 140 ); 141 } 142 143 } /* extern "C" */ 144 145 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 146 147 #define super OSObject 148 OSDefineMetaClassAndStructors(IOKitDiagnostics, OSObject) 149 150 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 151 152 OSObject * IOKitDiagnostics::diagnostics( void ) 153 { 154 IOKitDiagnostics * diags; 155 156 diags = new IOKitDiagnostics; 157 if( diags && !diags->init()) { 158 diags->release(); 159 diags = 0; 160 } 161 162 return( diags ); 163 } 164 165 void IOKitDiagnostics::updateOffset( OSDictionary * dict, 166 UInt64 value, const char * name ) 167 { 168 OSNumber * off; 169 170 off = OSNumber::withNumber( value, 64 ); 171 if( !off) 172 return; 173 174 dict->setObject( name, off ); 175 off->release(); 176 } 177 178 bool IOKitDiagnostics::serialize(OSSerialize *s) const 179 { 180 OSDictionary * dict; 181 bool ok; 182 183 dict = OSDictionary::withCapacity( 5 ); 184 if( !dict) 185 return( false ); 186 187 updateOffset( dict, debug_ivars_size, "Instance allocation" ); 188 updateOffset( dict, debug_container_malloc_size, "Container allocation" ); 189 updateOffset( dict, debug_iomalloc_size, "IOMalloc allocation" ); 190 updateOffset( dict, debug_iomallocpageable_size, "Pageable allocation" ); 191 192 OSMetaClass::serializeClassDictionary(dict); 193 194 ok = dict->serialize( s ); 195 196 dict->release(); 197 198 return( ok ); 199 } 200 201 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 202 203 #if IOTRACKING 204 205 #include <libkern/c++/OSCPPDebug.h> 206 #include <libkern/c++/OSKext.h> 207 #include <kern/zalloc.h> 208 209 __private_extern__ "C" void qsort( 210 void * array, 211 size_t nmembers, 212 size_t member_size, 213 int (*)(const void *, const void *)); 214 215 extern "C" ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); 216 extern "C" ppnum_t pmap_valid_page(ppnum_t pn); 217 218 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 219 220 struct IOTRecursiveLock 221 { 222 lck_mtx_t * mutex; 223 thread_t thread; 224 UInt32 count; 225 }; 226 227 struct IOTrackingQueue 228 { 229 queue_chain_t link; 230 IOTRecursiveLock lock; 231 const char * name; 232 uintptr_t btEntry; 233 size_t allocSize; 234 size_t minCaptureSize; 235 uint32_t siteCount; 236 uint32_t type; 237 uint32_t numSiteQs; 238 uint8_t captureOn; 239 queue_head_t sites[]; 240 }; 241 242 struct IOTrackingCallSite 243 { 244 queue_chain_t link; 245 IOTrackingQueue * queue; 246 uint32_t crc; 247 248 uint32_t count; 249 size_t size[2]; 250 uintptr_t bt[kIOTrackingCallSiteBTs]; 251 252 queue_head_t instances; 253 IOTracking * addresses; 254 }; 255 256 struct IOTrackingLeaksRef 257 { 258 uintptr_t * instances; 259 uint32_t zoneSize; 260 uint32_t count; 261 uint32_t found; 262 size_t bytes; 263 }; 264 265 lck_mtx_t * gIOTrackingLock; 266 queue_head_t gIOTrackingQ; 267 268 enum 269 { 270 kTrackingAddressFlagAllocated = 0x00000001 271 }; 272 273 #if defined(__LP64__) 274 #define IOTrackingAddressFlags(ptr) (ptr->flags) 275 #else 276 #define IOTrackingAddressFlags(ptr) (ptr->tracking.flags) 277 #endif 278 279 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 280 281 static void 282 IOTRecursiveLockLock(IOTRecursiveLock * lock) 283 { 284 if (lock->thread == current_thread()) lock->count++; 285 else 286 { 287 lck_mtx_lock(lock->mutex); 288 assert(lock->thread == 0); 289 assert(lock->count == 0); 290 lock->thread = current_thread(); 291 lock->count = 1; 292 } 293 } 294 295 static void 296 IOTRecursiveLockUnlock(IOTRecursiveLock * lock) 297 { 298 assert(lock->thread == current_thread()); 299 if (0 == (--lock->count)) 300 { 301 lock->thread = 0; 302 lck_mtx_unlock(lock->mutex); 303 } 304 } 305 306 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 307 308 void 309 IOTrackingInit(void) 310 { 311 queue_init(&gIOTrackingQ); 312 gIOTrackingLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); 313 } 314 315 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 316 317 IOTrackingQueue * 318 IOTrackingQueueAlloc(const char * name, uintptr_t btEntry, 319 size_t allocSize, size_t minCaptureSize, 320 uint32_t type, uint32_t numSiteQs) 321 { 322 IOTrackingQueue * queue; 323 uint32_t idx; 324 325 if (!numSiteQs) numSiteQs = 1; 326 queue = (typeof(queue)) kalloc(sizeof(IOTrackingQueue) + numSiteQs * sizeof(queue->sites[0])); 327 bzero(queue, sizeof(IOTrackingQueue)); 328 329 queue->name = name; 330 queue->btEntry = btEntry; 331 queue->allocSize = allocSize; 332 queue->minCaptureSize = minCaptureSize; 333 queue->lock.mutex = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); 334 queue->numSiteQs = numSiteQs; 335 queue->type = type; 336 enum { kFlags = (kIOTracking | kIOTrackingBoot) }; 337 queue->captureOn = (kFlags == (kFlags & gIOKitDebug)) 338 || (kIOTrackingQueueTypeDefaultOn & type); 339 340 for (idx = 0; idx < numSiteQs; idx++) queue_init(&queue->sites[idx]); 341 342 lck_mtx_lock(gIOTrackingLock); 343 queue_enter(&gIOTrackingQ, queue, IOTrackingQueue *, link); 344 lck_mtx_unlock(gIOTrackingLock); 345 346 return (queue); 347 }; 348 349 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 350 351 void 352 IOTrackingQueueFree(IOTrackingQueue * queue) 353 { 354 lck_mtx_lock(gIOTrackingLock); 355 IOTrackingReset(queue); 356 remque(&queue->link); 357 lck_mtx_unlock(gIOTrackingLock); 358 359 lck_mtx_free(queue->lock.mutex, IOLockGroup); 360 361 kfree(queue, sizeof(IOTrackingQueue) + queue->numSiteQs * sizeof(queue->sites[0])); 362 }; 363 364 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 365 366 /* fasthash 367 The MIT License 368 369 Copyright (C) 2012 Zilong Tan ([email protected]) 370 371 Permission is hereby granted, free of charge, to any person 372 obtaining a copy of this software and associated documentation 373 files (the "Software"), to deal in the Software without 374 restriction, including without limitation the rights to use, copy, 375 modify, merge, publish, distribute, sublicense, and/or sell copies 376 of the Software, and to permit persons to whom the Software is 377 furnished to do so, subject to the following conditions: 378 379 The above copyright notice and this permission notice shall be 380 included in all copies or substantial portions of the Software. 381 382 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 383 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 384 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 385 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 386 BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 387 ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 388 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 389 SOFTWARE. 390 */ 391 392 393 // Compression function for Merkle-Damgard construction. 394 // This function is generated using the framework provided. 395 #define mix(h) ({ \ 396 (h) ^= (h) >> 23; \ 397 (h) *= 0x2127599bf4325c37ULL; \ 398 (h) ^= (h) >> 47; }) 399 400 static uint64_t 401 fasthash64(const void *buf, size_t len, uint64_t seed) 402 { 403 const uint64_t m = 0x880355f21e6d1965ULL; 404 const uint64_t *pos = (const uint64_t *)buf; 405 const uint64_t *end = pos + (len / 8); 406 const unsigned char *pos2; 407 uint64_t h = seed ^ (len * m); 408 uint64_t v; 409 410 while (pos != end) { 411 v = *pos++; 412 h ^= mix(v); 413 h *= m; 414 } 415 416 pos2 = (const unsigned char*)pos; 417 v = 0; 418 419 switch (len & 7) { 420 case 7: v ^= (uint64_t)pos2[6] << 48; 421 [[clang::fallthrough]]; 422 case 6: v ^= (uint64_t)pos2[5] << 40; 423 [[clang::fallthrough]]; 424 case 5: v ^= (uint64_t)pos2[4] << 32; 425 [[clang::fallthrough]]; 426 case 4: v ^= (uint64_t)pos2[3] << 24; 427 [[clang::fallthrough]]; 428 case 3: v ^= (uint64_t)pos2[2] << 16; 429 [[clang::fallthrough]]; 430 case 2: v ^= (uint64_t)pos2[1] << 8; 431 [[clang::fallthrough]]; 432 case 1: v ^= (uint64_t)pos2[0]; 433 h ^= mix(v); 434 h *= m; 435 } 436 437 return mix(h); 438 } 439 440 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 441 442 static uint32_t 443 fasthash32(const void *buf, size_t len, uint32_t seed) 444 { 445 // the following trick converts the 64-bit hashcode to Fermat 446 // residue, which shall retain information from both the higher 447 // and lower parts of hashcode. 448 uint64_t h = fasthash64(buf, len, seed); 449 return h - (h >> 32); 450 } 451 452 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 453 454 void 455 IOTrackingAddUser(IOTrackingQueue * queue, IOTrackingUser * mem, vm_size_t size) 456 { 457 uint32_t num; 458 proc_t self; 459 460 if (!queue->captureOn) return; 461 if (size < queue->minCaptureSize) return; 462 463 assert(!mem->link.next); 464 465 num = backtrace(&mem->bt[0], kIOTrackingCallSiteBTs); 466 num = 0; 467 if ((kernel_task != current_task()) && (self = proc_self())) 468 { 469 bool user_64; 470 mem->btPID = proc_pid(self); 471 (void)backtrace_user(&mem->btUser[0], kIOTrackingCallSiteBTs - 1, &num, 472 &user_64); 473 mem->user32 = !user_64; 474 proc_rele(self); 475 } 476 assert(num <= kIOTrackingCallSiteBTs); 477 mem->userCount = num; 478 479 IOTRecursiveLockLock(&queue->lock); 480 queue_enter/*last*/(&queue->sites[0], mem, IOTrackingUser *, link); 481 queue->siteCount++; 482 IOTRecursiveLockUnlock(&queue->lock); 483 } 484 485 void 486 IOTrackingRemoveUser(IOTrackingQueue * queue, IOTrackingUser * mem) 487 { 488 if (!mem->link.next) return; 489 490 IOTRecursiveLockLock(&queue->lock); 491 if (mem->link.next) 492 { 493 remque(&mem->link); 494 assert(queue->siteCount); 495 queue->siteCount--; 496 } 497 IOTRecursiveLockUnlock(&queue->lock); 498 } 499 500 uint64_t gIOTrackingAddTime; 501 502 void 503 IOTrackingAdd(IOTrackingQueue * queue, IOTracking * mem, size_t size, bool address) 504 { 505 IOTrackingCallSite * site; 506 uint32_t crc, num; 507 uintptr_t bt[kIOTrackingCallSiteBTs + 1]; 508 queue_head_t * que; 509 510 if (mem->site) return; 511 if (!queue->captureOn) return; 512 if (size < queue->minCaptureSize) return; 513 514 assert(!mem->link.next); 515 516 num = backtrace(&bt[0], kIOTrackingCallSiteBTs + 1); 517 if (!num) return; 518 num--; 519 crc = fasthash32(&bt[1], num * sizeof(bt[0]), 0x04C11DB7); 520 521 IOTRecursiveLockLock(&queue->lock); 522 que = &queue->sites[crc % queue->numSiteQs]; 523 queue_iterate(que, site, IOTrackingCallSite *, link) 524 { 525 if (crc == site->crc) break; 526 } 527 528 if (queue_end(que, (queue_entry_t) site)) 529 { 530 site = (typeof(site)) kalloc(sizeof(IOTrackingCallSite)); 531 532 queue_init(&site->instances); 533 site->addresses = (IOTracking *) &site->instances; 534 site->queue = queue; 535 site->crc = crc; 536 site->count = 0; 537 memset(&site->size[0], 0, sizeof(site->size)); 538 bcopy(&bt[1], &site->bt[0], num * sizeof(site->bt[0])); 539 assert(num <= kIOTrackingCallSiteBTs); 540 bzero(&site->bt[num], (kIOTrackingCallSiteBTs - num) * sizeof(site->bt[0])); 541 542 queue_enter_first(que, site, IOTrackingCallSite *, link); 543 queue->siteCount++; 544 } 545 546 if (address) 547 { 548 queue_enter/*last*/(&site->instances, mem, IOTracking *, link); 549 if (queue_end(&site->instances, (queue_entry_t)site->addresses)) site->addresses = mem; 550 } 551 else queue_enter_first(&site->instances, mem, IOTracking *, link); 552 553 mem->site = site; 554 site->size[0] += size; 555 site->count++; 556 557 IOTRecursiveLockUnlock(&queue->lock); 558 } 559 560 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 561 562 void 563 IOTrackingRemove(IOTrackingQueue * queue, IOTracking * mem, size_t size) 564 { 565 if (!mem->link.next) return; 566 567 IOTRecursiveLockLock(&queue->lock); 568 if (mem->link.next) 569 { 570 assert(mem->site); 571 572 if (mem == mem->site->addresses) mem->site->addresses = (IOTracking *) queue_next(&mem->link); 573 remque(&mem->link); 574 575 assert(mem->site->count); 576 mem->site->count--; 577 assert(mem->site->size[0] >= size); 578 mem->site->size[0] -= size; 579 if (!mem->site->count) 580 { 581 assert(queue_empty(&mem->site->instances)); 582 assert(!mem->site->size[0]); 583 assert(!mem->site->size[1]); 584 585 remque(&mem->site->link); 586 assert(queue->siteCount); 587 queue->siteCount--; 588 kfree(mem->site, sizeof(IOTrackingCallSite)); 589 } 590 } 591 IOTRecursiveLockUnlock(&queue->lock); 592 } 593 594 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 595 596 void 597 IOTrackingAlloc(IOTrackingQueue * queue, uintptr_t address, size_t size) 598 { 599 IOTrackingAddress * tracking; 600 601 if (!queue->captureOn) return; 602 if (size < queue->minCaptureSize) return; 603 604 address = ~address; 605 tracking = (typeof(tracking)) kalloc(sizeof(IOTrackingAddress)); 606 bzero(tracking, sizeof(IOTrackingAddress)); 607 IOTrackingAddressFlags(tracking) |= kTrackingAddressFlagAllocated; 608 tracking->address = address; 609 tracking->size = size; 610 611 IOTrackingAdd(queue, &tracking->tracking, size, true); 612 } 613 614 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 615 616 void 617 IOTrackingFree(IOTrackingQueue * queue, uintptr_t address, size_t size) 618 { 619 IOTrackingCallSite * site; 620 IOTrackingAddress * tracking; 621 uint32_t idx; 622 bool done; 623 624 address = ~address; 625 IOTRecursiveLockLock(&queue->lock); 626 done = false; 627 for (idx = 0; idx < queue->numSiteQs; idx++) 628 { 629 queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link) 630 { 631 for (tracking = (IOTrackingAddress *) site->addresses; 632 !done && !queue_end(&site->instances, &tracking->tracking.link); 633 tracking = (IOTrackingAddress *) queue_next(&tracking->tracking.link)) 634 { 635 if ((done = (address == tracking->address))) 636 { 637 IOTrackingRemove(queue, &tracking->tracking, size); 638 kfree(tracking, sizeof(IOTrackingAddress)); 639 } 640 } 641 if (done) break; 642 } 643 if (done) break; 644 } 645 IOTRecursiveLockUnlock(&queue->lock); 646 } 647 648 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 649 650 void 651 IOTrackingAccumSize(IOTrackingQueue * queue, IOTracking * mem, size_t size) 652 { 653 IOTRecursiveLockLock(&queue->lock); 654 if (mem->link.next) 655 { 656 assert(mem->site); 657 assert((size > 0) || (mem->site->size[1] >= -size)); 658 mem->site->size[1] += size; 659 }; 660 IOTRecursiveLockUnlock(&queue->lock); 661 } 662 663 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 664 665 void 666 IOTrackingReset(IOTrackingQueue * queue) 667 { 668 IOTrackingCallSite * site; 669 IOTrackingUser * user; 670 IOTracking * tracking; 671 IOTrackingAddress * trackingAddress; 672 uint32_t idx; 673 bool addresses; 674 675 IOTRecursiveLockLock(&queue->lock); 676 for (idx = 0; idx < queue->numSiteQs; idx++) 677 { 678 while (!queue_empty(&queue->sites[idx])) 679 { 680 if (kIOTrackingQueueTypeMap & queue->type) 681 { 682 queue_remove_first(&queue->sites[idx], user, IOTrackingUser *, link); 683 user->link.next = user->link.prev = NULL; 684 } 685 else 686 { 687 queue_remove_first(&queue->sites[idx], site, IOTrackingCallSite *, link); 688 addresses = false; 689 while (!queue_empty(&site->instances)) 690 { 691 queue_remove_first(&site->instances, tracking, IOTracking *, link); 692 if (tracking == site->addresses) addresses = true; 693 if (addresses) 694 { 695 trackingAddress = (typeof(trackingAddress)) tracking; 696 if (kTrackingAddressFlagAllocated & IOTrackingAddressFlags(trackingAddress)) 697 { 698 kfree(tracking, sizeof(IOTrackingAddress)); 699 } 700 } 701 } 702 kfree(site, sizeof(IOTrackingCallSite)); 703 } 704 } 705 } 706 queue->siteCount = 0; 707 IOTRecursiveLockUnlock(&queue->lock); 708 } 709 710 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 711 712 static int 713 IOTrackingCallSiteInfoCompare(const void * left, const void * right) 714 { 715 IOTrackingCallSiteInfo * l = (typeof(l)) left; 716 IOTrackingCallSiteInfo * r = (typeof(r)) right; 717 size_t lsize, rsize; 718 719 rsize = r->size[0] + r->size[1]; 720 lsize = l->size[0] + l->size[1]; 721 722 return ((rsize > lsize) ? 1 : ((rsize == lsize) ? 0 : -1)); 723 } 724 725 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 726 727 static int 728 IOTrackingAddressCompare(const void * left, const void * right) 729 { 730 IOTracking * instance; 731 uintptr_t inst, laddr, raddr; 732 733 inst = ((typeof(inst) *) left)[0]; 734 instance = (typeof(instance)) INSTANCE_GET(inst); 735 if (kInstanceFlagAddress & inst) laddr = ~((IOTrackingAddress *)instance)->address; 736 else laddr = (uintptr_t) (instance + 1); 737 738 inst = ((typeof(inst) *) right)[0]; 739 instance = (typeof(instance)) (inst & ~kInstanceFlags); 740 if (kInstanceFlagAddress & inst) raddr = ~((IOTrackingAddress *)instance)->address; 741 else raddr = (uintptr_t) (instance + 1); 742 743 return ((laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1)); 744 } 745 746 747 static int 748 IOTrackingZoneElementCompare(const void * left, const void * right) 749 { 750 uintptr_t inst, laddr, raddr; 751 752 inst = ((typeof(inst) *) left)[0]; 753 laddr = INSTANCE_PUT(inst); 754 inst = ((typeof(inst) *) right)[0]; 755 raddr = INSTANCE_PUT(inst); 756 757 return ((laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1)); 758 } 759 760 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 761 762 static void 763 CopyOutKernelBacktrace(IOTrackingCallSite * site, IOTrackingCallSiteInfo * siteInfo) 764 { 765 uint32_t j; 766 mach_vm_address_t bt, btEntry; 767 768 btEntry = site->queue->btEntry; 769 for (j = 0; j < kIOTrackingCallSiteBTs; j++) 770 { 771 bt = site->bt[j]; 772 if (btEntry 773 && (!bt || (j == (kIOTrackingCallSiteBTs - 1)))) 774 { 775 bt = btEntry; 776 btEntry = 0; 777 } 778 siteInfo->bt[0][j] = VM_KERNEL_UNSLIDE(bt); 779 } 780 } 781 782 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 783 784 static void 785 IOTrackingLeakScan(void * refcon) 786 { 787 IOTrackingLeaksRef * ref = (typeof(ref)) refcon; 788 uintptr_t * instances; 789 IOTracking * instance; 790 uint64_t vaddr, vincr; 791 ppnum_t ppn; 792 uintptr_t ptr, addr, vphysaddr, inst; 793 size_t size; 794 uint32_t baseIdx, lim, ptrIdx, count; 795 boolean_t is; 796 AbsoluteTime deadline; 797 798 instances = ref->instances; 799 count = ref->count; 800 size = ref->zoneSize; 801 802 for (deadline = 0, vaddr = VM_MIN_KERNEL_AND_KEXT_ADDRESS; 803 ; 804 vaddr += vincr) 805 { 806 if ((mach_absolute_time() > deadline) || (vaddr >= VM_MAX_KERNEL_ADDRESS)) 807 { 808 if (deadline) 809 { 810 ml_set_interrupts_enabled(is); 811 IODelay(10); 812 } 813 if (vaddr >= VM_MAX_KERNEL_ADDRESS) break; 814 is = ml_set_interrupts_enabled(false); 815 clock_interval_to_deadline(10, kMillisecondScale, &deadline); 816 } 817 818 ppn = kernel_pmap_present_mapping(vaddr, &vincr, &vphysaddr); 819 // check noencrypt to avoid VM structs (map entries) with pointers 820 if (ppn && (!pmap_valid_page(ppn) || (!ref->zoneSize && pmap_is_noencrypt(ppn)))) ppn = 0; 821 if (!ppn) continue; 822 823 for (ptrIdx = 0; ptrIdx < (page_size / sizeof(uintptr_t)); ptrIdx++) 824 { 825 ptr = ((uintptr_t *)vphysaddr)[ptrIdx]; 826 827 for (lim = count, baseIdx = 0; lim; lim >>= 1) 828 { 829 inst = instances[baseIdx + (lim >> 1)]; 830 instance = (typeof(instance)) INSTANCE_GET(inst); 831 832 if (ref->zoneSize) 833 { 834 addr = INSTANCE_PUT(inst) & ~kInstanceFlags; 835 } 836 else if (kInstanceFlagAddress & inst) 837 { 838 addr = ~((IOTrackingAddress *)instance)->address; 839 size = ((IOTrackingAddress *)instance)->size; 840 } 841 else 842 { 843 addr = (uintptr_t) (instance + 1); 844 size = instance->site->queue->allocSize; 845 } 846 if ((ptr >= addr) && (ptr < (addr + size)) 847 848 && (((vaddr + ptrIdx * sizeof(uintptr_t)) < addr) 849 || ((vaddr + ptrIdx * sizeof(uintptr_t)) >= (addr + size)))) 850 { 851 if (!(kInstanceFlagReferenced & inst)) 852 { 853 inst |= kInstanceFlagReferenced; 854 instances[baseIdx + (lim >> 1)] = inst; 855 ref->found++; 856 } 857 break; 858 } 859 if (ptr > addr) 860 { 861 // move right 862 baseIdx += (lim >> 1) + 1; 863 lim--; 864 } 865 // else move left 866 } 867 } 868 ref->bytes += page_size; 869 } 870 } 871 872 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 873 874 extern "C" void 875 zone_leaks_scan(uintptr_t * instances, uint32_t count, uint32_t zoneSize, uint32_t * found) 876 { 877 IOTrackingLeaksRef ref; 878 IOTrackingCallSiteInfo siteInfo; 879 uint32_t idx; 880 881 qsort(instances, count, sizeof(*instances), &IOTrackingZoneElementCompare); 882 883 bzero(&siteInfo, sizeof(siteInfo)); 884 bzero(&ref, sizeof(ref)); 885 ref.instances = instances; 886 ref.count = count; 887 ref.zoneSize = zoneSize; 888 889 for (idx = 0; idx < 2; idx++) 890 { 891 ref.bytes = 0; 892 IOTrackingLeakScan(&ref); 893 IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d\n", idx, ref.bytes / 1024 / 1024, count, ref.found); 894 if (count <= ref.found) break; 895 } 896 897 *found = ref.found; 898 } 899 900 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 901 902 static void 903 ZoneSiteProc(void * refCon, uint32_t siteCount, uint32_t zoneSize, 904 uintptr_t * backtrace, uint32_t btCount) 905 { 906 IOTrackingCallSiteInfo siteInfo; 907 OSData * leakData; 908 uint32_t idx; 909 910 leakData = (typeof(leakData)) refCon; 911 912 bzero(&siteInfo, sizeof(siteInfo)); 913 siteInfo.count = siteCount; 914 siteInfo.size[0] = zoneSize * siteCount; 915 916 for (idx = 0; (idx < btCount) && (idx < kIOTrackingCallSiteBTs); idx++) 917 { 918 siteInfo.bt[0][idx] = VM_KERNEL_UNSLIDE(backtrace[idx]); 919 } 920 921 leakData->appendBytes(&siteInfo, sizeof(siteInfo)); 922 } 923 924 925 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 926 927 static OSData * 928 IOTrackingLeaks(OSData * data) 929 { 930 IOTrackingLeaksRef ref; 931 IOTrackingCallSiteInfo siteInfo; 932 IOTrackingCallSite * site; 933 OSData * leakData; 934 uintptr_t * instances; 935 IOTracking * instance; 936 uintptr_t inst; 937 uint32_t count, idx, numSites, dups, siteCount; 938 939 instances = (typeof(instances)) data->getBytesNoCopy(); 940 count = (data->getLength() / sizeof(*instances)); 941 qsort(instances, count, sizeof(*instances), &IOTrackingAddressCompare); 942 943 bzero(&siteInfo, sizeof(siteInfo)); 944 bzero(&ref, sizeof(ref)); 945 ref.instances = instances; 946 ref.count = count; 947 for (idx = 0; idx < 2; idx++) 948 { 949 ref.bytes = 0; 950 IOTrackingLeakScan(&ref); 951 IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d\n", idx, ref.bytes / 1024 / 1024, count, ref.found); 952 if (count <= ref.found) break; 953 } 954 955 leakData = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo)); 956 957 for (numSites = 0, idx = 0; idx < count; idx++) 958 { 959 inst = instances[idx]; 960 if (kInstanceFlagReferenced & inst) continue; 961 instance = (typeof(instance)) INSTANCE_GET(inst); 962 site = instance->site; 963 instances[numSites] = (uintptr_t) site; 964 numSites++; 965 } 966 967 for (idx = 0; idx < numSites; idx++) 968 { 969 inst = instances[idx]; 970 if (!inst) continue; 971 site = (typeof(site)) inst; 972 for (siteCount = 1, dups = (idx + 1); dups < numSites; dups++) 973 { 974 if (instances[dups] == (uintptr_t) site) 975 { 976 siteCount++; 977 instances[dups] = 0; 978 } 979 } 980 siteInfo.count = siteCount; 981 siteInfo.size[0] = (site->size[0] * site->count) / siteCount; 982 siteInfo.size[1] = (site->size[1] * site->count) / siteCount;; 983 CopyOutKernelBacktrace(site, &siteInfo); 984 leakData->appendBytes(&siteInfo, sizeof(siteInfo)); 985 } 986 data->release(); 987 988 return (leakData); 989 } 990 991 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 992 993 static bool 994 SkipName(uint32_t options, const char * name, size_t namesLen, const char * names) 995 { 996 const char * scan; 997 const char * next; 998 bool exclude, found; 999 size_t qLen, sLen; 1000 1001 if (!namesLen || !names) return (false); 1002 // <len><name>...<len><name><0> 1003 exclude = (0 != (kIOTrackingExcludeNames & options)); 1004 qLen = strlen(name); 1005 scan = names; 1006 found = false; 1007 do 1008 { 1009 sLen = scan[0]; 1010 scan++; 1011 next = scan + sLen; 1012 if (next >= (names + namesLen)) break; 1013 found = ((sLen == qLen) && !strncmp(scan, name, sLen)); 1014 scan = next; 1015 } 1016 while (!found && (scan < (names + namesLen))); 1017 1018 return (!(exclude ^ found)); 1019 } 1020 1021 #endif /* IOTRACKING */ 1022 1023 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1024 1025 kern_return_t 1026 IOTrackingDebug(uint32_t selector, uint32_t options, uint64_t value, 1027 const char * names, size_t namesLen, 1028 size_t size, OSObject ** result) 1029 { 1030 kern_return_t ret; 1031 OSData * data; 1032 1033 if (result) *result = 0; 1034 data = 0; 1035 ret = kIOReturnNotReady; 1036 1037 #if IOTRACKING 1038 1039 kern_return_t kr; 1040 IOTrackingQueue * queue; 1041 IOTracking * instance; 1042 IOTrackingCallSite * site; 1043 IOTrackingCallSiteInfo siteInfo; 1044 IOTrackingUser * user; 1045 task_t mapTask; 1046 mach_vm_address_t mapAddress; 1047 mach_vm_size_t mapSize; 1048 uint32_t num, idx, qIdx; 1049 uintptr_t instFlags; 1050 proc_t proc; 1051 bool addresses; 1052 1053 ret = kIOReturnNotFound; 1054 proc = NULL; 1055 if (kIOTrackingGetMappings == selector) 1056 { 1057 if (value != -1ULL) 1058 { 1059 proc = proc_find(value); 1060 if (!proc) return (kIOReturnNotFound); 1061 } 1062 } 1063 1064 bzero(&siteInfo, sizeof(siteInfo)); 1065 lck_mtx_lock(gIOTrackingLock); 1066 queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link) 1067 { 1068 if (SkipName(options, queue->name, namesLen, names)) continue; 1069 1070 if (!(kIOTracking & gIOKitDebug) && (kIOTrackingQueueTypeAlloc & queue->type)) continue; 1071 1072 switch (selector) 1073 { 1074 case kIOTrackingResetTracking: 1075 { 1076 IOTrackingReset(queue); 1077 ret = kIOReturnSuccess; 1078 break; 1079 } 1080 1081 case kIOTrackingStartCapture: 1082 case kIOTrackingStopCapture: 1083 { 1084 queue->captureOn = (kIOTrackingStartCapture == selector); 1085 ret = kIOReturnSuccess; 1086 break; 1087 } 1088 1089 case kIOTrackingSetMinCaptureSize: 1090 { 1091 queue->minCaptureSize = size; 1092 ret = kIOReturnSuccess; 1093 break; 1094 } 1095 1096 case kIOTrackingLeaks: 1097 { 1098 if (!(kIOTrackingQueueTypeAlloc & queue->type)) break; 1099 1100 if (!data) data = OSData::withCapacity(1024 * sizeof(uintptr_t)); 1101 1102 IOTRecursiveLockLock(&queue->lock); 1103 for (idx = 0; idx < queue->numSiteQs; idx++) 1104 { 1105 queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link) 1106 { 1107 addresses = false; 1108 queue_iterate(&site->instances, instance, IOTracking *, link) 1109 { 1110 if (instance == site->addresses) addresses = true; 1111 instFlags = (typeof(instFlags)) instance; 1112 if (addresses) instFlags |= kInstanceFlagAddress; 1113 data->appendBytes(&instFlags, sizeof(instFlags)); 1114 } 1115 } 1116 } 1117 // queue is locked 1118 ret = kIOReturnSuccess; 1119 break; 1120 } 1121 1122 case kIOTrackingGetTracking: 1123 { 1124 if (kIOTrackingQueueTypeMap & queue->type) break; 1125 1126 if (!data) data = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo)); 1127 1128 IOTRecursiveLockLock(&queue->lock); 1129 num = queue->siteCount; 1130 idx = 0; 1131 for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) 1132 { 1133 queue_iterate(&queue->sites[qIdx], site, IOTrackingCallSite *, link) 1134 { 1135 assert(idx < num); 1136 idx++; 1137 1138 if (size && ((site->size[0] + site->size[1]) < size)) continue; 1139 1140 siteInfo.count = site->count; 1141 siteInfo.size[0] = site->size[0]; 1142 siteInfo.size[1] = site->size[1]; 1143 1144 CopyOutKernelBacktrace(site, &siteInfo); 1145 data->appendBytes(&siteInfo, sizeof(siteInfo)); 1146 } 1147 } 1148 assert(idx == num); 1149 IOTRecursiveLockUnlock(&queue->lock); 1150 ret = kIOReturnSuccess; 1151 break; 1152 } 1153 1154 case kIOTrackingGetMappings: 1155 { 1156 if (!(kIOTrackingQueueTypeMap & queue->type)) break; 1157 if (!data) data = OSData::withCapacity(page_size); 1158 1159 IOTRecursiveLockLock(&queue->lock); 1160 num = queue->siteCount; 1161 idx = 0; 1162 for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) 1163 { 1164 queue_iterate(&queue->sites[qIdx], user, IOTrackingUser *, link) 1165 { 1166 assert(idx < num); 1167 idx++; 1168 1169 kr = IOMemoryMapTracking(user, &mapTask, &mapAddress, &mapSize); 1170 if (kIOReturnSuccess != kr) continue; 1171 if (proc && (mapTask != proc_task(proc))) continue; 1172 if (size && (mapSize < size)) continue; 1173 1174 siteInfo.count = 1; 1175 siteInfo.size[0] = mapSize; 1176 siteInfo.address = mapAddress; 1177 siteInfo.addressPID = task_pid(mapTask); 1178 siteInfo.btPID = user->btPID; 1179 1180 for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) 1181 { 1182 siteInfo.bt[0][j] = VM_KERNEL_UNSLIDE(user->bt[j]); 1183 } 1184 uint32_t * bt32 = (typeof(bt32)) &user->btUser[0]; 1185 uint64_t * bt64 = (typeof(bt64)) ((void *) &user->btUser[0]); 1186 for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) 1187 { 1188 if (j >= user->userCount) siteInfo.bt[1][j] = 0; 1189 else if (user->user32) siteInfo.bt[1][j] = bt32[j]; 1190 else siteInfo.bt[1][j] = bt64[j]; 1191 } 1192 data->appendBytes(&siteInfo, sizeof(siteInfo)); 1193 } 1194 } 1195 assert(idx == num); 1196 IOTRecursiveLockUnlock(&queue->lock); 1197 ret = kIOReturnSuccess; 1198 break; 1199 } 1200 1201 default: 1202 ret = kIOReturnUnsupported; 1203 break; 1204 } 1205 } 1206 1207 if ((kIOTrackingLeaks == selector) && data) 1208 { 1209 data = IOTrackingLeaks(data); 1210 queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link) 1211 { 1212 if (SkipName(options, queue->name, namesLen, names)) continue; 1213 if (!(kIOTrackingQueueTypeAlloc & queue->type)) continue; 1214 IOTRecursiveLockUnlock(&queue->lock); 1215 } 1216 } 1217 1218 lck_mtx_unlock(gIOTrackingLock); 1219 1220 if ((kIOTrackingLeaks == selector) && namesLen && names) 1221 { 1222 const char * scan; 1223 const char * next; 1224 size_t sLen; 1225 1226 if (!data) data = OSData::withCapacity(4096 * sizeof(uintptr_t)); 1227 1228 // <len><name>...<len><name><0> 1229 scan = names; 1230 do 1231 { 1232 sLen = scan[0]; 1233 scan++; 1234 next = scan + sLen; 1235 if (next >= (names + namesLen)) break; 1236 kr = zone_leaks(scan, sLen, &ZoneSiteProc, data); 1237 if (KERN_SUCCESS == kr) ret = kIOReturnSuccess; 1238 else if (KERN_INVALID_NAME != kr) ret = kIOReturnVMError; 1239 scan = next; 1240 } 1241 while (scan < (names + namesLen)); 1242 } 1243 1244 if (data) switch (selector) 1245 { 1246 case kIOTrackingLeaks: 1247 case kIOTrackingGetTracking: 1248 case kIOTrackingGetMappings: 1249 { 1250 IOTrackingCallSiteInfo * siteInfos; 1251 siteInfos = (typeof(siteInfos)) data->getBytesNoCopy(); 1252 num = (data->getLength() / sizeof(*siteInfos)); 1253 qsort(siteInfos, num, sizeof(*siteInfos), &IOTrackingCallSiteInfoCompare); 1254 break; 1255 } 1256 default: assert(false); break; 1257 } 1258 1259 *result = data; 1260 if (proc) proc_rele(proc); 1261 1262 #endif /* IOTRACKING */ 1263 1264 return (ret); 1265 } 1266 1267 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1268 1269 #include <IOKit/IOKitDiagnosticsUserClient.h> 1270 1271 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1272 1273 #undef super 1274 #define super IOUserClient 1275 1276 OSDefineMetaClassAndStructors(IOKitDiagnosticsClient, IOUserClient) 1277 1278 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1279 1280 IOUserClient * IOKitDiagnosticsClient::withTask(task_t owningTask) 1281 { 1282 IOKitDiagnosticsClient * inst; 1283 1284 inst = new IOKitDiagnosticsClient; 1285 if (inst && !inst->init()) 1286 { 1287 inst->release(); 1288 inst = 0; 1289 } 1290 1291 return (inst); 1292 } 1293 1294 IOReturn IOKitDiagnosticsClient::clientClose(void) 1295 { 1296 terminate(); 1297 return (kIOReturnSuccess); 1298 } 1299 1300 IOReturn IOKitDiagnosticsClient::setProperties(OSObject * properties) 1301 { 1302 IOReturn kr = kIOReturnUnsupported; 1303 return (kr); 1304 } 1305 1306 IOReturn IOKitDiagnosticsClient::externalMethod(uint32_t selector, IOExternalMethodArguments * args, 1307 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference) 1308 { 1309 IOReturn ret = kIOReturnBadArgument; 1310 const IOKitDiagnosticsParameters * params; 1311 const char * names; 1312 size_t namesLen; 1313 OSObject * result; 1314 1315 if (args->structureInputSize < sizeof(IOKitDiagnosticsParameters)) return (kIOReturnBadArgument); 1316 params = (typeof(params)) args->structureInput; 1317 if (!params) return (kIOReturnBadArgument); 1318 1319 names = 0; 1320 namesLen = args->structureInputSize - sizeof(IOKitDiagnosticsParameters); 1321 if (namesLen) names = (typeof(names))(params + 1); 1322 1323 ret = IOTrackingDebug(selector, params->options, params->value, names, namesLen, params->size, &result); 1324 1325 if ((kIOReturnSuccess == ret) && args->structureVariableOutputData) *args->structureVariableOutputData = result; 1326 else if (result) result->release(); 1327 1328 return (ret); 1329 } 1330 1331 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1332