1 /* 2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 30 #include <sys/sysctl.h> 31 extern "C" { 32 #include <vm/vm_kern.h> 33 #include <kern/task.h> 34 #include <kern/debug.h> 35 } 36 37 #include <libkern/c++/OSContainers.h> 38 #include <libkern/OSDebug.h> 39 #include <libkern/c++/OSCPPDebug.h> 40 #include <kern/backtrace.h> 41 42 #include <IOKit/IOKitDebug.h> 43 #include <IOKit/IOLib.h> 44 #include <IOKit/assert.h> 45 #include <IOKit/IODeviceTreeSupport.h> 46 #include <IOKit/IOService.h> 47 48 #include "IOKitKernelInternal.h" 49 50 #ifdef IOKITDEBUG 51 #define DEBUG_INIT_VALUE IOKITDEBUG 52 #else 53 #define DEBUG_INIT_VALUE 0 54 #endif 55 56 SInt64 gIOKitDebug = DEBUG_INIT_VALUE; 57 SInt64 gIOKitTrace = 0; 58 59 #if DEVELOPMENT || DEBUG 60 #define IODEBUG_CTLFLAGS CTLFLAG_RW 61 #else 62 #define IODEBUG_CTLFLAGS CTLFLAG_RD 63 #endif 64 65 SYSCTL_QUAD(_debug, OID_AUTO, iotrace, CTLFLAG_RW | CTLFLAG_LOCKED, &gIOKitTrace, "trace io"); 66 67 static int 68 sysctl_debug_iokit 69 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) 70 { 71 SInt64 newValue; 72 int changed, error = sysctl_io_number(req, gIOKitDebug, sizeof(gIOKitDebug), &newValue, &changed); 73 if (changed) { 74 gIOKitDebug = ((gIOKitDebug & ~kIOKitDebugUserOptions) | (newValue & kIOKitDebugUserOptions)); 75 } 76 return error; 77 } 78 79 SYSCTL_PROC(_debug, OID_AUTO, iokit, 80 CTLTYPE_QUAD | IODEBUG_CTLFLAGS | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, 81 &gIOKitDebug, 0, sysctl_debug_iokit, "Q", "boot_arg io"); 82 83 size_t debug_malloc_size; 84 size_t debug_iomalloc_size; 85 86 vm_size_t debug_iomallocpageable_size; 87 size_t debug_container_malloc_size; 88 // int debug_ivars_size; // in OSObject.cpp 89 90 extern "C" { 91 #if 0 92 #define DEBG(fmt, args...) { kprintf(fmt, ## args); } 93 #else 94 #define DEBG(fmt, args...) { IOLog(fmt, ## args); } 95 #endif 96 97 void 98 IOPrintPlane( const IORegistryPlane * plane ) 99 { 100 IORegistryEntry * next; 101 IORegistryIterator * iter; 102 OSOrderedSet * all; 103 char format[] = "%xxxs"; 104 IOService * service; 105 106 iter = IORegistryIterator::iterateOver( plane ); 107 assert( iter ); 108 all = iter->iterateAll(); 109 if (all) { 110 DEBG("Count %d\n", all->getCount()); 111 all->release(); 112 } else { 113 DEBG("Empty\n"); 114 } 115 116 iter->reset(); 117 while ((next = iter->getNextObjectRecursive())) { 118 snprintf(format + 1, sizeof(format) - 1, "%ds", 2 * next->getDepth( plane )); 119 DEBG( format, ""); 120 DEBG( "\033[33m%s", next->getName( plane )); 121 if ((next->getLocation( plane ))) { 122 DEBG("@%s", next->getLocation( plane )); 123 } 124 DEBG("\033[0m <class %s", next->getMetaClass()->getClassName()); 125 if ((service = OSDynamicCast(IOService, next))) { 126 DEBG(", busy %ld", (long) service->getBusyState()); 127 } 128 DEBG( ">\n"); 129 // IOSleep(250); 130 } 131 iter->release(); 132 } 133 134 void 135 db_piokjunk(void) 136 { 137 } 138 139 void 140 db_dumpiojunk( const IORegistryPlane * plane __unused ) 141 { 142 } 143 144 void 145 IOPrintMemory( void ) 146 { 147 // OSMetaClass::printInstanceCounts(); 148 149 IOLog("\n" 150 "ivar kalloc() 0x%08lx\n" 151 "malloc() 0x%08lx\n" 152 "containers kalloc() 0x%08lx\n" 153 "IOMalloc() 0x%08lx\n" 154 "----------------------------------------\n", 155 debug_ivars_size, 156 debug_malloc_size, 157 debug_container_malloc_size, 158 debug_iomalloc_size 159 ); 160 } 161 } /* extern "C" */ 162 163 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 164 165 #define super OSObject 166 OSDefineMetaClassAndStructors(IOKitDiagnostics, OSObject) 167 168 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 169 170 OSObject * IOKitDiagnostics::diagnostics( void ) 171 { 172 IOKitDiagnostics * diags; 173 174 diags = new IOKitDiagnostics; 175 if (diags && !diags->init()) { 176 diags->release(); 177 diags = NULL; 178 } 179 180 return diags; 181 } 182 183 void 184 IOKitDiagnostics::updateOffset( OSDictionary * dict, 185 UInt64 value, const char * name ) 186 { 187 OSNumber * off; 188 189 off = OSNumber::withNumber( value, 64 ); 190 if (!off) { 191 return; 192 } 193 194 dict->setObject( name, off ); 195 off->release(); 196 } 197 198 bool 199 IOKitDiagnostics::serialize(OSSerialize *s) const 200 { 201 OSDictionary * dict; 202 bool ok; 203 204 dict = OSDictionary::withCapacity( 5 ); 205 if (!dict) { 206 return false; 207 } 208 209 updateOffset( dict, debug_ivars_size, "Instance allocation" ); 210 updateOffset( dict, debug_container_malloc_size, "Container allocation" ); 211 updateOffset( dict, debug_iomalloc_size, "IOMalloc allocation" ); 212 updateOffset( dict, debug_iomallocpageable_size, "Pageable allocation" ); 213 214 OSMetaClass::serializeClassDictionary(dict); 215 216 ok = dict->serialize( s ); 217 218 dict->release(); 219 220 return ok; 221 } 222 223 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 224 225 #if IOTRACKING 226 227 #include <libkern/c++/OSCPPDebug.h> 228 #include <libkern/c++/OSKext.h> 229 #include <kern/zalloc.h> 230 231 __private_extern__ "C" void qsort( 232 void * array, 233 size_t nmembers, 234 size_t member_size, 235 int (*)(const void *, const void *)); 236 237 extern "C" ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); 238 extern "C" ppnum_t pmap_valid_page(ppnum_t pn); 239 240 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 241 242 struct IOTRecursiveLock { 243 lck_mtx_t * mutex; 244 thread_t thread; 245 UInt32 count; 246 }; 247 248 struct IOTrackingQueue { 249 queue_chain_t link; 250 IOTRecursiveLock lock; 251 const char * name; 252 uintptr_t btEntry; 253 size_t allocSize; 254 size_t minCaptureSize; 255 uint32_t siteCount; 256 uint32_t type; 257 uint32_t numSiteQs; 258 uint8_t captureOn; 259 queue_head_t sites[]; 260 }; 261 262 263 struct IOTrackingCallSiteUser { 264 pid_t pid; 265 uint8_t user32; 266 uint8_t userCount; 267 uintptr_t bt[kIOTrackingCallSiteBTs]; 268 }; 269 270 struct IOTrackingCallSite { 271 queue_chain_t link; 272 queue_head_t instances; 273 IOTrackingQueue * queue; 274 IOTracking * addresses; 275 size_t size[2]; 276 uint32_t crc; 277 uint32_t count; 278 279 vm_tag_t tag; 280 uint8_t user32; 281 uint8_t userCount; 282 pid_t btPID; 283 284 uintptr_t bt[kIOTrackingCallSiteBTs]; 285 IOTrackingCallSiteUser user[0]; 286 }; 287 288 289 struct IOTrackingLeaksRef { 290 uintptr_t * instances; 291 uint32_t zoneSize; 292 uint32_t count; 293 uint32_t found; 294 uint32_t foundzlen; 295 size_t bytes; 296 }; 297 298 lck_mtx_t * gIOTrackingLock; 299 queue_head_t gIOTrackingQ; 300 301 enum{ 302 kTrackingAddressFlagAllocated = 0x00000001 303 }; 304 305 #if defined(__LP64__) 306 #define IOTrackingAddressFlags(ptr) (ptr->flags) 307 #else 308 #define IOTrackingAddressFlags(ptr) (ptr->tracking.flags) 309 #endif 310 311 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 312 313 static void 314 IOTRecursiveLockLock(IOTRecursiveLock * lock) 315 { 316 if (lock->thread == current_thread()) { 317 lock->count++; 318 } else { 319 lck_mtx_lock(lock->mutex); 320 assert(lock->thread == NULL); 321 assert(lock->count == 0); 322 lock->thread = current_thread(); 323 lock->count = 1; 324 } 325 } 326 327 static void 328 IOTRecursiveLockUnlock(IOTRecursiveLock * lock) 329 { 330 assert(lock->thread == current_thread()); 331 if (0 == (--lock->count)) { 332 lock->thread = NULL; 333 lck_mtx_unlock(lock->mutex); 334 } 335 } 336 337 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 338 339 void 340 IOTrackingInit(void) 341 { 342 queue_init(&gIOTrackingQ); 343 gIOTrackingLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); 344 } 345 346 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 347 348 IOTrackingQueue * 349 IOTrackingQueueAlloc(const char * name, uintptr_t btEntry, 350 size_t allocSize, size_t minCaptureSize, 351 uint32_t type, uint32_t numSiteQs) 352 { 353 IOTrackingQueue * queue; 354 uint32_t idx; 355 356 if (!numSiteQs) { 357 numSiteQs = 1; 358 } 359 queue = (typeof(queue))kalloc(sizeof(IOTrackingQueue) + numSiteQs * sizeof(queue->sites[0])); 360 bzero(queue, sizeof(IOTrackingQueue)); 361 362 queue->name = name; 363 queue->btEntry = btEntry; 364 queue->allocSize = allocSize; 365 queue->minCaptureSize = minCaptureSize; 366 queue->lock.mutex = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); 367 queue->numSiteQs = numSiteQs; 368 queue->type = type; 369 enum { kFlags = (kIOTracking | kIOTrackingBoot) }; 370 queue->captureOn = (kFlags == (kFlags & gIOKitDebug)) 371 || (kIOTrackingQueueTypeDefaultOn & type); 372 373 for (idx = 0; idx < numSiteQs; idx++) { 374 queue_init(&queue->sites[idx]); 375 } 376 377 lck_mtx_lock(gIOTrackingLock); 378 queue_enter(&gIOTrackingQ, queue, IOTrackingQueue *, link); 379 lck_mtx_unlock(gIOTrackingLock); 380 381 return queue; 382 }; 383 384 void 385 IOTrackingQueueCollectUser(IOTrackingQueue * queue) 386 { 387 assert(0 == queue->siteCount); 388 queue->type |= kIOTrackingQueueTypeUser; 389 } 390 391 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 392 393 void 394 IOTrackingQueueFree(IOTrackingQueue * queue) 395 { 396 lck_mtx_lock(gIOTrackingLock); 397 IOTrackingReset(queue); 398 remque(&queue->link); 399 lck_mtx_unlock(gIOTrackingLock); 400 401 lck_mtx_free(queue->lock.mutex, IOLockGroup); 402 403 kfree(queue, sizeof(IOTrackingQueue) + queue->numSiteQs * sizeof(queue->sites[0])); 404 }; 405 406 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 407 408 /* fasthash 409 * The MIT License 410 * 411 * Copyright (C) 2012 Zilong Tan ([email protected]) 412 * 413 * Permission is hereby granted, free of charge, to any person 414 * obtaining a copy of this software and associated documentation 415 * files (the "Software"), to deal in the Software without 416 * restriction, including without limitation the rights to use, copy, 417 * modify, merge, publish, distribute, sublicense, and/or sell copies 418 * of the Software, and to permit persons to whom the Software is 419 * furnished to do so, subject to the following conditions: 420 * 421 * The above copyright notice and this permission notice shall be 422 * included in all copies or substantial portions of the Software. 423 * 424 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 425 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 426 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 427 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 428 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 429 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 430 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 431 * SOFTWARE. 432 */ 433 434 435 // Compression function for Merkle-Damgard construction. 436 // This function is generated using the framework provided. 437 #define mix(h) ({ \ 438 (h) ^= (h) >> 23; \ 439 (h) *= 0x2127599bf4325c37ULL; \ 440 (h) ^= (h) >> 47; }) 441 442 static uint64_t 443 fasthash64(const void *buf, size_t len, uint64_t seed) 444 { 445 const uint64_t m = 0x880355f21e6d1965ULL; 446 const uint64_t *pos = (const uint64_t *)buf; 447 const uint64_t *end = pos + (len / 8); 448 const unsigned char *pos2; 449 uint64_t h = seed ^ (len * m); 450 uint64_t v; 451 452 while (pos != end) { 453 v = *pos++; 454 h ^= mix(v); 455 h *= m; 456 } 457 458 pos2 = (const unsigned char*)pos; 459 v = 0; 460 461 switch (len & 7) { 462 case 7: v ^= (uint64_t)pos2[6] << 48; 463 [[clang::fallthrough]]; 464 case 6: v ^= (uint64_t)pos2[5] << 40; 465 [[clang::fallthrough]]; 466 case 5: v ^= (uint64_t)pos2[4] << 32; 467 [[clang::fallthrough]]; 468 case 4: v ^= (uint64_t)pos2[3] << 24; 469 [[clang::fallthrough]]; 470 case 3: v ^= (uint64_t)pos2[2] << 16; 471 [[clang::fallthrough]]; 472 case 2: v ^= (uint64_t)pos2[1] << 8; 473 [[clang::fallthrough]]; 474 case 1: v ^= (uint64_t)pos2[0]; 475 h ^= mix(v); 476 h *= m; 477 } 478 479 return mix(h); 480 } 481 482 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 483 484 static uint32_t 485 fasthash32(const void *buf, size_t len, uint32_t seed) 486 { 487 // the following trick converts the 64-bit hashcode to Fermat 488 // residue, which shall retain information from both the higher 489 // and lower parts of hashcode. 490 uint64_t h = fasthash64(buf, len, seed); 491 return (uint32_t) (h - (h >> 32)); 492 } 493 494 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 495 496 void 497 IOTrackingAddUser(IOTrackingQueue * queue, IOTrackingUser * mem, vm_size_t size) 498 { 499 uint32_t num; 500 int pid; 501 502 if (!queue->captureOn) { 503 return; 504 } 505 if (size < queue->minCaptureSize) { 506 return; 507 } 508 509 assert(!mem->link.next); 510 511 num = backtrace(&mem->bt[0], kIOTrackingCallSiteBTs, NULL); 512 num = 0; 513 if ((kernel_task != current_task()) && (pid = proc_selfpid())) { 514 bool user_64 = false; 515 mem->btPID = pid; 516 num = backtrace_user(&mem->btUser[0], kIOTrackingCallSiteBTs - 1, NULL, 517 &user_64, NULL); 518 mem->user32 = !user_64; 519 } 520 assert(num <= kIOTrackingCallSiteBTs); 521 static_assert(kIOTrackingCallSiteBTs <= UINT8_MAX); 522 mem->userCount = ((uint8_t) num); 523 524 IOTRecursiveLockLock(&queue->lock); 525 queue_enter/*last*/ (&queue->sites[0], mem, IOTrackingUser *, link); 526 queue->siteCount++; 527 IOTRecursiveLockUnlock(&queue->lock); 528 } 529 530 void 531 IOTrackingRemoveUser(IOTrackingQueue * queue, IOTrackingUser * mem) 532 { 533 if (!mem->link.next) { 534 return; 535 } 536 537 IOTRecursiveLockLock(&queue->lock); 538 if (mem->link.next) { 539 remque(&mem->link); 540 assert(queue->siteCount); 541 queue->siteCount--; 542 } 543 IOTRecursiveLockUnlock(&queue->lock); 544 } 545 546 uint64_t gIOTrackingAddTime; 547 548 void 549 IOTrackingAdd(IOTrackingQueue * queue, IOTracking * mem, size_t size, bool address, vm_tag_t tag) 550 { 551 IOTrackingCallSite * site; 552 uint32_t crc, num; 553 uintptr_t bt[kIOTrackingCallSiteBTs + 1]; 554 uintptr_t btUser[kIOTrackingCallSiteBTs]; 555 queue_head_t * que; 556 bool user; 557 int pid; 558 int userCount; 559 bool user64; 560 561 if (mem->site) { 562 return; 563 } 564 if (!queue->captureOn) { 565 return; 566 } 567 if (size < queue->minCaptureSize) { 568 return; 569 } 570 571 user = (0 != (kIOTrackingQueueTypeUser & queue->type)); 572 573 assert(!mem->link.next); 574 575 num = backtrace(&bt[0], kIOTrackingCallSiteBTs + 1, NULL); 576 if (!num) { 577 return; 578 } 579 num--; 580 crc = fasthash32(&bt[1], num * sizeof(bt[0]), 0x04C11DB7); 581 582 userCount = 0; 583 user64 = false; 584 pid = 0; 585 if (user) { 586 if ((kernel_task != current_task()) && (pid = proc_selfpid())) { 587 userCount = backtrace_user(&btUser[0], kIOTrackingCallSiteBTs, NULL, &user64, NULL); 588 assert(userCount <= kIOTrackingCallSiteBTs); 589 crc = fasthash32(&btUser[0], userCount * sizeof(bt[0]), crc); 590 } 591 } 592 593 IOTRecursiveLockLock(&queue->lock); 594 que = &queue->sites[crc % queue->numSiteQs]; 595 queue_iterate(que, site, IOTrackingCallSite *, link) 596 { 597 if (tag != site->tag) { 598 continue; 599 } 600 if (user && (pid != site->user[0].pid)) { 601 continue; 602 } 603 if (crc == site->crc) { 604 break; 605 } 606 } 607 608 if (queue_end(que, (queue_entry_t) site)) { 609 size_t siteSize = sizeof(IOTrackingCallSite); 610 if (user) { 611 siteSize += sizeof(IOTrackingCallSiteUser); 612 } 613 site = (typeof(site))kalloc(siteSize); 614 615 queue_init(&site->instances); 616 site->addresses = (IOTracking *) &site->instances; 617 site->queue = queue; 618 site->crc = crc; 619 site->count = 0; 620 site->tag = tag; 621 memset(&site->size[0], 0, sizeof(site->size)); 622 bcopy(&bt[1], &site->bt[0], num * sizeof(site->bt[0])); 623 assert(num <= kIOTrackingCallSiteBTs); 624 bzero(&site->bt[num], (kIOTrackingCallSiteBTs - num) * sizeof(site->bt[0])); 625 if (user) { 626 bcopy(&btUser[0], &site->user[0].bt[0], userCount * sizeof(site->user[0].bt[0])); 627 assert(userCount <= kIOTrackingCallSiteBTs); 628 bzero(&site->user[0].bt[userCount], (kIOTrackingCallSiteBTs - userCount) * sizeof(site->user[0].bt[0])); 629 site->user[0].pid = pid; 630 site->user[0].user32 = !user64; 631 static_assert(kIOTrackingCallSiteBTs <= UINT8_MAX); 632 site->user[0].userCount = ((uint8_t) userCount); 633 } 634 queue_enter_first(que, site, IOTrackingCallSite *, link); 635 queue->siteCount++; 636 } 637 638 if (address) { 639 queue_enter/*last*/ (&site->instances, mem, IOTracking *, link); 640 if (queue_end(&site->instances, (queue_entry_t)site->addresses)) { 641 site->addresses = mem; 642 } 643 } else { 644 queue_enter_first(&site->instances, mem, IOTracking *, link); 645 } 646 647 mem->site = site; 648 site->size[0] += size; 649 site->count++; 650 651 IOTRecursiveLockUnlock(&queue->lock); 652 } 653 654 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 655 656 void 657 IOTrackingRemove(IOTrackingQueue * queue, IOTracking * mem, size_t size) 658 { 659 if (!mem->link.next) { 660 return; 661 } 662 663 IOTRecursiveLockLock(&queue->lock); 664 if (mem->link.next) { 665 assert(mem->site); 666 667 if (mem == mem->site->addresses) { 668 mem->site->addresses = (IOTracking *) queue_next(&mem->link); 669 } 670 remque(&mem->link); 671 672 assert(mem->site->count); 673 mem->site->count--; 674 assert(mem->site->size[0] >= size); 675 mem->site->size[0] -= size; 676 if (!mem->site->count) { 677 assert(queue_empty(&mem->site->instances)); 678 assert(!mem->site->size[0]); 679 assert(!mem->site->size[1]); 680 681 remque(&mem->site->link); 682 assert(queue->siteCount); 683 queue->siteCount--; 684 size_t siteSize = sizeof(IOTrackingCallSite); 685 if (kIOTrackingQueueTypeUser & queue->type) { 686 siteSize += sizeof(IOTrackingCallSiteUser); 687 } 688 kfree(mem->site, siteSize); 689 } 690 mem->site = NULL; 691 } 692 IOTRecursiveLockUnlock(&queue->lock); 693 } 694 695 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 696 697 void 698 IOTrackingAlloc(IOTrackingQueue * queue, uintptr_t address, size_t size) 699 { 700 IOTrackingAddress * tracking; 701 702 if (!queue->captureOn) { 703 return; 704 } 705 if (size < queue->minCaptureSize) { 706 return; 707 } 708 709 address = ~address; 710 tracking = (typeof(tracking))kalloc(sizeof(IOTrackingAddress)); 711 bzero(tracking, sizeof(IOTrackingAddress)); 712 IOTrackingAddressFlags(tracking) |= kTrackingAddressFlagAllocated; 713 tracking->address = address; 714 tracking->size = size; 715 716 IOTrackingAdd(queue, &tracking->tracking, size, true, VM_KERN_MEMORY_NONE); 717 } 718 719 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 720 721 void 722 IOTrackingFree(IOTrackingQueue * queue, uintptr_t address, size_t size) 723 { 724 IOTrackingCallSite * site; 725 IOTrackingAddress * tracking; 726 uint32_t idx; 727 bool done; 728 729 address = ~address; 730 IOTRecursiveLockLock(&queue->lock); 731 done = false; 732 for (idx = 0; idx < queue->numSiteQs; idx++) { 733 queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link) 734 { 735 tracking = (IOTrackingAddress *) site->addresses; 736 while (!queue_end(&site->instances, &tracking->tracking.link)) { 737 if ((done = (address == tracking->address))) { 738 IOTrackingRemove(queue, &tracking->tracking, size); 739 kfree(tracking, sizeof(IOTrackingAddress)); 740 break; 741 } else { 742 tracking = (IOTrackingAddress *) queue_next(&tracking->tracking.link); 743 } 744 } 745 if (done) { 746 break; 747 } 748 } 749 if (done) { 750 break; 751 } 752 } 753 IOTRecursiveLockUnlock(&queue->lock); 754 } 755 756 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 757 758 void 759 IOTrackingAccumSize(IOTrackingQueue * queue, IOTracking * mem, size_t size) 760 { 761 IOTRecursiveLockLock(&queue->lock); 762 if (mem->link.next) { 763 assert(mem->site); 764 assert((size > 0) || (mem->site->size[1] >= -size)); 765 mem->site->size[1] += size; 766 } 767 ; 768 IOTRecursiveLockUnlock(&queue->lock); 769 } 770 771 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 772 773 void 774 IOTrackingReset(IOTrackingQueue * queue) 775 { 776 IOTrackingCallSite * site; 777 IOTrackingUser * user; 778 IOTracking * tracking; 779 IOTrackingAddress * trackingAddress; 780 uint32_t idx; 781 bool addresses; 782 783 IOTRecursiveLockLock(&queue->lock); 784 for (idx = 0; idx < queue->numSiteQs; idx++) { 785 while (!queue_empty(&queue->sites[idx])) { 786 if (kIOTrackingQueueTypeMap & queue->type) { 787 queue_remove_first(&queue->sites[idx], user, IOTrackingUser *, link); 788 user->link.next = user->link.prev = NULL; 789 } else { 790 queue_remove_first(&queue->sites[idx], site, IOTrackingCallSite *, link); 791 addresses = false; 792 while (!queue_empty(&site->instances)) { 793 queue_remove_first(&site->instances, tracking, IOTracking *, link); 794 if (tracking == site->addresses) { 795 addresses = true; 796 } 797 if (addresses) { 798 trackingAddress = (typeof(trackingAddress))tracking; 799 if (kTrackingAddressFlagAllocated & IOTrackingAddressFlags(trackingAddress)) { 800 kfree(tracking, sizeof(IOTrackingAddress)); 801 } 802 } 803 } 804 size_t siteSize = sizeof(IOTrackingCallSite); 805 if (kIOTrackingQueueTypeUser & queue->type) { 806 siteSize += sizeof(IOTrackingCallSiteUser); 807 } 808 kfree(site, siteSize); 809 } 810 } 811 } 812 queue->siteCount = 0; 813 IOTRecursiveLockUnlock(&queue->lock); 814 } 815 816 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 817 818 static int 819 IOTrackingCallSiteInfoCompare(const void * left, const void * right) 820 { 821 IOTrackingCallSiteInfo * l = (typeof(l))left; 822 IOTrackingCallSiteInfo * r = (typeof(r))right; 823 size_t lsize, rsize; 824 825 rsize = r->size[0] + r->size[1]; 826 lsize = l->size[0] + l->size[1]; 827 828 return (rsize > lsize) ? 1 : ((rsize == lsize) ? 0 : -1); 829 } 830 831 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 832 833 static int 834 IOTrackingAddressCompare(const void * left, const void * right) 835 { 836 IOTracking * instance; 837 uintptr_t inst, laddr, raddr; 838 839 inst = ((typeof(inst) *)left)[0]; 840 instance = (typeof(instance))INSTANCE_GET(inst); 841 if (kInstanceFlagAddress & inst) { 842 laddr = ~((IOTrackingAddress *)instance)->address; 843 } else { 844 laddr = (uintptr_t) (instance + 1); 845 } 846 847 inst = ((typeof(inst) *)right)[0]; 848 instance = (typeof(instance))(inst & ~kInstanceFlags); 849 if (kInstanceFlagAddress & inst) { 850 raddr = ~((IOTrackingAddress *)instance)->address; 851 } else { 852 raddr = (uintptr_t) (instance + 1); 853 } 854 855 return (laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1); 856 } 857 858 859 static int 860 IOTrackingZoneElementCompare(const void * left, const void * right) 861 { 862 uintptr_t inst, laddr, raddr; 863 864 inst = ((typeof(inst) *)left)[0]; 865 laddr = INSTANCE_PUT(inst); 866 inst = ((typeof(inst) *)right)[0]; 867 raddr = INSTANCE_PUT(inst); 868 869 return (laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1); 870 } 871 872 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 873 874 static void 875 CopyOutBacktraces(IOTrackingCallSite * site, IOTrackingCallSiteInfo * siteInfo) 876 { 877 uint32_t j; 878 mach_vm_address_t bt, btEntry; 879 880 btEntry = site->queue->btEntry; 881 for (j = 0; j < kIOTrackingCallSiteBTs; j++) { 882 bt = site->bt[j]; 883 if (btEntry 884 && (!bt || (j == (kIOTrackingCallSiteBTs - 1)))) { 885 bt = btEntry; 886 btEntry = 0; 887 } 888 siteInfo->bt[0][j] = VM_KERNEL_UNSLIDE(bt); 889 } 890 891 siteInfo->btPID = 0; 892 if (kIOTrackingQueueTypeUser & site->queue->type) { 893 siteInfo->btPID = site->user[0].pid; 894 uint32_t * bt32 = (typeof(bt32))((void *) &site->user[0].bt[0]); 895 uint64_t * bt64 = (typeof(bt64))((void *) &site->user[0].bt[0]); 896 for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) { 897 if (j >= site->user[0].userCount) { 898 siteInfo->bt[1][j] = 0; 899 } else if (site->user[0].user32) { 900 siteInfo->bt[1][j] = bt32[j]; 901 } else { 902 siteInfo->bt[1][j] = bt64[j]; 903 } 904 } 905 } 906 } 907 908 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 909 910 static void 911 IOTrackingLeakScan(void * refcon) 912 { 913 IOTrackingLeaksRef * ref = (typeof(ref))refcon; 914 uintptr_t * instances; 915 IOTracking * instance; 916 uint64_t vaddr, vincr; 917 ppnum_t ppn; 918 uintptr_t ptr, addr, vphysaddr, inst; 919 size_t size, origsize; 920 uint32_t baseIdx, lim, ptrIdx, count; 921 boolean_t is; 922 AbsoluteTime deadline; 923 924 instances = ref->instances; 925 count = ref->count; 926 size = origsize = ref->zoneSize; 927 928 for (deadline = 0, vaddr = VM_MIN_KERNEL_AND_KEXT_ADDRESS; 929 ; 930 vaddr += vincr) { 931 if ((mach_absolute_time() > deadline) || (vaddr >= VM_MAX_KERNEL_ADDRESS)) { 932 if (deadline) { 933 ml_set_interrupts_enabled(is); 934 IODelay(10); 935 } 936 if (vaddr >= VM_MAX_KERNEL_ADDRESS) { 937 break; 938 } 939 is = ml_set_interrupts_enabled(false); 940 clock_interval_to_deadline(10, kMillisecondScale, &deadline); 941 } 942 943 ppn = kernel_pmap_present_mapping(vaddr, &vincr, &vphysaddr); 944 // check noencrypt to avoid VM structs (map entries) with pointers 945 if (ppn && (!pmap_valid_page(ppn) || (!ref->zoneSize && pmap_is_noencrypt(ppn)))) { 946 ppn = 0; 947 } 948 if (!ppn) { 949 continue; 950 } 951 952 for (ptrIdx = 0; ptrIdx < (page_size / sizeof(uintptr_t)); ptrIdx++) { 953 ptr = ((uintptr_t *)vphysaddr)[ptrIdx]; 954 #if defined(HAS_APPLE_PAC) 955 // strip possible ptrauth signature from candidate data pointer 956 ptr = (uintptr_t)ptrauth_strip((void*)ptr, ptrauth_key_process_independent_data); 957 #endif /* defined(HAS_APPLE_PAC) */ 958 959 for (lim = count, baseIdx = 0; lim; lim >>= 1) { 960 inst = instances[baseIdx + (lim >> 1)]; 961 instance = (typeof(instance))INSTANCE_GET(inst); 962 963 if (ref->zoneSize) { 964 addr = INSTANCE_PUT(inst) & ~kInstanceFlags; 965 } else if (kInstanceFlagAddress & inst) { 966 addr = ~((IOTrackingAddress *)instance)->address; 967 origsize = size = ((IOTrackingAddress *)instance)->size; 968 if (!size) { 969 size = 1; 970 } 971 } else { 972 addr = (uintptr_t) (instance + 1); 973 origsize = size = instance->site->queue->allocSize; 974 } 975 if ((ptr >= addr) && (ptr < (addr + size)) 976 977 && (((vaddr + ptrIdx * sizeof(uintptr_t)) < addr) 978 || ((vaddr + ptrIdx * sizeof(uintptr_t)) >= (addr + size)))) { 979 if (!(kInstanceFlagReferenced & inst)) { 980 inst |= kInstanceFlagReferenced; 981 instances[baseIdx + (lim >> 1)] = inst; 982 ref->found++; 983 if (!origsize) { 984 ref->foundzlen++; 985 } 986 } 987 break; 988 } 989 if (ptr > addr) { 990 // move right 991 baseIdx += (lim >> 1) + 1; 992 lim--; 993 } 994 // else move left 995 } 996 } 997 ref->bytes += page_size; 998 } 999 } 1000 1001 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1002 1003 extern "C" void 1004 zone_leaks_scan(uintptr_t * instances, uint32_t count, uint32_t zoneSize, uint32_t * found) 1005 { 1006 IOTrackingLeaksRef ref; 1007 IOTrackingCallSiteInfo siteInfo; 1008 uint32_t idx; 1009 1010 qsort(instances, count, sizeof(*instances), &IOTrackingZoneElementCompare); 1011 1012 bzero(&siteInfo, sizeof(siteInfo)); 1013 bzero(&ref, sizeof(ref)); 1014 ref.instances = instances; 1015 ref.count = count; 1016 ref.zoneSize = zoneSize; 1017 1018 for (idx = 0; idx < 2; idx++) { 1019 ref.bytes = 0; 1020 IOTrackingLeakScan(&ref); 1021 IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d\n", idx, ref.bytes / 1024 / 1024, count, ref.found); 1022 if (count <= ref.found) { 1023 break; 1024 } 1025 } 1026 1027 *found = ref.found; 1028 } 1029 1030 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1031 1032 static void 1033 ZoneSiteProc(void * refCon, uint32_t siteCount, uint32_t zoneSize, 1034 uintptr_t * backtrace, uint32_t btCount) 1035 { 1036 IOTrackingCallSiteInfo siteInfo; 1037 OSData * leakData; 1038 uint32_t idx; 1039 1040 leakData = (typeof(leakData))refCon; 1041 1042 bzero(&siteInfo, sizeof(siteInfo)); 1043 siteInfo.count = siteCount; 1044 siteInfo.size[0] = zoneSize * siteCount; 1045 1046 for (idx = 0; (idx < btCount) && (idx < kIOTrackingCallSiteBTs); idx++) { 1047 siteInfo.bt[0][idx] = VM_KERNEL_UNSLIDE(backtrace[idx]); 1048 } 1049 1050 leakData->appendBytes(&siteInfo, sizeof(siteInfo)); 1051 } 1052 1053 1054 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1055 1056 static OSData * 1057 IOTrackingLeaks(LIBKERN_CONSUMED OSData * data) 1058 { 1059 IOTrackingLeaksRef ref; 1060 IOTrackingCallSiteInfo siteInfo; 1061 IOTrackingCallSite * site; 1062 OSData * leakData; 1063 uintptr_t * instances; 1064 IOTracking * instance; 1065 uintptr_t inst; 1066 uint32_t count, idx, numSites, dups, siteCount; 1067 1068 instances = (typeof(instances))data->getBytesNoCopy(); 1069 count = (data->getLength() / sizeof(*instances)); 1070 qsort(instances, count, sizeof(*instances), &IOTrackingAddressCompare); 1071 1072 bzero(&siteInfo, sizeof(siteInfo)); 1073 bzero(&ref, sizeof(ref)); 1074 ref.instances = instances; 1075 ref.count = count; 1076 for (idx = 0; idx < 2; idx++) { 1077 ref.bytes = 0; 1078 IOTrackingLeakScan(&ref); 1079 IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d (zlen %d)\n", idx, ref.bytes / 1024 / 1024, count, ref.found, ref.foundzlen); 1080 if (count <= ref.found) { 1081 break; 1082 } 1083 } 1084 1085 leakData = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo)); 1086 1087 for (numSites = 0, idx = 0; idx < count; idx++) { 1088 inst = instances[idx]; 1089 if (kInstanceFlagReferenced & inst) { 1090 continue; 1091 } 1092 instance = (typeof(instance))INSTANCE_GET(inst); 1093 site = instance->site; 1094 instances[numSites] = (uintptr_t) site; 1095 numSites++; 1096 } 1097 1098 for (idx = 0; idx < numSites; idx++) { 1099 inst = instances[idx]; 1100 if (!inst) { 1101 continue; 1102 } 1103 site = (typeof(site))inst; 1104 for (siteCount = 1, dups = (idx + 1); dups < numSites; dups++) { 1105 if (instances[dups] == (uintptr_t) site) { 1106 siteCount++; 1107 instances[dups] = 0; 1108 } 1109 } 1110 siteInfo.count = siteCount; 1111 siteInfo.size[0] = (site->size[0] * site->count) / siteCount; 1112 siteInfo.size[1] = (site->size[1] * site->count) / siteCount;; 1113 CopyOutBacktraces(site, &siteInfo); 1114 leakData->appendBytes(&siteInfo, sizeof(siteInfo)); 1115 } 1116 data->release(); 1117 1118 return leakData; 1119 } 1120 1121 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1122 1123 static bool 1124 SkipName(uint32_t options, const char * name, size_t namesLen, const char * names) 1125 { 1126 const char * scan; 1127 const char * next; 1128 bool exclude, found; 1129 size_t qLen, sLen; 1130 1131 if (!namesLen || !names) { 1132 return false; 1133 } 1134 // <len><name>...<len><name><0> 1135 exclude = (0 != (kIOTrackingExcludeNames & options)); 1136 qLen = strlen(name); 1137 scan = names; 1138 found = false; 1139 do{ 1140 sLen = scan[0]; 1141 scan++; 1142 next = scan + sLen; 1143 if (next >= (names + namesLen)) { 1144 break; 1145 } 1146 found = ((sLen == qLen) && !strncmp(scan, name, sLen)); 1147 scan = next; 1148 }while (!found && (scan < (names + namesLen))); 1149 1150 return !(exclude ^ found); 1151 } 1152 1153 #endif /* IOTRACKING */ 1154 1155 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1156 1157 static kern_return_t 1158 IOTrackingDebug(uint32_t selector, uint32_t options, uint64_t value, 1159 uint32_t intag, uint32_t inzsize, 1160 const char * names, size_t namesLen, 1161 size_t size, OSObject ** result) 1162 { 1163 kern_return_t ret; 1164 OSData * data; 1165 1166 if (result) { 1167 *result = NULL; 1168 } 1169 data = NULL; 1170 ret = kIOReturnNotReady; 1171 1172 #if IOTRACKING 1173 1174 kern_return_t kr; 1175 IOTrackingQueue * queue; 1176 IOTracking * instance; 1177 IOTrackingCallSite * site; 1178 IOTrackingCallSiteInfo siteInfo; 1179 IOTrackingUser * user; 1180 task_t mapTask; 1181 mach_vm_address_t mapAddress; 1182 mach_vm_size_t mapSize; 1183 uint32_t num, idx, qIdx; 1184 uintptr_t instFlags; 1185 proc_t proc; 1186 bool addresses; 1187 1188 ret = kIOReturnNotFound; 1189 proc = NULL; 1190 if (kIOTrackingGetMappings == selector) { 1191 if (value != -1ULL) { 1192 proc = proc_find((pid_t) value); 1193 if (!proc) { 1194 return kIOReturnNotFound; 1195 } 1196 } 1197 } 1198 1199 bzero(&siteInfo, sizeof(siteInfo)); 1200 lck_mtx_lock(gIOTrackingLock); 1201 queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link) 1202 { 1203 if (SkipName(options, queue->name, namesLen, names)) { 1204 continue; 1205 } 1206 1207 if (!(kIOTracking & gIOKitDebug) && (kIOTrackingQueueTypeAlloc & queue->type)) { 1208 continue; 1209 } 1210 1211 switch (selector) { 1212 case kIOTrackingResetTracking: 1213 { 1214 IOTrackingReset(queue); 1215 ret = kIOReturnSuccess; 1216 break; 1217 } 1218 1219 case kIOTrackingStartCapture: 1220 case kIOTrackingStopCapture: 1221 { 1222 queue->captureOn = (kIOTrackingStartCapture == selector); 1223 ret = kIOReturnSuccess; 1224 break; 1225 } 1226 1227 case kIOTrackingSetMinCaptureSize: 1228 { 1229 queue->minCaptureSize = size; 1230 ret = kIOReturnSuccess; 1231 break; 1232 } 1233 1234 case kIOTrackingLeaks: 1235 { 1236 if (!(kIOTrackingQueueTypeAlloc & queue->type)) { 1237 break; 1238 } 1239 1240 if (!data) { 1241 data = OSData::withCapacity(1024 * sizeof(uintptr_t)); 1242 } 1243 1244 IOTRecursiveLockLock(&queue->lock); 1245 for (idx = 0; idx < queue->numSiteQs; idx++) { 1246 queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link) 1247 { 1248 addresses = false; 1249 queue_iterate(&site->instances, instance, IOTracking *, link) 1250 { 1251 if (instance == site->addresses) { 1252 addresses = true; 1253 } 1254 instFlags = (typeof(instFlags))instance; 1255 if (addresses) { 1256 instFlags |= kInstanceFlagAddress; 1257 } 1258 data->appendBytes(&instFlags, sizeof(instFlags)); 1259 } 1260 } 1261 } 1262 // queue is locked 1263 ret = kIOReturnSuccess; 1264 break; 1265 } 1266 1267 1268 case kIOTrackingGetTracking: 1269 { 1270 if (kIOTrackingQueueTypeMap & queue->type) { 1271 break; 1272 } 1273 1274 if (!data) { 1275 data = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo)); 1276 } 1277 1278 IOTRecursiveLockLock(&queue->lock); 1279 num = queue->siteCount; 1280 idx = 0; 1281 for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) { 1282 queue_iterate(&queue->sites[qIdx], site, IOTrackingCallSite *, link) 1283 { 1284 assert(idx < num); 1285 idx++; 1286 1287 size_t tsize[2]; 1288 uint32_t count = site->count; 1289 tsize[0] = site->size[0]; 1290 tsize[1] = site->size[1]; 1291 1292 if (intag || inzsize) { 1293 uintptr_t addr; 1294 vm_size_t size, zoneSize; 1295 vm_tag_t tag; 1296 1297 if (kIOTrackingQueueTypeAlloc & queue->type) { 1298 addresses = false; 1299 count = 0; 1300 tsize[0] = tsize[1] = 0; 1301 queue_iterate(&site->instances, instance, IOTracking *, link) 1302 { 1303 if (instance == site->addresses) { 1304 addresses = true; 1305 } 1306 1307 if (addresses) { 1308 addr = ~((IOTrackingAddress *)instance)->address; 1309 } else { 1310 addr = (uintptr_t) (instance + 1); 1311 } 1312 1313 kr = vm_kern_allocation_info(addr, &size, &tag, &zoneSize); 1314 if (KERN_SUCCESS != kr) { 1315 continue; 1316 } 1317 1318 if ((VM_KERN_MEMORY_NONE != intag) && (intag != tag)) { 1319 continue; 1320 } 1321 if (inzsize && (inzsize != zoneSize)) { 1322 continue; 1323 } 1324 1325 count++; 1326 tsize[0] += size; 1327 } 1328 } else { 1329 if (!intag || inzsize || (intag != site->tag)) { 1330 continue; 1331 } 1332 } 1333 } 1334 1335 if (!count) { 1336 continue; 1337 } 1338 if (size && ((tsize[0] + tsize[1]) < size)) { 1339 continue; 1340 } 1341 siteInfo.count = count; 1342 siteInfo.size[0] = tsize[0]; 1343 siteInfo.size[1] = tsize[1]; 1344 CopyOutBacktraces(site, &siteInfo); 1345 data->appendBytes(&siteInfo, sizeof(siteInfo)); 1346 } 1347 } 1348 assert(idx == num); 1349 IOTRecursiveLockUnlock(&queue->lock); 1350 ret = kIOReturnSuccess; 1351 break; 1352 } 1353 1354 case kIOTrackingGetMappings: 1355 { 1356 if (!(kIOTrackingQueueTypeMap & queue->type)) { 1357 break; 1358 } 1359 if (!data) { 1360 data = OSData::withCapacity((unsigned int) page_size); 1361 } 1362 1363 IOTRecursiveLockLock(&queue->lock); 1364 num = queue->siteCount; 1365 idx = 0; 1366 for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) { 1367 queue_iterate(&queue->sites[qIdx], user, IOTrackingUser *, link) 1368 { 1369 assert(idx < num); 1370 idx++; 1371 1372 kr = IOMemoryMapTracking(user, &mapTask, &mapAddress, &mapSize); 1373 if (kIOReturnSuccess != kr) { 1374 continue; 1375 } 1376 if (proc && (mapTask != proc_task(proc))) { 1377 continue; 1378 } 1379 if (size && (mapSize < size)) { 1380 continue; 1381 } 1382 1383 siteInfo.count = 1; 1384 siteInfo.size[0] = mapSize; 1385 siteInfo.address = mapAddress; 1386 siteInfo.addressPID = task_pid(mapTask); 1387 siteInfo.btPID = user->btPID; 1388 1389 for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) { 1390 siteInfo.bt[0][j] = VM_KERNEL_UNSLIDE(user->bt[j]); 1391 } 1392 uint32_t * bt32 = (typeof(bt32)) & user->btUser[0]; 1393 uint64_t * bt64 = (typeof(bt64))((void *) &user->btUser[0]); 1394 for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) { 1395 if (j >= user->userCount) { 1396 siteInfo.bt[1][j] = 0; 1397 } else if (user->user32) { 1398 siteInfo.bt[1][j] = bt32[j]; 1399 } else { 1400 siteInfo.bt[1][j] = bt64[j]; 1401 } 1402 } 1403 data->appendBytes(&siteInfo, sizeof(siteInfo)); 1404 } 1405 } 1406 assert(idx == num); 1407 IOTRecursiveLockUnlock(&queue->lock); 1408 ret = kIOReturnSuccess; 1409 break; 1410 } 1411 1412 default: 1413 ret = kIOReturnUnsupported; 1414 break; 1415 } 1416 } 1417 1418 if ((kIOTrackingLeaks == selector) && data) { 1419 data = IOTrackingLeaks(data); 1420 queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link) 1421 { 1422 if (SkipName(options, queue->name, namesLen, names)) { 1423 continue; 1424 } 1425 if (!(kIOTrackingQueueTypeAlloc & queue->type)) { 1426 continue; 1427 } 1428 IOTRecursiveLockUnlock(&queue->lock); 1429 } 1430 } 1431 1432 lck_mtx_unlock(gIOTrackingLock); 1433 1434 if ((kIOTrackingLeaks == selector) && namesLen && names) { 1435 const char * scan; 1436 const char * next; 1437 uint8_t sLen; 1438 1439 if (!data) { 1440 data = OSData::withCapacity(4096 * sizeof(uintptr_t)); 1441 } 1442 1443 // <len><name>...<len><name><0> 1444 scan = names; 1445 do{ 1446 sLen = ((uint8_t) scan[0]); 1447 scan++; 1448 next = scan + sLen; 1449 if (next >= (names + namesLen)) { 1450 break; 1451 } 1452 kr = zone_leaks(scan, sLen, &ZoneSiteProc, data); 1453 if (KERN_SUCCESS == kr) { 1454 ret = kIOReturnSuccess; 1455 } else if (KERN_INVALID_NAME != kr) { 1456 ret = kIOReturnVMError; 1457 } 1458 scan = next; 1459 }while (scan < (names + namesLen)); 1460 } 1461 1462 if (data) { 1463 switch (selector) { 1464 case kIOTrackingLeaks: 1465 case kIOTrackingGetTracking: 1466 case kIOTrackingGetMappings: 1467 { 1468 IOTrackingCallSiteInfo * siteInfos; 1469 siteInfos = (typeof(siteInfos))data->getBytesNoCopy(); 1470 num = (data->getLength() / sizeof(*siteInfos)); 1471 qsort(siteInfos, num, sizeof(*siteInfos), &IOTrackingCallSiteInfoCompare); 1472 break; 1473 } 1474 default: assert(false); break; 1475 } 1476 } 1477 1478 *result = data; 1479 if (proc) { 1480 proc_rele(proc); 1481 } 1482 1483 #endif /* IOTRACKING */ 1484 1485 return ret; 1486 } 1487 1488 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1489 1490 #include <IOKit/IOKitDiagnosticsUserClient.h> 1491 1492 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1493 1494 #undef super 1495 #define super IOUserClient 1496 1497 OSDefineMetaClassAndStructors(IOKitDiagnosticsClient, IOUserClient) 1498 1499 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1500 1501 IOUserClient * IOKitDiagnosticsClient::withTask(task_t owningTask) 1502 { 1503 IOKitDiagnosticsClient * inst; 1504 1505 inst = new IOKitDiagnosticsClient; 1506 if (inst && !inst->init()) { 1507 inst->release(); 1508 inst = NULL; 1509 } 1510 1511 return inst; 1512 } 1513 1514 IOReturn 1515 IOKitDiagnosticsClient::clientClose(void) 1516 { 1517 terminate(); 1518 return kIOReturnSuccess; 1519 } 1520 1521 IOReturn 1522 IOKitDiagnosticsClient::setProperties(OSObject * properties) 1523 { 1524 IOReturn kr = kIOReturnUnsupported; 1525 return kr; 1526 } 1527 1528 IOReturn 1529 IOKitDiagnosticsClient::externalMethod(uint32_t selector, IOExternalMethodArguments * args, 1530 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference) 1531 { 1532 IOReturn ret = kIOReturnBadArgument; 1533 const IOKitDiagnosticsParameters * params; 1534 const char * names; 1535 size_t namesLen; 1536 OSObject * result; 1537 1538 if (args->structureInputSize < sizeof(IOKitDiagnosticsParameters)) { 1539 return kIOReturnBadArgument; 1540 } 1541 params = (typeof(params))args->structureInput; 1542 if (!params) { 1543 return kIOReturnBadArgument; 1544 } 1545 1546 names = NULL; 1547 namesLen = args->structureInputSize - sizeof(IOKitDiagnosticsParameters); 1548 if (namesLen) { 1549 names = (typeof(names))(params + 1); 1550 } 1551 1552 ret = IOTrackingDebug(selector, params->options, params->value, params->tag, params->zsize, names, namesLen, params->size, &result); 1553 1554 if ((kIOReturnSuccess == ret) && args->structureVariableOutputData) { 1555 *args->structureVariableOutputData = result; 1556 } else if (result) { 1557 result->release(); 1558 } 1559 1560 return ret; 1561 } 1562 1563 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 1564