1 /*
2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/sysctl.h>
30 extern "C" {
31 #include <vm/vm_kern_xnu.h>
32 #include <kern/task.h>
33 #include <kern/debug.h>
34 }
35
36 #include <libkern/c++/OSContainers.h>
37 #include <libkern/OSDebug.h>
38 #include <libkern/c++/OSCPPDebug.h>
39 #include <kern/backtrace.h>
40 #include <kern/btlog.h>
41
42 #include <IOKit/IOKitDebug.h>
43 #include <IOKit/IOLib.h>
44 #include <IOKit/assert.h>
45 #include <IOKit/IODeviceTreeSupport.h>
46 #include <IOKit/IOService.h>
47
48 #include "IOKitKernelInternal.h"
49
50 TUNABLE_WRITEABLE(SInt64, gIOKitDebug, "io", DEBUG_INIT_VALUE);
51 TUNABLE_DEV_WRITEABLE(SInt64, gIOKitTrace, "iotrace", 0);
52
53 #if DEVELOPMENT || DEBUG
54 #define IODEBUG_CTLFLAGS CTLFLAG_RW
55 #else
56 #define IODEBUG_CTLFLAGS CTLFLAG_RD
57 #endif
58
59 SYSCTL_QUAD(_debug, OID_AUTO, iotrace, IODEBUG_CTLFLAGS | CTLFLAG_LOCKED, &gIOKitTrace, "trace io");
60
61 static int
sysctl_debug_iokit(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)62 sysctl_debug_iokit
63 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
64 {
65 SInt64 newValue;
66 int changed, error = sysctl_io_number(req, gIOKitDebug, sizeof(gIOKitDebug), &newValue, &changed);
67 if (changed) {
68 gIOKitDebug = ((gIOKitDebug & ~kIOKitDebugUserOptions) | (newValue & kIOKitDebugUserOptions));
69 }
70 return error;
71 }
72
73 SYSCTL_PROC(_debug, OID_AUTO, iokit,
74 CTLTYPE_QUAD | IODEBUG_CTLFLAGS | CTLFLAG_KERN | CTLFLAG_LOCKED,
75 &gIOKitDebug, 0, sysctl_debug_iokit, "Q", "boot_arg io");
76
77 void (*gIOTrackingLeakScanCallback)(uint32_t notification) = NULL;
78
79 size_t debug_malloc_size;
80 size_t debug_iomalloc_size;
81
82 vm_size_t debug_iomallocpageable_size;
83 size_t debug_container_malloc_size;
84 // int debug_ivars_size; // in OSObject.cpp
85
86 extern "C" {
87 #if 0
88 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
89 #else
90 #define DEBG(fmt, args...) { IOLog(fmt, ## args); }
91 #endif
92
93 void
IOPrintPlane(const IORegistryPlane * plane)94 IOPrintPlane( const IORegistryPlane * plane )
95 {
96 IORegistryEntry * next;
97 IORegistryIterator * iter;
98 OSOrderedSet * all;
99 IOService * service;
100
101 iter = IORegistryIterator::iterateOver( plane );
102 assert( iter );
103 all = iter->iterateAll();
104 if (all) {
105 DEBG("Count %d\n", all->getCount());
106 all->release();
107 } else {
108 DEBG("Empty\n");
109 }
110
111 iter->reset();
112 while ((next = iter->getNextObjectRecursive())) {
113 DEBG( "%*s\033[33m%s", 2 * next->getDepth( plane ), "", next->getName( plane ));
114 if ((next->getLocation( plane ))) {
115 DEBG("@%s", next->getLocation( plane ));
116 }
117 DEBG("\033[0m <class %s", next->getMetaClass()->getClassName());
118 if ((service = OSDynamicCast(IOService, next))) {
119 DEBG(", busy %ld", (long) service->getBusyState());
120 }
121 DEBG( ">\n");
122 // IOSleep(250);
123 }
124 iter->release();
125
126 #undef IOPrintPlaneFormat
127 }
128
129 void
db_piokjunk(void)130 db_piokjunk(void)
131 {
132 }
133
134 void
db_dumpiojunk(const IORegistryPlane * plane __unused)135 db_dumpiojunk( const IORegistryPlane * plane __unused )
136 {
137 }
138
139 void
IOPrintMemory(void)140 IOPrintMemory( void )
141 {
142 // OSMetaClass::printInstanceCounts();
143
144 IOLog("\n"
145 "ivar kalloc() 0x%08lx\n"
146 "malloc() 0x%08lx\n"
147 "containers kalloc() 0x%08lx\n"
148 "IOMalloc() 0x%08lx\n"
149 "----------------------------------------\n",
150 debug_ivars_size,
151 debug_malloc_size,
152 debug_container_malloc_size,
153 debug_iomalloc_size
154 );
155 }
156 } /* extern "C" */
157
158 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
159
160 #define super OSObject
OSDefineMetaClassAndStructors(IOKitDiagnostics,OSObject)161 OSDefineMetaClassAndStructors(IOKitDiagnostics, OSObject)
162
163 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
164
165 OSObject * IOKitDiagnostics::diagnostics( void )
166 {
167 IOKitDiagnostics * diags;
168
169 diags = new IOKitDiagnostics;
170 if (diags && !diags->init()) {
171 diags->release();
172 diags = NULL;
173 }
174
175 return diags;
176 }
177
178 void
updateOffset(OSDictionary * dict,UInt64 value,const char * name)179 IOKitDiagnostics::updateOffset( OSDictionary * dict,
180 UInt64 value, const char * name )
181 {
182 OSNumber * off;
183
184 off = OSNumber::withNumber( value, 64 );
185 if (!off) {
186 return;
187 }
188
189 dict->setObject( name, off );
190 off->release();
191 }
192
193 bool
serialize(OSSerialize * s) const194 IOKitDiagnostics::serialize(OSSerialize *s) const
195 {
196 OSDictionary * dict;
197 bool ok;
198
199 dict = OSDictionary::withCapacity( 5 );
200 if (!dict) {
201 return false;
202 }
203
204 updateOffset( dict, debug_ivars_size, "Instance allocation" );
205 updateOffset( dict, debug_container_malloc_size, "Container allocation" );
206 updateOffset( dict, debug_iomalloc_size, "IOMalloc allocation" );
207 updateOffset( dict, debug_iomallocpageable_size, "Pageable allocation" );
208
209 OSMetaClass::serializeClassDictionary(dict);
210
211 ok = dict->serialize( s );
212
213 dict->release();
214
215 return ok;
216 }
217
218 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
219
220 #if IOTRACKING
221
222 #include <libkern/c++/OSCPPDebug.h>
223 #include <libkern/c++/OSKext.h>
224 #include <kern/zalloc.h>
225
226 __private_extern__ "C" void qsort(
227 void * array,
228 size_t nmembers,
229 size_t member_size,
230 int (*)(const void *, const void *));
231
232 extern "C" ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
233 extern "C" ppnum_t pmap_valid_page(ppnum_t pn);
234
235 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
236
237 struct IOTRecursiveLock {
238 lck_mtx_t * mutex;
239 thread_t thread;
240 UInt32 count;
241 };
242
243 struct IOTrackingQueue {
244 queue_chain_t link;
245 IOTRecursiveLock lock;
246 const char * name;
247 uintptr_t btEntry;
248 size_t allocSize;
249 size_t minCaptureSize;
250 uint32_t siteCount;
251 uint32_t type;
252 uint32_t numSiteQs;
253 uint8_t captureOn;
254 queue_head_t sites[];
255 };
256
257
258 struct IOTrackingCallSiteUser {
259 pid_t pid;
260 uint8_t user32;
261 uint8_t userCount;
262 uintptr_t bt[kIOTrackingCallSiteBTs];
263 };
264
265 struct IOTrackingCallSite {
266 queue_chain_t link;
267 queue_head_t instances;
268 IOTrackingQueue * queue;
269 IOTracking ** addresses;
270 size_t size[2];
271 uint32_t crc;
272 uint32_t count;
273
274 vm_tag_t tag;
275 uint8_t user32;
276 uint8_t userCount;
277 pid_t btPID;
278
279 uintptr_t bt[kIOTrackingCallSiteBTs];
280 IOTrackingCallSiteUser user[0];
281 };
282
283 struct IOTrackingCallSiteWithUser {
284 struct IOTrackingCallSite site;
285 struct IOTrackingCallSiteUser user;
286 };
287
288 static void IOTrackingFreeCallSite(uint32_t type, IOTrackingCallSite ** site);
289
290 struct IOTrackingLeaksRef {
291 uintptr_t * instances;
292 uint32_t zoneSize;
293 uint32_t count;
294 uint32_t found;
295 uint32_t foundzlen;
296 size_t bytes;
297 };
298
299 lck_mtx_t * gIOTrackingLock;
300 queue_head_t gIOTrackingQ;
301
302 enum{
303 kTrackingAddressFlagAllocated = 0x00000001
304 };
305
306 #if defined(__LP64__)
307 #define IOTrackingAddressFlags(ptr) (ptr->flags)
308 #else
309 #define IOTrackingAddressFlags(ptr) (ptr->tracking.flags)
310 #endif
311
312 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
313
314 static void
IOTRecursiveLockLock(IOTRecursiveLock * lock)315 IOTRecursiveLockLock(IOTRecursiveLock * lock)
316 {
317 if (lock->thread == current_thread()) {
318 lock->count++;
319 } else {
320 lck_mtx_lock(lock->mutex);
321 assert(lock->thread == NULL);
322 assert(lock->count == 0);
323 lock->thread = current_thread();
324 lock->count = 1;
325 }
326 }
327
328 static void
IOTRecursiveLockUnlock(IOTRecursiveLock * lock)329 IOTRecursiveLockUnlock(IOTRecursiveLock * lock)
330 {
331 assert(lock->thread == current_thread());
332 if (0 == (--lock->count)) {
333 lock->thread = NULL;
334 lck_mtx_unlock(lock->mutex);
335 }
336 }
337
338 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
339
340 void
IOTrackingInit(void)341 IOTrackingInit(void)
342 {
343 queue_init(&gIOTrackingQ);
344 gIOTrackingLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
345 }
346
347 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
348
349 IOTrackingQueue *
IOTrackingQueueAlloc(const char * name,uintptr_t btEntry,size_t allocSize,size_t minCaptureSize,uint32_t type,uint32_t numSiteQs)350 IOTrackingQueueAlloc(const char * name, uintptr_t btEntry,
351 size_t allocSize, size_t minCaptureSize,
352 uint32_t type, uint32_t numSiteQs)
353 {
354 IOTrackingQueue * queue;
355 uint32_t idx;
356
357 if (!numSiteQs) {
358 numSiteQs = 1;
359 }
360 queue = kalloc_type(IOTrackingQueue, queue_head_t, numSiteQs, Z_WAITOK_ZERO);
361 queue->name = name;
362 queue->btEntry = btEntry;
363 queue->allocSize = allocSize;
364 queue->minCaptureSize = minCaptureSize;
365 queue->lock.mutex = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
366 queue->numSiteQs = numSiteQs;
367 queue->type = type;
368 enum { kFlags = (kIOTracking | kIOTrackingBoot) };
369 queue->captureOn = (kFlags == (kFlags & gIOKitDebug))
370 || (kIOTrackingQueueTypeDefaultOn & type);
371
372 for (idx = 0; idx < numSiteQs; idx++) {
373 queue_init(&queue->sites[idx]);
374 }
375
376 lck_mtx_lock(gIOTrackingLock);
377 queue_enter(&gIOTrackingQ, queue, IOTrackingQueue *, link);
378 lck_mtx_unlock(gIOTrackingLock);
379
380 return queue;
381 };
382
383 void
IOTrackingQueueCollectUser(IOTrackingQueue * queue)384 IOTrackingQueueCollectUser(IOTrackingQueue * queue)
385 {
386 assert(0 == queue->siteCount);
387 queue->type |= kIOTrackingQueueTypeUser;
388 }
389
390 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
391
392 void
IOTrackingQueueFree(IOTrackingQueue * queue)393 IOTrackingQueueFree(IOTrackingQueue * queue)
394 {
395 lck_mtx_lock(gIOTrackingLock);
396 IOTrackingReset(queue);
397 remque(&queue->link);
398 lck_mtx_unlock(gIOTrackingLock);
399
400 lck_mtx_free(queue->lock.mutex, IOLockGroup);
401
402 kfree_type(IOTrackingQueue, queue_head_t, queue->numSiteQs, queue);
403 };
404
405 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
406
407 /* fasthash
408 * The MIT License
409 *
410 * Copyright (C) 2012 Zilong Tan ([email protected])
411 *
412 * Permission is hereby granted, free of charge, to any person
413 * obtaining a copy of this software and associated documentation
414 * files (the "Software"), to deal in the Software without
415 * restriction, including without limitation the rights to use, copy,
416 * modify, merge, publish, distribute, sublicense, and/or sell copies
417 * of the Software, and to permit persons to whom the Software is
418 * furnished to do so, subject to the following conditions:
419 *
420 * The above copyright notice and this permission notice shall be
421 * included in all copies or substantial portions of the Software.
422 *
423 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
424 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
425 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
426 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
427 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
428 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
429 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
430 * SOFTWARE.
431 */
432
433
434 // Compression function for Merkle-Damgard construction.
435 // This function is generated using the framework provided.
436 #define mix(h) ({ \
437 (h) ^= (h) >> 23; \
438 (h) *= 0x2127599bf4325c37ULL; \
439 (h) ^= (h) >> 47; })
440
441 static uint64_t
fasthash64(const void * buf,size_t len,uint64_t seed)442 fasthash64(const void *buf, size_t len, uint64_t seed)
443 {
444 const uint64_t m = 0x880355f21e6d1965ULL;
445 const uint64_t *pos = (const uint64_t *)buf;
446 const uint64_t *end = pos + (len / 8);
447 const unsigned char *pos2;
448 uint64_t h = seed ^ (len * m);
449 uint64_t v;
450
451 while (pos != end) {
452 v = *pos++;
453 h ^= mix(v);
454 h *= m;
455 }
456
457 pos2 = (const unsigned char*)pos;
458 v = 0;
459
460 switch (len & 7) {
461 case 7: v ^= (uint64_t)pos2[6] << 48;
462 [[clang::fallthrough]];
463 case 6: v ^= (uint64_t)pos2[5] << 40;
464 [[clang::fallthrough]];
465 case 5: v ^= (uint64_t)pos2[4] << 32;
466 [[clang::fallthrough]];
467 case 4: v ^= (uint64_t)pos2[3] << 24;
468 [[clang::fallthrough]];
469 case 3: v ^= (uint64_t)pos2[2] << 16;
470 [[clang::fallthrough]];
471 case 2: v ^= (uint64_t)pos2[1] << 8;
472 [[clang::fallthrough]];
473 case 1: v ^= (uint64_t)pos2[0];
474 h ^= mix(v);
475 h *= m;
476 }
477
478 return mix(h);
479 }
480
481 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
482
483 static uint32_t
fasthash32(const void * buf,size_t len,uint32_t seed)484 fasthash32(const void *buf, size_t len, uint32_t seed)
485 {
486 // the following trick converts the 64-bit hashcode to Fermat
487 // residue, which shall retain information from both the higher
488 // and lower parts of hashcode.
489 uint64_t h = fasthash64(buf, len, seed);
490 return (uint32_t) (h - (h >> 32));
491 }
492
493 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
494
495 void
IOTrackingAddUser(IOTrackingQueue * queue,IOTrackingUser * mem,vm_size_t size)496 IOTrackingAddUser(IOTrackingQueue * queue, IOTrackingUser * mem, vm_size_t size)
497 {
498 uint32_t num;
499 int pid;
500
501 if (!queue->captureOn) {
502 return;
503 }
504 if (size < queue->minCaptureSize) {
505 return;
506 }
507
508 assert(!mem->link.next);
509
510 num = backtrace(&mem->bt[0], kIOTrackingCallSiteBTs, NULL, NULL);
511 num = 0;
512 if ((kernel_task != current_task()) && (pid = proc_selfpid())) {
513 struct backtrace_user_info btinfo = BTUINFO_INIT;
514 mem->btPID = pid;
515 num = backtrace_user(&mem->btUser[0], kIOTrackingCallSiteBTs - 1,
516 NULL, &btinfo);
517 mem->user32 = !(btinfo.btui_info & BTI_64_BIT);
518 }
519 assert(num <= kIOTrackingCallSiteBTs);
520 static_assert(kIOTrackingCallSiteBTs <= UINT8_MAX);
521 mem->userCount = ((uint8_t) num);
522
523 IOTRecursiveLockLock(&queue->lock);
524 queue_enter/*last*/ (&queue->sites[0], mem, IOTrackingUser *, link);
525 queue->siteCount++;
526 IOTRecursiveLockUnlock(&queue->lock);
527 }
528
529 void
IOTrackingRemoveUser(IOTrackingQueue * queue,IOTrackingUser * mem)530 IOTrackingRemoveUser(IOTrackingQueue * queue, IOTrackingUser * mem)
531 {
532 if (!mem->link.next) {
533 return;
534 }
535
536 IOTRecursiveLockLock(&queue->lock);
537 if (mem->link.next) {
538 remque(&mem->link);
539 assert(queue->siteCount);
540 queue->siteCount--;
541 }
542 IOTRecursiveLockUnlock(&queue->lock);
543 }
544
545 uint64_t gIOTrackingAddTime;
546
547 void
IOTrackingAdd(IOTrackingQueue * queue,IOTracking * mem,size_t size,bool address,vm_tag_t tag)548 IOTrackingAdd(IOTrackingQueue * queue, IOTracking * mem, size_t size, bool address, vm_tag_t tag)
549 {
550 IOTrackingCallSite * site;
551 uint32_t crc, num;
552 uintptr_t bt[kIOTrackingCallSiteBTs + 1];
553 uintptr_t btUser[kIOTrackingCallSiteBTs];
554 queue_head_t * que;
555 bool user;
556 int pid;
557 int userCount;
558
559 if (mem->site) {
560 return;
561 }
562 if (!queue->captureOn) {
563 return;
564 }
565 if (size < queue->minCaptureSize) {
566 return;
567 }
568
569 user = (0 != (kIOTrackingQueueTypeUser & queue->type));
570
571 assert(!mem->link.next);
572
573 num = backtrace(&bt[0], kIOTrackingCallSiteBTs + 1, NULL, NULL);
574 if (!num) {
575 return;
576 }
577 num--;
578 crc = fasthash32(&bt[1], num * sizeof(bt[0]), 0x04C11DB7);
579
580 userCount = 0;
581 pid = 0;
582 backtrace_info_t btinfo = BTI_NONE;
583 if (user) {
584 if ((kernel_task != current_task()) && (pid = proc_selfpid())) {
585 struct backtrace_user_info btuinfo = BTUINFO_INIT;
586 userCount = backtrace_user(&btUser[0], kIOTrackingCallSiteBTs,
587 NULL, &btuinfo);
588 assert(userCount <= kIOTrackingCallSiteBTs);
589 btinfo = btuinfo.btui_info;
590 crc = fasthash32(&btUser[0], userCount * sizeof(bt[0]), crc);
591 }
592 }
593
594 IOTRecursiveLockLock(&queue->lock);
595 que = &queue->sites[crc % queue->numSiteQs];
596 queue_iterate(que, site, IOTrackingCallSite *, link)
597 {
598 if (tag != site->tag) {
599 continue;
600 }
601 if (user && (pid != site->user[0].pid)) {
602 continue;
603 }
604 if (crc == site->crc) {
605 break;
606 }
607 }
608
609 if (queue_end(que, (queue_entry_t) site)) {
610 if (user) {
611 site = &kalloc_type(IOTrackingCallSiteWithUser,
612 Z_WAITOK_ZERO_NOFAIL)->site;
613 } else {
614 site = kalloc_type(IOTrackingCallSite,
615 Z_WAITOK_ZERO_NOFAIL);
616 }
617
618 queue_init(&site->instances);
619 site->addresses = NULL;
620 site->queue = queue;
621 site->crc = crc;
622 site->count = 0;
623 site->tag = tag;
624 memset(&site->size[0], 0, sizeof(site->size));
625 bcopy(&bt[1], &site->bt[0], num * sizeof(site->bt[0]));
626 assert(num <= kIOTrackingCallSiteBTs);
627 bzero(&site->bt[num], (kIOTrackingCallSiteBTs - num) * sizeof(site->bt[0]));
628 if (user) {
629 bcopy(&btUser[0], &site->user[0].bt[0], userCount * sizeof(site->user[0].bt[0]));
630 assert(userCount <= kIOTrackingCallSiteBTs);
631 bzero(&site->user[0].bt[userCount], (kIOTrackingCallSiteBTs - userCount) * sizeof(site->user[0].bt[0]));
632 site->user[0].pid = pid;
633 site->user[0].user32 = !(btinfo & BTI_64_BIT);
634 static_assert(kIOTrackingCallSiteBTs <= UINT8_MAX);
635 site->user[0].userCount = ((uint8_t) userCount);
636 }
637 queue_enter_first(que, site, IOTrackingCallSite *, link);
638 queue->siteCount++;
639 }
640
641 if (address) {
642 IOTrackingAddress * memAddr = (typeof(memAddr))mem;
643 uint32_t hashIdx;
644
645 if (NULL == site->addresses) {
646 site->addresses = kalloc_type(IOTracking *, queue->numSiteQs, Z_WAITOK_ZERO_NOFAIL);
647 for (hashIdx = 0; hashIdx < queue->numSiteQs; hashIdx++) {
648 site->addresses[hashIdx] = (IOTracking *) &site->instances;
649 }
650 }
651 hashIdx = atop(memAddr->address) % queue->numSiteQs;
652 if (queue_end(&site->instances, (queue_entry_t)site->addresses[hashIdx])) {
653 queue_enter/*last*/ (&site->instances, mem, IOTracking *, link);
654 } else {
655 queue_insert_before(&site->instances, mem, site->addresses[hashIdx], IOTracking *, link);
656 }
657 site->addresses[hashIdx] = mem;
658 } else {
659 queue_enter_first(&site->instances, mem, IOTracking *, link);
660 }
661
662 mem->site = site;
663 site->size[0] += size;
664 site->count++;
665
666 IOTRecursiveLockUnlock(&queue->lock);
667 }
668
669 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
670
671 static void
IOTrackingRemoveInternal(IOTrackingQueue * queue,IOTracking * mem,size_t size,uint32_t addressIdx)672 IOTrackingRemoveInternal(IOTrackingQueue * queue, IOTracking * mem, size_t size, uint32_t addressIdx)
673 {
674 IOTrackingCallSite * site;
675 IOTrackingAddress * nextAddress;
676
677 if (!mem->link.next) {
678 return;
679 }
680
681 IOTRecursiveLockLock(&queue->lock);
682 if (mem->link.next) {
683 assert(mem->site);
684 site = mem->site;
685
686 if ((-1U != addressIdx) && (mem == site->addresses[addressIdx])) {
687 nextAddress = (IOTrackingAddress *) queue_next(&mem->link);
688 if (!queue_end(&site->instances, &nextAddress->tracking.link)
689 && (addressIdx != (atop(nextAddress->address) % queue->numSiteQs))) {
690 nextAddress = (IOTrackingAddress *) &site->instances;
691 }
692 site->addresses[addressIdx] = &nextAddress->tracking;
693 }
694
695 remque(&mem->link);
696 assert(site->count);
697 site->count--;
698 assert(site->size[0] >= size);
699 site->size[0] -= size;
700 if (!site->count) {
701 assert(queue_empty(&site->instances));
702 assert(!site->size[0]);
703 assert(!site->size[1]);
704
705 remque(&site->link);
706 assert(queue->siteCount);
707 queue->siteCount--;
708 IOTrackingFreeCallSite(queue->type, &site);
709 }
710 mem->site = NULL;
711 }
712 IOTRecursiveLockUnlock(&queue->lock);
713 }
714
715 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
716
717 void
IOTrackingRemove(IOTrackingQueue * queue,IOTracking * mem,size_t size)718 IOTrackingRemove(IOTrackingQueue * queue, IOTracking * mem, size_t size)
719 {
720 return IOTrackingRemoveInternal(queue, mem, size, -1U);
721 }
722
723 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
724
725 void
IOTrackingRemoveAddress(IOTrackingQueue * queue,IOTrackingAddress * mem,size_t size)726 IOTrackingRemoveAddress(IOTrackingQueue * queue, IOTrackingAddress * mem, size_t size)
727 {
728 uint32_t addressIdx;
729 uint64_t address;
730
731 address = mem->address;
732 addressIdx = atop(address) % queue->numSiteQs;
733
734 return IOTrackingRemoveInternal(queue, &mem->tracking, size, addressIdx);
735 }
736
737 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
738
739 void
IOTrackingAlloc(IOTrackingQueue * queue,uintptr_t address,size_t size)740 IOTrackingAlloc(IOTrackingQueue * queue, uintptr_t address, size_t size)
741 {
742 IOTrackingAddress * tracking;
743
744 if (!queue->captureOn) {
745 return;
746 }
747 if (size < queue->minCaptureSize) {
748 return;
749 }
750
751 address = ~address;
752 tracking = kalloc_type(IOTrackingAddress, (zalloc_flags_t)(Z_WAITOK | Z_ZERO));
753 IOTrackingAddressFlags(tracking) |= kTrackingAddressFlagAllocated;
754 tracking->address = address;
755 tracking->size = size;
756
757 IOTrackingAdd(queue, &tracking->tracking, size, true, VM_KERN_MEMORY_NONE);
758 }
759
760 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
761
762 void
IOTrackingFree(IOTrackingQueue * queue,uintptr_t address,size_t size)763 IOTrackingFree(IOTrackingQueue * queue, uintptr_t address, size_t size)
764 {
765 IOTrackingCallSite * site;
766 IOTrackingAddress * tracking;
767 IOTrackingAddress * nextAddress;
768 uint32_t idx, hashIdx;
769 bool done;
770
771 address = ~address;
772 IOTRecursiveLockLock(&queue->lock);
773
774 hashIdx = atop(address) % queue->numSiteQs;
775
776 done = false;
777 for (idx = 0; idx < queue->numSiteQs; idx++) {
778 queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link)
779 {
780 if (!site->addresses) {
781 continue;
782 }
783 tracking = (IOTrackingAddress *) site->addresses[hashIdx];
784 while (!queue_end(&site->instances, &tracking->tracking.link)) {
785 nextAddress = (IOTrackingAddress *) queue_next(&tracking->tracking.link);
786 if (!queue_end(&site->instances, &nextAddress->tracking.link)
787 && (hashIdx != (atop(nextAddress->address) % queue->numSiteQs))) {
788 nextAddress = (IOTrackingAddress *) &site->instances;
789 }
790 if ((done = (address == tracking->address))) {
791 if (tracking == (IOTrackingAddress *) site->addresses[hashIdx]) {
792 site->addresses[hashIdx] = &nextAddress->tracking;
793 }
794 IOTrackingRemoveInternal(queue, &tracking->tracking, size, -1U);
795 kfree_type(IOTrackingAddress, tracking);
796 break;
797 }
798 tracking = nextAddress;
799 }
800 if (done) {
801 break;
802 }
803 }
804 if (done) {
805 break;
806 }
807 }
808 IOTRecursiveLockUnlock(&queue->lock);
809 }
810
811 static void
IOTrackingFreeCallSite(uint32_t type,IOTrackingCallSite ** pSite)812 IOTrackingFreeCallSite(uint32_t type, IOTrackingCallSite ** pSite)
813 {
814 IOTrackingCallSite * site;
815 void ** ptr;
816
817 site = *pSite;
818 kfree_type(IOTracking *, site->queue->numSiteQs, site->addresses);
819
820 ptr = reinterpret_cast<void **>(pSite);
821 if (kIOTrackingQueueTypeUser & type) {
822 kfree_type(IOTrackingCallSiteWithUser, *ptr);
823 } else {
824 kfree_type(IOTrackingCallSite, *ptr);
825 }
826 }
827
828 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
829
830 void
IOTrackingAccumSize(IOTrackingQueue * queue,IOTracking * mem,size_t size)831 IOTrackingAccumSize(IOTrackingQueue * queue, IOTracking * mem, size_t size)
832 {
833 IOTRecursiveLockLock(&queue->lock);
834 if (mem->link.next) {
835 assert(mem->site);
836 assert((size > 0) || (mem->site->size[1] >= -size));
837 mem->site->size[1] += size;
838 }
839 ;
840 IOTRecursiveLockUnlock(&queue->lock);
841 }
842
843 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
844
845 void
IOTrackingReset(IOTrackingQueue * queue)846 IOTrackingReset(IOTrackingQueue * queue)
847 {
848 IOTrackingCallSite * site;
849 IOTrackingUser * user;
850 IOTracking * tracking;
851 IOTrackingAddress * trackingAddress;
852 uint32_t idx, hashIdx;
853 bool addresses;
854
855 IOTRecursiveLockLock(&queue->lock);
856 for (idx = 0; idx < queue->numSiteQs; idx++) {
857 while (!queue_empty(&queue->sites[idx])) {
858 if (kIOTrackingQueueTypeMap & queue->type) {
859 queue_remove_first(&queue->sites[idx], user, IOTrackingUser *, link);
860 user->link.next = user->link.prev = NULL;
861 } else {
862 queue_remove_first(&queue->sites[idx], site, IOTrackingCallSite *, link);
863 addresses = false;
864 while (!queue_empty(&site->instances)) {
865 queue_remove_first(&site->instances, tracking, IOTracking *, link);
866 if (site->addresses) {
867 for (hashIdx = 0; !addresses && (hashIdx < queue->numSiteQs); hashIdx++) {
868 if (tracking == site->addresses[hashIdx]) {
869 addresses = true;
870 }
871 }
872 }
873 if (addresses) {
874 trackingAddress = (typeof(trackingAddress))tracking;
875 if (kTrackingAddressFlagAllocated & IOTrackingAddressFlags(trackingAddress)) {
876 kfree_type(IOTrackingAddress, trackingAddress);
877 }
878 }
879 }
880 IOTrackingFreeCallSite(queue->type, &site);
881 }
882 }
883 }
884 queue->siteCount = 0;
885 IOTRecursiveLockUnlock(&queue->lock);
886 }
887
888 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
889
890 static int
IOTrackingCallSiteInfoCompare(const void * left,const void * right)891 IOTrackingCallSiteInfoCompare(const void * left, const void * right)
892 {
893 IOTrackingCallSiteInfo * l = (typeof(l))left;
894 IOTrackingCallSiteInfo * r = (typeof(r))right;
895 size_t lsize, rsize;
896
897 rsize = r->size[0] + r->size[1];
898 lsize = l->size[0] + l->size[1];
899
900 return (rsize > lsize) ? 1 : ((rsize == lsize) ? 0 : -1);
901 }
902
903 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
904
905 static int
IOTrackingAddressCompare(const void * left,const void * right)906 IOTrackingAddressCompare(const void * left, const void * right)
907 {
908 IOTracking * instance;
909 uintptr_t inst, laddr, raddr;
910
911 inst = ((typeof(inst) *)left)[0];
912 instance = (typeof(instance))INSTANCE_GET(inst);
913 if (kInstanceFlagAddress & inst) {
914 laddr = ~((IOTrackingAddress *)instance)->address;
915 } else {
916 laddr = (uintptr_t) (instance + 1);
917 }
918
919 inst = ((typeof(inst) *)right)[0];
920 instance = (typeof(instance))(inst & ~kInstanceFlags);
921 if (kInstanceFlagAddress & inst) {
922 raddr = ~((IOTrackingAddress *)instance)->address;
923 } else {
924 raddr = (uintptr_t) (instance + 1);
925 }
926
927 return (laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1);
928 }
929
930
931 static int
IOTrackingZoneElementCompare(const void * left,const void * right)932 IOTrackingZoneElementCompare(const void * left, const void * right)
933 {
934 uintptr_t inst, laddr, raddr;
935
936 inst = ((typeof(inst) *)left)[0];
937 laddr = INSTANCE_PUT(inst);
938 inst = ((typeof(inst) *)right)[0];
939 raddr = INSTANCE_PUT(inst);
940
941 return (laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1);
942 }
943
944 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
945
946 static void
CopyOutBacktraces(IOTrackingCallSite * site,IOTrackingCallSiteInfo * siteInfo)947 CopyOutBacktraces(IOTrackingCallSite * site, IOTrackingCallSiteInfo * siteInfo)
948 {
949 uint32_t j;
950 mach_vm_address_t bt, btEntry;
951
952 btEntry = site->queue->btEntry;
953 for (j = 0; j < kIOTrackingCallSiteBTs; j++) {
954 bt = site->bt[j];
955 if (btEntry
956 && (!bt || (j == (kIOTrackingCallSiteBTs - 1)))) {
957 bt = btEntry;
958 btEntry = 0;
959 }
960 siteInfo->bt[0][j] = VM_KERNEL_UNSLIDE(bt);
961 }
962
963 siteInfo->btPID = 0;
964 if (kIOTrackingQueueTypeUser & site->queue->type) {
965 siteInfo->btPID = site->user[0].pid;
966 uint32_t * bt32 = (typeof(bt32))((void *) &site->user[0].bt[0]);
967 uint64_t * bt64 = (typeof(bt64))((void *) &site->user[0].bt[0]);
968 for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) {
969 if (j >= site->user[0].userCount) {
970 siteInfo->bt[1][j] = 0;
971 } else if (site->user[0].user32) {
972 siteInfo->bt[1][j] = bt32[j];
973 } else {
974 siteInfo->bt[1][j] = bt64[j];
975 }
976 }
977 }
978 }
979
980 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
981
982 static void
IOTrackingLeakScan(void * refcon)983 IOTrackingLeakScan(void * refcon)
984 {
985 IOTrackingLeaksRef * ref = (typeof(ref))refcon;
986 uintptr_t * instances;
987 IOTracking * instance;
988 uint64_t vaddr, vincr;
989 ppnum_t ppn;
990 uintptr_t ptr, addr, vphysaddr, inst;
991 size_t size, origsize;
992 uint32_t baseIdx, lim, ptrIdx, count;
993 boolean_t is;
994 AbsoluteTime deadline;
995
996 instances = ref->instances;
997 count = ref->count;
998 size = origsize = ref->zoneSize;
999
1000 if (gIOTrackingLeakScanCallback) {
1001 gIOTrackingLeakScanCallback(kIOTrackingLeakScanStart);
1002 }
1003
1004 for (deadline = 0, vaddr = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
1005 ;
1006 vaddr += vincr) {
1007 if ((mach_absolute_time() > deadline) || (vaddr >= VM_MAX_KERNEL_ADDRESS)) {
1008 if (deadline) {
1009 #if SCHED_HYGIENE_DEBUG
1010 if (is) {
1011 // Reset the interrupt timeout to avoid panics
1012 ml_spin_debug_clear_self();
1013 }
1014 #endif /* SCHED_HYGIENE_DEBUG */
1015 ml_set_interrupts_enabled(is);
1016 IODelay(10);
1017 }
1018 if (vaddr >= VM_MAX_KERNEL_ADDRESS) {
1019 break;
1020 }
1021 is = ml_set_interrupts_enabled(false);
1022 clock_interval_to_deadline(10, kMillisecondScale, &deadline);
1023 }
1024
1025 ppn = kernel_pmap_present_mapping(vaddr, &vincr, &vphysaddr);
1026 // check noencrypt to avoid VM structs (map entries) with pointers
1027 if (ppn && (!pmap_valid_page(ppn) || (!ref->zoneSize && pmap_is_noencrypt(ppn)))) {
1028 ppn = 0;
1029 }
1030 if (!ppn) {
1031 continue;
1032 }
1033
1034 for (ptrIdx = 0; ptrIdx < (page_size / sizeof(uintptr_t)); ptrIdx++) {
1035 ptr = ((uintptr_t *)vphysaddr)[ptrIdx];
1036 #if defined(HAS_APPLE_PAC)
1037 // strip possible ptrauth signature from candidate data pointer
1038 ptr = (uintptr_t)ptrauth_strip((void*)ptr, ptrauth_key_process_independent_data);
1039 #endif /* defined(HAS_APPLE_PAC) */
1040
1041 for (lim = count, baseIdx = 0; lim; lim >>= 1) {
1042 inst = instances[baseIdx + (lim >> 1)];
1043 instance = (typeof(instance))INSTANCE_GET(inst);
1044
1045 if (ref->zoneSize) {
1046 addr = INSTANCE_PUT(inst) & ~kInstanceFlags;
1047 } else if (kInstanceFlagAddress & inst) {
1048 addr = ~((IOTrackingAddress *)instance)->address;
1049 origsize = size = ((IOTrackingAddress *)instance)->size;
1050 if (!size) {
1051 size = 1;
1052 }
1053 } else {
1054 addr = (uintptr_t) (instance + 1);
1055 origsize = size = instance->site->queue->allocSize;
1056 }
1057 if ((ptr >= addr) && (ptr < (addr + size))
1058
1059 && (((vaddr + ptrIdx * sizeof(uintptr_t)) < addr)
1060 || ((vaddr + ptrIdx * sizeof(uintptr_t)) >= (addr + size)))) {
1061 if (!(kInstanceFlagReferenced & inst)) {
1062 inst |= kInstanceFlagReferenced;
1063 instances[baseIdx + (lim >> 1)] = inst;
1064 ref->found++;
1065 if (!origsize) {
1066 ref->foundzlen++;
1067 }
1068 }
1069 break;
1070 }
1071 if (ptr > addr) {
1072 // move right
1073 baseIdx += (lim >> 1) + 1;
1074 lim--;
1075 }
1076 // else move left
1077 }
1078 }
1079 ref->bytes += page_size;
1080 }
1081
1082 if (gIOTrackingLeakScanCallback) {
1083 gIOTrackingLeakScanCallback(kIOTrackingLeakScanEnd);
1084 }
1085 }
1086
1087 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1088
1089 extern "C" void
zone_leaks_scan(uintptr_t * instances,uint32_t count,uint32_t zoneSize,uint32_t * found)1090 zone_leaks_scan(uintptr_t * instances, uint32_t count, uint32_t zoneSize, uint32_t * found)
1091 {
1092 IOTrackingLeaksRef ref;
1093 IOTrackingCallSiteInfo siteInfo;
1094 uint32_t idx;
1095
1096 qsort(instances, count, sizeof(*instances), &IOTrackingZoneElementCompare);
1097
1098 bzero(&siteInfo, sizeof(siteInfo));
1099 bzero(&ref, sizeof(ref));
1100 ref.instances = instances;
1101 ref.count = count;
1102 ref.zoneSize = zoneSize;
1103
1104 for (idx = 0; idx < 2; idx++) {
1105 ref.bytes = 0;
1106 IOTrackingLeakScan(&ref);
1107 IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d\n", idx, ref.bytes / 1024 / 1024, count, ref.found);
1108 if (count <= ref.found) {
1109 break;
1110 }
1111 }
1112
1113 *found = ref.found;
1114 }
1115
1116 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1117
1118 static OSData *
IOTrackingLeaks(LIBKERN_CONSUMED OSData * data)1119 IOTrackingLeaks(LIBKERN_CONSUMED OSData * data)
1120 {
1121 IOTrackingLeaksRef ref;
1122 IOTrackingCallSiteInfo siteInfo;
1123 IOTrackingCallSite * site;
1124 OSData * leakData;
1125 uintptr_t * instances;
1126 IOTracking * instance;
1127 uintptr_t inst;
1128 uint32_t count, idx, numSites, dups, siteCount;
1129
1130 /* BEGIN IGNORE CODESTYLE */
1131 __typed_allocators_ignore_push
1132 instances = (typeof(instances))data->getBytesNoCopy();
1133 __typed_allocators_ignore_pop
1134 /* END IGNORE CODESTYLE */
1135 count = (data->getLength() / sizeof(*instances));
1136 qsort(instances, count, sizeof(*instances), &IOTrackingAddressCompare);
1137
1138 bzero(&siteInfo, sizeof(siteInfo));
1139 bzero(&ref, sizeof(ref));
1140 ref.instances = instances;
1141 ref.count = count;
1142 for (idx = 0; idx < 2; idx++) {
1143 ref.bytes = 0;
1144 IOTrackingLeakScan(&ref);
1145 IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d (zlen %d)\n", idx, ref.bytes / 1024 / 1024, count, ref.found, ref.foundzlen);
1146 if (count <= ref.found) {
1147 break;
1148 }
1149 }
1150
1151 /* BEGIN IGNORE CODESTYLE */
1152 __typed_allocators_ignore_push
1153 leakData = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo));
1154 __typed_allocators_ignore_pop
1155 /* END IGNORE CODESTYLE */
1156
1157 for (numSites = 0, idx = 0; idx < count; idx++) {
1158 inst = instances[idx];
1159 if (kInstanceFlagReferenced & inst) {
1160 continue;
1161 }
1162 instance = (typeof(instance))INSTANCE_GET(inst);
1163 site = instance->site;
1164 instances[numSites] = (uintptr_t) site;
1165 numSites++;
1166 }
1167
1168 for (idx = 0; idx < numSites; idx++) {
1169 inst = instances[idx];
1170 if (!inst) {
1171 continue;
1172 }
1173 site = (typeof(site))inst;
1174 for (siteCount = 1, dups = (idx + 1); dups < numSites; dups++) {
1175 if (instances[dups] == (uintptr_t) site) {
1176 siteCount++;
1177 instances[dups] = 0;
1178 }
1179 }
1180 // leak byte size is reported as:
1181 // (total bytes allocated by the callsite * number of leaked instances)
1182 // divided by (number of allocations by callsite)
1183 siteInfo.count = siteCount;
1184 siteInfo.size[0] = (site->size[0] * siteCount) / site->count;
1185 siteInfo.size[1] = (site->size[1] * siteCount) / site->count;
1186 CopyOutBacktraces(site, &siteInfo);
1187 __typed_allocators_ignore_push
1188 leakData->appendBytes(&siteInfo, sizeof(siteInfo));
1189 __typed_allocators_ignore_pop
1190 }
1191 data->release();
1192
1193 return leakData;
1194 }
1195
1196 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1197
1198 static bool
SkipName(uint32_t options,const char * name,size_t namesLen,const char * names)1199 SkipName(uint32_t options, const char * name, size_t namesLen, const char * names)
1200 {
1201 const char * scan;
1202 const char * next;
1203 bool exclude, found;
1204 size_t qLen, sLen;
1205
1206 if (!namesLen || !names) {
1207 return false;
1208 }
1209 // <len><name>...<len><name><0>
1210 exclude = (0 != (kIOTrackingExcludeNames & options));
1211 qLen = strlen(name);
1212 scan = names;
1213 found = false;
1214 do{
1215 sLen = scan[0];
1216 scan++;
1217 next = scan + sLen;
1218 if (next >= (names + namesLen)) {
1219 break;
1220 }
1221 found = ((sLen == qLen) && !strncmp(scan, name, sLen));
1222 scan = next;
1223 }while (!found && (scan < (names + namesLen)));
1224
1225 return !(exclude ^ found);
1226 }
1227
1228 #endif /* IOTRACKING */
1229
1230 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1231
1232 static kern_return_t
IOTrackingDebug(uint32_t selector,uint32_t options,uint64_t value,uint32_t intag,uint32_t inzsize,const char * names,size_t namesLen,size_t size,OSObject ** result)1233 IOTrackingDebug(uint32_t selector, uint32_t options, uint64_t value,
1234 uint32_t intag, uint32_t inzsize,
1235 const char * names, size_t namesLen,
1236 size_t size, OSObject ** result)
1237 {
1238 kern_return_t ret;
1239 OSData * data;
1240
1241 if (result) {
1242 *result = NULL;
1243 }
1244 data = NULL;
1245 ret = kIOReturnNotReady;
1246
1247 #if IOTRACKING
1248
1249 kern_return_t kr;
1250 IOTrackingQueue * queue;
1251 IOTracking * instance;
1252 IOTrackingCallSite * site;
1253 IOTrackingCallSiteInfo siteInfo;
1254 IOTrackingUser * user;
1255 task_t mapTask;
1256 mach_vm_address_t mapAddress;
1257 mach_vm_size_t mapSize;
1258 uint32_t num, idx, qIdx;
1259 uintptr_t instFlags;
1260 proc_t proc;
1261 bool addresses;
1262
1263 ret = kIOReturnNotFound;
1264 proc = NULL;
1265 if (kIOTrackingGetMappings == selector) {
1266 if (value != -1ULL) {
1267 proc = proc_find((pid_t) value);
1268 if (!proc) {
1269 return kIOReturnNotFound;
1270 }
1271 }
1272 }
1273
1274 bzero(&siteInfo, sizeof(siteInfo));
1275 lck_mtx_lock(gIOTrackingLock);
1276 queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link)
1277 {
1278 if (SkipName(options, queue->name, namesLen, names)) {
1279 continue;
1280 }
1281
1282 if (!(kIOTracking & gIOKitDebug) && (kIOTrackingQueueTypeAlloc & queue->type)) {
1283 continue;
1284 }
1285
1286 switch (selector) {
1287 case kIOTrackingResetTracking:
1288 {
1289 IOTrackingReset(queue);
1290 ret = kIOReturnSuccess;
1291 break;
1292 }
1293
1294 case kIOTrackingStartCapture:
1295 case kIOTrackingStopCapture:
1296 {
1297 queue->captureOn = (kIOTrackingStartCapture == selector);
1298 ret = kIOReturnSuccess;
1299 break;
1300 }
1301
1302 case kIOTrackingSetMinCaptureSize:
1303 {
1304 queue->minCaptureSize = size;
1305 ret = kIOReturnSuccess;
1306 break;
1307 }
1308
1309 case kIOTrackingLeaks:
1310 {
1311 if (!(kIOTrackingQueueTypeAlloc & queue->type)) {
1312 break;
1313 }
1314
1315 if (!data) {
1316 /* BEGIN IGNORE CODESTYLE */
1317 __typed_allocators_ignore_push
1318 data = OSData::withCapacity(1024 * sizeof(uintptr_t));
1319 __typed_allocators_ignore_pop
1320 /* END IGNORE CODESTYLE */
1321 }
1322
1323 IOTRecursiveLockLock(&queue->lock);
1324 for (idx = 0; idx < queue->numSiteQs; idx++) {
1325 queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link)
1326 {
1327 addresses = false;
1328 queue_iterate(&site->instances, instance, IOTracking *, link)
1329 {
1330 if (site->addresses) {
1331 for (uint32_t hashIdx = 0; !addresses && (hashIdx < queue->numSiteQs); hashIdx++) {
1332 if (instance == site->addresses[hashIdx]) {
1333 addresses = true;
1334 }
1335 }
1336 }
1337 instFlags = (typeof(instFlags))instance;
1338 if (addresses) {
1339 instFlags |= kInstanceFlagAddress;
1340 }
1341 data->appendValue(instFlags);
1342 }
1343 }
1344 }
1345 // queue is locked
1346 ret = kIOReturnSuccess;
1347 break;
1348 }
1349
1350
1351 case kIOTrackingGetTracking:
1352 {
1353 if (kIOTrackingQueueTypeMap & queue->type) {
1354 break;
1355 }
1356
1357 if (!data) {
1358 /* BEGIN IGNORE CODESTYLE */
1359 __typed_allocators_ignore_push
1360 data = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo));
1361 __typed_allocators_ignore_pop
1362 /* END IGNORE CODESTYLE */
1363 }
1364
1365 IOTRecursiveLockLock(&queue->lock);
1366 num = queue->siteCount;
1367 idx = 0;
1368 for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) {
1369 queue_iterate(&queue->sites[qIdx], site, IOTrackingCallSite *, link)
1370 {
1371 assert(idx < num);
1372 idx++;
1373
1374 size_t tsize[2];
1375 uint32_t count = site->count;
1376 tsize[0] = site->size[0];
1377 tsize[1] = site->size[1];
1378
1379 if (intag || inzsize) {
1380 uintptr_t addr;
1381 vm_size_t size, zoneSize;
1382 vm_tag_t tag;
1383
1384 if (kIOTrackingQueueTypeAlloc & queue->type) {
1385 addresses = false;
1386 count = 0;
1387 tsize[0] = tsize[1] = 0;
1388 queue_iterate(&site->instances, instance, IOTracking *, link)
1389 {
1390 if (site->addresses) {
1391 for (uint32_t hashIdx = 0; !addresses && (hashIdx < queue->numSiteQs); hashIdx++) {
1392 if (instance == site->addresses[hashIdx]) {
1393 addresses = true;
1394 }
1395 }
1396 }
1397
1398 if (addresses) {
1399 addr = ~((IOTrackingAddress *)instance)->address;
1400 } else {
1401 addr = (uintptr_t) (instance + 1);
1402 }
1403
1404 kr = vm_kern_allocation_info(addr, &size, &tag, &zoneSize);
1405 if (KERN_SUCCESS != kr) {
1406 continue;
1407 }
1408
1409 if ((VM_KERN_MEMORY_NONE != intag) && (intag != tag)) {
1410 continue;
1411 }
1412 if (inzsize && (inzsize != zoneSize)) {
1413 continue;
1414 }
1415
1416 count++;
1417 tsize[0] += size;
1418 }
1419 } else {
1420 if (!intag || inzsize || (intag != site->tag)) {
1421 continue;
1422 }
1423 }
1424 }
1425
1426 if (!count) {
1427 continue;
1428 }
1429 if (size && ((tsize[0] + tsize[1]) < size)) {
1430 continue;
1431 }
1432 siteInfo.count = count;
1433 siteInfo.size[0] = tsize[0];
1434 siteInfo.size[1] = tsize[1];
1435 CopyOutBacktraces(site, &siteInfo);
1436 __typed_allocators_ignore_push
1437 data->appendBytes(&siteInfo, sizeof(siteInfo));
1438 __typed_allocators_ignore_pop
1439 }
1440 }
1441 assert(idx == num);
1442 IOTRecursiveLockUnlock(&queue->lock);
1443 ret = kIOReturnSuccess;
1444 break;
1445 }
1446
1447 case kIOTrackingGetMappings:
1448 {
1449 if (!(kIOTrackingQueueTypeMap & queue->type)) {
1450 break;
1451 }
1452 if (!data) {
1453 data = OSData::withCapacity((unsigned int) page_size);
1454 }
1455
1456 IOTRecursiveLockLock(&queue->lock);
1457 num = queue->siteCount;
1458 idx = 0;
1459 for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) {
1460 queue_iterate(&queue->sites[qIdx], user, IOTrackingUser *, link)
1461 {
1462 assert(idx < num);
1463 idx++;
1464
1465 kr = IOMemoryMapTracking(user, &mapTask, &mapAddress, &mapSize);
1466 if (kIOReturnSuccess != kr) {
1467 continue;
1468 }
1469 if (proc && (mapTask != proc_task(proc))) {
1470 continue;
1471 }
1472 if (size && (mapSize < size)) {
1473 continue;
1474 }
1475
1476 siteInfo.count = 1;
1477 siteInfo.size[0] = mapSize;
1478 siteInfo.address = mapAddress;
1479 siteInfo.addressPID = task_pid(mapTask);
1480 siteInfo.btPID = user->btPID;
1481
1482 for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) {
1483 siteInfo.bt[0][j] = VM_KERNEL_UNSLIDE(user->bt[j]);
1484 }
1485 uint32_t * bt32 = (typeof(bt32)) & user->btUser[0];
1486 uint64_t * bt64 = (typeof(bt64))((void *) &user->btUser[0]);
1487 for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) {
1488 if (j >= user->userCount) {
1489 siteInfo.bt[1][j] = 0;
1490 } else if (user->user32) {
1491 siteInfo.bt[1][j] = bt32[j];
1492 } else {
1493 siteInfo.bt[1][j] = bt64[j];
1494 }
1495 }
1496 __typed_allocators_ignore_push
1497 data->appendBytes(&siteInfo, sizeof(siteInfo));
1498 __typed_allocators_ignore_pop
1499 }
1500 }
1501 assert(idx == num);
1502 IOTRecursiveLockUnlock(&queue->lock);
1503 ret = kIOReturnSuccess;
1504 break;
1505 }
1506
1507 default:
1508 ret = kIOReturnUnsupported;
1509 break;
1510 }
1511 }
1512
1513 if ((kIOTrackingLeaks == selector) && data) {
1514 data = IOTrackingLeaks(data);
1515 queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link)
1516 {
1517 if (SkipName(options, queue->name, namesLen, names)) {
1518 continue;
1519 }
1520 if (!(kIOTrackingQueueTypeAlloc & queue->type)) {
1521 continue;
1522 }
1523 IOTRecursiveLockUnlock(&queue->lock);
1524 }
1525 }
1526
1527 lck_mtx_unlock(gIOTrackingLock);
1528
1529 if ((kIOTrackingLeaks == selector) && namesLen && names) {
1530 const char * scan;
1531 const char * next;
1532 uint8_t sLen;
1533
1534 if (!data) {
1535 /* BEGIN IGNORE CODESTYLE */
1536 __typed_allocators_ignore_push
1537 data = OSData::withCapacity(4096 * sizeof(uintptr_t));
1538 __typed_allocators_ignore_pop
1539 /* END IGNORE CODESTYLE */
1540 }
1541
1542 // <len><name>...<len><name><0>
1543 scan = names;
1544 do{
1545 sLen = ((uint8_t) scan[0]);
1546 scan++;
1547 next = scan + sLen;
1548 if (next >= (names + namesLen)) {
1549 break;
1550 }
1551 kr = zone_leaks(scan, sLen, ^(uint32_t count, uint32_t eSize, btref_t ref) {
1552 IOTrackingCallSiteInfo siteInfo = {
1553 .count = count,
1554 .size[0] = eSize * count,
1555 };
1556
1557 btref_decode_unslide(ref, siteInfo.bt[0]);
1558 __typed_allocators_ignore_push
1559 data->appendBytes(&siteInfo, sizeof(siteInfo));
1560 __typed_allocators_ignore_pop
1561 });
1562 if (KERN_SUCCESS == kr) {
1563 ret = kIOReturnSuccess;
1564 } else if (KERN_INVALID_NAME != kr) {
1565 ret = kIOReturnVMError;
1566 }
1567 scan = next;
1568 }while (scan < (names + namesLen));
1569 }
1570
1571 if (data) {
1572 switch (selector) {
1573 case kIOTrackingLeaks:
1574 case kIOTrackingGetTracking:
1575 case kIOTrackingGetMappings:
1576 {
1577 IOTrackingCallSiteInfo * siteInfos;
1578 /* BEGIN IGNORE CODESTYLE */
1579 __typed_allocators_ignore_push
1580 siteInfos = (typeof(siteInfos))data->getBytesNoCopy();
1581 __typed_allocators_ignore_pop
1582 /* END IGNORE CODESTYLE */
1583 num = (data->getLength() / sizeof(*siteInfos));
1584 qsort(siteInfos, num, sizeof(*siteInfos), &IOTrackingCallSiteInfoCompare);
1585 break;
1586 }
1587 default: assert(false); break;
1588 }
1589 }
1590
1591 *result = data;
1592 if (proc) {
1593 proc_rele(proc);
1594 }
1595
1596 #endif /* IOTRACKING */
1597
1598 return ret;
1599 }
1600
1601 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1602
1603 #include <IOKit/IOKitDiagnosticsUserClient.h>
1604
1605 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1606
1607 #undef super
1608 #define super IOUserClient2022
1609
OSDefineMetaClassAndStructors(IOKitDiagnosticsClient,IOUserClient2022)1610 OSDefineMetaClassAndStructors(IOKitDiagnosticsClient, IOUserClient2022)
1611
1612 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1613
1614 IOUserClient * IOKitDiagnosticsClient::withTask(task_t owningTask)
1615 {
1616 #if IOTRACKING
1617 IOKitDiagnosticsClient * inst;
1618
1619 inst = new IOKitDiagnosticsClient;
1620 if (inst && !inst->init()) {
1621 inst->release();
1622 inst = NULL;
1623 }
1624
1625 inst->setProperty(kIOUserClientDefaultLockingKey, kOSBooleanTrue);
1626 inst->setProperty(kIOUserClientDefaultLockingSetPropertiesKey, kOSBooleanTrue);
1627 inst->setProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey, kOSBooleanTrue);
1628
1629 inst->setProperty(kIOUserClientEntitlementsKey, kOSBooleanFalse);
1630
1631 return inst;
1632 #else
1633 return NULL;
1634 #endif
1635 }
1636
1637 IOReturn
clientClose(void)1638 IOKitDiagnosticsClient::clientClose(void)
1639 {
1640 terminate();
1641 return kIOReturnSuccess;
1642 }
1643
1644 IOReturn
setProperties(OSObject * properties)1645 IOKitDiagnosticsClient::setProperties(OSObject * properties)
1646 {
1647 IOReturn kr = kIOReturnUnsupported;
1648 return kr;
1649 }
1650
1651
1652 IOReturn
IOTrackingMethodDispatched(OSObject * target,void * reference,IOExternalMethodArguments * args)1653 IOTrackingMethodDispatched(OSObject * target, void * reference,
1654 IOExternalMethodArguments * args)
1655 {
1656 IOReturn ret = kIOReturnBadArgument;
1657 const IOKitDiagnosticsParameters * params;
1658 const char * names;
1659 size_t namesLen;
1660 OSObject * result;
1661
1662 if (args->structureInputSize < sizeof(IOKitDiagnosticsParameters)) {
1663 return kIOReturnBadArgument;
1664 }
1665 params = (typeof(params))args->structureInput;
1666 if (!params) {
1667 return kIOReturnBadArgument;
1668 }
1669
1670 names = NULL;
1671 namesLen = args->structureInputSize - sizeof(IOKitDiagnosticsParameters);
1672 if (namesLen) {
1673 names = (typeof(names))(params + 1);
1674 }
1675
1676 ret = IOTrackingDebug(args->selector, params->options, params->value, params->tag, params->zsize, names, namesLen, params->size, &result);
1677 if ((kIOReturnSuccess == ret) && args->structureVariableOutputData) {
1678 *args->structureVariableOutputData = result;
1679 } else if (result) {
1680 result->release();
1681 }
1682 return ret;
1683 }
1684
1685 IOReturn
externalMethod(uint32_t selector,IOExternalMethodArgumentsOpaque * args)1686 IOKitDiagnosticsClient::externalMethod(uint32_t selector, IOExternalMethodArgumentsOpaque * args)
1687 {
1688 static const IOExternalMethodDispatch2022 dispatchArray[] = {
1689 [kIOTrackingGetTracking] = {
1690 .function = &IOTrackingMethodDispatched,
1691 .checkScalarInputCount = 0,
1692 .checkStructureInputSize = kIOUCVariableStructureSize,
1693 .checkScalarOutputCount = 0,
1694 .checkStructureOutputSize = 0,
1695 .allowAsync = false,
1696 .checkEntitlement = NULL,
1697 },
1698 [kIOTrackingGetMappings] = {
1699 .function = &IOTrackingMethodDispatched,
1700 .checkScalarInputCount = 0,
1701 .checkStructureInputSize = kIOUCVariableStructureSize,
1702 .checkScalarOutputCount = 0,
1703 .checkStructureOutputSize = 0,
1704 .allowAsync = false,
1705 .checkEntitlement = NULL,
1706 },
1707 [kIOTrackingResetTracking] = {
1708 .function = &IOTrackingMethodDispatched,
1709 .checkScalarInputCount = 0,
1710 .checkStructureInputSize = kIOUCVariableStructureSize,
1711 .checkScalarOutputCount = 0,
1712 .checkStructureOutputSize = 0,
1713 .allowAsync = false,
1714 .checkEntitlement = NULL,
1715 },
1716 [kIOTrackingStartCapture] = {
1717 .function = &IOTrackingMethodDispatched,
1718 .checkScalarInputCount = 0,
1719 .checkStructureInputSize = kIOUCVariableStructureSize,
1720 .checkScalarOutputCount = 0,
1721 .checkStructureOutputSize = 0,
1722 .allowAsync = false,
1723 .checkEntitlement = NULL,
1724 },
1725 [kIOTrackingStopCapture] = {
1726 .function = &IOTrackingMethodDispatched,
1727 .checkScalarInputCount = 0,
1728 .checkStructureInputSize = kIOUCVariableStructureSize,
1729 .checkScalarOutputCount = 0,
1730 .checkStructureOutputSize = 0,
1731 .allowAsync = false,
1732 .checkEntitlement = NULL,
1733 },
1734 [kIOTrackingSetMinCaptureSize] = {
1735 .function = &IOTrackingMethodDispatched,
1736 .checkScalarInputCount = 0,
1737 .checkStructureInputSize = kIOUCVariableStructureSize,
1738 .checkScalarOutputCount = 0,
1739 .checkStructureOutputSize = 0,
1740 .allowAsync = false,
1741 .checkEntitlement = NULL,
1742 },
1743 [kIOTrackingLeaks] = {
1744 .function = &IOTrackingMethodDispatched,
1745 .checkScalarInputCount = 0,
1746 .checkStructureInputSize = kIOUCVariableStructureSize,
1747 .checkScalarOutputCount = 0,
1748 .checkStructureOutputSize = 0,
1749 .allowAsync = false,
1750 .checkEntitlement = NULL,
1751 },
1752 };
1753
1754 return dispatchExternalMethod(selector, args, dispatchArray, sizeof(dispatchArray) / sizeof(dispatchArray[0]), this, NULL);
1755 }
1756
1757 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1758