xref: /xnu-11215/iokit/Kernel/IOLib.cpp (revision d4514f0b)
1 /*
2  * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * HISTORY
30  *
31  * 17-Apr-91   Portions from libIO.m, Doug Mitchell at NeXT.
32  * 17-Nov-98   cpp
33  *
34  */
35 
36 #include <IOKit/system.h>
37 #include <mach/sync_policy.h>
38 #include <machine/machine_routines.h>
39 #include <vm/vm_kern_xnu.h>
40 #include <vm/vm_map_xnu.h>
41 #include <libkern/c++/OSCPPDebug.h>
42 
43 #include <IOKit/assert.h>
44 
45 #include <IOKit/IOReturn.h>
46 #include <IOKit/IOLib.h>
47 #include <IOKit/IOLocks.h>
48 #include <IOKit/IOMapper.h>
49 #include <IOKit/IOBufferMemoryDescriptor.h>
50 #include <IOKit/IOKitDebug.h>
51 
52 #include "IOKitKernelInternal.h"
53 
54 #ifdef IOALLOCDEBUG
55 #include <libkern/OSDebug.h>
56 #include <sys/sysctl.h>
57 #endif
58 
59 #include "libkern/OSAtomic.h"
60 #include <libkern/c++/OSKext.h>
61 #include <IOKit/IOStatisticsPrivate.h>
62 #include <os/log_private.h>
63 #include <sys/msgbuf.h>
64 #include <console/serial_protos.h>
65 
66 #if IOKITSTATS
67 
68 #define IOStatisticsAlloc(type, size) \
69 do { \
70 	IOStatistics::countAlloc(type, size); \
71 } while (0)
72 
73 #else
74 
75 #define IOStatisticsAlloc(type, size)
76 
77 #endif /* IOKITSTATS */
78 
79 
80 #define TRACK_ALLOC     (IOTRACKING && (kIOTracking & gIOKitDebug))
81 
82 
83 extern "C"
84 {
85 mach_timespec_t IOZeroTvalspec = { 0, 0 };
86 
87 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
88 
89 extern int
90 __doprnt(
91 	const char              *fmt,
92 	va_list                 argp,
93 	void                    (*putc)(int, void *),
94 	void                    *arg,
95 	int                     radix,
96 	int                     is_log);
97 
98 extern bool bsd_log_lock(bool);
99 extern void bsd_log_unlock(void);
100 
101 
102 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
103 
104 lck_grp_t        io_lck_grp;
105 lck_grp_t       *IOLockGroup;
106 
107 /*
108  * Global variables for use by iLogger
109  * These symbols are for use only by Apple diagnostic code.
110  * Binary compatibility is not guaranteed for kexts that reference these symbols.
111  */
112 
113 void *_giDebugLogInternal       = NULL;
114 void *_giDebugLogDataInternal   = NULL;
115 void *_giDebugReserved1         = NULL;
116 void *_giDebugReserved2         = NULL;
117 
118 #if defined(__x86_64__)
119 iopa_t gIOBMDPageAllocator;
120 #endif /* defined(__x86_64__) */
121 
122 /*
123  * Static variables for this module.
124  */
125 
126 static queue_head_t gIOMallocContiguousEntries;
127 static lck_mtx_t *  gIOMallocContiguousEntriesLock;
128 
129 #if __x86_64__
130 enum { kIOMaxPageableMaps    = 8 };
131 enum { kIOMaxFixedRanges     = 4 };
132 enum { kIOPageableMapSize    = 512 * 1024 * 1024 };
133 enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 };
134 #else
135 enum { kIOMaxPageableMaps    = 16 };
136 enum { kIOMaxFixedRanges     = 4 };
137 enum { kIOPageableMapSize    = 96 * 1024 * 1024 };
138 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
139 #endif
140 
141 typedef struct {
142 	vm_map_t            map;
143 	vm_offset_t address;
144 	vm_offset_t end;
145 } IOMapData;
146 
147 static SECURITY_READ_ONLY_LATE(struct mach_vm_range)
148 gIOKitPageableFixedRanges[kIOMaxFixedRanges];
149 
150 static struct {
151 	UInt32      count;
152 	UInt32      hint;
153 	IOMapData   maps[kIOMaxPageableMaps];
154 	lck_mtx_t * lock;
155 } gIOKitPageableSpace;
156 
157 #if defined(__x86_64__)
158 static iopa_t gIOPageablePageAllocator;
159 
160 uint32_t  gIOPageAllocChunkBytes;
161 #endif /* defined(__x86_64__) */
162 
163 #if IOTRACKING
164 IOTrackingQueue * gIOMallocTracking;
165 IOTrackingQueue * gIOWireTracking;
166 IOTrackingQueue * gIOMapTracking;
167 #endif /* IOTRACKING */
168 
169 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
170 
171 KMEM_RANGE_REGISTER_STATIC(gIOKitPageableFixed0,
172     &gIOKitPageableFixedRanges[0], kIOPageableMapSize);
173 KMEM_RANGE_REGISTER_STATIC(gIOKitPageableFixed1,
174     &gIOKitPageableFixedRanges[1], kIOPageableMapSize);
175 KMEM_RANGE_REGISTER_STATIC(gIOKitPageableFixed2,
176     &gIOKitPageableFixedRanges[2], kIOPageableMapSize);
177 KMEM_RANGE_REGISTER_STATIC(gIOKitPageableFixed3,
178     &gIOKitPageableFixedRanges[3], kIOPageableMapSize);
179 void
IOLibInit(void)180 IOLibInit(void)
181 {
182 	static bool libInitialized;
183 
184 	if (libInitialized) {
185 		return;
186 	}
187 
188 	lck_grp_init(&io_lck_grp, "IOKit", LCK_GRP_ATTR_NULL);
189 	IOLockGroup = &io_lck_grp;
190 
191 #if IOTRACKING
192 	IOTrackingInit();
193 	gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, 0,
194 	    kIOTrackingQueueTypeAlloc,
195 	    37);
196 	gIOWireTracking   = IOTrackingQueueAlloc(kIOWireTrackingName, 0, 0, page_size, 0, 0);
197 
198 	size_t mapCaptureSize = (kIOTracking & gIOKitDebug) ? page_size : (1024 * 1024);
199 	gIOMapTracking    = IOTrackingQueueAlloc(kIOMapTrackingName, 0, 0, mapCaptureSize,
200 	    kIOTrackingQueueTypeDefaultOn
201 	    | kIOTrackingQueueTypeMap
202 	    | kIOTrackingQueueTypeUser,
203 	    0);
204 #endif
205 
206 	gIOKitPageableSpace.maps[0].map = kmem_suballoc(kernel_map,
207 	    &gIOKitPageableFixedRanges[0].min_address,
208 	    kIOPageableMapSize,
209 	    VM_MAP_CREATE_PAGEABLE,
210 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
211 	    (kms_flags_t)(KMS_PERMANENT | KMS_DATA | KMS_NOFAIL),
212 	    VM_KERN_MEMORY_IOKIT).kmr_submap;
213 
214 	gIOKitPageableSpace.maps[0].address = gIOKitPageableFixedRanges[0].min_address;
215 	gIOKitPageableSpace.maps[0].end     = gIOKitPageableFixedRanges[0].max_address;
216 	gIOKitPageableSpace.lock            = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
217 	gIOKitPageableSpace.hint            = 0;
218 	gIOKitPageableSpace.count           = 1;
219 
220 	gIOMallocContiguousEntriesLock      = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
221 	queue_init( &gIOMallocContiguousEntries );
222 
223 #if defined(__x86_64__)
224 	gIOPageAllocChunkBytes = PAGE_SIZE / 64;
225 
226 	assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes);
227 	iopa_init(&gIOBMDPageAllocator);
228 	iopa_init(&gIOPageablePageAllocator);
229 #endif /* defined(__x86_64__) */
230 
231 
232 	libInitialized = true;
233 }
234 
235 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
236 
237 vm_size_t
log2up(vm_size_t size)238 log2up(vm_size_t size)
239 {
240 	if (size <= 1) {
241 		size = 0;
242 	} else {
243 #if __LP64__
244 		size = 64 - __builtin_clzl(size - 1);
245 #else
246 		size = 32 - __builtin_clzl(size - 1);
247 #endif
248 	}
249 	return size;
250 }
251 
252 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
253 
254 IOThread
IOCreateThread(IOThreadFunc fcn,void * arg)255 IOCreateThread(IOThreadFunc fcn, void *arg)
256 {
257 	kern_return_t   result;
258 	thread_t                thread;
259 
260 	result = kernel_thread_start((thread_continue_t)(void (*)(void))fcn, arg, &thread);
261 	if (result != KERN_SUCCESS) {
262 		return NULL;
263 	}
264 
265 	thread_deallocate(thread);
266 
267 	return thread;
268 }
269 
270 
271 void
IOExitThread(void)272 IOExitThread(void)
273 {
274 	(void) thread_terminate(current_thread());
275 }
276 
277 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
278 
279 #if IOTRACKING
280 struct IOLibMallocHeader {
281 	IOTrackingAddress tracking;
282 };
283 #endif
284 
285 #if IOTRACKING
286 #define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
287 #else
288 #define sizeofIOLibMallocHeader (0)
289 #endif
290 
291 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
292 
293 __typed_allocators_ignore_push // allocator implementation
294 
295 void *
296 (IOMalloc_internal)(struct kalloc_heap *kheap, vm_size_t size,
297 zalloc_flags_t flags)
298 {
299 	void * address;
300 	vm_size_t allocSize;
301 
302 	allocSize = size + sizeofIOLibMallocHeader;
303 #if IOTRACKING
304 	if (sizeofIOLibMallocHeader && (allocSize <= size)) {
305 		return NULL;                                          // overflow
306 	}
307 #endif
308 	address = kheap_alloc(kheap, allocSize,
309 	    Z_VM_TAG(Z_WAITOK | flags, VM_KERN_MEMORY_IOKIT));
310 
311 	if (address) {
312 #if IOTRACKING
313 		if (TRACK_ALLOC) {
314 			IOLibMallocHeader * hdr;
315 			hdr = (typeof(hdr))address;
316 			bzero(&hdr->tracking, sizeof(hdr->tracking));
317 			hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader);
318 			hdr->tracking.size    = size;
319 			IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
320 		}
321 #endif
322 		address = (typeof(address))(((uintptr_t) address) + sizeofIOLibMallocHeader);
323 
324 #if IOALLOCDEBUG
325 		OSAddAtomicLong(size, &debug_iomalloc_size);
326 #endif
327 		IOStatisticsAlloc(kIOStatisticsMalloc, size);
328 	}
329 
330 	return address;
331 }
332 
333 void
IOFree_internal(struct kalloc_heap * kheap,void * inAddress,vm_size_t size)334 IOFree_internal(struct kalloc_heap *kheap, void * inAddress, vm_size_t size)
335 {
336 	void * address;
337 
338 	if ((address = inAddress)) {
339 		address = (typeof(address))(((uintptr_t) address) - sizeofIOLibMallocHeader);
340 
341 #if IOTRACKING
342 		if (TRACK_ALLOC) {
343 			IOLibMallocHeader * hdr;
344 			struct ptr_reference { void * ptr; };
345 			volatile struct ptr_reference ptr;
346 
347 			// we're about to block in IOTrackingRemove(), make sure the original pointer
348 			// exists in memory or a register for leak scanning to find
349 			ptr.ptr = inAddress;
350 
351 			hdr = (typeof(hdr))address;
352 			if (size != hdr->tracking.size) {
353 				OSReportWithBacktrace("bad IOFree size 0x%zx should be 0x%zx",
354 				    (size_t)size, (size_t)hdr->tracking.size);
355 				size = hdr->tracking.size;
356 			}
357 			IOTrackingRemoveAddress(gIOMallocTracking, &hdr->tracking, size);
358 			ptr.ptr = NULL;
359 		}
360 #endif
361 
362 		kheap_free(kheap, address, size + sizeofIOLibMallocHeader);
363 #if IOALLOCDEBUG
364 		OSAddAtomicLong(-size, &debug_iomalloc_size);
365 #endif
366 		IOStatisticsAlloc(kIOStatisticsFree, size);
367 	}
368 }
369 
370 void *
371 IOMalloc_external(
372 	vm_size_t size);
373 void *
IOMalloc_external(vm_size_t size)374 IOMalloc_external(
375 	vm_size_t size)
376 {
377 	return IOMalloc_internal(KHEAP_DEFAULT, size, Z_VM_TAG_BT_BIT);
378 }
379 
380 void
IOFree(void * inAddress,vm_size_t size)381 IOFree(void * inAddress, vm_size_t size)
382 {
383 	IOFree_internal(KHEAP_DEFAULT, inAddress, size);
384 }
385 
386 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
387 
388 void *
389 IOMallocZero_external(
390 	vm_size_t size);
391 void *
IOMallocZero_external(vm_size_t size)392 IOMallocZero_external(
393 	vm_size_t size)
394 {
395 	return IOMalloc_internal(KHEAP_DEFAULT, size, Z_ZERO_VM_TAG_BT_BIT);
396 }
397 
398 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
399 
400 vm_tag_t
IOMemoryTag(vm_map_t map)401 IOMemoryTag(vm_map_t map)
402 {
403 	vm_tag_t tag;
404 
405 	if (!vm_kernel_map_is_kernel(map)) {
406 		return VM_MEMORY_IOKIT;
407 	}
408 
409 	tag = vm_tag_bt();
410 	if (tag == VM_KERN_MEMORY_NONE) {
411 		tag = VM_KERN_MEMORY_IOKIT;
412 	}
413 
414 	return tag;
415 }
416 
417 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
418 
419 struct IOLibPageMallocHeader {
420 	mach_vm_size_t    alignMask;
421 	mach_vm_offset_t  allocationOffset;
422 #if IOTRACKING
423 	IOTrackingAddress tracking;
424 #endif
425 };
426 
427 #if IOTRACKING
428 #define sizeofIOLibPageMallocHeader     (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
429 #else
430 #define sizeofIOLibPageMallocHeader     (sizeof(IOLibPageMallocHeader))
431 #endif
432 
433 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
434 
435 static __header_always_inline void
IOMallocAlignedSetHdr(IOLibPageMallocHeader * hdr,mach_vm_size_t alignMask,mach_vm_address_t allocationStart,mach_vm_address_t alignedStart)436 IOMallocAlignedSetHdr(
437 	IOLibPageMallocHeader  *hdr,
438 	mach_vm_size_t          alignMask,
439 	mach_vm_address_t       allocationStart,
440 	mach_vm_address_t       alignedStart)
441 {
442 	mach_vm_offset_t        offset = alignedStart - allocationStart;
443 #if __has_feature(ptrauth_calls)
444 	offset = (mach_vm_offset_t) ptrauth_sign_unauthenticated((void *)offset,
445 	    ptrauth_key_process_independent_data,
446 	    ptrauth_blend_discriminator((void *)(alignedStart | alignMask),
447 	    OS_PTRAUTH_DISCRIMINATOR("IOLibPageMallocHeader.allocationOffset")));
448 #endif /* __has_feature(ptrauth_calls) */
449 	hdr->allocationOffset = offset;
450 	hdr->alignMask = alignMask;
451 }
452 
453 __abortlike
454 static void
IOMallocAlignedHdrCorruptionPanic(mach_vm_offset_t offset,mach_vm_size_t alignMask,mach_vm_address_t alignedStart,vm_size_t size)455 IOMallocAlignedHdrCorruptionPanic(
456 	mach_vm_offset_t        offset,
457 	mach_vm_size_t          alignMask,
458 	mach_vm_address_t       alignedStart,
459 	vm_size_t               size)
460 {
461 	mach_vm_address_t       address = 0;
462 	mach_vm_address_t       recalAlignedStart = 0;
463 
464 	if (os_sub_overflow(alignedStart, offset, &address)) {
465 		panic("Invalid offset %p for aligned addr %p", (void *)offset,
466 		    (void *)alignedStart);
467 	}
468 	if (os_add3_overflow(address, sizeofIOLibPageMallocHeader, alignMask,
469 	    &recalAlignedStart)) {
470 		panic("alignMask 0x%llx overflows recalAlignedStart %p for provided addr "
471 		    "%p", alignMask, (void *)recalAlignedStart, (void *)alignedStart);
472 	}
473 	if (((recalAlignedStart &= ~alignMask) != alignedStart) &&
474 	    (round_page(recalAlignedStart) != alignedStart)) {
475 		panic("Recalculated aligned addr %p doesn't match provided addr %p",
476 		    (void *)recalAlignedStart, (void *)alignedStart);
477 	}
478 	if (offset < sizeofIOLibPageMallocHeader) {
479 		panic("Offset %zd doesn't accomodate IOLibPageMallocHeader for aligned "
480 		    "addr %p", (size_t)offset, (void *)alignedStart);
481 	}
482 	panic("alignMask 0x%llx overflows adjusted size %zd for aligned addr %p",
483 	    alignMask, (size_t)size, (void *)alignedStart);
484 }
485 
486 static __header_always_inline mach_vm_address_t
IOMallocAlignedGetAddress(IOLibPageMallocHeader * hdr,mach_vm_address_t alignedStart,vm_size_t * size)487 IOMallocAlignedGetAddress(
488 	IOLibPageMallocHeader  *hdr,
489 	mach_vm_address_t       alignedStart,
490 	vm_size_t              *size)
491 {
492 	mach_vm_address_t       address = 0;
493 	mach_vm_address_t       recalAlignedStart = 0;
494 	mach_vm_offset_t        offset = hdr->allocationOffset;
495 	mach_vm_size_t          alignMask = hdr->alignMask;
496 #if __has_feature(ptrauth_calls)
497 	offset = (mach_vm_offset_t) ptrauth_auth_data((void *)offset,
498 	    ptrauth_key_process_independent_data,
499 	    ptrauth_blend_discriminator((void *)(alignedStart | alignMask),
500 	    OS_PTRAUTH_DISCRIMINATOR("IOLibPageMallocHeader.allocationOffset")));
501 #endif /* __has_feature(ptrauth_calls) */
502 	if (os_sub_overflow(alignedStart, offset, &address) ||
503 	    os_add3_overflow(address, sizeofIOLibPageMallocHeader, alignMask,
504 	    &recalAlignedStart) ||
505 	    (((recalAlignedStart &= ~alignMask) != alignedStart) &&
506 	    (round_page(recalAlignedStart) != alignedStart)) ||
507 	    (offset < sizeofIOLibPageMallocHeader) ||
508 	    os_add_overflow(*size, alignMask, size)) {
509 		IOMallocAlignedHdrCorruptionPanic(offset, alignMask, alignedStart, *size);
510 	}
511 	return address;
512 }
513 
514 void *
515 (IOMallocAligned_internal)(struct kalloc_heap *kheap, vm_size_t size,
516 vm_size_t alignment, zalloc_flags_t flags)
517 {
518 	kern_return_t           kr;
519 	vm_offset_t             address;
520 	vm_offset_t             allocationAddress;
521 	vm_size_t               adjustedSize;
522 	uintptr_t               alignMask;
523 	IOLibPageMallocHeader * hdr;
524 	kma_flags_t kma_flags = KMA_NONE;
525 
526 	if (size == 0) {
527 		return NULL;
528 	}
529 	if (((uint32_t) alignment) != alignment) {
530 		return NULL;
531 	}
532 
533 	if (flags & Z_ZERO) {
534 		kma_flags = KMA_ZERO;
535 	}
536 
537 	if (kheap == KHEAP_DATA_BUFFERS) {
538 		kma_flags = (kma_flags_t) (kma_flags | KMA_DATA);
539 	}
540 
541 	alignment = (1UL << log2up((uint32_t) alignment));
542 	alignMask = alignment - 1;
543 	adjustedSize = size + sizeofIOLibPageMallocHeader;
544 
545 	if (size > adjustedSize) {
546 		address = 0; /* overflow detected */
547 	} else if (adjustedSize >= page_size) {
548 		kr = kernel_memory_allocate(kernel_map, &address,
549 		    size, alignMask, kma_flags, IOMemoryTag(kernel_map));
550 		if (KERN_SUCCESS != kr) {
551 			address = 0;
552 		}
553 #if IOTRACKING
554 		else if (TRACK_ALLOC) {
555 			IOTrackingAlloc(gIOMallocTracking, address, size);
556 		}
557 #endif
558 	} else {
559 		adjustedSize += alignMask;
560 
561 		if (adjustedSize >= page_size) {
562 			kr = kmem_alloc(kernel_map, &allocationAddress,
563 			    adjustedSize, kma_flags, IOMemoryTag(kernel_map));
564 			if (KERN_SUCCESS != kr) {
565 				allocationAddress = 0;
566 			}
567 		} else {
568 			allocationAddress = (vm_address_t) kheap_alloc(kheap,
569 			    adjustedSize, Z_VM_TAG(Z_WAITOK | flags, VM_KERN_MEMORY_IOKIT));
570 		}
571 
572 		if (allocationAddress) {
573 			address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
574 			    & (~alignMask);
575 
576 			hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
577 			IOMallocAlignedSetHdr(hdr, alignMask, allocationAddress, address);
578 #if IOTRACKING
579 			if (TRACK_ALLOC) {
580 				bzero(&hdr->tracking, sizeof(hdr->tracking));
581 				hdr->tracking.address = ~address;
582 				hdr->tracking.size = size;
583 				IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
584 			}
585 #endif
586 		} else {
587 			address = 0;
588 		}
589 	}
590 
591 	assert(0 == (address & alignMask));
592 
593 	if (address) {
594 #if IOALLOCDEBUG
595 		OSAddAtomicLong(size, &debug_iomalloc_size);
596 #endif
597 		IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
598 	}
599 
600 	return (void *) address;
601 }
602 
603 void
IOFreeAligned_internal(kalloc_heap_t kheap,void * address,vm_size_t size)604 IOFreeAligned_internal(kalloc_heap_t kheap, void * address, vm_size_t size)
605 {
606 	vm_address_t            allocationAddress;
607 	vm_size_t               adjustedSize;
608 	IOLibPageMallocHeader * hdr;
609 
610 	if (!address) {
611 		return;
612 	}
613 
614 	assert(size);
615 
616 	adjustedSize = size + sizeofIOLibPageMallocHeader;
617 	if (adjustedSize >= page_size) {
618 #if IOTRACKING
619 		if (TRACK_ALLOC) {
620 			IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size);
621 		}
622 #endif
623 		kmem_free(kernel_map, (vm_offset_t) address, size);
624 	} else {
625 		hdr = (typeof(hdr))(((uintptr_t)address) - sizeofIOLibPageMallocHeader);
626 		allocationAddress = IOMallocAlignedGetAddress(hdr,
627 		    (mach_vm_address_t)address, &adjustedSize);
628 
629 #if IOTRACKING
630 		if (TRACK_ALLOC) {
631 			if (size != hdr->tracking.size) {
632 				OSReportWithBacktrace("bad IOFreeAligned size 0x%zx should be 0x%zx",
633 				    (size_t)size, (size_t)hdr->tracking.size);
634 				size = hdr->tracking.size;
635 			}
636 			IOTrackingRemoveAddress(gIOMallocTracking, &hdr->tracking, size);
637 		}
638 #endif
639 		if (adjustedSize >= page_size) {
640 			kmem_free(kernel_map, allocationAddress, adjustedSize);
641 		} else {
642 			kheap_free(kheap, allocationAddress, adjustedSize);
643 		}
644 	}
645 
646 #if IOALLOCDEBUG
647 	OSAddAtomicLong(-size, &debug_iomalloc_size);
648 #endif
649 
650 	IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
651 }
652 
653 void *
654 IOMallocAligned_external(
655 	vm_size_t size, vm_size_t alignment);
656 void *
IOMallocAligned_external(vm_size_t size,vm_size_t alignment)657 IOMallocAligned_external(
658 	vm_size_t size, vm_size_t alignment)
659 {
660 	return IOMallocAligned_internal(KHEAP_DATA_BUFFERS, size, alignment,
661 	           Z_VM_TAG_BT_BIT);
662 }
663 
664 void
IOFreeAligned(void * address,vm_size_t size)665 IOFreeAligned(
666 	void                  * address,
667 	vm_size_t               size)
668 {
669 	IOFreeAligned_internal(KHEAP_DATA_BUFFERS, address, size);
670 }
671 
672 __typed_allocators_ignore_pop
673 
674 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
675 
676 void
IOKernelFreePhysical(kalloc_heap_t kheap,mach_vm_address_t address,mach_vm_size_t size)677 IOKernelFreePhysical(
678 	kalloc_heap_t         kheap,
679 	mach_vm_address_t     address,
680 	mach_vm_size_t        size)
681 {
682 	vm_address_t       allocationAddress;
683 	vm_size_t          adjustedSize;
684 	IOLibPageMallocHeader * hdr;
685 
686 	if (!address) {
687 		return;
688 	}
689 
690 	assert(size);
691 
692 	adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
693 	if (adjustedSize >= page_size) {
694 #if IOTRACKING
695 		if (TRACK_ALLOC) {
696 			IOTrackingFree(gIOMallocTracking, address, size);
697 		}
698 #endif
699 		kmem_free(kernel_map, (vm_offset_t) address, size);
700 	} else {
701 		hdr = (typeof(hdr))(((uintptr_t)address) - sizeofIOLibPageMallocHeader);
702 		allocationAddress = IOMallocAlignedGetAddress(hdr, address, &adjustedSize);
703 #if IOTRACKING
704 		if (TRACK_ALLOC) {
705 			IOTrackingRemoveAddress(gIOMallocTracking, &hdr->tracking, size);
706 		}
707 #endif
708 		__typed_allocators_ignore(kheap_free(kheap, allocationAddress, adjustedSize));
709 	}
710 
711 	IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
712 #if IOALLOCDEBUG
713 	OSAddAtomicLong(-size, &debug_iomalloc_size);
714 #endif
715 }
716 
717 #if __arm64__
718 extern unsigned long gPhysBase, gPhysSize;
719 #endif
720 
721 mach_vm_address_t
IOKernelAllocateWithPhysicalRestrict(kalloc_heap_t kheap,mach_vm_size_t size,mach_vm_address_t maxPhys,mach_vm_size_t alignment,bool contiguous)722 IOKernelAllocateWithPhysicalRestrict(
723 	kalloc_heap_t         kheap,
724 	mach_vm_size_t        size,
725 	mach_vm_address_t     maxPhys,
726 	mach_vm_size_t        alignment,
727 	bool                  contiguous)
728 {
729 	kern_return_t           kr;
730 	mach_vm_address_t       address;
731 	mach_vm_address_t       allocationAddress;
732 	mach_vm_size_t          adjustedSize;
733 	mach_vm_address_t       alignMask;
734 	IOLibPageMallocHeader * hdr;
735 
736 	if (size == 0) {
737 		return 0;
738 	}
739 	if (alignment == 0) {
740 		alignment = 1;
741 	}
742 
743 	alignMask = alignment - 1;
744 
745 	if (os_mul_and_add_overflow(2, size, sizeofIOLibPageMallocHeader, &adjustedSize)) {
746 		return 0;
747 	}
748 
749 	contiguous = (contiguous && (adjustedSize > page_size))
750 	    || (alignment > page_size);
751 
752 	if (contiguous || maxPhys) {
753 		kma_flags_t options = KMA_ZERO;
754 		vm_offset_t virt;
755 
756 		if (kheap == KHEAP_DATA_BUFFERS) {
757 			options = (kma_flags_t) (options | KMA_DATA);
758 		}
759 
760 		adjustedSize = size;
761 		contiguous = (contiguous && (adjustedSize > page_size))
762 		    || (alignment > page_size);
763 
764 		if (!contiguous) {
765 #if __arm64__
766 			if (maxPhys >= (mach_vm_address_t)(gPhysBase + gPhysSize)) {
767 				maxPhys = 0;
768 			} else
769 #endif
770 			if (maxPhys <= 0xFFFFFFFF) {
771 				maxPhys = 0;
772 				options = (kma_flags_t)(options | KMA_LOMEM);
773 			} else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage)) {
774 				maxPhys = 0;
775 			}
776 		}
777 		if (contiguous || maxPhys) {
778 			kr = kmem_alloc_contig(kernel_map, &virt, size,
779 			    alignMask, (ppnum_t) atop(maxPhys), (ppnum_t) atop(alignMask),
780 			    options, IOMemoryTag(kernel_map));
781 		} else {
782 			kr = kernel_memory_allocate(kernel_map, &virt,
783 			    size, alignMask, options, IOMemoryTag(kernel_map));
784 		}
785 		if (KERN_SUCCESS == kr) {
786 			address = virt;
787 #if IOTRACKING
788 			if (TRACK_ALLOC) {
789 				IOTrackingAlloc(gIOMallocTracking, address, size);
790 			}
791 #endif
792 		} else {
793 			address = 0;
794 		}
795 	} else {
796 		adjustedSize += alignMask;
797 		if (adjustedSize < size) {
798 			return 0;
799 		}
800 		/* BEGIN IGNORE CODESTYLE */
801 		__typed_allocators_ignore_push // allocator implementation
802 		allocationAddress = (mach_vm_address_t) kheap_alloc(kheap,
803 		    adjustedSize, Z_VM_TAG_BT(Z_WAITOK, VM_KERN_MEMORY_IOKIT));
804 		__typed_allocators_ignore_pop
805 		/* END IGNORE CODESTYLE */
806 
807 		if (allocationAddress) {
808 			address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
809 			    & (~alignMask);
810 
811 			if (atop_32(address) != atop_32(address + size - 1)) {
812 				address = round_page(address);
813 			}
814 
815 			hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
816 			IOMallocAlignedSetHdr(hdr, alignMask, allocationAddress, address);
817 #if IOTRACKING
818 			if (TRACK_ALLOC) {
819 				bzero(&hdr->tracking, sizeof(hdr->tracking));
820 				hdr->tracking.address = ~address;
821 				hdr->tracking.size    = size;
822 				IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE);
823 			}
824 #endif
825 		} else {
826 			address = 0;
827 		}
828 	}
829 
830 	if (address) {
831 		IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
832 #if IOALLOCDEBUG
833 		OSAddAtomicLong(size, &debug_iomalloc_size);
834 #endif
835 	}
836 
837 	return address;
838 }
839 
840 
841 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
842 
843 struct _IOMallocContiguousEntry {
844 	mach_vm_address_t          virtualAddr;
845 	IOBufferMemoryDescriptor * md;
846 	queue_chain_t              link;
847 };
848 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
849 
850 void *
IOMallocContiguous(vm_size_t size,vm_size_t alignment,IOPhysicalAddress * physicalAddress)851 IOMallocContiguous(vm_size_t size, vm_size_t alignment,
852     IOPhysicalAddress * physicalAddress)
853 {
854 	mach_vm_address_t   address = 0;
855 
856 	if (size == 0) {
857 		return NULL;
858 	}
859 	if (alignment == 0) {
860 		alignment = 1;
861 	}
862 
863 	/* Do we want a physical address? */
864 	if (!physicalAddress) {
865 		address = IOKernelAllocateWithPhysicalRestrict(KHEAP_DEFAULT,
866 		    size, 0 /*maxPhys*/, alignment, true);
867 	} else {
868 		do {
869 			IOBufferMemoryDescriptor * bmd;
870 			mach_vm_address_t          physicalMask;
871 			vm_offset_t                alignMask;
872 
873 			alignMask = alignment - 1;
874 			physicalMask = (0xFFFFFFFF ^ alignMask);
875 
876 			bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
877 				kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
878 			if (!bmd) {
879 				break;
880 			}
881 
882 			_IOMallocContiguousEntry *
883 			    entry = IOMallocType(_IOMallocContiguousEntry);
884 			if (!entry) {
885 				bmd->release();
886 				break;
887 			}
888 			entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
889 			entry->md          = bmd;
890 			lck_mtx_lock(gIOMallocContiguousEntriesLock);
891 			queue_enter( &gIOMallocContiguousEntries, entry,
892 			    _IOMallocContiguousEntry *, link );
893 			lck_mtx_unlock(gIOMallocContiguousEntriesLock);
894 
895 			address          = (mach_vm_address_t) entry->virtualAddr;
896 			*physicalAddress = bmd->getPhysicalAddress();
897 		}while (false);
898 	}
899 
900 	return (void *) address;
901 }
902 
903 void
IOFreeContiguous(void * _address,vm_size_t size)904 IOFreeContiguous(void * _address, vm_size_t size)
905 {
906 	_IOMallocContiguousEntry * entry;
907 	IOMemoryDescriptor *       md = NULL;
908 
909 	mach_vm_address_t address = (mach_vm_address_t) _address;
910 
911 	if (!address) {
912 		return;
913 	}
914 
915 	assert(size);
916 
917 	lck_mtx_lock(gIOMallocContiguousEntriesLock);
918 	queue_iterate( &gIOMallocContiguousEntries, entry,
919 	    _IOMallocContiguousEntry *, link )
920 	{
921 		if (entry->virtualAddr == address) {
922 			md   = entry->md;
923 			queue_remove( &gIOMallocContiguousEntries, entry,
924 			    _IOMallocContiguousEntry *, link );
925 			break;
926 		}
927 	}
928 	lck_mtx_unlock(gIOMallocContiguousEntriesLock);
929 
930 	if (md) {
931 		md->release();
932 		IOFreeType(entry, _IOMallocContiguousEntry);
933 	} else {
934 		IOKernelFreePhysical(KHEAP_DEFAULT, (mach_vm_address_t) address, size);
935 	}
936 }
937 
938 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
939 
940 kern_return_t
IOIteratePageableMaps(vm_size_t size,IOIteratePageableMapsCallback callback,void * ref)941 IOIteratePageableMaps(vm_size_t size,
942     IOIteratePageableMapsCallback callback, void * ref)
943 {
944 	kern_return_t       kr = kIOReturnNotReady;
945 	kmem_return_t       kmr;
946 	vm_size_t           segSize;
947 	UInt32              attempts;
948 	UInt32              index;
949 	mach_vm_offset_t    min;
950 	int                 flags;
951 
952 	if (size > kIOPageableMaxMapSize) {
953 		return kIOReturnBadArgument;
954 	}
955 
956 	do {
957 		index = gIOKitPageableSpace.hint;
958 		attempts = gIOKitPageableSpace.count;
959 		while (attempts--) {
960 			kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
961 			if (KERN_SUCCESS == kr) {
962 				gIOKitPageableSpace.hint = index;
963 				break;
964 			}
965 			if (index) {
966 				index--;
967 			} else {
968 				index = gIOKitPageableSpace.count - 1;
969 			}
970 		}
971 		if (KERN_NO_SPACE != kr) {
972 			break;
973 		}
974 
975 		lck_mtx_lock( gIOKitPageableSpace.lock );
976 
977 		index = gIOKitPageableSpace.count;
978 		if (index >= kIOMaxPageableMaps) {
979 			lck_mtx_unlock( gIOKitPageableSpace.lock );
980 			break;
981 		}
982 
983 		if (size < kIOPageableMapSize) {
984 			segSize = kIOPageableMapSize;
985 		} else {
986 			segSize = size;
987 		}
988 
989 		/*
990 		 * Use the predefine ranges if available, else default to data
991 		 */
992 		if (index < kIOMaxFixedRanges) {
993 			min = gIOKitPageableFixedRanges[index].min_address;
994 			flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
995 		} else {
996 			min = 0;
997 			flags = VM_FLAGS_ANYWHERE;
998 		}
999 		kmr = kmem_suballoc(kernel_map,
1000 		    &min,
1001 		    segSize,
1002 		    VM_MAP_CREATE_PAGEABLE,
1003 		    flags,
1004 		    (kms_flags_t)(KMS_PERMANENT | KMS_DATA),
1005 		    VM_KERN_MEMORY_IOKIT);
1006 		if (kmr.kmr_return != KERN_SUCCESS) {
1007 			kr = kmr.kmr_return;
1008 			lck_mtx_unlock( gIOKitPageableSpace.lock );
1009 			break;
1010 		}
1011 
1012 		gIOKitPageableSpace.maps[index].map     = kmr.kmr_submap;
1013 		gIOKitPageableSpace.maps[index].address = min;
1014 		gIOKitPageableSpace.maps[index].end     = min + segSize;
1015 		gIOKitPageableSpace.hint                = index;
1016 		gIOKitPageableSpace.count               = index + 1;
1017 
1018 		lck_mtx_unlock( gIOKitPageableSpace.lock );
1019 	} while (true);
1020 
1021 	return kr;
1022 }
1023 
1024 struct IOMallocPageableRef {
1025 	vm_offset_t address;
1026 	vm_size_t   size;
1027 	vm_tag_t    tag;
1028 };
1029 
1030 static kern_return_t
IOMallocPageableCallback(vm_map_t map,void * _ref)1031 IOMallocPageableCallback(vm_map_t map, void * _ref)
1032 {
1033 	struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
1034 	kma_flags_t flags = (kma_flags_t)(KMA_PAGEABLE | KMA_DATA);
1035 
1036 	return kmem_alloc( map, &ref->address, ref->size, flags, ref->tag );
1037 }
1038 
1039 static void *
IOMallocPageablePages(vm_size_t size,vm_size_t alignment,vm_tag_t tag)1040 IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag)
1041 {
1042 	kern_return_t              kr = kIOReturnNotReady;
1043 	struct IOMallocPageableRef ref;
1044 
1045 	if (alignment > page_size) {
1046 		return NULL;
1047 	}
1048 	if (size > kIOPageableMaxMapSize) {
1049 		return NULL;
1050 	}
1051 
1052 	ref.size = size;
1053 	ref.tag  = tag;
1054 	kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
1055 	if (kIOReturnSuccess != kr) {
1056 		ref.address = 0;
1057 	}
1058 
1059 	return (void *) ref.address;
1060 }
1061 
1062 vm_map_t
IOPageableMapForAddress(uintptr_t address)1063 IOPageableMapForAddress( uintptr_t address )
1064 {
1065 	vm_map_t    map = NULL;
1066 	UInt32      index;
1067 
1068 	for (index = 0; index < gIOKitPageableSpace.count; index++) {
1069 		if ((address >= gIOKitPageableSpace.maps[index].address)
1070 		    && (address < gIOKitPageableSpace.maps[index].end)) {
1071 			map = gIOKitPageableSpace.maps[index].map;
1072 			break;
1073 		}
1074 	}
1075 	if (!map) {
1076 		panic("IOPageableMapForAddress: null");
1077 	}
1078 
1079 	return map;
1080 }
1081 
1082 static void
IOFreePageablePages(void * address,vm_size_t size)1083 IOFreePageablePages(void * address, vm_size_t size)
1084 {
1085 	vm_map_t map;
1086 
1087 	map = IOPageableMapForAddress((vm_address_t) address);
1088 	if (map) {
1089 		kmem_free( map, (vm_offset_t) address, size);
1090 	}
1091 }
1092 
1093 #if defined(__x86_64__)
1094 static uintptr_t
IOMallocOnePageablePage(kalloc_heap_t kheap __unused,iopa_t * a)1095 IOMallocOnePageablePage(kalloc_heap_t kheap __unused, iopa_t * a)
1096 {
1097 	return (uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT);
1098 }
1099 #endif /* defined(__x86_64__) */
1100 
1101 static void *
IOMallocPageableInternal(vm_size_t size,vm_size_t alignment,bool zeroed)1102 IOMallocPageableInternal(vm_size_t size, vm_size_t alignment, bool zeroed)
1103 {
1104 	void * addr;
1105 
1106 	if (((uint32_t) alignment) != alignment) {
1107 		return NULL;
1108 	}
1109 #if defined(__x86_64__)
1110 	if (size >= (page_size - 4 * gIOPageAllocChunkBytes) ||
1111 	    alignment > page_size) {
1112 		addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map));
1113 		/* Memory allocated this way will already be zeroed. */
1114 	} else {
1115 		addr = ((void *) iopa_alloc(&gIOPageablePageAllocator,
1116 		    &IOMallocOnePageablePage, KHEAP_DEFAULT, size, (uint32_t) alignment));
1117 		if (addr && zeroed) {
1118 			bzero(addr, size);
1119 		}
1120 	}
1121 #else /* !defined(__x86_64__) */
1122 	vm_size_t allocSize = size;
1123 	if (allocSize == 0) {
1124 		allocSize = 1;
1125 	}
1126 	addr = IOMallocPageablePages(allocSize, alignment, IOMemoryTag(kernel_map));
1127 	/* already zeroed */
1128 #endif /* defined(__x86_64__) */
1129 
1130 	if (addr) {
1131 #if IOALLOCDEBUG
1132 		OSAddAtomicLong(size, &debug_iomallocpageable_size);
1133 #endif
1134 		IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
1135 	}
1136 
1137 	return addr;
1138 }
1139 
1140 void *
IOMallocPageable(vm_size_t size,vm_size_t alignment)1141 IOMallocPageable(vm_size_t size, vm_size_t alignment)
1142 {
1143 	return IOMallocPageableInternal(size, alignment, /*zeroed*/ false);
1144 }
1145 
1146 void *
IOMallocPageableZero(vm_size_t size,vm_size_t alignment)1147 IOMallocPageableZero(vm_size_t size, vm_size_t alignment)
1148 {
1149 	return IOMallocPageableInternal(size, alignment, /*zeroed*/ true);
1150 }
1151 
1152 void
IOFreePageable(void * address,vm_size_t size)1153 IOFreePageable(void * address, vm_size_t size)
1154 {
1155 #if IOALLOCDEBUG
1156 	OSAddAtomicLong(-size, &debug_iomallocpageable_size);
1157 #endif
1158 	IOStatisticsAlloc(kIOStatisticsFreePageable, size);
1159 
1160 #if defined(__x86_64__)
1161 	if (size < (page_size - 4 * gIOPageAllocChunkBytes)) {
1162 		address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
1163 		size = page_size;
1164 	}
1165 	if (address) {
1166 		IOFreePageablePages(address, size);
1167 	}
1168 #else /* !defined(__x86_64__) */
1169 	if (size == 0) {
1170 		size = 1;
1171 	}
1172 	if (address) {
1173 		IOFreePageablePages(address, size);
1174 	}
1175 #endif /* defined(__x86_64__) */
1176 }
1177 
1178 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1179 
1180 void *
1181 IOMallocData_external(
1182 	vm_size_t size);
1183 void *
IOMallocData_external(vm_size_t size)1184 IOMallocData_external(vm_size_t size)
1185 {
1186 	return IOMalloc_internal(KHEAP_DATA_BUFFERS, size, Z_VM_TAG_BT_BIT);
1187 }
1188 
1189 void *
1190 IOMallocZeroData_external(
1191 	vm_size_t size);
1192 void *
IOMallocZeroData_external(vm_size_t size)1193 IOMallocZeroData_external(vm_size_t size)
1194 {
1195 	return IOMalloc_internal(KHEAP_DATA_BUFFERS, size, Z_ZERO_VM_TAG_BT_BIT);
1196 }
1197 
1198 void
IOFreeData(void * address,vm_size_t size)1199 IOFreeData(void * address, vm_size_t size)
1200 {
1201 	return IOFree_internal(KHEAP_DATA_BUFFERS, address, size);
1202 }
1203 
1204 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1205 
1206 __typed_allocators_ignore_push // allocator implementation
1207 
1208 void *
IOMallocTypeImpl(kalloc_type_view_t kt_view)1209 IOMallocTypeImpl(kalloc_type_view_t kt_view)
1210 {
1211 #if IOTRACKING
1212 	/*
1213 	 * When leak detection is on default to using IOMalloc as kalloc
1214 	 * type infrastructure isn't aware of needing additional space for
1215 	 * the header.
1216 	 */
1217 	if (TRACK_ALLOC) {
1218 		uint32_t kt_size = kalloc_type_get_size(kt_view->kt_size);
1219 		void *mem = IOMalloc_internal(KHEAP_DEFAULT, kt_size, Z_ZERO);
1220 		if (!IOMallocType_from_vm(kt_view)) {
1221 			assert(mem);
1222 		}
1223 		return mem;
1224 	}
1225 #endif
1226 	zalloc_flags_t kt_flags = (zalloc_flags_t) (Z_WAITOK | Z_ZERO);
1227 	if (!IOMallocType_from_vm(kt_view)) {
1228 		kt_flags = (zalloc_flags_t) (kt_flags | Z_NOFAIL);
1229 	}
1230 	/*
1231 	 * Use external symbol for kalloc_type_impl as
1232 	 * kalloc_type_views generated at some external callsites
1233 	 * many not have been processed during boot.
1234 	 */
1235 	return kalloc_type_impl_external(kt_view, kt_flags);
1236 }
1237 
1238 void
IOFreeTypeImpl(kalloc_type_view_t kt_view,void * address)1239 IOFreeTypeImpl(kalloc_type_view_t kt_view, void * address)
1240 {
1241 #if IOTRACKING
1242 	if (TRACK_ALLOC) {
1243 		return IOFree_internal(KHEAP_DEFAULT, address,
1244 		           kalloc_type_get_size(kt_view->kt_size));
1245 	}
1246 #endif
1247 	/*
1248 	 * Use external symbol for kalloc_type_impl as
1249 	 * kalloc_type_views generated at some external callsites
1250 	 * many not have been processed during boot.
1251 	 */
1252 	return kfree_type_impl_external(kt_view, address);
1253 }
1254 
1255 void *
IOMallocTypeVarImpl(kalloc_type_var_view_t kt_view,vm_size_t size)1256 IOMallocTypeVarImpl(kalloc_type_var_view_t kt_view, vm_size_t size)
1257 {
1258 #if IOTRACKING
1259 	/*
1260 	 * When leak detection is on default to using IOMalloc as kalloc
1261 	 * type infrastructure isn't aware of needing additional space for
1262 	 * the header.
1263 	 */
1264 	if (TRACK_ALLOC) {
1265 		return IOMalloc_internal(KHEAP_DEFAULT, size, Z_ZERO);
1266 	}
1267 #endif
1268 	zalloc_flags_t kt_flags = (zalloc_flags_t) (Z_WAITOK | Z_ZERO);
1269 
1270 	kt_flags = Z_VM_TAG_BT(kt_flags, VM_KERN_MEMORY_KALLOC_TYPE);
1271 	return kalloc_type_var_impl(kt_view, size, kt_flags, NULL);
1272 }
1273 
1274 void
IOFreeTypeVarImpl(kalloc_type_var_view_t kt_view,void * address,vm_size_t size)1275 IOFreeTypeVarImpl(kalloc_type_var_view_t kt_view, void * address,
1276     vm_size_t size)
1277 {
1278 #if IOTRACKING
1279 	if (TRACK_ALLOC) {
1280 		return IOFree_internal(KHEAP_DEFAULT, address, size);
1281 	}
1282 #endif
1283 
1284 	return kfree_type_var_impl(kt_view, address, size);
1285 }
1286 
1287 __typed_allocators_ignore_pop
1288 
1289 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1290 
1291 #if defined(__x86_64__)
1292 
1293 
1294 extern "C" void
iopa_init(iopa_t * a)1295 iopa_init(iopa_t * a)
1296 {
1297 	bzero(a, sizeof(*a));
1298 	a->lock = IOLockAlloc();
1299 	queue_init(&a->list);
1300 }
1301 
1302 static uintptr_t
iopa_allocinpage(iopa_page_t * pa,uint32_t count,uint64_t align)1303 iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
1304 {
1305 	uint32_t n, s;
1306 	uint64_t avail = pa->avail;
1307 
1308 	assert(avail);
1309 
1310 	// find strings of count 1 bits in avail
1311 	for (n = count; n > 1; n -= s) {
1312 		s = n >> 1;
1313 		avail = avail & (avail << s);
1314 	}
1315 	// and aligned
1316 	avail &= align;
1317 
1318 	if (avail) {
1319 		n = __builtin_clzll(avail);
1320 		pa->avail &= ~((-1ULL << (64 - count)) >> n);
1321 		if (!pa->avail && pa->link.next) {
1322 			remque(&pa->link);
1323 			pa->link.next = NULL;
1324 		}
1325 		return n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa);
1326 	}
1327 
1328 	return 0;
1329 }
1330 
1331 uintptr_t
iopa_alloc(iopa_t * a,iopa_proc_t alloc,kalloc_heap_t kheap,vm_size_t bytes,vm_size_t balign)1332 iopa_alloc(
1333 	iopa_t          * a,
1334 	iopa_proc_t       alloc,
1335 	kalloc_heap_t     kheap,
1336 	vm_size_t         bytes,
1337 	vm_size_t         balign)
1338 {
1339 	static const uint64_t align_masks[] = {
1340 		0xFFFFFFFFFFFFFFFF,
1341 		0xAAAAAAAAAAAAAAAA,
1342 		0x8888888888888888,
1343 		0x8080808080808080,
1344 		0x8000800080008000,
1345 		0x8000000080000000,
1346 		0x8000000000000000,
1347 	};
1348 	iopa_page_t * pa;
1349 	uintptr_t     addr = 0;
1350 	uint32_t      count;
1351 	uint64_t      align;
1352 	vm_size_t     align_masks_idx;
1353 
1354 	if (((uint32_t) bytes) != bytes) {
1355 		return 0;
1356 	}
1357 	if (!bytes) {
1358 		bytes = 1;
1359 	}
1360 	count = (((uint32_t) bytes) + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1361 
1362 	align_masks_idx = log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes);
1363 	assert(align_masks_idx < sizeof(align_masks) / sizeof(*align_masks));
1364 	align = align_masks[align_masks_idx];
1365 
1366 	IOLockLock(a->lock);
1367 	__IGNORE_WCASTALIGN(pa = (typeof(pa))queue_first(&a->list));
1368 	while (!queue_end(&a->list, &pa->link)) {
1369 		addr = iopa_allocinpage(pa, count, align);
1370 		if (addr) {
1371 			a->bytecount += bytes;
1372 			break;
1373 		}
1374 		__IGNORE_WCASTALIGN(pa = (typeof(pa))queue_next(&pa->link));
1375 	}
1376 	IOLockUnlock(a->lock);
1377 
1378 	if (!addr) {
1379 		addr = alloc(kheap, a);
1380 		if (addr) {
1381 			pa = (typeof(pa))(addr + page_size - gIOPageAllocChunkBytes);
1382 			pa->signature = kIOPageAllocSignature;
1383 			pa->avail     = -2ULL;
1384 
1385 			addr = iopa_allocinpage(pa, count, align);
1386 			IOLockLock(a->lock);
1387 			if (pa->avail) {
1388 				enqueue_head(&a->list, &pa->link);
1389 			}
1390 			a->pagecount++;
1391 			if (addr) {
1392 				a->bytecount += bytes;
1393 			}
1394 			IOLockUnlock(a->lock);
1395 		}
1396 	}
1397 
1398 	assert((addr & ((1 << log2up(balign)) - 1)) == 0);
1399 	return addr;
1400 }
1401 
1402 uintptr_t
iopa_free(iopa_t * a,uintptr_t addr,vm_size_t bytes)1403 iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
1404 {
1405 	iopa_page_t * pa;
1406 	uint32_t      count;
1407 	uintptr_t     chunk;
1408 
1409 	if (((uint32_t) bytes) != bytes) {
1410 		return 0;
1411 	}
1412 	if (!bytes) {
1413 		bytes = 1;
1414 	}
1415 
1416 	chunk = (addr & page_mask);
1417 	assert(0 == (chunk & (gIOPageAllocChunkBytes - 1)));
1418 
1419 	pa = (typeof(pa))(addr | (page_size - gIOPageAllocChunkBytes));
1420 	assert(kIOPageAllocSignature == pa->signature);
1421 
1422 	count = (((uint32_t) bytes) + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
1423 	chunk /= gIOPageAllocChunkBytes;
1424 
1425 	IOLockLock(a->lock);
1426 	if (!pa->avail) {
1427 		assert(!pa->link.next);
1428 		enqueue_tail(&a->list, &pa->link);
1429 	}
1430 	pa->avail |= ((-1ULL << (64 - count)) >> chunk);
1431 	if (pa->avail != -2ULL) {
1432 		pa = NULL;
1433 	} else {
1434 		remque(&pa->link);
1435 		pa->link.next = NULL;
1436 		pa->signature = 0;
1437 		a->pagecount--;
1438 		// page to free
1439 		pa = (typeof(pa))trunc_page(pa);
1440 	}
1441 	a->bytecount -= bytes;
1442 	IOLockUnlock(a->lock);
1443 
1444 	return (uintptr_t) pa;
1445 }
1446 
1447 #endif /* defined(__x86_64__) */
1448 
1449 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1450 
1451 IOReturn
IOSetProcessorCacheMode(task_t task,IOVirtualAddress address,IOByteCount length,IOOptionBits cacheMode)1452 IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
1453     IOByteCount length, IOOptionBits cacheMode )
1454 {
1455 	IOReturn    ret = kIOReturnSuccess;
1456 	ppnum_t     pagenum;
1457 
1458 	if (task != kernel_task) {
1459 		return kIOReturnUnsupported;
1460 	}
1461 	if ((address | length) & PAGE_MASK) {
1462 //	OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode);
1463 		return kIOReturnUnsupported;
1464 	}
1465 	length = round_page(address + length) - trunc_page( address );
1466 	address = trunc_page( address );
1467 
1468 	// make map mode
1469 	cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
1470 
1471 	while ((kIOReturnSuccess == ret) && (length > 0)) {
1472 		// Get the physical page number
1473 		pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
1474 		if (pagenum) {
1475 			ret = IOUnmapPages( get_task_map(task), address, page_size );
1476 			ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode );
1477 		} else {
1478 			ret = kIOReturnVMError;
1479 		}
1480 
1481 		address += page_size;
1482 		length -= page_size;
1483 	}
1484 
1485 	return ret;
1486 }
1487 
1488 
1489 IOReturn
IOFlushProcessorCache(task_t task,IOVirtualAddress address,IOByteCount length)1490 IOFlushProcessorCache( task_t task, IOVirtualAddress address,
1491     IOByteCount length )
1492 {
1493 	if (task != kernel_task) {
1494 		return kIOReturnUnsupported;
1495 	}
1496 
1497 	flush_dcache64((addr64_t) address, (unsigned) length, false );
1498 
1499 	return kIOReturnSuccess;
1500 }
1501 
1502 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1503 
1504 vm_offset_t
OSKernelStackRemaining(void)1505 OSKernelStackRemaining( void )
1506 {
1507 	return ml_stack_remaining();
1508 }
1509 
1510 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1511 
1512 /*
1513  * Spin for indicated number of milliseconds.
1514  */
1515 void
IOSleep(unsigned milliseconds)1516 IOSleep(unsigned milliseconds)
1517 {
1518 	delay_for_interval(milliseconds, kMillisecondScale);
1519 }
1520 
1521 /*
1522  * Spin for indicated number of milliseconds, and potentially an
1523  * additional number of milliseconds up to the leeway values.
1524  */
1525 void
IOSleepWithLeeway(unsigned intervalMilliseconds,unsigned leewayMilliseconds)1526 IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds)
1527 {
1528 	delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale);
1529 }
1530 
1531 /*
1532  * Spin for indicated number of microseconds.
1533  */
1534 void
IODelay(unsigned microseconds)1535 IODelay(unsigned microseconds)
1536 {
1537 	delay_for_interval(microseconds, kMicrosecondScale);
1538 }
1539 
1540 /*
1541  * Spin for indicated number of nanoseconds.
1542  */
1543 void
IOPause(unsigned nanoseconds)1544 IOPause(unsigned nanoseconds)
1545 {
1546 	delay_for_interval(nanoseconds, kNanosecondScale);
1547 }
1548 
1549 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1550 
1551 static void _IOLogv(const char *format, va_list ap, void *caller) __printflike(1, 0);
1552 
1553 __attribute__((noinline, not_tail_called))
1554 void
IOLog(const char * format,...)1555 IOLog(const char *format, ...)
1556 {
1557 	void *caller = __builtin_return_address(0);
1558 	va_list ap;
1559 
1560 	va_start(ap, format);
1561 	_IOLogv(format, ap, caller);
1562 	va_end(ap);
1563 }
1564 
1565 __attribute__((noinline, not_tail_called))
1566 void
IOLogv(const char * format,va_list ap)1567 IOLogv(const char *format, va_list ap)
1568 {
1569 	void *caller = __builtin_return_address(0);
1570 	_IOLogv(format, ap, caller);
1571 }
1572 
1573 void
_IOLogv(const char * format,va_list ap,void * caller)1574 _IOLogv(const char *format, va_list ap, void *caller)
1575 {
1576 	va_list ap2;
1577 	struct console_printbuf_state info_data;
1578 	console_printbuf_state_init(&info_data, TRUE, TRUE);
1579 
1580 	va_copy(ap2, ap);
1581 
1582 #pragma clang diagnostic push
1583 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1584 	os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, format, ap, caller);
1585 #pragma clang diagnostic pop
1586 
1587 	if (!disable_iolog_serial_output) {
1588 		__doprnt(format, ap2, console_printbuf_putc, &info_data, 16, TRUE);
1589 		console_printbuf_clear(&info_data);
1590 	}
1591 	va_end(ap2);
1592 
1593 	assertf(ml_get_interrupts_enabled() || ml_is_quiescing() ||
1594 	    debug_mode_active() || !gCPUsRunning,
1595 	    "IOLog called with interrupts disabled");
1596 }
1597 
1598 #if !__LP64__
1599 void
IOPanic(const char * reason)1600 IOPanic(const char *reason)
1601 {
1602 	panic("%s", reason);
1603 }
1604 #endif
1605 
1606 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1607 
1608 void
IOKitKernelLogBuffer(const char * title,const void * buffer,size_t size,void (* output)(const char * format,...))1609 IOKitKernelLogBuffer(const char * title, const void * buffer, size_t size,
1610     void (*output)(const char *format, ...))
1611 {
1612 	size_t idx, linestart;
1613 	enum { bytelen = (sizeof("0xZZ, ") - 1) };
1614 	char hex[(bytelen * 16) + 1];
1615 	uint8_t c, chars[17];
1616 
1617 	output("%s(0x%lx):\n", title, size);
1618 	output("              0     1     2     3     4     5     6     7     8     9     A     B     C     D     E     F\n");
1619 	if (size > 4096) {
1620 		size = 4096;
1621 	}
1622 	chars[16] = 0;
1623 	for (idx = 0, linestart = 0; idx < size;) {
1624 		c = ((char *)buffer)[idx];
1625 		snprintf(&hex[bytelen * (idx & 15)], bytelen + 1, "0x%02x, ", c);
1626 		chars[idx & 15] = ((c >= 0x20) && (c <= 0x7f)) ? c : ' ';
1627 		idx++;
1628 		if ((idx == size) || !(idx & 15)) {
1629 			if (idx & 15) {
1630 				chars[idx & 15] = 0;
1631 			}
1632 			output("/* %04lx: */ %-96s /* |%-16s| */\n", linestart, hex, chars);
1633 			linestart += 16;
1634 		}
1635 	}
1636 }
1637 
1638 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1639 
1640 /*
1641  * Convert a integer constant (typically a #define or enum) to a string.
1642  */
1643 static char noValue[80];        // that's pretty
1644 
1645 const char *
IOFindNameForValue(int value,const IONamedValue * regValueArray)1646 IOFindNameForValue(int value, const IONamedValue *regValueArray)
1647 {
1648 	for (; regValueArray->name; regValueArray++) {
1649 		if (regValueArray->value == value) {
1650 			return regValueArray->name;
1651 		}
1652 	}
1653 	snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value);
1654 	return (const char *)noValue;
1655 }
1656 
1657 IOReturn
IOFindValueForName(const char * string,const IONamedValue * regValueArray,int * value)1658 IOFindValueForName(const char *string,
1659     const IONamedValue *regValueArray,
1660     int *value)
1661 {
1662 	for (; regValueArray->name; regValueArray++) {
1663 		if (!strcmp(regValueArray->name, string)) {
1664 			*value = regValueArray->value;
1665 			return kIOReturnSuccess;
1666 		}
1667 	}
1668 	return kIOReturnBadArgument;
1669 }
1670 
1671 OSString *
IOCopyLogNameForPID(int pid)1672 IOCopyLogNameForPID(int pid)
1673 {
1674 	char   buf[128];
1675 	size_t len;
1676 	snprintf(buf, sizeof(buf), "pid %d, ", pid);
1677 	len = strlen(buf);
1678 	proc_name(pid, buf + len, (int) (sizeof(buf) - len));
1679 	return OSString::withCString(buf);
1680 }
1681 
1682 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1683 
1684 IOAlignment
IOSizeToAlignment(unsigned int size)1685 IOSizeToAlignment(unsigned int size)
1686 {
1687 	int shift;
1688 	const int intsize = sizeof(unsigned int) * 8;
1689 
1690 	for (shift = 1; shift < intsize; shift++) {
1691 		if (size & 0x80000000) {
1692 			return (IOAlignment)(intsize - shift);
1693 		}
1694 		size <<= 1;
1695 	}
1696 	return 0;
1697 }
1698 
1699 unsigned int
IOAlignmentToSize(IOAlignment align)1700 IOAlignmentToSize(IOAlignment align)
1701 {
1702 	unsigned int size;
1703 
1704 	for (size = 1; align; align--) {
1705 		size <<= 1;
1706 	}
1707 	return size;
1708 }
1709 } /* extern "C" */
1710