1 /*
2  * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #define IOKIT_ENABLE_SHARED_PTR
29 
30 #define _IOMEMORYDESCRIPTOR_INTERNAL_
31 
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 
35 #include <IOKit/IOLib.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IOBufferMemoryDescriptor.h>
38 #include <libkern/OSDebug.h>
39 #include <mach/mach_vm.h>
40 
41 #include "IOKitKernelInternal.h"
42 
43 #ifdef IOALLOCDEBUG
44 #include <libkern/c++/OSCPPDebug.h>
45 #endif
46 #include <IOKit/IOStatisticsPrivate.h>
47 
48 #if IOKITSTATS
49 #define IOStatisticsAlloc(type, size) \
50 do { \
51 	IOStatistics::countAlloc(type, size); \
52 } while (0)
53 #else
54 #define IOStatisticsAlloc(type, size)
55 #endif /* IOKITSTATS */
56 
57 
58 __BEGIN_DECLS
59 void ipc_port_release_send(ipc_port_t port);
60 #include <vm/pmap.h>
61 
62 __END_DECLS
63 
64 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
65 
66 enum{
67 	kInternalFlagPhysical      = 0x00000001,
68 	kInternalFlagPageSized     = 0x00000002,
69 	kInternalFlagPageAllocated = 0x00000004,
70 	kInternalFlagInit          = 0x00000008,
71 	kInternalFlagHasPointers   = 0x00000010,
72 	kInternalFlagGuardPages    = 0x00000020,
73 };
74 
75 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
76 
77 #define super IOGeneralMemoryDescriptor
78 OSDefineMetaClassAndStructorsWithZone(IOBufferMemoryDescriptor,
79     IOGeneralMemoryDescriptor, ZC_ZFREE_CLEARMEM);
80 
81 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
82 
83 #if defined(__x86_64__)
84 static uintptr_t
85 IOBMDPageProc(kalloc_heap_t kheap, iopa_t * a)
86 {
87 	kern_return_t kr;
88 	vm_address_t  vmaddr  = 0;
89 
90 	kr = kernel_memory_allocate(kheap->kh_fallback_map, &vmaddr,
91 	    page_size, 0, (kma_flags_t) (KMA_NONE | KMA_ZERO), VM_KERN_MEMORY_IOKIT);
92 
93 	if (KERN_SUCCESS != kr) {
94 		vmaddr = 0;
95 	}
96 
97 	return (uintptr_t) vmaddr;
98 }
99 #endif /* defined(__x86_64__) */
100 
101 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
102 
103 #ifndef __LP64__
104 bool
105 IOBufferMemoryDescriptor::initWithOptions(
106 	IOOptionBits options,
107 	vm_size_t    capacity,
108 	vm_offset_t  alignment,
109 	task_t       inTask)
110 {
111 	mach_vm_address_t physicalMask = 0;
112 	return initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask);
113 }
114 #endif /* !__LP64__ */
115 
116 OSSharedPtr<IOBufferMemoryDescriptor>
117 IOBufferMemoryDescriptor::withCopy(
118 	task_t                inTask,
119 	IOOptionBits      options,
120 	vm_map_t              sourceMap,
121 	mach_vm_address_t source,
122 	mach_vm_size_t    size)
123 {
124 	OSSharedPtr<IOBufferMemoryDescriptor> inst;
125 	kern_return_t              err;
126 	vm_map_copy_t              copy;
127 	vm_map_address_t           address;
128 
129 	copy = NULL;
130 	do {
131 		err = kIOReturnNoMemory;
132 		inst = OSMakeShared<IOBufferMemoryDescriptor>();
133 		if (!inst) {
134 			break;
135 		}
136 		inst->_ranges.v64 = IOMallocType(IOAddressRange);
137 
138 		err = vm_map_copyin(sourceMap, source, size,
139 		    false /* src_destroy */, &copy);
140 		if (KERN_SUCCESS != err) {
141 			break;
142 		}
143 
144 		err = vm_map_copyout(get_task_map(inTask), &address, copy);
145 		if (KERN_SUCCESS != err) {
146 			break;
147 		}
148 		copy = NULL;
149 
150 		inst->_ranges.v64->address = address;
151 		inst->_ranges.v64->length  = size;
152 
153 		if (!inst->initWithPhysicalMask(inTask, options, size, page_size, 0)) {
154 			err = kIOReturnError;
155 		}
156 	} while (false);
157 
158 	if (KERN_SUCCESS == err) {
159 		return inst;
160 	}
161 
162 	if (copy) {
163 		vm_map_copy_discard(copy);
164 	}
165 
166 	return nullptr;
167 }
168 
169 
170 bool
171 IOBufferMemoryDescriptor::initWithPhysicalMask(
172 	task_t            inTask,
173 	IOOptionBits      options,
174 	mach_vm_size_t    capacity,
175 	mach_vm_address_t alignment,
176 	mach_vm_address_t physicalMask)
177 {
178 	task_t                mapTask = NULL;
179 	kalloc_heap_t         kheap = KHEAP_DATA_BUFFERS;
180 	mach_vm_address_t     highestMask = 0;
181 	IOOptionBits          iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
182 	IODMAMapSpecification mapSpec;
183 	bool                  mapped = false;
184 	bool                  withCopy = false;
185 	bool                  mappedOrShared = false;
186 
187 	/*
188 	 * Temporarily use default heap on intel due to rdar://74982985
189 	 */
190 #if __x86_64__
191 	kheap = KHEAP_DEFAULT;
192 #endif
193 
194 	if (!capacity) {
195 		return false;
196 	}
197 
198 	/*
199 	 * The IOKit constructor requests the allocator for zeroed memory
200 	 * so the members of the class do not need to be explicitly zeroed.
201 	 */
202 	_options          = options;
203 	_capacity         = capacity;
204 
205 	if (!_ranges.v64) {
206 		_ranges.v64 = IOMallocType(IOAddressRange);
207 		_ranges.v64->address = 0;
208 		_ranges.v64->length  = 0;
209 	} else {
210 		if (!_ranges.v64->address) {
211 			return false;
212 		}
213 		if (!(kIOMemoryPageable & options)) {
214 			return false;
215 		}
216 		if (!inTask) {
217 			return false;
218 		}
219 		_buffer = (void *) _ranges.v64->address;
220 		withCopy = true;
221 	}
222 
223 	/*
224 	 * Set kalloc_heap to default if allocation contains pointers
225 	 */
226 	if (kInternalFlagHasPointers & _internalFlags) {
227 		kheap = KHEAP_DEFAULT;
228 	}
229 
230 	//  make sure super::free doesn't dealloc _ranges before super::init
231 	_flags = kIOMemoryAsReference;
232 
233 	// Grab IOMD bits from the Buffer MD options
234 	iomdOptions  |= (options & kIOBufferDescriptorMemoryFlags);
235 
236 	if (!(kIOMemoryMapperNone & options)) {
237 		IOMapper::checkForSystemMapper();
238 		mapped = (NULL != IOMapper::gSystem);
239 	}
240 
241 	if (physicalMask && (alignment <= 1)) {
242 		alignment   = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
243 		highestMask = (physicalMask | alignment);
244 		alignment++;
245 		if (alignment < page_size) {
246 			alignment = page_size;
247 		}
248 	}
249 
250 	if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) {
251 		alignment = page_size;
252 	}
253 
254 	if (alignment >= page_size) {
255 		if (round_page_overflow(capacity, &capacity)) {
256 			return false;
257 		}
258 	}
259 
260 	if (alignment > page_size) {
261 		options |= kIOMemoryPhysicallyContiguous;
262 	}
263 
264 	_alignment = alignment;
265 
266 	if ((capacity + alignment) < _capacity) {
267 		return false;
268 	}
269 
270 	if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) {
271 		return false;
272 	}
273 
274 	bzero(&mapSpec, sizeof(mapSpec));
275 	mapSpec.alignment      = _alignment;
276 	mapSpec.numAddressBits = 64;
277 	if (highestMask && mapped) {
278 		if (highestMask <= 0xFFFFFFFF) {
279 			mapSpec.numAddressBits = (uint8_t)(32 - __builtin_clz((unsigned int) highestMask));
280 		} else {
281 			mapSpec.numAddressBits = (uint8_t)(64 - __builtin_clz((unsigned int) (highestMask >> 32)));
282 		}
283 		highestMask = 0;
284 	}
285 
286 	// set memory entry cache mode, pageable, purgeable
287 	iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift;
288 	if (options & kIOMemoryPageable) {
289 		if (_internalFlags & kInternalFlagGuardPages) {
290 			printf("IOBMD: Unsupported use of guard pages with pageable memory.\n");
291 			return false;
292 		}
293 		iomdOptions |= kIOMemoryBufferPageable;
294 		if (options & kIOMemoryPurgeable) {
295 			iomdOptions |= kIOMemoryBufferPurgeable;
296 		}
297 	} else {
298 		// Buffer shouldn't auto prepare they should be prepared explicitly
299 		// But it never was enforced so what are you going to do?
300 		iomdOptions |= kIOMemoryAutoPrepare;
301 
302 		/* Allocate a wired-down buffer inside kernel space. */
303 
304 		bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));
305 
306 		if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) {
307 			contig |= (!mapped);
308 			contig |= (0 != (kIOMemoryMapperNone & options));
309 #if 0
310 			// treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
311 			contig |= true;
312 #endif
313 		}
314 
315 		mappedOrShared = (mapped || (0 != (kIOMemorySharingTypeMask & options)));
316 		if (contig || highestMask || (alignment > page_size)) {
317 			if (_internalFlags & kInternalFlagGuardPages) {
318 				printf("IOBMD: Unsupported use of guard pages with physical mask or contiguous memory.\n");
319 				return false;
320 			}
321 			_internalFlags |= kInternalFlagPhysical;
322 			if (highestMask) {
323 				_internalFlags |= kInternalFlagPageSized;
324 				if (round_page_overflow(capacity, &capacity)) {
325 					return false;
326 				}
327 			}
328 			_buffer = (void *) IOKernelAllocateWithPhysicalRestrict(kheap,
329 			    capacity, highestMask, alignment, contig);
330 		} else if (_internalFlags & kInternalFlagGuardPages) {
331 			vm_offset_t address = 0;
332 			kern_return_t kr;
333 			uintptr_t alignMask;
334 
335 			if (((uint32_t) alignment) != alignment) {
336 				return NULL;
337 			}
338 
339 			alignMask = (1UL << log2up((uint32_t) alignment)) - 1;
340 			kr = kernel_memory_allocate(kheap->kh_fallback_map, &address,
341 			    capacity + page_size * 2, alignMask, (kma_flags_t)(KMA_GUARD_FIRST | KMA_GUARD_LAST), IOMemoryTag(kernel_map));
342 			if (kr != KERN_SUCCESS || address == 0) {
343 				return false;
344 			}
345 #if IOALLOCDEBUG
346 			OSAddAtomicLong(capacity, &debug_iomalloc_size);
347 #endif
348 			IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
349 			_buffer = (void *)(address + page_size);
350 #if defined(__x86_64__)
351 		} else if (mappedOrShared
352 		    && (capacity + alignment) <= (page_size - gIOPageAllocChunkBytes)) {
353 			_internalFlags |= kInternalFlagPageAllocated;
354 			_buffer         = (void *) iopa_alloc(&gIOBMDPageAllocator,
355 			    &IOBMDPageProc, kheap, capacity, alignment);
356 			if (_buffer) {
357 				IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
358 #if IOALLOCDEBUG
359 				OSAddAtomicLong(capacity, &debug_iomalloc_size);
360 #endif
361 			}
362 #endif /* defined(__x86_64__) */
363 		} else if (alignment > 1) {
364 			_buffer = IOMallocAligned_internal(kheap, capacity, alignment);
365 		} else {
366 			_buffer = IOMalloc_internal(kheap, capacity);
367 		}
368 		if (!_buffer) {
369 			return false;
370 		}
371 		bzero(_buffer, capacity);
372 	}
373 
374 	if ((options & (kIOMemoryPageable | kIOMapCacheMask))) {
375 		vm_size_t       size = round_page(capacity);
376 
377 		// initWithOptions will create memory entry
378 		if (!withCopy) {
379 			iomdOptions |= kIOMemoryPersistent;
380 		}
381 
382 		if (options & kIOMemoryPageable) {
383 #if IOALLOCDEBUG
384 			OSAddAtomicLong(size, &debug_iomallocpageable_size);
385 #endif
386 			if (!withCopy) {
387 				mapTask = inTask;
388 			}
389 			if (NULL == inTask) {
390 				inTask = kernel_task;
391 			}
392 		} else if (options & kIOMapCacheMask) {
393 			// Prefetch each page to put entries into the pmap
394 			volatile UInt8 *    startAddr = (UInt8 *)_buffer;
395 			volatile UInt8 *    endAddr   = (UInt8 *)_buffer + capacity;
396 
397 			while (startAddr < endAddr) {
398 				UInt8 dummyVar = *startAddr;
399 				(void) dummyVar;
400 				startAddr += page_size;
401 			}
402 		}
403 	}
404 
405 	_ranges.v64->address = (mach_vm_address_t) pgz_decode(_buffer, _capacity);
406 	_ranges.v64->length  = _capacity;
407 
408 	if (!super::initWithOptions(_ranges.v64, 1, 0,
409 	    inTask, iomdOptions, /* System mapper */ NULL)) {
410 		return false;
411 	}
412 
413 	_internalFlags |= kInternalFlagInit;
414 #if IOTRACKING
415 	if (!(options & kIOMemoryPageable)) {
416 		trackingAccumSize(capacity);
417 	}
418 #endif /* IOTRACKING */
419 
420 	// give any system mapper the allocation params
421 	if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec,
422 	    &mapSpec, sizeof(mapSpec))) {
423 		return false;
424 	}
425 
426 	if (mapTask) {
427 		if (!reserved) {
428 			reserved = IOMallocType(ExpansionData);
429 			if (!reserved) {
430 				return false;
431 			}
432 		}
433 		reserved->map = createMappingInTask(mapTask, 0,
434 		    kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0).detach();
435 		if (!reserved->map) {
436 			_buffer = NULL;
437 			return false;
438 		}
439 		release();  // map took a retain on this
440 		reserved->map->retain();
441 		removeMapping(reserved->map);
442 		mach_vm_address_t buffer = reserved->map->getAddress();
443 		_buffer = (void *) buffer;
444 		if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) {
445 			_ranges.v64->address = buffer;
446 		}
447 	}
448 
449 	setLength(_capacity);
450 
451 	return true;
452 }
453 
454 bool
455 IOBufferMemoryDescriptor::initControlWithPhysicalMask(
456 	task_t            inTask,
457 	IOOptionBits      options,
458 	mach_vm_size_t    capacity,
459 	mach_vm_address_t alignment,
460 	mach_vm_address_t physicalMask)
461 {
462 	_internalFlags = kInternalFlagHasPointers;
463 	return initWithPhysicalMask(inTask, options, capacity, alignment,
464 	           physicalMask);
465 }
466 
467 bool
468 IOBufferMemoryDescriptor::initWithGuardPages(
469 	task_t            inTask,
470 	IOOptionBits      options,
471 	mach_vm_size_t    capacity)
472 {
473 	mach_vm_size_t roundedCapacity;
474 
475 	_internalFlags = kInternalFlagGuardPages;
476 
477 	if (round_page_overflow(capacity, &roundedCapacity)) {
478 		return false;
479 	}
480 
481 	return initWithPhysicalMask(inTask, options, roundedCapacity, page_size,
482 	           (mach_vm_address_t)0);
483 }
484 
485 OSSharedPtr<IOBufferMemoryDescriptor>
486 IOBufferMemoryDescriptor::inTaskWithOptions(
487 	task_t       inTask,
488 	IOOptionBits options,
489 	vm_size_t    capacity,
490 	vm_offset_t  alignment)
491 {
492 	OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
493 
494 	if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
495 		me.reset();
496 	}
497 	return me;
498 }
499 
500 OSSharedPtr<IOBufferMemoryDescriptor>
501 IOBufferMemoryDescriptor::inTaskWithOptions(
502 	task_t       inTask,
503 	IOOptionBits options,
504 	vm_size_t    capacity,
505 	vm_offset_t  alignment,
506 	uint32_t     kernTag,
507 	uint32_t     userTag)
508 {
509 	OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
510 
511 	if (me) {
512 		me->setVMTags(kernTag, userTag);
513 
514 		if (!me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
515 			me.reset();
516 		}
517 	}
518 	return me;
519 }
520 
521 OSSharedPtr<IOBufferMemoryDescriptor>
522 IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
523 	task_t            inTask,
524 	IOOptionBits      options,
525 	mach_vm_size_t    capacity,
526 	mach_vm_address_t physicalMask)
527 {
528 	OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
529 
530 	if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) {
531 		me.reset();
532 	}
533 	return me;
534 }
535 
536 OSSharedPtr<IOBufferMemoryDescriptor>
537 IOBufferMemoryDescriptor::inTaskWithGuardPages(
538 	task_t            inTask,
539 	IOOptionBits      options,
540 	mach_vm_size_t    capacity)
541 {
542 	OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
543 
544 	if (me && !me->initWithGuardPages(inTask, options, capacity)) {
545 		me.reset();
546 	}
547 	return me;
548 }
549 
550 #ifndef __LP64__
551 bool
552 IOBufferMemoryDescriptor::initWithOptions(
553 	IOOptionBits options,
554 	vm_size_t    capacity,
555 	vm_offset_t  alignment)
556 {
557 	return initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0);
558 }
559 #endif /* !__LP64__ */
560 
561 OSSharedPtr<IOBufferMemoryDescriptor>
562 IOBufferMemoryDescriptor::withOptions(
563 	IOOptionBits options,
564 	vm_size_t    capacity,
565 	vm_offset_t  alignment)
566 {
567 	OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
568 
569 	if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) {
570 		me.reset();
571 	}
572 	return me;
573 }
574 
575 
576 /*
577  * withCapacity:
578  *
579  * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
580  * hold capacity bytes.  The descriptor's length is initially set to the capacity.
581  */
582 OSSharedPtr<IOBufferMemoryDescriptor>
583 IOBufferMemoryDescriptor::withCapacity(vm_size_t   inCapacity,
584     IODirection inDirection,
585     bool        inContiguous)
586 {
587 	return IOBufferMemoryDescriptor::withOptions(
588 		inDirection | kIOMemoryUnshared
589 		| (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
590 		inCapacity, inContiguous ? inCapacity : 1 );
591 }
592 
593 #ifndef __LP64__
594 /*
595  * initWithBytes:
596  *
597  * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
598  * The descriptor's length and capacity are set to the input buffer's size.
599  */
600 bool
601 IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
602     vm_size_t    inLength,
603     IODirection  inDirection,
604     bool         inContiguous)
605 {
606 	if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared
607 	    | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
608 	    inLength, inLength, (mach_vm_address_t)0)) {
609 		return false;
610 	}
611 
612 	// start out with no data
613 	setLength(0);
614 
615 	if (!appendBytes(inBytes, inLength)) {
616 		return false;
617 	}
618 
619 	return true;
620 }
621 #endif /* !__LP64__ */
622 
623 /*
624  * withBytes:
625  *
626  * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
627  * The descriptor's length and capacity are set to the input buffer's size.
628  */
629 OSSharedPtr<IOBufferMemoryDescriptor>
630 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
631     vm_size_t    inLength,
632     IODirection  inDirection,
633     bool         inContiguous)
634 {
635 	OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
636 
637 	if (me && !me->initWithPhysicalMask(
638 		    kernel_task, inDirection | kIOMemoryUnshared
639 		    | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
640 		    inLength, inLength, 0 )) {
641 		me.reset();
642 	}
643 
644 	if (me) {
645 		// start out with no data
646 		me->setLength(0);
647 
648 		if (!me->appendBytes(inBytes, inLength)) {
649 			me.reset();
650 		}
651 	}
652 	return me;
653 }
654 
655 /*
656  * free:
657  *
658  * Free resources
659  */
660 void
661 IOBufferMemoryDescriptor::free()
662 {
663 	// Cache all of the relevant information on the stack for use
664 	// after we call super::free()!
665 	IOOptionBits     flags         = _flags;
666 	IOOptionBits     internalFlags = _internalFlags;
667 	IOOptionBits     options   = _options;
668 	vm_size_t        size      = _capacity;
669 	void *           buffer    = _buffer;
670 	IOMemoryMap *    map       = NULL;
671 	IOAddressRange * range     = _ranges.v64;
672 	vm_offset_t      alignment = _alignment;
673 	kalloc_heap_t    kheap     = KHEAP_DATA_BUFFERS;
674 
675 	/*
676 	 * Temporarily use default heap on intel due to rdar://74982985
677 	 */
678 #if __x86_64__
679 	kheap = KHEAP_DEFAULT;
680 #endif
681 
682 	if (alignment >= page_size) {
683 		size = round_page(size);
684 	}
685 
686 	if (reserved) {
687 		map = reserved->map;
688 		IOFreeType(reserved, ExpansionData);
689 		if (map) {
690 			map->release();
691 		}
692 	}
693 
694 	if ((options & kIOMemoryPageable)
695 	    || (kInternalFlagPageSized & internalFlags)) {
696 		size = round_page(size);
697 	}
698 
699 	if (internalFlags & kInternalFlagHasPointers) {
700 		kheap = KHEAP_DEFAULT;
701 	}
702 
703 #if IOTRACKING
704 	if (!(options & kIOMemoryPageable)
705 	    && buffer
706 	    && (kInternalFlagInit & _internalFlags)) {
707 		trackingAccumSize(-size);
708 	}
709 #endif /* IOTRACKING */
710 
711 	/* super::free may unwire - deallocate buffer afterwards */
712 	super::free();
713 
714 	if (options & kIOMemoryPageable) {
715 #if IOALLOCDEBUG
716 		OSAddAtomicLong(-size, &debug_iomallocpageable_size);
717 #endif
718 	} else if (buffer) {
719 		if (kInternalFlagPhysical & internalFlags) {
720 			IOKernelFreePhysical(kheap, (mach_vm_address_t) buffer, size);
721 		} else if (kInternalFlagPageAllocated & internalFlags) {
722 #if defined(__x86_64__)
723 			uintptr_t page;
724 			page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size);
725 			if (page) {
726 				kmem_free(kheap->kh_fallback_map, page, page_size);
727 			}
728 #if IOALLOCDEBUG
729 			OSAddAtomicLong(-size, &debug_iomalloc_size);
730 #endif
731 			IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
732 #else /* !defined(__x86_64__) */
733 			/* should be unreachable */
734 			panic("Attempting to free IOBMD with page allocated flag");
735 #endif /* defined(__x86_64__) */
736 		} else if (kInternalFlagGuardPages & internalFlags) {
737 			vm_offset_t allocation = (vm_offset_t)buffer - page_size;
738 			kmem_free(kheap->kh_fallback_map, allocation, size + page_size * 2);
739 #if IOALLOCDEBUG
740 			OSAddAtomicLong(-size, &debug_iomalloc_size);
741 #endif
742 			IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
743 		} else if (alignment > 1) {
744 			IOFreeAligned_internal(kheap, buffer, size);
745 		} else {
746 			IOFree_internal(kheap, buffer, size);
747 		}
748 	}
749 	if (range && (kIOMemoryAsReference & flags)) {
750 		IOFreeType(range, IOAddressRange);
751 	}
752 }
753 
754 /*
755  * getCapacity:
756  *
757  * Get the buffer capacity
758  */
759 vm_size_t
760 IOBufferMemoryDescriptor::getCapacity() const
761 {
762 	return _capacity;
763 }
764 
765 /*
766  * setLength:
767  *
768  * Change the buffer length of the memory descriptor.  When a new buffer
769  * is created, the initial length of the buffer is set to be the same as
770  * the capacity.  The length can be adjusted via setLength for a shorter
771  * transfer (there is no need to create more buffer descriptors when you
772  * can reuse an existing one, even for different transfer sizes).   Note
773  * that the specified length must not exceed the capacity of the buffer.
774  */
775 void
776 IOBufferMemoryDescriptor::setLength(vm_size_t length)
777 {
778 	assert(length <= _capacity);
779 	if (length > _capacity) {
780 		return;
781 	}
782 
783 	_length = length;
784 	_ranges.v64->length = length;
785 }
786 
787 /*
788  * setDirection:
789  *
790  * Change the direction of the transfer.  This method allows one to redirect
791  * the descriptor's transfer direction.  This eliminates the need to destroy
792  * and create new buffers when different transfer directions are needed.
793  */
794 void
795 IOBufferMemoryDescriptor::setDirection(IODirection direction)
796 {
797 	_flags = (_flags & ~kIOMemoryDirectionMask) | direction;
798 #ifndef __LP64__
799 	_direction = (IODirection) (_flags & kIOMemoryDirectionMask);
800 #endif /* !__LP64__ */
801 }
802 
803 /*
804  * appendBytes:
805  *
806  * Add some data to the end of the buffer.  This method automatically
807  * maintains the memory descriptor buffer length.  Note that appendBytes
808  * will not copy past the end of the memory descriptor's current capacity.
809  */
810 bool
811 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
812 {
813 	vm_size_t   actualBytesToCopy = min(withLength, _capacity - _length);
814 	IOByteCount offset;
815 
816 	assert(_length <= _capacity);
817 
818 	offset = _length;
819 	_length += actualBytesToCopy;
820 	_ranges.v64->length += actualBytesToCopy;
821 
822 	if (_task == kernel_task) {
823 		bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset),
824 		    actualBytesToCopy);
825 	} else {
826 		writeBytes(offset, bytes, actualBytesToCopy);
827 	}
828 
829 	return true;
830 }
831 
832 /*
833  * getBytesNoCopy:
834  *
835  * Return the virtual address of the beginning of the buffer
836  */
837 void *
838 IOBufferMemoryDescriptor::getBytesNoCopy()
839 {
840 	if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) {
841 		return _buffer;
842 	} else {
843 		return (void *)_ranges.v64->address;
844 	}
845 }
846 
847 
848 /*
849  * getBytesNoCopy:
850  *
851  * Return the virtual address of an offset from the beginning of the buffer
852  */
853 void *
854 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
855 {
856 	IOVirtualAddress address;
857 
858 	if ((start + withLength) < start) {
859 		return NULL;
860 	}
861 
862 	if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) {
863 		address = (IOVirtualAddress) _buffer;
864 	} else {
865 		address = _ranges.v64->address;
866 	}
867 
868 	if (start < _length && (start + withLength) <= _length) {
869 		return (void *)(address + start);
870 	}
871 	return NULL;
872 }
873 
874 #ifndef __LP64__
875 void *
876 IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
877     IOByteCount * lengthOfSegment)
878 {
879 	void * bytes = getBytesNoCopy(offset, 0);
880 
881 	if (bytes && lengthOfSegment) {
882 		*lengthOfSegment = _length - offset;
883 	}
884 
885 	return bytes;
886 }
887 #endif /* !__LP64__ */
888 
889 #ifdef __LP64__
890 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0);
891 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
892 #else /* !__LP64__ */
893 OSMetaClassDefineReservedUsedX86(IOBufferMemoryDescriptor, 0);
894 OSMetaClassDefineReservedUsedX86(IOBufferMemoryDescriptor, 1);
895 #endif /* !__LP64__ */
896 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
897 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
898 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
899 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
900 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
901 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
902 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
903 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
904 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
905 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
906 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
907 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
908 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
909 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);
910