1 /*
2  * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #define IOKIT_ENABLE_SHARED_PTR
29 
30 #define _IOMEMORYDESCRIPTOR_INTERNAL_
31 
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 
35 #include <IOKit/IOLib.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IOBufferMemoryDescriptor.h>
38 #include <libkern/OSDebug.h>
39 #include <mach/mach_vm.h>
40 
41 #include "IOKitKernelInternal.h"
42 
43 #ifdef IOALLOCDEBUG
44 #include <libkern/c++/OSCPPDebug.h>
45 #endif
46 #include <IOKit/IOStatisticsPrivate.h>
47 
48 #if IOKITSTATS
49 #define IOStatisticsAlloc(type, size) \
50 do { \
51 	IOStatistics::countAlloc(type, size); \
52 } while (0)
53 #else
54 #define IOStatisticsAlloc(type, size)
55 #endif /* IOKITSTATS */
56 
57 
58 __BEGIN_DECLS
59 void ipc_port_release_send(ipc_port_t port);
60 #include <vm/pmap.h>
61 
62 __END_DECLS
63 
64 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
65 
66 enum{
67 	kInternalFlagPhysical      = 0x00000001,
68 	kInternalFlagPageSized     = 0x00000002,
69 	kInternalFlagPageAllocated = 0x00000004,
70 	kInternalFlagInit          = 0x00000008,
71 	kInternalFlagHasPointers   = 0x00000010,
72 	kInternalFlagGuardPages    = 0x00000020,
73 };
74 
75 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
76 
77 #define super IOGeneralMemoryDescriptor
78 OSDefineMetaClassAndStructorsWithZone(IOBufferMemoryDescriptor,
79     IOGeneralMemoryDescriptor, ZC_ZFREE_CLEARMEM);
80 
81 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
82 
83 #if defined(__x86_64__)
84 static uintptr_t
85 IOBMDPageProc(kalloc_heap_t kheap, iopa_t * a)
86 {
87 	kern_return_t kr;
88 	vm_address_t  vmaddr  = 0;
89 	kma_flags_t kma_flags = KMA_ZERO;
90 
91 	if (kheap == KHEAP_DATA_BUFFERS) {
92 		kma_flags = (kma_flags_t) (kma_flags | KMA_DATA);
93 	}
94 	kr = kmem_alloc(kernel_map, &vmaddr, page_size,
95 	    kma_flags, VM_KERN_MEMORY_IOKIT);
96 
97 	if (KERN_SUCCESS != kr) {
98 		vmaddr = 0;
99 	}
100 
101 	return (uintptr_t) vmaddr;
102 }
103 #endif /* defined(__x86_64__) */
104 
105 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
106 
107 #ifndef __LP64__
108 bool
109 IOBufferMemoryDescriptor::initWithOptions(
110 	IOOptionBits options,
111 	vm_size_t    capacity,
112 	vm_offset_t  alignment,
113 	task_t       inTask)
114 {
115 	mach_vm_address_t physicalMask = 0;
116 	return initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask);
117 }
118 #endif /* !__LP64__ */
119 
120 OSSharedPtr<IOBufferMemoryDescriptor>
121 IOBufferMemoryDescriptor::withCopy(
122 	task_t                inTask,
123 	IOOptionBits      options,
124 	vm_map_t              sourceMap,
125 	mach_vm_address_t source,
126 	mach_vm_size_t    size)
127 {
128 	OSSharedPtr<IOBufferMemoryDescriptor> inst;
129 	kern_return_t              err;
130 	vm_map_copy_t              copy;
131 	vm_map_address_t           address;
132 
133 	copy = NULL;
134 	do {
135 		err = kIOReturnNoMemory;
136 		inst = OSMakeShared<IOBufferMemoryDescriptor>();
137 		if (!inst) {
138 			break;
139 		}
140 		inst->_ranges.v64 = IOMallocType(IOAddressRange);
141 
142 		err = vm_map_copyin(sourceMap, source, size,
143 		    false /* src_destroy */, &copy);
144 		if (KERN_SUCCESS != err) {
145 			break;
146 		}
147 
148 		err = vm_map_copyout(get_task_map(inTask), &address, copy);
149 		if (KERN_SUCCESS != err) {
150 			break;
151 		}
152 		copy = NULL;
153 
154 		inst->_ranges.v64->address = address;
155 		inst->_ranges.v64->length  = size;
156 
157 		if (!inst->initWithPhysicalMask(inTask, options, size, page_size, 0)) {
158 			err = kIOReturnError;
159 		}
160 	} while (false);
161 
162 	if (KERN_SUCCESS == err) {
163 		return inst;
164 	}
165 
166 	if (copy) {
167 		vm_map_copy_discard(copy);
168 	}
169 
170 	return nullptr;
171 }
172 
173 
174 bool
175 IOBufferMemoryDescriptor::initWithPhysicalMask(
176 	task_t            inTask,
177 	IOOptionBits      options,
178 	mach_vm_size_t    capacity,
179 	mach_vm_address_t alignment,
180 	mach_vm_address_t physicalMask)
181 {
182 	task_t                mapTask = NULL;
183 	kalloc_heap_t         kheap = KHEAP_DATA_BUFFERS;
184 	mach_vm_address_t     highestMask = 0;
185 	IOOptionBits          iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
186 	IODMAMapSpecification mapSpec;
187 	bool                  mapped = false;
188 	bool                  withCopy = false;
189 	bool                  mappedOrShared = false;
190 
191 	if (!capacity) {
192 		return false;
193 	}
194 
195 	/*
196 	 * The IOKit constructor requests the allocator for zeroed memory
197 	 * so the members of the class do not need to be explicitly zeroed.
198 	 */
199 	_options          = options;
200 	_capacity         = capacity;
201 
202 	if (!_ranges.v64) {
203 		_ranges.v64 = IOMallocType(IOAddressRange);
204 		_ranges.v64->address = 0;
205 		_ranges.v64->length  = 0;
206 	} else {
207 		if (!_ranges.v64->address) {
208 			return false;
209 		}
210 		if (!(kIOMemoryPageable & options)) {
211 			return false;
212 		}
213 		if (!inTask) {
214 			return false;
215 		}
216 		_buffer = (void *) _ranges.v64->address;
217 		withCopy = true;
218 	}
219 
220 	/*
221 	 * Set kalloc_heap to default if allocation contains pointers
222 	 */
223 	if (kInternalFlagHasPointers & _internalFlags) {
224 		kheap = KHEAP_DEFAULT;
225 	}
226 
227 	//  make sure super::free doesn't dealloc _ranges before super::init
228 	_flags = kIOMemoryAsReference;
229 
230 	// Grab IOMD bits from the Buffer MD options
231 	iomdOptions  |= (options & kIOBufferDescriptorMemoryFlags);
232 
233 	if (!(kIOMemoryMapperNone & options)) {
234 		IOMapper::checkForSystemMapper();
235 		mapped = (NULL != IOMapper::gSystem);
236 	}
237 
238 	if (physicalMask && (alignment <= 1)) {
239 		alignment   = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
240 		highestMask = (physicalMask | alignment);
241 		alignment++;
242 		if (alignment < page_size) {
243 			alignment = page_size;
244 		}
245 	}
246 
247 	if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) {
248 		alignment = page_size;
249 	}
250 
251 	if (alignment >= page_size) {
252 		if (round_page_overflow(capacity, &capacity)) {
253 			return false;
254 		}
255 	}
256 
257 	if (alignment > page_size) {
258 		options |= kIOMemoryPhysicallyContiguous;
259 	}
260 
261 	_alignment = alignment;
262 
263 	if ((capacity + alignment) < _capacity) {
264 		return false;
265 	}
266 
267 	if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) {
268 		return false;
269 	}
270 
271 	bzero(&mapSpec, sizeof(mapSpec));
272 	mapSpec.alignment      = _alignment;
273 	mapSpec.numAddressBits = 64;
274 	if (highestMask && mapped) {
275 		if (highestMask <= 0xFFFFFFFF) {
276 			mapSpec.numAddressBits = (uint8_t)(32 - __builtin_clz((unsigned int) highestMask));
277 		} else {
278 			mapSpec.numAddressBits = (uint8_t)(64 - __builtin_clz((unsigned int) (highestMask >> 32)));
279 		}
280 		highestMask = 0;
281 	}
282 
283 	// set memory entry cache mode, pageable, purgeable
284 	iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift;
285 	if (options & kIOMemoryPageable) {
286 		if (_internalFlags & kInternalFlagGuardPages) {
287 			printf("IOBMD: Unsupported use of guard pages with pageable memory.\n");
288 			return false;
289 		}
290 		iomdOptions |= kIOMemoryBufferPageable;
291 		if (options & kIOMemoryPurgeable) {
292 			iomdOptions |= kIOMemoryBufferPurgeable;
293 		}
294 	} else {
295 		// Buffer shouldn't auto prepare they should be prepared explicitly
296 		// But it never was enforced so what are you going to do?
297 		iomdOptions |= kIOMemoryAutoPrepare;
298 
299 		/* Allocate a wired-down buffer inside kernel space. */
300 
301 		bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));
302 
303 		if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) {
304 			contig |= (!mapped);
305 			contig |= (0 != (kIOMemoryMapperNone & options));
306 #if 0
307 			// treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
308 			contig |= true;
309 #endif
310 		}
311 
312 		mappedOrShared = (mapped || (0 != (kIOMemorySharingTypeMask & options)));
313 		if (contig || highestMask || (alignment > page_size)) {
314 			if (_internalFlags & kInternalFlagGuardPages) {
315 				printf("IOBMD: Unsupported use of guard pages with physical mask or contiguous memory.\n");
316 				return false;
317 			}
318 			_internalFlags |= kInternalFlagPhysical;
319 			if (highestMask) {
320 				_internalFlags |= kInternalFlagPageSized;
321 				if (round_page_overflow(capacity, &capacity)) {
322 					return false;
323 				}
324 			}
325 			_buffer = (void *) IOKernelAllocateWithPhysicalRestrict(kheap,
326 			    capacity, highestMask, alignment, contig);
327 		} else if (_internalFlags & kInternalFlagGuardPages) {
328 			vm_offset_t address = 0;
329 			kern_return_t kr;
330 			uintptr_t alignMask;
331 			kma_flags_t kma_flags = (kma_flags_t) (KMA_GUARD_FIRST |
332 			    KMA_GUARD_LAST | KMA_ZERO);
333 
334 			if (((uint32_t) alignment) != alignment) {
335 				return false;
336 			}
337 			if (kheap == KHEAP_DATA_BUFFERS) {
338 				kma_flags = (kma_flags_t) (kma_flags | KMA_DATA);
339 			}
340 
341 			alignMask = (1UL << log2up((uint32_t) alignment)) - 1;
342 			kr = kernel_memory_allocate(kernel_map, &address,
343 			    capacity + page_size * 2, alignMask, kma_flags,
344 			    IOMemoryTag(kernel_map));
345 			if (kr != KERN_SUCCESS || address == 0) {
346 				return false;
347 			}
348 #if IOALLOCDEBUG
349 			OSAddAtomicLong(capacity, &debug_iomalloc_size);
350 #endif
351 			IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
352 			_buffer = (void *)(address + page_size);
353 #if defined(__x86_64__)
354 		} else if (mappedOrShared
355 		    && (capacity + alignment) <= (page_size - gIOPageAllocChunkBytes)) {
356 			_internalFlags |= kInternalFlagPageAllocated;
357 			_buffer         = (void *) iopa_alloc(&gIOBMDPageAllocator,
358 			    &IOBMDPageProc, kheap, capacity, alignment);
359 			if (_buffer) {
360 				bzero(_buffer, capacity);
361 				IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
362 #if IOALLOCDEBUG
363 				OSAddAtomicLong(capacity, &debug_iomalloc_size);
364 #endif
365 			}
366 #endif /* defined(__x86_64__) */
367 		} else if (alignment > 1) {
368 			/* BEGIN IGNORE CODESTYLE */
369 			__typed_allocators_ignore_push
370 			_buffer = IOMallocAligned_internal(kheap, capacity, alignment,
371 			    Z_ZERO_VM_TAG_BT_BIT);
372 		} else {
373 			_buffer = IOMalloc_internal(kheap, capacity, Z_ZERO_VM_TAG_BT_BIT);
374 			__typed_allocators_ignore_pop
375 			/* END IGNORE CODESTYLE */
376 		}
377 		if (!_buffer) {
378 			return false;
379 		}
380 	}
381 
382 	if ((options & (kIOMemoryPageable | kIOMapCacheMask))) {
383 		vm_size_t       size = round_page(capacity);
384 
385 		// initWithOptions will create memory entry
386 		if (!withCopy) {
387 			iomdOptions |= kIOMemoryPersistent;
388 		}
389 
390 		if (options & kIOMemoryPageable) {
391 #if IOALLOCDEBUG
392 			OSAddAtomicLong(size, &debug_iomallocpageable_size);
393 #endif
394 			if (!withCopy) {
395 				mapTask = inTask;
396 			}
397 			if (NULL == inTask) {
398 				inTask = kernel_task;
399 			}
400 		} else if (options & kIOMapCacheMask) {
401 			// Prefetch each page to put entries into the pmap
402 			volatile UInt8 *    startAddr = (UInt8 *)_buffer;
403 			volatile UInt8 *    endAddr   = (UInt8 *)_buffer + capacity;
404 
405 			while (startAddr < endAddr) {
406 				UInt8 dummyVar = *startAddr;
407 				(void) dummyVar;
408 				startAddr += page_size;
409 			}
410 		}
411 	}
412 
413 	_ranges.v64->address = (mach_vm_address_t) pgz_decode(_buffer, _capacity);
414 	_ranges.v64->length  = _capacity;
415 
416 	if (!super::initWithOptions(_ranges.v64, 1, 0,
417 	    inTask, iomdOptions, /* System mapper */ NULL)) {
418 		return false;
419 	}
420 
421 	_internalFlags |= kInternalFlagInit;
422 #if IOTRACKING
423 	if (!(options & kIOMemoryPageable)) {
424 		trackingAccumSize(capacity);
425 	}
426 #endif /* IOTRACKING */
427 
428 	// give any system mapper the allocation params
429 	if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec,
430 	    &mapSpec, sizeof(mapSpec))) {
431 		return false;
432 	}
433 
434 	if (mapTask) {
435 		if (!reserved) {
436 			reserved = IOMallocType(ExpansionData);
437 			if (!reserved) {
438 				return false;
439 			}
440 		}
441 		reserved->map = createMappingInTask(mapTask, 0,
442 		    kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0).detach();
443 		if (!reserved->map) {
444 			_buffer = NULL;
445 			return false;
446 		}
447 		release();  // map took a retain on this
448 		reserved->map->retain();
449 		removeMapping(reserved->map);
450 		mach_vm_address_t buffer = reserved->map->getAddress();
451 		_buffer = (void *) buffer;
452 		if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) {
453 			_ranges.v64->address = buffer;
454 		}
455 	}
456 
457 	setLength(_capacity);
458 
459 	return true;
460 }
461 
462 bool
463 IOBufferMemoryDescriptor::initControlWithPhysicalMask(
464 	task_t            inTask,
465 	IOOptionBits      options,
466 	mach_vm_size_t    capacity,
467 	mach_vm_address_t alignment,
468 	mach_vm_address_t physicalMask)
469 {
470 	_internalFlags = kInternalFlagHasPointers;
471 	return initWithPhysicalMask(inTask, options, capacity, alignment,
472 	           physicalMask);
473 }
474 
475 bool
476 IOBufferMemoryDescriptor::initWithGuardPages(
477 	task_t            inTask,
478 	IOOptionBits      options,
479 	mach_vm_size_t    capacity)
480 {
481 	mach_vm_size_t roundedCapacity;
482 
483 	_internalFlags = kInternalFlagGuardPages;
484 
485 	if (round_page_overflow(capacity, &roundedCapacity)) {
486 		return false;
487 	}
488 
489 	return initWithPhysicalMask(inTask, options, roundedCapacity, page_size,
490 	           (mach_vm_address_t)0);
491 }
492 
493 OSSharedPtr<IOBufferMemoryDescriptor>
494 IOBufferMemoryDescriptor::inTaskWithOptions(
495 	task_t       inTask,
496 	IOOptionBits options,
497 	vm_size_t    capacity,
498 	vm_offset_t  alignment)
499 {
500 	OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
501 
502 	if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
503 		me.reset();
504 	}
505 	return me;
506 }
507 
508 OSSharedPtr<IOBufferMemoryDescriptor>
509 IOBufferMemoryDescriptor::inTaskWithOptions(
510 	task_t       inTask,
511 	IOOptionBits options,
512 	vm_size_t    capacity,
513 	vm_offset_t  alignment,
514 	uint32_t     kernTag,
515 	uint32_t     userTag)
516 {
517 	OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
518 
519 	if (me) {
520 		me->setVMTags(kernTag, userTag);
521 
522 		if (!me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
523 			me.reset();
524 		}
525 	}
526 	return me;
527 }
528 
529 OSSharedPtr<IOBufferMemoryDescriptor>
530 IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
531 	task_t            inTask,
532 	IOOptionBits      options,
533 	mach_vm_size_t    capacity,
534 	mach_vm_address_t physicalMask)
535 {
536 	OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
537 
538 	if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) {
539 		me.reset();
540 	}
541 	return me;
542 }
543 
544 OSSharedPtr<IOBufferMemoryDescriptor>
545 IOBufferMemoryDescriptor::inTaskWithGuardPages(
546 	task_t            inTask,
547 	IOOptionBits      options,
548 	mach_vm_size_t    capacity)
549 {
550 	OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
551 
552 	if (me && !me->initWithGuardPages(inTask, options, capacity)) {
553 		me.reset();
554 	}
555 	return me;
556 }
557 
558 #ifndef __LP64__
559 bool
560 IOBufferMemoryDescriptor::initWithOptions(
561 	IOOptionBits options,
562 	vm_size_t    capacity,
563 	vm_offset_t  alignment)
564 {
565 	return initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0);
566 }
567 #endif /* !__LP64__ */
568 
569 OSSharedPtr<IOBufferMemoryDescriptor>
570 IOBufferMemoryDescriptor::withOptions(
571 	IOOptionBits options,
572 	vm_size_t    capacity,
573 	vm_offset_t  alignment)
574 {
575 	OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
576 
577 	if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) {
578 		me.reset();
579 	}
580 	return me;
581 }
582 
583 
584 /*
585  * withCapacity:
586  *
587  * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
588  * hold capacity bytes.  The descriptor's length is initially set to the capacity.
589  */
590 OSSharedPtr<IOBufferMemoryDescriptor>
591 IOBufferMemoryDescriptor::withCapacity(vm_size_t   inCapacity,
592     IODirection inDirection,
593     bool        inContiguous)
594 {
595 	return IOBufferMemoryDescriptor::withOptions(
596 		inDirection | kIOMemoryUnshared
597 		| (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
598 		inCapacity, inContiguous ? inCapacity : 1 );
599 }
600 
601 #ifndef __LP64__
602 /*
603  * initWithBytes:
604  *
605  * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
606  * The descriptor's length and capacity are set to the input buffer's size.
607  */
608 bool
609 IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
610     vm_size_t    inLength,
611     IODirection  inDirection,
612     bool         inContiguous)
613 {
614 	if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared
615 	    | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
616 	    inLength, inLength, (mach_vm_address_t)0)) {
617 		return false;
618 	}
619 
620 	// start out with no data
621 	setLength(0);
622 
623 	if (!appendBytes(inBytes, inLength)) {
624 		return false;
625 	}
626 
627 	return true;
628 }
629 #endif /* !__LP64__ */
630 
631 /*
632  * withBytes:
633  *
634  * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
635  * The descriptor's length and capacity are set to the input buffer's size.
636  */
637 OSSharedPtr<IOBufferMemoryDescriptor>
638 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
639     vm_size_t    inLength,
640     IODirection  inDirection,
641     bool         inContiguous)
642 {
643 	OSSharedPtr<IOBufferMemoryDescriptor> me = OSMakeShared<IOBufferMemoryDescriptor>();
644 
645 	if (me && !me->initWithPhysicalMask(
646 		    kernel_task, inDirection | kIOMemoryUnshared
647 		    | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
648 		    inLength, inLength, 0 )) {
649 		me.reset();
650 	}
651 
652 	if (me) {
653 		// start out with no data
654 		me->setLength(0);
655 
656 		if (!me->appendBytes(inBytes, inLength)) {
657 			me.reset();
658 		}
659 	}
660 	return me;
661 }
662 
663 /*
664  * free:
665  *
666  * Free resources
667  */
668 void
669 IOBufferMemoryDescriptor::free()
670 {
671 	// Cache all of the relevant information on the stack for use
672 	// after we call super::free()!
673 	IOOptionBits     flags         = _flags;
674 	IOOptionBits     internalFlags = _internalFlags;
675 	IOOptionBits     options   = _options;
676 	vm_size_t        size      = _capacity;
677 	void *           buffer    = _buffer;
678 	IOMemoryMap *    map       = NULL;
679 	IOAddressRange * range     = _ranges.v64;
680 	vm_offset_t      alignment = _alignment;
681 	kalloc_heap_t    kheap     = KHEAP_DATA_BUFFERS;
682 
683 	if (alignment >= page_size) {
684 		size = round_page(size);
685 	}
686 
687 	if (reserved) {
688 		map = reserved->map;
689 		IOFreeType(reserved, ExpansionData);
690 		if (map) {
691 			map->release();
692 		}
693 	}
694 
695 	if ((options & kIOMemoryPageable)
696 	    || (kInternalFlagPageSized & internalFlags)) {
697 		size = round_page(size);
698 	}
699 
700 	if (internalFlags & kInternalFlagHasPointers) {
701 		kheap = KHEAP_DEFAULT;
702 	}
703 
704 #if IOTRACKING
705 	if (!(options & kIOMemoryPageable)
706 	    && buffer
707 	    && (kInternalFlagInit & _internalFlags)) {
708 		trackingAccumSize(-size);
709 	}
710 #endif /* IOTRACKING */
711 
712 	/* super::free may unwire - deallocate buffer afterwards */
713 	super::free();
714 
715 	if (options & kIOMemoryPageable) {
716 #if IOALLOCDEBUG
717 		OSAddAtomicLong(-size, &debug_iomallocpageable_size);
718 #endif
719 	} else if (buffer) {
720 		if (kInternalFlagPhysical & internalFlags) {
721 			IOKernelFreePhysical(kheap, (mach_vm_address_t) buffer, size);
722 		} else if (kInternalFlagPageAllocated & internalFlags) {
723 #if defined(__x86_64__)
724 			uintptr_t page;
725 			page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size);
726 			if (page) {
727 				kmem_free(kernel_map, page, page_size);
728 			}
729 #if IOALLOCDEBUG
730 			OSAddAtomicLong(-size, &debug_iomalloc_size);
731 #endif
732 			IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
733 #else /* !defined(__x86_64__) */
734 			/* should be unreachable */
735 			panic("Attempting to free IOBMD with page allocated flag");
736 #endif /* defined(__x86_64__) */
737 		} else if (kInternalFlagGuardPages & internalFlags) {
738 			vm_offset_t allocation = (vm_offset_t)buffer - page_size;
739 			kmem_free(kernel_map, allocation, size + page_size * 2);
740 #if IOALLOCDEBUG
741 			OSAddAtomicLong(-size, &debug_iomalloc_size);
742 #endif
743 			IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
744 		} else if (alignment > 1) {
745 			/* BEGIN IGNORE CODESTYLE */
746 			__typed_allocators_ignore_push
747 			IOFreeAligned_internal(kheap, buffer, size);
748 		} else {
749 			IOFree_internal(kheap, buffer, size);
750 			__typed_allocators_ignore_pop
751 			/* END IGNORE CODESTYLE */
752 		}
753 	}
754 	if (range && (kIOMemoryAsReference & flags)) {
755 		IOFreeType(range, IOAddressRange);
756 	}
757 }
758 
759 /*
760  * getCapacity:
761  *
762  * Get the buffer capacity
763  */
764 vm_size_t
765 IOBufferMemoryDescriptor::getCapacity() const
766 {
767 	return _capacity;
768 }
769 
770 /*
771  * setLength:
772  *
773  * Change the buffer length of the memory descriptor.  When a new buffer
774  * is created, the initial length of the buffer is set to be the same as
775  * the capacity.  The length can be adjusted via setLength for a shorter
776  * transfer (there is no need to create more buffer descriptors when you
777  * can reuse an existing one, even for different transfer sizes).   Note
778  * that the specified length must not exceed the capacity of the buffer.
779  */
780 void
781 IOBufferMemoryDescriptor::setLength(vm_size_t length)
782 {
783 	assert(length <= _capacity);
784 	if (length > _capacity) {
785 		return;
786 	}
787 
788 	_length = length;
789 	_ranges.v64->length = length;
790 }
791 
792 /*
793  * setDirection:
794  *
795  * Change the direction of the transfer.  This method allows one to redirect
796  * the descriptor's transfer direction.  This eliminates the need to destroy
797  * and create new buffers when different transfer directions are needed.
798  */
799 void
800 IOBufferMemoryDescriptor::setDirection(IODirection direction)
801 {
802 	_flags = (_flags & ~kIOMemoryDirectionMask) | direction;
803 #ifndef __LP64__
804 	_direction = (IODirection) (_flags & kIOMemoryDirectionMask);
805 #endif /* !__LP64__ */
806 }
807 
808 /*
809  * appendBytes:
810  *
811  * Add some data to the end of the buffer.  This method automatically
812  * maintains the memory descriptor buffer length.  Note that appendBytes
813  * will not copy past the end of the memory descriptor's current capacity.
814  */
815 bool
816 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
817 {
818 	vm_size_t   actualBytesToCopy = min(withLength, _capacity - _length);
819 	IOByteCount offset;
820 
821 	assert(_length <= _capacity);
822 
823 	offset = _length;
824 	_length += actualBytesToCopy;
825 	_ranges.v64->length += actualBytesToCopy;
826 
827 	if (_task == kernel_task) {
828 		bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset),
829 		    actualBytesToCopy);
830 	} else {
831 		writeBytes(offset, bytes, actualBytesToCopy);
832 	}
833 
834 	return true;
835 }
836 
837 /*
838  * getBytesNoCopy:
839  *
840  * Return the virtual address of the beginning of the buffer
841  */
842 void *
843 IOBufferMemoryDescriptor::getBytesNoCopy()
844 {
845 	if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) {
846 		return _buffer;
847 	} else {
848 		return (void *)_ranges.v64->address;
849 	}
850 }
851 
852 
853 /*
854  * getBytesNoCopy:
855  *
856  * Return the virtual address of an offset from the beginning of the buffer
857  */
858 void *
859 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
860 {
861 	IOVirtualAddress address;
862 
863 	if ((start + withLength) < start) {
864 		return NULL;
865 	}
866 
867 	if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) {
868 		address = (IOVirtualAddress) _buffer;
869 	} else {
870 		address = _ranges.v64->address;
871 	}
872 
873 	if (start < _length && (start + withLength) <= _length) {
874 		return (void *)(address + start);
875 	}
876 	return NULL;
877 }
878 
879 #ifndef __LP64__
880 void *
881 IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
882     IOByteCount * lengthOfSegment)
883 {
884 	void * bytes = getBytesNoCopy(offset, 0);
885 
886 	if (bytes && lengthOfSegment) {
887 		*lengthOfSegment = _length - offset;
888 	}
889 
890 	return bytes;
891 }
892 #endif /* !__LP64__ */
893 
894 #ifdef __LP64__
895 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0);
896 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
897 #else /* !__LP64__ */
898 OSMetaClassDefineReservedUsedX86(IOBufferMemoryDescriptor, 0);
899 OSMetaClassDefineReservedUsedX86(IOBufferMemoryDescriptor, 1);
900 #endif /* !__LP64__ */
901 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
902 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
903 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
904 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
905 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
906 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
907 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
908 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
909 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
910 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
911 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
912 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
913 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
914 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);
915