1 /*
2  * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #define _IOMEMORYDESCRIPTOR_INTERNAL_
30 
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33 
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IOBufferMemoryDescriptor.h>
37 #include <libkern/OSDebug.h>
38 #include <mach/mach_vm.h>
39 
40 #include "IOKitKernelInternal.h"
41 
42 #ifdef IOALLOCDEBUG
43 #include <libkern/c++/OSCPPDebug.h>
44 #endif
45 #include <IOKit/IOStatisticsPrivate.h>
46 
47 #if IOKITSTATS
48 #define IOStatisticsAlloc(type, size) \
49 do { \
50 	IOStatistics::countAlloc(type, size); \
51 } while (0)
52 #else
53 #define IOStatisticsAlloc(type, size)
54 #endif /* IOKITSTATS */
55 
56 
57 __BEGIN_DECLS
58 void ipc_port_release_send(ipc_port_t port);
59 #include <vm/pmap.h>
60 
61 __END_DECLS
62 
63 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
64 
65 enum{
66 	kInternalFlagPhysical      = 0x00000001,
67 	kInternalFlagPageSized     = 0x00000002,
68 	kInternalFlagPageAllocated = 0x00000004,
69 	kInternalFlagInit          = 0x00000008
70 };
71 
72 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
73 
74 #define super IOGeneralMemoryDescriptor
75 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
76     IOGeneralMemoryDescriptor);
77 
78 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
79 
80 static uintptr_t
81 IOBMDPageProc(iopa_t * a)
82 {
83 	kern_return_t kr;
84 	vm_address_t  vmaddr  = 0;
85 	int           options = 0;// KMA_LOMEM;
86 
87 	kr = kernel_memory_allocate(kernel_map, &vmaddr,
88 	    page_size, 0, options, VM_KERN_MEMORY_IOKIT);
89 
90 	if (KERN_SUCCESS != kr) {
91 		vmaddr = 0;
92 	} else {
93 		bzero((void *) vmaddr, page_size);
94 	}
95 
96 	return (uintptr_t) vmaddr;
97 }
98 
99 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
100 
101 #ifndef __LP64__
102 bool
103 IOBufferMemoryDescriptor::initWithOptions(
104 	IOOptionBits options,
105 	vm_size_t    capacity,
106 	vm_offset_t  alignment,
107 	task_t       inTask)
108 {
109 	mach_vm_address_t physicalMask = 0;
110 	return initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask);
111 }
112 #endif /* !__LP64__ */
113 
114 IOBufferMemoryDescriptor *
115 IOBufferMemoryDescriptor::withCopy(
116 	task_t                inTask,
117 	IOOptionBits      options,
118 	vm_map_t              sourceMap,
119 	mach_vm_address_t source,
120 	mach_vm_size_t    size)
121 {
122 	IOBufferMemoryDescriptor * inst;
123 	kern_return_t              err;
124 	vm_map_copy_t              copy;
125 	vm_map_address_t           address;
126 
127 	copy = NULL;
128 	do {
129 		err = kIOReturnNoMemory;
130 		inst = new IOBufferMemoryDescriptor;
131 		if (!inst) {
132 			break;
133 		}
134 		inst->_ranges.v64 = IONew(IOAddressRange, 1);
135 		if (!inst->_ranges.v64) {
136 			break;
137 		}
138 
139 		err = vm_map_copyin(sourceMap, source, size,
140 		    false /* src_destroy */, &copy);
141 		if (KERN_SUCCESS != err) {
142 			break;
143 		}
144 
145 		err = vm_map_copyout(get_task_map(inTask), &address, copy);
146 		if (KERN_SUCCESS != err) {
147 			break;
148 		}
149 		copy = NULL;
150 
151 		inst->_ranges.v64->address = address;
152 		inst->_ranges.v64->length  = size;
153 
154 		if (!inst->initWithPhysicalMask(inTask, options, size, page_size, 0)) {
155 			err = kIOReturnError;
156 		}
157 	} while (false);
158 
159 	if (KERN_SUCCESS == err) {
160 		return inst;
161 	}
162 
163 	if (copy) {
164 		vm_map_copy_discard(copy);
165 	}
166 	OSSafeReleaseNULL(inst);
167 	return NULL;
168 }
169 
170 
171 bool
172 IOBufferMemoryDescriptor::initWithPhysicalMask(
173 	task_t            inTask,
174 	IOOptionBits      options,
175 	mach_vm_size_t    capacity,
176 	mach_vm_address_t alignment,
177 	mach_vm_address_t physicalMask)
178 {
179 	task_t                mapTask = NULL;
180 	vm_map_t              vmmap = NULL;
181 	mach_vm_address_t     highestMask = 0;
182 	IOOptionBits          iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
183 	IODMAMapSpecification mapSpec;
184 	bool                  mapped = false;
185 	bool                  withCopy = false;
186 	bool                  needZero;
187 
188 	if (!capacity) {
189 		return false;
190 	}
191 
192 	_options          = options;
193 	_capacity         = capacity;
194 	_internalFlags    = 0;
195 	_internalReserved = 0;
196 	_buffer           = NULL;
197 
198 	if (!_ranges.v64) {
199 		_ranges.v64 = IONew(IOAddressRange, 1);
200 		if (!_ranges.v64) {
201 			return false;
202 		}
203 		_ranges.v64->address = 0;
204 		_ranges.v64->length  = 0;
205 	} else {
206 		if (!_ranges.v64->address) {
207 			return false;
208 		}
209 		if (!(kIOMemoryPageable & options)) {
210 			return false;
211 		}
212 		if (!inTask) {
213 			return false;
214 		}
215 		_buffer = (void *) _ranges.v64->address;
216 		withCopy = true;
217 	}
218 	//  make sure super::free doesn't dealloc _ranges before super::init
219 	_flags = kIOMemoryAsReference;
220 
221 	// Grab IOMD bits from the Buffer MD options
222 	iomdOptions  |= (options & kIOBufferDescriptorMemoryFlags);
223 
224 	if (!(kIOMemoryMapperNone & options)) {
225 		IOMapper::checkForSystemMapper();
226 		mapped = (NULL != IOMapper::gSystem);
227 	}
228 	needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options)));
229 
230 	if (physicalMask && (alignment <= 1)) {
231 		alignment   = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
232 		highestMask = (physicalMask | alignment);
233 		alignment++;
234 		if (alignment < page_size) {
235 			alignment = page_size;
236 		}
237 	}
238 
239 	if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) {
240 		alignment = page_size;
241 	}
242 
243 	if (alignment >= page_size) {
244 		capacity = round_page(capacity);
245 	}
246 
247 	if (alignment > page_size) {
248 		options |= kIOMemoryPhysicallyContiguous;
249 	}
250 
251 	_alignment = alignment;
252 
253 	if ((capacity + alignment) < _capacity) {
254 		return false;
255 	}
256 
257 	if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) {
258 		return false;
259 	}
260 
261 	bzero(&mapSpec, sizeof(mapSpec));
262 	mapSpec.alignment      = _alignment;
263 	mapSpec.numAddressBits = 64;
264 	if (highestMask && mapped) {
265 		if (highestMask <= 0xFFFFFFFF) {
266 			mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask));
267 		} else {
268 			mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32)));
269 		}
270 		highestMask = 0;
271 	}
272 
273 	// set memory entry cache mode, pageable, purgeable
274 	iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift;
275 	if (options & kIOMemoryPageable) {
276 		iomdOptions |= kIOMemoryBufferPageable;
277 		if (options & kIOMemoryPurgeable) {
278 			iomdOptions |= kIOMemoryBufferPurgeable;
279 		}
280 	} else {
281 		vmmap = kernel_map;
282 
283 		// Buffer shouldn't auto prepare they should be prepared explicitly
284 		// But it never was enforced so what are you going to do?
285 		iomdOptions |= kIOMemoryAutoPrepare;
286 
287 		/* Allocate a wired-down buffer inside kernel space. */
288 
289 		bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));
290 
291 		if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) {
292 			contig |= (!mapped);
293 			contig |= (0 != (kIOMemoryMapperNone & options));
294 #if 0
295 			// treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
296 			contig |= true;
297 #endif
298 		}
299 
300 		if (contig || highestMask || (alignment > page_size)) {
301 			_internalFlags |= kInternalFlagPhysical;
302 			if (highestMask) {
303 				_internalFlags |= kInternalFlagPageSized;
304 				capacity = round_page(capacity);
305 			}
306 			_buffer = (void *) IOKernelAllocateWithPhysicalRestrict(
307 				capacity, highestMask, alignment, contig);
308 		} else if (needZero
309 		    && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes))) {
310 			_internalFlags |= kInternalFlagPageAllocated;
311 			needZero        = false;
312 			_buffer         = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment);
313 			if (_buffer) {
314 				IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
315 #if IOALLOCDEBUG
316 				OSAddAtomic(capacity, &debug_iomalloc_size);
317 #endif
318 			}
319 		} else if (alignment > 1) {
320 			_buffer = IOMallocAligned(capacity, alignment);
321 		} else {
322 			_buffer = IOMalloc(capacity);
323 		}
324 		if (!_buffer) {
325 			return false;
326 		}
327 		if (needZero) {
328 			bzero(_buffer, capacity);
329 		}
330 	}
331 
332 	if ((options & (kIOMemoryPageable | kIOMapCacheMask))) {
333 		vm_size_t       size = round_page(capacity);
334 
335 		// initWithOptions will create memory entry
336 		if (!withCopy) {
337 			iomdOptions |= kIOMemoryPersistent;
338 		}
339 
340 		if (options & kIOMemoryPageable) {
341 #if IOALLOCDEBUG
342 			OSAddAtomicLong(size, &debug_iomallocpageable_size);
343 #endif
344 			if (!withCopy) {
345 				mapTask = inTask;
346 			}
347 			if (NULL == inTask) {
348 				inTask = kernel_task;
349 			}
350 		} else if (options & kIOMapCacheMask) {
351 			// Prefetch each page to put entries into the pmap
352 			volatile UInt8 *    startAddr = (UInt8 *)_buffer;
353 			volatile UInt8 *    endAddr   = (UInt8 *)_buffer + capacity;
354 
355 			while (startAddr < endAddr) {
356 				UInt8 dummyVar = *startAddr;
357 				(void) dummyVar;
358 				startAddr += page_size;
359 			}
360 		}
361 	}
362 
363 	_ranges.v64->address = (mach_vm_address_t) _buffer;
364 	_ranges.v64->length  = _capacity;
365 
366 	if (!super::initWithOptions(_ranges.v64, 1, 0,
367 	    inTask, iomdOptions, /* System mapper */ NULL)) {
368 		return false;
369 	}
370 
371 	_internalFlags |= kInternalFlagInit;
372 #if IOTRACKING
373 	if (!(options & kIOMemoryPageable)) {
374 		trackingAccumSize(capacity);
375 	}
376 #endif /* IOTRACKING */
377 
378 	// give any system mapper the allocation params
379 	if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec,
380 	    &mapSpec, sizeof(mapSpec))) {
381 		return false;
382 	}
383 
384 	if (mapTask) {
385 		if (!reserved) {
386 			reserved = IONew( ExpansionData, 1 );
387 			if (!reserved) {
388 				return false;
389 			}
390 		}
391 		reserved->map = createMappingInTask(mapTask, 0,
392 		    kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0);
393 		if (!reserved->map) {
394 			_buffer = NULL;
395 			return false;
396 		}
397 		release();  // map took a retain on this
398 		reserved->map->retain();
399 		removeMapping(reserved->map);
400 		mach_vm_address_t buffer = reserved->map->getAddress();
401 		_buffer = (void *) buffer;
402 		if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) {
403 			_ranges.v64->address = buffer;
404 		}
405 	}
406 
407 	setLength(_capacity);
408 
409 	return true;
410 }
411 
412 IOBufferMemoryDescriptor *
413 IOBufferMemoryDescriptor::inTaskWithOptions(
414 	task_t       inTask,
415 	IOOptionBits options,
416 	vm_size_t    capacity,
417 	vm_offset_t  alignment)
418 {
419 	IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
420 
421 	if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
422 		me->release();
423 		me = NULL;
424 	}
425 	return me;
426 }
427 
428 IOBufferMemoryDescriptor *
429 IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
430 	task_t            inTask,
431 	IOOptionBits      options,
432 	mach_vm_size_t    capacity,
433 	mach_vm_address_t physicalMask)
434 {
435 	IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
436 
437 	if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) {
438 		me->release();
439 		me = NULL;
440 	}
441 	return me;
442 }
443 
444 #ifndef __LP64__
445 bool
446 IOBufferMemoryDescriptor::initWithOptions(
447 	IOOptionBits options,
448 	vm_size_t    capacity,
449 	vm_offset_t  alignment)
450 {
451 	return initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0);
452 }
453 #endif /* !__LP64__ */
454 
455 IOBufferMemoryDescriptor *
456 IOBufferMemoryDescriptor::withOptions(
457 	IOOptionBits options,
458 	vm_size_t    capacity,
459 	vm_offset_t  alignment)
460 {
461 	IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
462 
463 	if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) {
464 		me->release();
465 		me = NULL;
466 	}
467 	return me;
468 }
469 
470 
471 /*
472  * withCapacity:
473  *
474  * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
475  * hold capacity bytes.  The descriptor's length is initially set to the capacity.
476  */
477 IOBufferMemoryDescriptor *
478 IOBufferMemoryDescriptor::withCapacity(vm_size_t   inCapacity,
479     IODirection inDirection,
480     bool        inContiguous)
481 {
482 	return IOBufferMemoryDescriptor::withOptions(
483 		inDirection | kIOMemoryUnshared
484 		| (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
485 		inCapacity, inContiguous ? inCapacity : 1 );
486 }
487 
488 #ifndef __LP64__
489 /*
490  * initWithBytes:
491  *
492  * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
493  * The descriptor's length and capacity are set to the input buffer's size.
494  */
495 bool
496 IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
497     vm_size_t    inLength,
498     IODirection  inDirection,
499     bool         inContiguous)
500 {
501 	if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared
502 	    | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
503 	    inLength, inLength, (mach_vm_address_t)0)) {
504 		return false;
505 	}
506 
507 	// start out with no data
508 	setLength(0);
509 
510 	if (!appendBytes(inBytes, inLength)) {
511 		return false;
512 	}
513 
514 	return true;
515 }
516 #endif /* !__LP64__ */
517 
518 /*
519  * withBytes:
520  *
521  * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
522  * The descriptor's length and capacity are set to the input buffer's size.
523  */
524 IOBufferMemoryDescriptor *
525 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
526     vm_size_t    inLength,
527     IODirection  inDirection,
528     bool         inContiguous)
529 {
530 	IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
531 
532 	if (me && !me->initWithPhysicalMask(
533 		    kernel_task, inDirection | kIOMemoryUnshared
534 		    | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
535 		    inLength, inLength, 0 )) {
536 		me->release();
537 		me = NULL;
538 	}
539 
540 	if (me) {
541 		// start out with no data
542 		me->setLength(0);
543 
544 		if (!me->appendBytes(inBytes, inLength)) {
545 			me->release();
546 			me = NULL;
547 		}
548 	}
549 	return me;
550 }
551 
552 /*
553  * free:
554  *
555  * Free resources
556  */
557 void
558 IOBufferMemoryDescriptor::free()
559 {
560 	// Cache all of the relevant information on the stack for use
561 	// after we call super::free()!
562 	IOOptionBits     flags         = _flags;
563 	IOOptionBits     internalFlags = _internalFlags;
564 	IOOptionBits     options   = _options;
565 	vm_size_t        size      = _capacity;
566 	void *           buffer    = _buffer;
567 	IOMemoryMap *    map       = NULL;
568 	IOAddressRange * range     = _ranges.v64;
569 	vm_offset_t      alignment = _alignment;
570 
571 	if (alignment >= page_size) {
572 		size = round_page(size);
573 	}
574 
575 	if (reserved) {
576 		map = reserved->map;
577 		IODelete( reserved, ExpansionData, 1 );
578 		if (map) {
579 			map->release();
580 		}
581 	}
582 
583 	if ((options & kIOMemoryPageable)
584 	    || (kInternalFlagPageSized & internalFlags)) {
585 		size = round_page(size);
586 	}
587 
588 #if IOTRACKING
589 	if (!(options & kIOMemoryPageable)
590 	    && buffer
591 	    && (kInternalFlagInit & _internalFlags)) {
592 		trackingAccumSize(-size);
593 	}
594 #endif /* IOTRACKING */
595 
596 	/* super::free may unwire - deallocate buffer afterwards */
597 	super::free();
598 
599 	if (options & kIOMemoryPageable) {
600 #if IOALLOCDEBUG
601 		OSAddAtomicLong(-size, &debug_iomallocpageable_size);
602 #endif
603 	} else if (buffer) {
604 		if (kInternalFlagPhysical & internalFlags) {
605 			IOKernelFreePhysical((mach_vm_address_t) buffer, size);
606 		} else if (kInternalFlagPageAllocated & internalFlags) {
607 			uintptr_t page;
608 			page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size);
609 			if (page) {
610 				kmem_free(kernel_map, page, page_size);
611 			}
612 #if IOALLOCDEBUG
613 			OSAddAtomic(-size, &debug_iomalloc_size);
614 #endif
615 			IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
616 		} else if (alignment > 1) {
617 			IOFreeAligned(buffer, size);
618 		} else {
619 			IOFree(buffer, size);
620 		}
621 	}
622 	if (range && (kIOMemoryAsReference & flags)) {
623 		IODelete(range, IOAddressRange, 1);
624 	}
625 }
626 
627 /*
628  * getCapacity:
629  *
630  * Get the buffer capacity
631  */
632 vm_size_t
633 IOBufferMemoryDescriptor::getCapacity() const
634 {
635 	return _capacity;
636 }
637 
638 /*
639  * setLength:
640  *
641  * Change the buffer length of the memory descriptor.  When a new buffer
642  * is created, the initial length of the buffer is set to be the same as
643  * the capacity.  The length can be adjusted via setLength for a shorter
644  * transfer (there is no need to create more buffer descriptors when you
645  * can reuse an existing one, even for different transfer sizes).   Note
646  * that the specified length must not exceed the capacity of the buffer.
647  */
648 void
649 IOBufferMemoryDescriptor::setLength(vm_size_t length)
650 {
651 	assert(length <= _capacity);
652 	if (length > _capacity) {
653 		return;
654 	}
655 
656 	_length = length;
657 	_ranges.v64->length = length;
658 }
659 
660 /*
661  * setDirection:
662  *
663  * Change the direction of the transfer.  This method allows one to redirect
664  * the descriptor's transfer direction.  This eliminates the need to destroy
665  * and create new buffers when different transfer directions are needed.
666  */
667 void
668 IOBufferMemoryDescriptor::setDirection(IODirection direction)
669 {
670 	_flags = (_flags & ~kIOMemoryDirectionMask) | direction;
671 #ifndef __LP64__
672 	_direction = (IODirection) (_flags & kIOMemoryDirectionMask);
673 #endif /* !__LP64__ */
674 }
675 
676 /*
677  * appendBytes:
678  *
679  * Add some data to the end of the buffer.  This method automatically
680  * maintains the memory descriptor buffer length.  Note that appendBytes
681  * will not copy past the end of the memory descriptor's current capacity.
682  */
683 bool
684 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
685 {
686 	vm_size_t   actualBytesToCopy = min(withLength, _capacity - _length);
687 	IOByteCount offset;
688 
689 	assert(_length <= _capacity);
690 
691 	offset = _length;
692 	_length += actualBytesToCopy;
693 	_ranges.v64->length += actualBytesToCopy;
694 
695 	if (_task == kernel_task) {
696 		bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset),
697 		    actualBytesToCopy);
698 	} else {
699 		writeBytes(offset, bytes, actualBytesToCopy);
700 	}
701 
702 	return true;
703 }
704 
705 /*
706  * getBytesNoCopy:
707  *
708  * Return the virtual address of the beginning of the buffer
709  */
710 void *
711 IOBufferMemoryDescriptor::getBytesNoCopy()
712 {
713 	if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) {
714 		return _buffer;
715 	} else {
716 		return (void *)_ranges.v64->address;
717 	}
718 }
719 
720 
721 /*
722  * getBytesNoCopy:
723  *
724  * Return the virtual address of an offset from the beginning of the buffer
725  */
726 void *
727 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
728 {
729 	IOVirtualAddress address;
730 
731 	if ((start + withLength) < start) {
732 		return NULL;
733 	}
734 
735 	if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) {
736 		address = (IOVirtualAddress) _buffer;
737 	} else {
738 		address = _ranges.v64->address;
739 	}
740 
741 	if (start < _length && (start + withLength) <= _length) {
742 		return (void *)(address + start);
743 	}
744 	return NULL;
745 }
746 
747 #ifndef __LP64__
748 void *
749 IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
750     IOByteCount * lengthOfSegment)
751 {
752 	void * bytes = getBytesNoCopy(offset, 0);
753 
754 	if (bytes && lengthOfSegment) {
755 		*lengthOfSegment = _length - offset;
756 	}
757 
758 	return bytes;
759 }
760 #endif /* !__LP64__ */
761 
762 #ifdef __LP64__
763 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0);
764 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
765 #else /* !__LP64__ */
766 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
767 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1);
768 #endif /* !__LP64__ */
769 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
770 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
771 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
772 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
773 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
774 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
775 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
776 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
777 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
778 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
779 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
780 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
781 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
782 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);
783