1 /*
2  * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #define _IOMEMORYDESCRIPTOR_INTERNAL_
30 
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33 
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IOBufferMemoryDescriptor.h>
37 #include <libkern/OSDebug.h>
38 #include <mach/mach_vm.h>
39 
40 #include "IOKitKernelInternal.h"
41 
42 #ifdef IOALLOCDEBUG
43 #include <libkern/c++/OSCPPDebug.h>
44 #endif
45 #include <IOKit/IOStatisticsPrivate.h>
46 
47 #if IOKITSTATS
48 #define IOStatisticsAlloc(type, size) \
49 do { \
50 	IOStatistics::countAlloc(type, size); \
51 } while (0)
52 #else
53 #define IOStatisticsAlloc(type, size)
54 #endif /* IOKITSTATS */
55 
56 
57 __BEGIN_DECLS
58 void ipc_port_release_send(ipc_port_t port);
59 #include <vm/pmap.h>
60 
61 __END_DECLS
62 
63 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
64 
65 enum
66 {
67     kInternalFlagPhysical      = 0x00000001,
68     kInternalFlagPageSized     = 0x00000002,
69     kInternalFlagPageAllocated = 0x00000004
70 };
71 
72 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
73 
74 #define super IOGeneralMemoryDescriptor
75 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
76 				IOGeneralMemoryDescriptor);
77 
78 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
79 
80 static uintptr_t IOBMDPageProc(iopa_t * a)
81 {
82     kern_return_t kr;
83     vm_address_t  vmaddr  = 0;
84     int           options = 0; // KMA_LOMEM;
85 
86     kr = kernel_memory_allocate(kernel_map, &vmaddr,
87 				page_size, 0, options);
88 
89     if (KERN_SUCCESS != kr) vmaddr = 0;
90     else 		    bzero((void *) vmaddr, page_size);
91 
92     return ((uintptr_t) vmaddr);
93 }
94 
95 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
96 
97 #ifndef __LP64__
98 bool IOBufferMemoryDescriptor::initWithOptions(
99                                IOOptionBits options,
100                                vm_size_t    capacity,
101                                vm_offset_t  alignment,
102 			       task_t	    inTask)
103 {
104     mach_vm_address_t physicalMask = 0;
105     return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask));
106 }
107 #endif /* !__LP64__ */
108 
109 bool IOBufferMemoryDescriptor::initWithPhysicalMask(
110 				task_t		  inTask,
111 				IOOptionBits      options,
112 				mach_vm_size_t    capacity,
113 				mach_vm_address_t alignment,
114 				mach_vm_address_t physicalMask)
115 {
116     kern_return_t 	  kr;
117     task_t		  mapTask = NULL;
118     vm_map_t 		  vmmap = NULL;
119     mach_vm_address_t     highestMask = 0;
120     IOOptionBits	  iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
121     IODMAMapSpecification mapSpec;
122     bool                  mapped = false;
123     bool                  needZero;
124 
125     if (!capacity)
126         return false;
127 
128     _options   	      = options;
129     _capacity         = capacity;
130     _internalFlags    = 0;
131     _internalReserved = 0;
132     _buffer	      = 0;
133 
134     _ranges.v64 = IONew(IOAddressRange, 1);
135     if (!_ranges.v64)
136 	return (false);
137     _ranges.v64->address = 0;
138     _ranges.v64->length  = 0;
139     //  make sure super::free doesn't dealloc _ranges before super::init
140     _flags = kIOMemoryAsReference;
141 
142     // Grab IOMD bits from the Buffer MD options
143     iomdOptions  |= (options & kIOBufferDescriptorMemoryFlags);
144 
145     if (!(kIOMemoryMapperNone & options))
146     {
147 	IOMapper::checkForSystemMapper();
148 	mapped = (0 != IOMapper::gSystem);
149     }
150     needZero = mapped;
151 
152     if (physicalMask && (alignment <= 1))
153     {
154 	alignment   = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
155 	highestMask = (physicalMask | alignment);
156 	alignment++;
157 	if (alignment < page_size)
158             alignment = page_size;
159     }
160 
161     if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size))
162 	alignment = page_size;
163 
164     if (alignment >= page_size)
165 	capacity = round_page(capacity);
166 
167     if (alignment > page_size)
168 	options |= kIOMemoryPhysicallyContiguous;
169 
170     _alignment = alignment;
171 
172     if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
173 	return false;
174 
175     bzero(&mapSpec, sizeof(mapSpec));
176     mapSpec.alignment      = _alignment;
177     mapSpec.numAddressBits = 64;
178     if (highestMask && mapped)
179     {
180 	if (highestMask <= 0xFFFFFFFF)
181 	    mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask));
182 	else
183 	    mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32)));
184 	highestMask = 0;
185     }
186 
187     // set flags for entry + object create
188     vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
189 
190     // set memory entry cache mode
191     switch (options & kIOMapCacheMask)
192     {
193 	case kIOMapInhibitCache:
194 	    SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
195 	    break;
196 
197 	case kIOMapWriteThruCache:
198 	    SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
199 	    break;
200 
201 	case kIOMapWriteCombineCache:
202 	    SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
203 	    break;
204 
205 	case kIOMapCopybackCache:
206 	    SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
207 	    break;
208 
209 	case kIOMapCopybackInnerCache:
210 	    SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode);
211 	    break;
212 
213 	case kIOMapDefaultCache:
214 	default:
215 	    SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
216 	    break;
217     }
218 
219     if (options & kIOMemoryPageable)
220     {
221 	iomdOptions |= kIOMemoryBufferPageable;
222 
223 	// must create the entry before any pages are allocated
224 
225 	// set flags for entry + object create
226 	memEntryCacheMode |= MAP_MEM_NAMED_CREATE;
227 
228 	if (options & kIOMemoryPurgeable)
229 	    memEntryCacheMode |= MAP_MEM_PURGABLE;
230     }
231     else
232     {
233 	memEntryCacheMode |= MAP_MEM_NAMED_REUSE;
234 	vmmap = kernel_map;
235 
236 	// Buffer shouldn't auto prepare they should be prepared explicitly
237 	// But it never was enforced so what are you going to do?
238 	iomdOptions |= kIOMemoryAutoPrepare;
239 
240 	/* Allocate a wired-down buffer inside kernel space. */
241 
242 	bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));
243 
244 	if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous)))
245 	{
246 	    contig |= (!mapped);
247 	    contig |= (0 != (kIOMemoryMapperNone & options));
248 #if 0
249 	    // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
250 	    contig |= true;
251 #endif
252 	}
253 
254 	if (contig || highestMask || (alignment > page_size))
255 	{
256             _internalFlags |= kInternalFlagPhysical;
257             if (highestMask)
258             {
259                 _internalFlags |= kInternalFlagPageSized;
260                 capacity = round_page(capacity);
261             }
262             _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(
263             				capacity, highestMask, alignment, contig);
264 	}
265 	else if (needZero
266 		  && ((capacity + alignment) <= (page_size - kIOPageAllocChunkBytes)))
267 	{
268             _internalFlags |= kInternalFlagPageAllocated;
269             needZero        = false;
270             _buffer         = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment);
271 	    if (_buffer)
272 	    {
273 		IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
274 #if IOALLOCDEBUG
275 		debug_iomalloc_size += capacity;
276 #endif
277 	    }
278 	}
279 	else if (alignment > 1)
280 	{
281             _buffer = IOMallocAligned(capacity, alignment);
282 	}
283 	else
284 	{
285             _buffer = IOMalloc(capacity);
286 	}
287 	if (!_buffer)
288 	{
289             return false;
290 	}
291 	if (needZero) bzero(_buffer, capacity);
292     }
293 
294     if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
295 	ipc_port_t	sharedMem;
296 	vm_size_t	size = round_page(capacity);
297 
298 	kr = mach_make_memory_entry(vmmap,
299 				    &size, (vm_offset_t)_buffer,
300 				    memEntryCacheMode, &sharedMem,
301 				    NULL );
302 
303 	if( (KERN_SUCCESS == kr) && (size != round_page(capacity))) {
304 	    ipc_port_release_send( sharedMem );
305 	    kr = kIOReturnVMError;
306 	}
307 	if( KERN_SUCCESS != kr)
308 	    return( false );
309 
310 	_memEntry = (void *) sharedMem;
311 
312 	if( options & kIOMemoryPageable) {
313 #if IOALLOCDEBUG
314 	    debug_iomallocpageable_size += size;
315 #endif
316 	    mapTask = inTask;
317 	    if (NULL == inTask)
318 		inTask = kernel_task;
319 	}
320 	else if (options & kIOMapCacheMask)
321 	{
322 	    // Prefetch each page to put entries into the pmap
323 	    volatile UInt8 *	startAddr = (UInt8 *)_buffer;
324 	    volatile UInt8 *	endAddr   = (UInt8 *)_buffer + capacity;
325 
326 	    while (startAddr < endAddr)
327 	    {
328 		UInt8 dummyVar = *startAddr;
329 		(void) dummyVar;
330 		startAddr += page_size;
331  	    }
332 	}
333     }
334 
335     _ranges.v64->address = (mach_vm_address_t) _buffer;;
336     _ranges.v64->length  = _capacity;
337 
338     if (!super::initWithOptions(_ranges.v64, 1, 0,
339 				inTask, iomdOptions, /* System mapper */ 0))
340 	return false;
341 
342     // give any system mapper the allocation params
343     if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec,
344     						&mapSpec, sizeof(mapSpec)))
345 	return false;
346 
347     if (mapTask)
348     {
349 	if (!reserved) {
350 	    reserved = IONew( ExpansionData, 1 );
351 	    if( !reserved)
352 		return( false );
353 	}
354 	reserved->map = createMappingInTask(mapTask, 0,
355 			    kIOMapAnywhere | (options & kIOMapCacheMask), 0, 0);
356 	if (!reserved->map)
357 	{
358 	    _buffer = 0;
359 	    return( false );
360 	}
361 	release();	    // map took a retain on this
362 	reserved->map->retain();
363 	removeMapping(reserved->map);
364 	mach_vm_address_t buffer = reserved->map->getAddress();
365 	_buffer = (void *) buffer;
366 	if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions))
367 	    _ranges.v64->address = buffer;
368     }
369 
370     setLength(_capacity);
371 
372     return true;
373 }
374 
375 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
376 					    task_t       inTask,
377                                             IOOptionBits options,
378                                             vm_size_t    capacity,
379                                             vm_offset_t  alignment)
380 {
381     IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
382 
383     if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
384 	me->release();
385 	me = 0;
386     }
387     return me;
388 }
389 
390 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
391 					    task_t	      inTask,
392                                             IOOptionBits      options,
393                                             mach_vm_size_t    capacity,
394                                             mach_vm_address_t physicalMask)
395 {
396     IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
397 
398     if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
399     {
400 	me->release();
401 	me = 0;
402     }
403     return me;
404 }
405 
406 #ifndef __LP64__
407 bool IOBufferMemoryDescriptor::initWithOptions(
408                                IOOptionBits options,
409                                vm_size_t    capacity,
410                                vm_offset_t  alignment)
411 {
412     return (initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0));
413 }
414 #endif /* !__LP64__ */
415 
416 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
417                                             IOOptionBits options,
418                                             vm_size_t    capacity,
419                                             vm_offset_t  alignment)
420 {
421     IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
422 
423     if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) {
424 	me->release();
425 	me = 0;
426     }
427     return me;
428 }
429 
430 
431 /*
432  * withCapacity:
433  *
434  * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
435  * hold capacity bytes.  The descriptor's length is initially set to the capacity.
436  */
437 IOBufferMemoryDescriptor *
438 IOBufferMemoryDescriptor::withCapacity(vm_size_t   inCapacity,
439                                        IODirection inDirection,
440                                        bool        inContiguous)
441 {
442     return( IOBufferMemoryDescriptor::withOptions(
443                inDirection | kIOMemoryUnshared
444                 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
445                inCapacity, inContiguous ? inCapacity : 1 ));
446 }
447 
448 #ifndef __LP64__
449 /*
450  * initWithBytes:
451  *
452  * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
453  * The descriptor's length and capacity are set to the input buffer's size.
454  */
455 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
456                                              vm_size_t    inLength,
457                                              IODirection  inDirection,
458                                              bool         inContiguous)
459 {
460     if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared
461 			      | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
462 			      inLength, inLength, (mach_vm_address_t)0))
463         return false;
464 
465     // start out with no data
466     setLength(0);
467 
468     if (!appendBytes(inBytes, inLength))
469         return false;
470 
471     return true;
472 }
473 #endif /* !__LP64__ */
474 
475 /*
476  * withBytes:
477  *
478  * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
479  * The descriptor's length and capacity are set to the input buffer's size.
480  */
481 IOBufferMemoryDescriptor *
482 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
483                                     vm_size_t    inLength,
484                                     IODirection  inDirection,
485                                     bool         inContiguous)
486 {
487     IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
488 
489     if (me && !me->initWithPhysicalMask(
490                kernel_task, inDirection | kIOMemoryUnshared
491                 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
492                inLength, inLength, 0 ))
493     {
494 	me->release();
495 	me = 0;
496     }
497 
498     if (me)
499     {
500 	// start out with no data
501 	me->setLength(0);
502 
503 	if (!me->appendBytes(inBytes, inLength))
504 	{
505 	    me->release();
506 	    me = 0;
507 	}
508     }
509     return me;
510 }
511 
512 /*
513  * free:
514  *
515  * Free resources
516  */
517 void IOBufferMemoryDescriptor::free()
518 {
519     // Cache all of the relevant information on the stack for use
520     // after we call super::free()!
521     IOOptionBits     flags         = _flags;
522     IOOptionBits     internalFlags = _internalFlags;
523     IOOptionBits     options   = _options;
524     vm_size_t        size      = _capacity;
525     void *           buffer    = _buffer;
526     IOMemoryMap *    map       = 0;
527     IOAddressRange * range     = _ranges.v64;
528     vm_offset_t      alignment = _alignment;
529 
530     if (alignment >= page_size)
531 	size = round_page(size);
532 
533     if (reserved)
534     {
535 	map = reserved->map;
536         IODelete( reserved, ExpansionData, 1 );
537 	if (map)
538 	    map->release();
539     }
540 
541     /* super::free may unwire - deallocate buffer afterwards */
542     super::free();
543 
544     if (options & kIOMemoryPageable)
545     {
546 #if IOALLOCDEBUG
547 	debug_iomallocpageable_size -= round_page(size);
548 #endif
549     }
550     else if (buffer)
551     {
552 	if (kInternalFlagPageSized & internalFlags) size = round_page(size);
553 
554         if (kInternalFlagPhysical & internalFlags)
555         {
556             IOKernelFreePhysical((mach_vm_address_t) buffer, size);
557 	}
558 	else if (kInternalFlagPageAllocated & internalFlags)
559 	{
560 	    uintptr_t page;
561             page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size);
562 	    if (page)
563 	    {
564 		kmem_free(kernel_map, page, page_size);
565 	    }
566 #if IOALLOCDEBUG
567 	    debug_iomalloc_size -= size;
568 #endif
569 	    IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
570 	}
571         else if (alignment > 1)
572 	{
573             IOFreeAligned(buffer, size);
574 	}
575         else
576 	{
577             IOFree(buffer, size);
578 	}
579     }
580     if (range && (kIOMemoryAsReference & flags))
581 	IODelete(range, IOAddressRange, 1);
582 }
583 
584 /*
585  * getCapacity:
586  *
587  * Get the buffer capacity
588  */
589 vm_size_t IOBufferMemoryDescriptor::getCapacity() const
590 {
591     return _capacity;
592 }
593 
594 /*
595  * setLength:
596  *
597  * Change the buffer length of the memory descriptor.  When a new buffer
598  * is created, the initial length of the buffer is set to be the same as
599  * the capacity.  The length can be adjusted via setLength for a shorter
600  * transfer (there is no need to create more buffer descriptors when you
601  * can reuse an existing one, even for different transfer sizes).   Note
602  * that the specified length must not exceed the capacity of the buffer.
603  */
604 void IOBufferMemoryDescriptor::setLength(vm_size_t length)
605 {
606     assert(length <= _capacity);
607 
608     _length = length;
609     _ranges.v64->length = length;
610 }
611 
612 /*
613  * setDirection:
614  *
615  * Change the direction of the transfer.  This method allows one to redirect
616  * the descriptor's transfer direction.  This eliminates the need to destroy
617  * and create new buffers when different transfer directions are needed.
618  */
619 void IOBufferMemoryDescriptor::setDirection(IODirection direction)
620 {
621     _flags = (_flags & ~kIOMemoryDirectionMask) | direction;
622 #ifndef __LP64__
623     _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
624 #endif /* !__LP64__ */
625 }
626 
627 /*
628  * appendBytes:
629  *
630  * Add some data to the end of the buffer.  This method automatically
631  * maintains the memory descriptor buffer length.  Note that appendBytes
632  * will not copy past the end of the memory descriptor's current capacity.
633  */
634 bool
635 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
636 {
637     vm_size_t   actualBytesToCopy = min(withLength, _capacity - _length);
638     IOByteCount offset;
639 
640     assert(_length <= _capacity);
641 
642     offset = _length;
643     _length += actualBytesToCopy;
644     _ranges.v64->length += actualBytesToCopy;
645 
646     if (_task == kernel_task)
647 	bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset),
648 	      actualBytesToCopy);
649     else
650 	writeBytes(offset, bytes, actualBytesToCopy);
651 
652     return true;
653 }
654 
655 /*
656  * getBytesNoCopy:
657  *
658  * Return the virtual address of the beginning of the buffer
659  */
660 void * IOBufferMemoryDescriptor::getBytesNoCopy()
661 {
662     if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
663 	return _buffer;
664     else
665 	return (void *)_ranges.v64->address;
666 }
667 
668 
669 /*
670  * getBytesNoCopy:
671  *
672  * Return the virtual address of an offset from the beginning of the buffer
673  */
674 void *
675 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
676 {
677     IOVirtualAddress address;
678     if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
679 	address = (IOVirtualAddress) _buffer;
680     else
681 	address = _ranges.v64->address;
682 
683    if (start < _length && (start + withLength) <= _length)
684         return (void *)(address + start);
685     return 0;
686 }
687 
688 #ifndef __LP64__
689 void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
690 							IOByteCount * lengthOfSegment)
691 {
692     void * bytes = getBytesNoCopy(offset, 0);
693 
694     if (bytes && lengthOfSegment)
695 	*lengthOfSegment = _length - offset;
696 
697     return bytes;
698 }
699 #endif /* !__LP64__ */
700 
701 #ifdef __LP64__
702 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0);
703 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
704 #else /* !__LP64__ */
705 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
706 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1);
707 #endif /* !__LP64__ */
708 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
709 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
710 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
711 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
712 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
713 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
714 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
715 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
716 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
717 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
718 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
719 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
720 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
721 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);
722