1 /*
2  * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #define _IOMEMORYDESCRIPTOR_INTERNAL_
30 
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33 
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IOBufferMemoryDescriptor.h>
37 #include <libkern/OSDebug.h>
38 
39 #include "IOKitKernelInternal.h"
40 #include "IOCopyMapper.h"
41 
42 __BEGIN_DECLS
43 void ipc_port_release_send(ipc_port_t port);
44 #include <vm/pmap.h>
45 
46 __END_DECLS
47 
48 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
49 
50 enum
51 {
52     kInternalFlagRealloc = 0x00000001,
53 };
54 
55 volatile ppnum_t gIOHighestAllocatedPage;
56 
57 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
58 
59 #define super IOGeneralMemoryDescriptor
60 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
61 				IOGeneralMemoryDescriptor);
62 
63 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
64 
65 #ifndef __LP64__
66 bool IOBufferMemoryDescriptor::initWithOptions(
67                                IOOptionBits options,
68                                vm_size_t    capacity,
69                                vm_offset_t  alignment,
70 			       task_t	    inTask)
71 {
72     mach_vm_address_t physicalMask = 0;
73     return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask));
74 }
75 #endif /* !__LP64__ */
76 
77 bool IOBufferMemoryDescriptor::initWithPhysicalMask(
78 				task_t		  inTask,
79 				IOOptionBits      options,
80 				mach_vm_size_t    capacity,
81 				mach_vm_address_t alignment,
82 				mach_vm_address_t physicalMask)
83 {
84     kern_return_t 	kr;
85     task_t		mapTask = NULL;
86     vm_map_t 		vmmap = NULL;
87     addr64_t            lastIOAddr;
88     mach_vm_address_t   highestMask = 0;
89     bool		usePhys;
90     IOOptionBits	iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
91 
92     if (!capacity)
93         return false;
94 
95     _options   	      = options;
96     _capacity         = capacity;
97     _internalFlags    = 0;
98     _internalReserved = 0;
99     _buffer	      = 0;
100 
101     _ranges.v64 = IONew(IOAddressRange, 1);
102     if (!_ranges.v64)
103 	return (false);
104     _ranges.v64->address = 0;
105     _ranges.v64->length  = 0;
106 
107     // Grab IOMD bits from the Buffer MD options
108     iomdOptions  |= (options & kIOBufferDescriptorMemoryFlags);
109 
110     if (physicalMask && (alignment <= 1))
111     {
112 	alignment   = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
113 	highestMask = (physicalMask | alignment);
114 	alignment++;
115     }
116 
117     if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask)) && (alignment < page_size))
118 	alignment = page_size;
119 
120     if (alignment >= page_size)
121 	capacity = round_page(capacity);
122 
123     if (alignment > page_size)
124 	options |= kIOMemoryPhysicallyContiguous;
125 
126     _alignment = alignment;
127 
128     if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
129 	return false;
130 
131     if ((options & kIOMemoryPhysicallyContiguous) && !physicalMask)
132 	physicalMask = 0xFFFFFFFF;
133 
134     // set flags for entry + object create
135     vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
136 
137     // set memory entry cache mode
138     switch (options & kIOMapCacheMask)
139     {
140 	case kIOMapInhibitCache:
141 	    SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
142 	    break;
143 
144 	case kIOMapWriteThruCache:
145 	    SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
146 	    break;
147 
148 	case kIOMapWriteCombineCache:
149 	    SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
150 	    break;
151 
152 	case kIOMapCopybackCache:
153 	    SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
154 	    break;
155 
156 	case kIOMapDefaultCache:
157 	default:
158 	    SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
159 	    break;
160     }
161 
162     if (options & kIOMemoryPageable)
163     {
164 	iomdOptions |= kIOMemoryBufferPageable;
165 
166 	// must create the entry before any pages are allocated
167 
168 	// set flags for entry + object create
169 	memEntryCacheMode |= MAP_MEM_NAMED_CREATE;
170 
171 	if (options & kIOMemoryPurgeable)
172 	    memEntryCacheMode |= MAP_MEM_PURGABLE;
173     }
174     else
175     {
176 	memEntryCacheMode |= MAP_MEM_NAMED_REUSE;
177 
178 	if (IOMapper::gSystem)
179 	    // assuming mapped space is 2G
180 	    lastIOAddr = (1UL << 31) - PAGE_SIZE;
181 	else
182 	    lastIOAddr = ptoa_64(gIOHighestAllocatedPage);
183 
184 	usePhys = (highestMask && (lastIOAddr != (lastIOAddr & highestMask))
185 		    && (alignment <= page_size));
186 
187 	if (!usePhys && (options & kIOMemoryPhysicallyContiguous))
188 	{
189 	    _buffer = (void *) IOKernelAllocateContiguous(capacity, highestMask, alignment);
190 	    usePhys = (NULL == _buffer);
191 	}
192 	if (usePhys)
193 	{
194 	    mach_vm_address_t address;
195 	    iomdOptions &= ~kIOMemoryTypeVirtual64;
196 	    iomdOptions |= kIOMemoryTypePhysical64;
197 
198 	    address = IOMallocPhysical(capacity, highestMask);
199 	    _buffer = (void *) address;
200 	    if (!_buffer)
201 		return false;
202 
203 	    mapTask = inTask;
204 	    inTask = 0;
205 	}
206 	else
207 	{
208 	    vmmap = kernel_map;
209 
210 	    // Buffer shouldn't auto prepare they should be prepared explicitly
211 	    // But it never was enforced so what are you going to do?
212 	    iomdOptions |= kIOMemoryAutoPrepare;
213 
214 	    /* Allocate a wired-down buffer inside kernel space. */
215 	    if (options & kIOMemoryPhysicallyContiguous)
216 	    {
217 		// attempted allocate already
218 	    }
219 	    else if (alignment > 1)
220 	    {
221 		_buffer = IOMallocAligned(capacity, alignment);
222 	    }
223 	    else
224 	    {
225 		_buffer = IOMalloc(capacity);
226 	    }
227 	    if (!_buffer)
228 		return false;
229 	}
230     }
231 
232     if( (kIOMemoryTypePhysical64 != (kIOMemoryTypeMask & iomdOptions))
233 	&& (options & (kIOMemoryPageable | kIOMapCacheMask))) {
234 	ipc_port_t	sharedMem;
235 	vm_size_t	size = round_page(capacity);
236 
237 	kr = mach_make_memory_entry(vmmap,
238 				    &size, (vm_offset_t)_buffer,
239 				    memEntryCacheMode, &sharedMem,
240 				    NULL );
241 
242 	if( (KERN_SUCCESS == kr) && (size != round_page(capacity))) {
243 	    ipc_port_release_send( sharedMem );
244 	    kr = kIOReturnVMError;
245 	}
246 	if( KERN_SUCCESS != kr)
247 	    return( false );
248 
249 	_memEntry = (void *) sharedMem;
250 
251 	if( options & kIOMemoryPageable) {
252 #if IOALLOCDEBUG
253 	    debug_iomallocpageable_size += size;
254 #endif
255 	    mapTask = inTask;
256 	    if (NULL == inTask)
257 		inTask = kernel_task;
258 	}
259 	else if (options & kIOMapCacheMask)
260 	{
261 	    // Prefetch each page to put entries into the pmap
262 	    volatile UInt8 *	startAddr = (UInt8 *)_buffer;
263 	    volatile UInt8 *	endAddr   = (UInt8 *)_buffer + capacity;
264 
265 	    while (startAddr < endAddr)
266 	    {
267 		*startAddr;
268 		startAddr += page_size;
269 	    }
270 	}
271     }
272 
273     _ranges.v64->address = (mach_vm_address_t) _buffer;;
274     _ranges.v64->length  = _capacity;
275 
276     if (!super::initWithOptions(_ranges.v64, 1, 0,
277 				inTask, iomdOptions, /* System mapper */ 0))
278 	return false;
279 
280     if (highestMask && !IOMapper::gSystem)
281     {
282 	IOMDDMACharacteristics mdSummary;
283 
284 	bzero(&mdSummary, sizeof(mdSummary));
285 	IOReturn rtn = dmaCommandOperation(
286 		kIOMDGetCharacteristics,
287 		&mdSummary, sizeof(mdSummary));
288 	if (rtn)
289 	    return false;
290 
291 	if (mdSummary.fHighestPage)
292 	{
293 	    ppnum_t highest;
294 	    while (mdSummary.fHighestPage > (highest = gIOHighestAllocatedPage))
295 	    {
296 		if (OSCompareAndSwap(highest, mdSummary.fHighestPage,
297 					(UInt32 *) &gIOHighestAllocatedPage))
298 		    break;
299 	    }
300 	    lastIOAddr = ptoa_64(mdSummary.fHighestPage);
301 	}
302 	else
303 	    lastIOAddr = ptoa_64(gIOLastPage);
304 
305 	if (lastIOAddr != (lastIOAddr & highestMask))
306 	{
307 	    if (kIOMemoryTypePhysical64 != (_flags & kIOMemoryTypeMask))
308 	    {
309 		// flag a retry
310 		_internalFlags |= kInternalFlagRealloc;
311 	    }
312 	    return false;
313 	}
314     }
315 
316     if (mapTask)
317     {
318 	if (!reserved) {
319 	    reserved = IONew( ExpansionData, 1 );
320 	    if( !reserved)
321 		return( false );
322 	}
323 	reserved->map = createMappingInTask(mapTask, 0,
324 			    kIOMapAnywhere | (options & kIOMapCacheMask), 0, 0);
325 	if (!reserved->map)
326 	{
327 	    _buffer = 0;
328 	    return( false );
329 	}
330 	release();	    // map took a retain on this
331 	reserved->map->retain();
332 	removeMapping(reserved->map);
333 	mach_vm_address_t buffer = reserved->map->getAddress();
334 	_buffer = (void *) buffer;
335 	if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions))
336 	    _ranges.v64->address = buffer;
337     }
338 
339     setLength(_capacity);
340 
341     return true;
342 }
343 
344 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
345 					    task_t       inTask,
346                                             IOOptionBits options,
347                                             vm_size_t    capacity,
348                                             vm_offset_t  alignment)
349 {
350     IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
351 
352     if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
353 	bool retry = (0 != (kInternalFlagRealloc & me->_internalFlags));
354 	me->release();
355 	me = 0;
356 	if (retry)
357 	{
358 	    me = new IOBufferMemoryDescriptor;
359 	    if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0))
360 	    {
361 		me->release();
362 		me = 0;
363 	    }
364 	}
365     }
366     return me;
367 }
368 
369 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
370 					    task_t	      inTask,
371                                             IOOptionBits      options,
372                                             mach_vm_size_t    capacity,
373                                             mach_vm_address_t physicalMask)
374 {
375     IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
376 
377     if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
378     {
379 	bool retry = (0 != (kInternalFlagRealloc & me->_internalFlags));
380 	me->release();
381 	me = 0;
382 	if (retry)
383 	{
384 	    me = new IOBufferMemoryDescriptor;
385 	    if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
386 	    {
387 		me->release();
388 		me = 0;
389 	    }
390 	}
391     }
392     return me;
393 }
394 
395 #ifndef __LP64__
396 bool IOBufferMemoryDescriptor::initWithOptions(
397                                IOOptionBits options,
398                                vm_size_t    capacity,
399                                vm_offset_t  alignment)
400 {
401     return (initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0));
402 }
403 #endif /* !__LP64__ */
404 
405 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
406                                             IOOptionBits options,
407                                             vm_size_t    capacity,
408                                             vm_offset_t  alignment)
409 {
410     IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
411 
412     if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) {
413 	bool retry = (0 != (kInternalFlagRealloc & me->_internalFlags));
414 	me->release();
415 	me = 0;
416 	if (retry)
417 	{
418 	    me = new IOBufferMemoryDescriptor;
419 	    if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0))
420 	    {
421 		me->release();
422 		me = 0;
423 	    }
424 	}
425     }
426     return me;
427 }
428 
429 
430 /*
431  * withCapacity:
432  *
433  * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
434  * hold capacity bytes.  The descriptor's length is initially set to the capacity.
435  */
436 IOBufferMemoryDescriptor *
437 IOBufferMemoryDescriptor::withCapacity(vm_size_t   inCapacity,
438                                        IODirection inDirection,
439                                        bool        inContiguous)
440 {
441     return( IOBufferMemoryDescriptor::withOptions(
442                inDirection | kIOMemoryUnshared
443                 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
444                inCapacity, inContiguous ? inCapacity : 1 ));
445 }
446 
447 #ifndef __LP64__
448 /*
449  * initWithBytes:
450  *
451  * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
452  * The descriptor's length and capacity are set to the input buffer's size.
453  */
454 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
455                                              vm_size_t    inLength,
456                                              IODirection  inDirection,
457                                              bool         inContiguous)
458 {
459     if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared
460 			      | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
461 			      inLength, inLength, (mach_vm_address_t)0))
462         return false;
463 
464     // start out with no data
465     setLength(0);
466 
467     if (!appendBytes(inBytes, inLength))
468         return false;
469 
470     return true;
471 }
472 #endif /* !__LP64__ */
473 
474 /*
475  * withBytes:
476  *
477  * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
478  * The descriptor's length and capacity are set to the input buffer's size.
479  */
480 IOBufferMemoryDescriptor *
481 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
482                                     vm_size_t    inLength,
483                                     IODirection  inDirection,
484                                     bool         inContiguous)
485 {
486     IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
487 
488     if (me && !me->initWithPhysicalMask(
489                kernel_task, inDirection | kIOMemoryUnshared
490                 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
491                inLength, inLength, 0 ))
492     {
493 	bool retry = (0 != (kInternalFlagRealloc & me->_internalFlags));
494 	me->release();
495 	me = 0;
496 	if (retry)
497 	{
498 	    me = new IOBufferMemoryDescriptor;
499 	    if (me && !me->initWithPhysicalMask(
500 	           kernel_task, inDirection | kIOMemoryUnshared
501 	            | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
502 	           inLength, inLength, 0 ))
503 	    {
504 		me->release();
505 		me = 0;
506 	    }
507 	}
508 
509     }
510 
511     if (me)
512     {
513 	// start out with no data
514 	me->setLength(0);
515 
516 	if (!me->appendBytes(inBytes, inLength))
517 	{
518 	    me->release();
519 	    me = 0;
520 	}
521     }
522     return me;
523 }
524 
525 /*
526  * free:
527  *
528  * Free resources
529  */
530 void IOBufferMemoryDescriptor::free()
531 {
532     // Cache all of the relevant information on the stack for use
533     // after we call super::free()!
534     IOOptionBits     flags     = _flags;
535     IOOptionBits     options   = _options;
536     vm_size_t        size      = _capacity;
537     void *           buffer    = _buffer;
538     IOMemoryMap *    map       = 0;
539     IOAddressRange * range     = _ranges.v64;
540     mach_vm_address_t source   = range ? range->address : 0;
541     vm_offset_t      alignment = _alignment;
542 
543     if (alignment >= page_size)
544 	size = round_page(size);
545 
546     if (reserved)
547     {
548 	map = reserved->map;
549         IODelete( reserved, ExpansionData, 1 );
550 	if (map)
551 	    map->release();
552     }
553 
554     /* super::free may unwire - deallocate buffer afterwards */
555     super::free();
556 
557     if (options & kIOMemoryPageable)
558     {
559 #if IOALLOCDEBUG
560 	debug_iomallocpageable_size -= round_page(size);
561 #endif
562     }
563     else if (buffer)
564     {
565 	if (kIOMemoryTypePhysical64 == (flags & kIOMemoryTypeMask))
566 	    IOFreePhysical(source, size);
567         else if (options & kIOMemoryPhysicallyContiguous)
568             IOKernelFreeContiguous((mach_vm_address_t) buffer, size);
569         else if (alignment > 1)
570             IOFreeAligned(buffer, size);
571         else
572             IOFree(buffer, size);
573     }
574     if (range && (kIOMemoryAsReference & flags))
575 	IODelete(range, IOAddressRange, 1);
576 }
577 
578 /*
579  * getCapacity:
580  *
581  * Get the buffer capacity
582  */
583 vm_size_t IOBufferMemoryDescriptor::getCapacity() const
584 {
585     return _capacity;
586 }
587 
588 /*
589  * setLength:
590  *
591  * Change the buffer length of the memory descriptor.  When a new buffer
592  * is created, the initial length of the buffer is set to be the same as
593  * the capacity.  The length can be adjusted via setLength for a shorter
594  * transfer (there is no need to create more buffer descriptors when you
595  * can reuse an existing one, even for different transfer sizes).   Note
596  * that the specified length must not exceed the capacity of the buffer.
597  */
598 void IOBufferMemoryDescriptor::setLength(vm_size_t length)
599 {
600     assert(length <= _capacity);
601 
602     _length = length;
603     _ranges.v64->length = length;
604 }
605 
606 /*
607  * setDirection:
608  *
609  * Change the direction of the transfer.  This method allows one to redirect
610  * the descriptor's transfer direction.  This eliminates the need to destroy
611  * and create new buffers when different transfer directions are needed.
612  */
613 void IOBufferMemoryDescriptor::setDirection(IODirection direction)
614 {
615     _flags = (_flags & ~kIOMemoryDirectionMask) | direction;
616 #ifndef __LP64__
617     _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
618 #endif /* !__LP64__ */
619 }
620 
621 /*
622  * appendBytes:
623  *
624  * Add some data to the end of the buffer.  This method automatically
625  * maintains the memory descriptor buffer length.  Note that appendBytes
626  * will not copy past the end of the memory descriptor's current capacity.
627  */
628 bool
629 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
630 {
631     vm_size_t   actualBytesToCopy = min(withLength, _capacity - _length);
632     IOByteCount offset;
633 
634     assert(_length <= _capacity);
635 
636     offset = _length;
637     _length += actualBytesToCopy;
638     _ranges.v64->length += actualBytesToCopy;
639 
640     if (_task == kernel_task)
641 	bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset),
642 	      actualBytesToCopy);
643     else
644 	writeBytes(offset, bytes, actualBytesToCopy);
645 
646     return true;
647 }
648 
649 /*
650  * getBytesNoCopy:
651  *
652  * Return the virtual address of the beginning of the buffer
653  */
654 void * IOBufferMemoryDescriptor::getBytesNoCopy()
655 {
656     if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
657 	return _buffer;
658     else
659 	return (void *)_ranges.v64->address;
660 }
661 
662 
663 /*
664  * getBytesNoCopy:
665  *
666  * Return the virtual address of an offset from the beginning of the buffer
667  */
668 void *
669 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
670 {
671     IOVirtualAddress address;
672     if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
673 	address = (IOVirtualAddress) _buffer;
674     else
675 	address = _ranges.v64->address;
676 
677    if (start < _length && (start + withLength) <= _length)
678         return (void *)(address + start);
679     return 0;
680 }
681 
682 #ifndef __LP64__
683 void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
684 							IOByteCount * lengthOfSegment)
685 {
686     void * bytes = getBytesNoCopy(offset, 0);
687 
688     if (bytes && lengthOfSegment)
689 	*lengthOfSegment = _length - offset;
690 
691     return bytes;
692 }
693 #endif /* !__LP64__ */
694 
695 #ifdef __LP64__
696 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0);
697 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
698 #else /* !__LP64__ */
699 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
700 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1);
701 #endif /* !__LP64__ */
702 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
703 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
704 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
705 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
706 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
707 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
708 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
709 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
710 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
711 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
712 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
713 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
714 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
715 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);
716