1 /*
2  * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #define _IOMEMORYDESCRIPTOR_INTERNAL_
30 
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33 
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IOBufferMemoryDescriptor.h>
37 #include <libkern/OSDebug.h>
38 
39 #include "IOKitKernelInternal.h"
40 
41 __BEGIN_DECLS
42 void ipc_port_release_send(ipc_port_t port);
43 #include <vm/pmap.h>
44 
45 __END_DECLS
46 
47 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
48 
49 enum
50 {
51     kInternalFlagPhysical  = 0x00000001,
52     kInternalFlagPageSized = 0x00000002
53 };
54 
55 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
56 
57 #define super IOGeneralMemoryDescriptor
58 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
59 				IOGeneralMemoryDescriptor);
60 
61 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
62 
63 #ifndef __LP64__
64 bool IOBufferMemoryDescriptor::initWithOptions(
65                                IOOptionBits options,
66                                vm_size_t    capacity,
67                                vm_offset_t  alignment,
68 			       task_t	    inTask)
69 {
70     mach_vm_address_t physicalMask = 0;
71     return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask));
72 }
73 #endif /* !__LP64__ */
74 
75 bool IOBufferMemoryDescriptor::initWithPhysicalMask(
76 				task_t		  inTask,
77 				IOOptionBits      options,
78 				mach_vm_size_t    capacity,
79 				mach_vm_address_t alignment,
80 				mach_vm_address_t physicalMask)
81 {
82     kern_return_t 	kr;
83     task_t		mapTask = NULL;
84     vm_map_t 		vmmap = NULL;
85     mach_vm_address_t   highestMask = 0;
86     IOOptionBits	iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
87 
88     if (!capacity)
89         return false;
90 
91     _options   	      = options;
92     _capacity         = capacity;
93     _internalFlags    = 0;
94     _internalReserved = 0;
95     _buffer	      = 0;
96 
97     _ranges.v64 = IONew(IOAddressRange, 1);
98     if (!_ranges.v64)
99 	return (false);
100     _ranges.v64->address = 0;
101     _ranges.v64->length  = 0;
102 	//  make sure super::free doesn't dealloc _ranges before super::init
103 	_flags = kIOMemoryAsReference;
104 
105     // Grab IOMD bits from the Buffer MD options
106     iomdOptions  |= (options & kIOBufferDescriptorMemoryFlags);
107 
108     if (physicalMask && (alignment <= 1))
109     {
110 	alignment   = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
111 	highestMask = (physicalMask | alignment);
112 	alignment++;
113 	if (alignment < page_size)
114             alignment = page_size;
115     }
116 
117     if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size))
118 	alignment = page_size;
119 
120     if (alignment >= page_size)
121 	capacity = round_page(capacity);
122 
123     if (alignment > page_size)
124 	options |= kIOMemoryPhysicallyContiguous;
125 
126     _alignment = alignment;
127 
128     if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
129 	return false;
130 
131     // set flags for entry + object create
132     vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
133 
134     // set memory entry cache mode
135     switch (options & kIOMapCacheMask)
136     {
137 	case kIOMapInhibitCache:
138 	    SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
139 	    break;
140 
141 	case kIOMapWriteThruCache:
142 	    SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
143 	    break;
144 
145 	case kIOMapWriteCombineCache:
146 	    SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
147 	    break;
148 
149 	case kIOMapCopybackCache:
150 	    SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
151 	    break;
152 
153 	case kIOMapCopybackInnerCache:
154 	    SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode);
155 	    break;
156 
157 	case kIOMapDefaultCache:
158 	default:
159 	    SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
160 	    break;
161     }
162 
163     if (options & kIOMemoryPageable)
164     {
165 	iomdOptions |= kIOMemoryBufferPageable;
166 
167 	// must create the entry before any pages are allocated
168 
169 	// set flags for entry + object create
170 	memEntryCacheMode |= MAP_MEM_NAMED_CREATE;
171 
172 	if (options & kIOMemoryPurgeable)
173 	    memEntryCacheMode |= MAP_MEM_PURGABLE;
174     }
175     else
176     {
177 	memEntryCacheMode |= MAP_MEM_NAMED_REUSE;
178 	vmmap = kernel_map;
179 
180 	// Buffer shouldn't auto prepare they should be prepared explicitly
181 	// But it never was enforced so what are you going to do?
182 	iomdOptions |= kIOMemoryAutoPrepare;
183 
184 	/* Allocate a wired-down buffer inside kernel space. */
185 
186 	if ((options & kIOMemoryPhysicallyContiguous) || highestMask || (alignment > page_size))
187 	{
188             _internalFlags |= kInternalFlagPhysical;
189             if (highestMask)
190             {
191                 _internalFlags |= kInternalFlagPageSized;
192                 capacity = round_page(capacity);
193             }
194             _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(capacity, highestMask, alignment,
195                                         (0 != (options & kIOMemoryPhysicallyContiguous)));
196 	}
197 	else if (alignment > 1)
198 	{
199             _buffer = IOMallocAligned(capacity, alignment);
200 	}
201 	else
202 	{
203             _buffer = IOMalloc(capacity);
204 	}
205 
206 	if (!_buffer)
207 	{
208             return false;
209 	}
210     }
211 
212     if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
213 	ipc_port_t	sharedMem;
214 	vm_size_t	size = round_page(capacity);
215 
216 	kr = mach_make_memory_entry(vmmap,
217 				    &size, (vm_offset_t)_buffer,
218 				    memEntryCacheMode, &sharedMem,
219 				    NULL );
220 
221 	if( (KERN_SUCCESS == kr) && (size != round_page(capacity))) {
222 	    ipc_port_release_send( sharedMem );
223 	    kr = kIOReturnVMError;
224 	}
225 	if( KERN_SUCCESS != kr)
226 	    return( false );
227 
228 	_memEntry = (void *) sharedMem;
229 
230 	if( options & kIOMemoryPageable) {
231 #if IOALLOCDEBUG
232 	    debug_iomallocpageable_size += size;
233 #endif
234 	    mapTask = inTask;
235 	    if (NULL == inTask)
236 		inTask = kernel_task;
237 	}
238 	else if (options & kIOMapCacheMask)
239 	{
240 	    // Prefetch each page to put entries into the pmap
241 	    volatile UInt8 *	startAddr = (UInt8 *)_buffer;
242 	    volatile UInt8 *	endAddr   = (UInt8 *)_buffer + capacity;
243 
244 	    while (startAddr < endAddr)
245 	    {
246 		*startAddr;
247 		startAddr += page_size;
248 	    }
249 	}
250     }
251 
252     _ranges.v64->address = (mach_vm_address_t) _buffer;;
253     _ranges.v64->length  = _capacity;
254 
255     if (!super::initWithOptions(_ranges.v64, 1, 0,
256 				inTask, iomdOptions, /* System mapper */ 0))
257 	return false;
258 
259     if (mapTask)
260     {
261 	if (!reserved) {
262 	    reserved = IONew( ExpansionData, 1 );
263 	    if( !reserved)
264 		return( false );
265 	}
266 	reserved->map = createMappingInTask(mapTask, 0,
267 			    kIOMapAnywhere | (options & kIOMapCacheMask), 0, 0);
268 	if (!reserved->map)
269 	{
270 	    _buffer = 0;
271 	    return( false );
272 	}
273 	release();	    // map took a retain on this
274 	reserved->map->retain();
275 	removeMapping(reserved->map);
276 	mach_vm_address_t buffer = reserved->map->getAddress();
277 	_buffer = (void *) buffer;
278 	if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions))
279 	    _ranges.v64->address = buffer;
280     }
281 
282     setLength(_capacity);
283 
284     return true;
285 }
286 
287 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
288 					    task_t       inTask,
289                                             IOOptionBits options,
290                                             vm_size_t    capacity,
291                                             vm_offset_t  alignment)
292 {
293     IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
294 
295     if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) {
296 	me->release();
297 	me = 0;
298     }
299     return me;
300 }
301 
302 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
303 					    task_t	      inTask,
304                                             IOOptionBits      options,
305                                             mach_vm_size_t    capacity,
306                                             mach_vm_address_t physicalMask)
307 {
308     IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
309 
310     if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
311     {
312 	me->release();
313 	me = 0;
314     }
315     return me;
316 }
317 
318 #ifndef __LP64__
319 bool IOBufferMemoryDescriptor::initWithOptions(
320                                IOOptionBits options,
321                                vm_size_t    capacity,
322                                vm_offset_t  alignment)
323 {
324     return (initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0));
325 }
326 #endif /* !__LP64__ */
327 
328 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
329                                             IOOptionBits options,
330                                             vm_size_t    capacity,
331                                             vm_offset_t  alignment)
332 {
333     IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
334 
335     if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) {
336 	me->release();
337 	me = 0;
338     }
339     return me;
340 }
341 
342 
343 /*
344  * withCapacity:
345  *
346  * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
347  * hold capacity bytes.  The descriptor's length is initially set to the capacity.
348  */
349 IOBufferMemoryDescriptor *
350 IOBufferMemoryDescriptor::withCapacity(vm_size_t   inCapacity,
351                                        IODirection inDirection,
352                                        bool        inContiguous)
353 {
354     return( IOBufferMemoryDescriptor::withOptions(
355                inDirection | kIOMemoryUnshared
356                 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
357                inCapacity, inContiguous ? inCapacity : 1 ));
358 }
359 
360 #ifndef __LP64__
361 /*
362  * initWithBytes:
363  *
364  * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
365  * The descriptor's length and capacity are set to the input buffer's size.
366  */
367 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
368                                              vm_size_t    inLength,
369                                              IODirection  inDirection,
370                                              bool         inContiguous)
371 {
372     if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared
373 			      | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
374 			      inLength, inLength, (mach_vm_address_t)0))
375         return false;
376 
377     // start out with no data
378     setLength(0);
379 
380     if (!appendBytes(inBytes, inLength))
381         return false;
382 
383     return true;
384 }
385 #endif /* !__LP64__ */
386 
387 /*
388  * withBytes:
389  *
390  * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
391  * The descriptor's length and capacity are set to the input buffer's size.
392  */
393 IOBufferMemoryDescriptor *
394 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
395                                     vm_size_t    inLength,
396                                     IODirection  inDirection,
397                                     bool         inContiguous)
398 {
399     IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
400 
401     if (me && !me->initWithPhysicalMask(
402                kernel_task, inDirection | kIOMemoryUnshared
403                 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
404                inLength, inLength, 0 ))
405     {
406 	me->release();
407 	me = 0;
408     }
409 
410     if (me)
411     {
412 	// start out with no data
413 	me->setLength(0);
414 
415 	if (!me->appendBytes(inBytes, inLength))
416 	{
417 	    me->release();
418 	    me = 0;
419 	}
420     }
421     return me;
422 }
423 
424 /*
425  * free:
426  *
427  * Free resources
428  */
429 void IOBufferMemoryDescriptor::free()
430 {
431     // Cache all of the relevant information on the stack for use
432     // after we call super::free()!
433     IOOptionBits     flags         = _flags;
434     IOOptionBits     internalFlags = _internalFlags;
435     IOOptionBits     options   = _options;
436     vm_size_t        size      = _capacity;
437     void *           buffer    = _buffer;
438     IOMemoryMap *    map       = 0;
439     IOAddressRange * range     = _ranges.v64;
440     vm_offset_t      alignment = _alignment;
441 
442     if (alignment >= page_size)
443 	size = round_page(size);
444 
445     if (reserved)
446     {
447 	map = reserved->map;
448         IODelete( reserved, ExpansionData, 1 );
449 	if (map)
450 	    map->release();
451     }
452 
453     /* super::free may unwire - deallocate buffer afterwards */
454     super::free();
455 
456     if (options & kIOMemoryPageable)
457     {
458 #if IOALLOCDEBUG
459 	debug_iomallocpageable_size -= round_page(size);
460 #endif
461     }
462     else if (buffer)
463     {
464         if (internalFlags & kInternalFlagPhysical)
465         {
466             if (kInternalFlagPageSized & internalFlags)
467                 size = round_page(size);
468             IOKernelFreePhysical((mach_vm_address_t) buffer, size);
469         }
470         else if (alignment > 1)
471             IOFreeAligned(buffer, size);
472         else
473             IOFree(buffer, size);
474     }
475     if (range && (kIOMemoryAsReference & flags))
476 	IODelete(range, IOAddressRange, 1);
477 }
478 
479 /*
480  * getCapacity:
481  *
482  * Get the buffer capacity
483  */
484 vm_size_t IOBufferMemoryDescriptor::getCapacity() const
485 {
486     return _capacity;
487 }
488 
489 /*
490  * setLength:
491  *
492  * Change the buffer length of the memory descriptor.  When a new buffer
493  * is created, the initial length of the buffer is set to be the same as
494  * the capacity.  The length can be adjusted via setLength for a shorter
495  * transfer (there is no need to create more buffer descriptors when you
496  * can reuse an existing one, even for different transfer sizes).   Note
497  * that the specified length must not exceed the capacity of the buffer.
498  */
499 void IOBufferMemoryDescriptor::setLength(vm_size_t length)
500 {
501     assert(length <= _capacity);
502 
503     _length = length;
504     _ranges.v64->length = length;
505 }
506 
507 /*
508  * setDirection:
509  *
510  * Change the direction of the transfer.  This method allows one to redirect
511  * the descriptor's transfer direction.  This eliminates the need to destroy
512  * and create new buffers when different transfer directions are needed.
513  */
514 void IOBufferMemoryDescriptor::setDirection(IODirection direction)
515 {
516     _flags = (_flags & ~kIOMemoryDirectionMask) | direction;
517 #ifndef __LP64__
518     _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
519 #endif /* !__LP64__ */
520 }
521 
522 /*
523  * appendBytes:
524  *
525  * Add some data to the end of the buffer.  This method automatically
526  * maintains the memory descriptor buffer length.  Note that appendBytes
527  * will not copy past the end of the memory descriptor's current capacity.
528  */
529 bool
530 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
531 {
532     vm_size_t   actualBytesToCopy = min(withLength, _capacity - _length);
533     IOByteCount offset;
534 
535     assert(_length <= _capacity);
536 
537     offset = _length;
538     _length += actualBytesToCopy;
539     _ranges.v64->length += actualBytesToCopy;
540 
541     if (_task == kernel_task)
542 	bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset),
543 	      actualBytesToCopy);
544     else
545 	writeBytes(offset, bytes, actualBytesToCopy);
546 
547     return true;
548 }
549 
550 /*
551  * getBytesNoCopy:
552  *
553  * Return the virtual address of the beginning of the buffer
554  */
555 void * IOBufferMemoryDescriptor::getBytesNoCopy()
556 {
557     if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
558 	return _buffer;
559     else
560 	return (void *)_ranges.v64->address;
561 }
562 
563 
564 /*
565  * getBytesNoCopy:
566  *
567  * Return the virtual address of an offset from the beginning of the buffer
568  */
569 void *
570 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
571 {
572     IOVirtualAddress address;
573     if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
574 	address = (IOVirtualAddress) _buffer;
575     else
576 	address = _ranges.v64->address;
577 
578    if (start < _length && (start + withLength) <= _length)
579         return (void *)(address + start);
580     return 0;
581 }
582 
583 #ifndef __LP64__
584 void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
585 							IOByteCount * lengthOfSegment)
586 {
587     void * bytes = getBytesNoCopy(offset, 0);
588 
589     if (bytes && lengthOfSegment)
590 	*lengthOfSegment = _length - offset;
591 
592     return bytes;
593 }
594 #endif /* !__LP64__ */
595 
596 #ifdef __LP64__
597 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0);
598 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
599 #else /* !__LP64__ */
600 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
601 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1);
602 #endif /* !__LP64__ */
603 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
604 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
605 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
606 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
607 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
608 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
609 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
610 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
611 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
612 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
613 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
614 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
615 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
616 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);
617