1 /*
2  * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <IOKit/assert.h>
29 #include <IOKit/system.h>
30 
31 #include <IOKit/IOLib.h>
32 #include <IOKit/IOMapper.h>
33 #include <IOKit/IOBufferMemoryDescriptor.h>
34 
35 #include "IOKitKernelInternal.h"
36 #include "IOCopyMapper.h"
37 
38 __BEGIN_DECLS
39 void ipc_port_release_send(ipc_port_t port);
40 #include <vm/pmap.h>
41 
42 vm_map_t IOPageableMapForAddress( vm_address_t address );
43 __END_DECLS
44 
45 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
46 
47 volatile ppnum_t gIOHighestAllocatedPage;
48 
49 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
50 
51 #define super IOGeneralMemoryDescriptor
52 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
53 				IOGeneralMemoryDescriptor);
54 
55 bool IOBufferMemoryDescriptor::initWithAddress(
56                                   void *      /* address       */ ,
57                                   IOByteCount /* withLength    */ ,
58                                   IODirection /* withDirection */ )
59 {
60     return false;
61 }
62 
63 bool IOBufferMemoryDescriptor::initWithAddress(
64                                   vm_address_t /* address       */ ,
65                                   IOByteCount  /* withLength    */ ,
66                                   IODirection  /* withDirection */ ,
67                                   task_t       /* withTask      */ )
68 {
69     return false;
70 }
71 
72 bool IOBufferMemoryDescriptor::initWithPhysicalAddress(
73                                   IOPhysicalAddress /* address       */ ,
74                                   IOByteCount       /* withLength    */ ,
75                                   IODirection       /* withDirection */ )
76 {
77     return false;
78 }
79 
80 bool IOBufferMemoryDescriptor::initWithPhysicalRanges(
81                                   IOPhysicalRange * /* ranges        */ ,
82                                   UInt32            /* withCount     */ ,
83                                   IODirection       /* withDirection */ ,
84                                   bool              /* asReference   */ )
85 {
86     return false;
87 }
88 
89 bool IOBufferMemoryDescriptor::initWithRanges(
90                                   IOVirtualRange * /* ranges        */ ,
91                                   UInt32           /* withCount     */ ,
92                                   IODirection      /* withDirection */ ,
93                                   task_t           /* withTask      */ ,
94                                   bool             /* asReference   */ )
95 {
96     return false;
97 }
98 
99 bool IOBufferMemoryDescriptor::initWithOptions(
100                                IOOptionBits options,
101                                vm_size_t    capacity,
102                                vm_offset_t  alignment,
103 			       task_t	    inTask)
104 {
105     mach_vm_address_t physicalMask = 0;
106     return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask));
107 }
108 
109 bool IOBufferMemoryDescriptor::initWithPhysicalMask(
110 				task_t		  inTask,
111 				IOOptionBits      options,
112 				mach_vm_size_t    capacity,
113 				mach_vm_address_t alignment,
114 				mach_vm_address_t physicalMask)
115 {
116     kern_return_t 	kr;
117     task_t		mapTask = NULL;
118     vm_map_t 		vmmap = NULL;
119     addr64_t            lastIOAddr;
120     IOAddressRange	range;
121     IOOptionBits	iomdOptions = kIOMemoryTypeVirtual64;
122 
123     if (!capacity)
124         return false;
125 
126     _options   	  = options;
127     _capacity     = capacity;
128     _physAddrs    = 0;
129     _physSegCount = 0;
130     _buffer	  = 0;
131     range.address = 0;
132     range.length  = 0;
133     _ranges.v64   = &range;
134 
135     // Grab the direction and the Auto Prepare bits from the Buffer MD options
136     iomdOptions  |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare);
137 
138     if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask)) && (alignment < page_size))
139 	alignment = page_size;
140 
141     if (physicalMask && (alignment <= 1))
142 	alignment = ((physicalMask ^ PAGE_MASK) & PAGE_MASK) + 1;
143 
144     _alignment = alignment;
145 
146     if (((inTask != kernel_task) && !(options & kIOMemoryPageable)) ||
147 	(physicalMask && (options & kIOMapCacheMask)))
148 	return false;
149 
150     if ((options & kIOMemoryPhysicallyContiguous) && !physicalMask)
151 	physicalMask = 0xFFFFFFFF;
152 
153     // set flags for entry + object create
154     vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
155 
156     // set memory entry cache mode
157     switch (options & kIOMapCacheMask)
158     {
159 	case kIOMapInhibitCache:
160 	    SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
161 	    break;
162 
163 	case kIOMapWriteThruCache:
164 	    SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
165 	    break;
166 
167 	case kIOMapWriteCombineCache:
168 	    SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
169 	    break;
170 
171 	case kIOMapCopybackCache:
172 	    SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
173 	    break;
174 
175 	case kIOMapDefaultCache:
176 	default:
177 	    SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
178 	    break;
179     }
180 
181     if (options & kIOMemoryPageable)
182     {
183 	iomdOptions |= kIOMemoryBufferPageable;
184 
185 	// must create the entry before any pages are allocated
186 
187 	// set flags for entry + object create
188 	memEntryCacheMode |= MAP_MEM_NAMED_CREATE;
189 
190 	if (options & kIOMemoryPurgeable)
191 	    memEntryCacheMode |= MAP_MEM_PURGABLE;
192     }
193     else
194     {
195 	memEntryCacheMode |= MAP_MEM_NAMED_REUSE;
196 
197 	if (IOMapper::gSystem)
198 	    // assuming mapped space is 2G
199 	    lastIOAddr = (1UL << 31) - PAGE_SIZE;
200 	else
201 	    lastIOAddr = ptoa_64(gIOHighestAllocatedPage);
202 
203 	if (physicalMask && (lastIOAddr != (lastIOAddr & physicalMask)))
204 	{
205 	    mach_vm_address_t address;
206 	    iomdOptions &= ~kIOMemoryTypeVirtual64;
207 	    iomdOptions |= kIOMemoryTypePhysical64;
208 
209 	    address = IOMallocPhysical(capacity, physicalMask);
210 	    _buffer = (void *) address;
211 	    if (!_buffer)
212 		return false;
213 
214 	    mapTask = inTask;
215 	    inTask = 0;
216 	}
217 	else
218 	{
219 	    vmmap = kernel_map;
220 
221 	    // Buffer shouldn't auto prepare they should be prepared explicitly
222 	    // But it never was enforced so what are you going to do?
223 	    iomdOptions |= kIOMemoryAutoPrepare;
224 
225 	    /* Allocate a wired-down buffer inside kernel space. */
226 	    if (options & kIOMemoryPhysicallyContiguous)
227 		_buffer = (void *) IOKernelAllocateContiguous(capacity, alignment);
228 	    else if (alignment > 1)
229 		_buffer = IOMallocAligned(capacity, alignment);
230 	    else
231 		_buffer = IOMalloc(capacity);
232 	    if (!_buffer)
233 		return false;
234 	}
235     }
236 
237     if( (kIOMemoryTypePhysical64 != (kIOMemoryTypeMask & iomdOptions))
238 	&& (options & (kIOMemoryPageable | kIOMapCacheMask))) {
239 	ipc_port_t	sharedMem;
240 	vm_size_t	size = round_page_32(capacity);
241 
242 	kr = mach_make_memory_entry(vmmap,
243 				    &size, (vm_offset_t)_buffer,
244 				    memEntryCacheMode, &sharedMem,
245 				    NULL );
246 
247 	if( (KERN_SUCCESS == kr) && (size != round_page_32(capacity))) {
248 	    ipc_port_release_send( sharedMem );
249 	    kr = kIOReturnVMError;
250 	}
251 	if( KERN_SUCCESS != kr)
252 	    return( false );
253 
254 	_memEntry = (void *) sharedMem;
255 
256 	if( options & kIOMemoryPageable) {
257 #if IOALLOCDEBUG
258 	    debug_iomallocpageable_size += size;
259 #endif
260 	    mapTask = inTask;
261 	    if (NULL == inTask)
262 		inTask = kernel_task;
263 	}
264 	else if (options & kIOMapCacheMask)
265 	{
266 	    // Prefetch each page to put entries into the pmap
267 	    volatile UInt8 *	startAddr = (UInt8 *)_buffer;
268 	    volatile UInt8 *	endAddr   = (UInt8 *)_buffer + capacity;
269 
270 	    while (startAddr < endAddr)
271 	    {
272 		*startAddr;
273 		startAddr += page_size;
274 	    }
275 	}
276     }
277 
278     range.address = (mach_vm_address_t) _buffer;
279     range.length  = capacity;
280 
281     if (!super::initWithOptions(&range, 1, 0,
282 				inTask, iomdOptions, /* System mapper */ 0))
283 	return false;
284 
285     if (physicalMask && !IOMapper::gSystem)
286     {
287 	IOMDDMACharacteristics mdSummary;
288 
289 	bzero(&mdSummary, sizeof(mdSummary));
290 	IOReturn rtn = dmaCommandOperation(
291 		kIOMDGetCharacteristics,
292 		&mdSummary, sizeof(mdSummary));
293 	if (rtn)
294 	    return false;
295 
296 	if (mdSummary.fHighestPage)
297 	{
298 	    ppnum_t highest;
299 	    while (mdSummary.fHighestPage > (highest = gIOHighestAllocatedPage))
300 	    {
301 		if (OSCompareAndSwap(highest, mdSummary.fHighestPage,
302 					(UInt32 *) &gIOHighestAllocatedPage))
303 		    break;
304 	    }
305 	    lastIOAddr = ptoa_64(mdSummary.fHighestPage);
306 	}
307 	else
308 	    lastIOAddr = ptoa_64(gIOLastPage);
309 
310 	if (lastIOAddr != (lastIOAddr & physicalMask))
311 	{
312 	    if (kIOMemoryTypePhysical64 != (_flags & kIOMemoryTypeMask))
313 	    {
314 		// flag a retry
315 		_physSegCount = 1;
316 	    }
317 	    return false;
318 	}
319     }
320 
321     if (mapTask)
322     {
323 	if (!reserved) {
324 	    reserved = IONew( ExpansionData, 1 );
325 	    if( !reserved)
326 		return( false );
327 	}
328 	reserved->map = map(mapTask, 0, kIOMapAnywhere, 0, 0);
329 	if (!reserved->map)
330 	{
331 	    _buffer = 0;
332 	    return( false );
333 	}
334 	release();	    // map took a retain on this
335 	mach_vm_address_t buffer = reserved->map->getAddress();
336 	_buffer = (void *) buffer;
337 	if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions))
338 	    _ranges.v64->address = buffer;
339     }
340 
341     setLength(capacity);
342 
343     return true;
344 }
345 
346 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
347 					    task_t       inTask,
348                                             IOOptionBits options,
349                                             vm_size_t    capacity,
350                                             vm_offset_t  alignment)
351 {
352     IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
353 
354     if (me && !me->initWithOptions(options, capacity, alignment, inTask)) {
355 	bool retry = me->_physSegCount;
356 	me->release();
357 	me = 0;
358 	if (retry)
359 	{
360 	    me = new IOBufferMemoryDescriptor;
361 	    if (me && !me->initWithOptions(options, capacity, alignment, inTask))
362 	    {
363 		me->release();
364 		me = 0;
365 	    }
366 	}
367     }
368     return me;
369 }
370 
371 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
372 					    task_t	      inTask,
373                                             IOOptionBits      options,
374                                             mach_vm_size_t    capacity,
375                                             mach_vm_address_t physicalMask)
376 {
377     IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
378 
379     if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
380     {
381 	bool retry = me->_physSegCount;
382 	me->release();
383 	me = 0;
384 	if (retry)
385 	{
386 	    me = new IOBufferMemoryDescriptor;
387 	    if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask))
388 	    {
389 		me->release();
390 		me = 0;
391 	    }
392 	}
393     }
394     return me;
395 }
396 
397 bool IOBufferMemoryDescriptor::initWithOptions(
398                                IOOptionBits options,
399                                vm_size_t    capacity,
400                                vm_offset_t  alignment)
401 {
402     return( initWithOptions(options, capacity, alignment, kernel_task) );
403 }
404 
405 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
406                                             IOOptionBits options,
407                                             vm_size_t    capacity,
408                                             vm_offset_t  alignment)
409 {
410     return(IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, options, capacity, alignment));
411 }
412 
413 
414 /*
415  * withCapacity:
416  *
417  * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
418  * hold capacity bytes.  The descriptor's length is initially set to the capacity.
419  */
420 IOBufferMemoryDescriptor *
421 IOBufferMemoryDescriptor::withCapacity(vm_size_t   inCapacity,
422                                        IODirection inDirection,
423                                        bool        inContiguous)
424 {
425     return( IOBufferMemoryDescriptor::withOptions(
426                inDirection | kIOMemoryUnshared
427                 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
428                inCapacity, inContiguous ? inCapacity : 1 ));
429 }
430 
431 /*
432  * initWithBytes:
433  *
434  * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
435  * The descriptor's length and capacity are set to the input buffer's size.
436  */
437 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
438                                              vm_size_t    inLength,
439                                              IODirection  inDirection,
440                                              bool         inContiguous)
441 {
442     if (!initWithOptions(
443                inDirection | kIOMemoryUnshared
444                 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
445                inLength, inLength ))
446         return false;
447 
448     // start out with no data
449     setLength(0);
450 
451     if (!appendBytes(inBytes, inLength))
452         return false;
453 
454     return true;
455 }
456 
457 /*
458  * withBytes:
459  *
460  * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
461  * The descriptor's length and capacity are set to the input buffer's size.
462  */
463 IOBufferMemoryDescriptor *
464 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
465                                     vm_size_t    inLength,
466                                     IODirection  inDirection,
467                                     bool         inContiguous)
468 {
469     IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
470 
471     if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous))
472     {
473 	bool retry = me->_physSegCount;
474 	me->release();
475 	me = 0;
476 	if (retry)
477 	{
478 	    me = new IOBufferMemoryDescriptor;
479 	    if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous))
480 	    {
481 		me->release();
482 		me = 0;
483 	    }
484 	}
485 
486     }
487     return me;
488 }
489 
490 /*
491  * free:
492  *
493  * Free resources
494  */
495 void IOBufferMemoryDescriptor::free()
496 {
497     // Cache all of the relevant information on the stack for use
498     // after we call super::free()!
499     IOOptionBits     flags     = _flags;
500     IOOptionBits     options   = _options;
501     vm_size_t        size      = _capacity;
502     void *           buffer    = _buffer;
503     IOVirtualAddress source    = _ranges.v64->address;
504     IOMemoryMap *    map       = 0;
505     vm_offset_t      alignment = _alignment;
506 
507     if (reserved)
508     {
509 	map = reserved->map;
510         IODelete( reserved, ExpansionData, 1 );
511 	if (map)
512 	    map->release();
513     }
514 
515     /* super::free may unwire - deallocate buffer afterwards */
516     super::free();
517 
518     if (options & kIOMemoryPageable)
519     {
520 #if IOALLOCDEBUG
521 	debug_iomallocpageable_size -= round_page_32(size);
522 #endif
523     }
524     else if (buffer)
525     {
526 	if (kIOMemoryTypePhysical64 == (flags & kIOMemoryTypeMask))
527 	    IOFreePhysical((mach_vm_address_t) source, size);
528         else if (options & kIOMemoryPhysicallyContiguous)
529             IOKernelFreeContiguous((mach_vm_address_t) buffer, size);
530         else if (alignment > 1)
531             IOFreeAligned(buffer, size);
532         else
533             IOFree(buffer, size);
534     }
535 }
536 
537 /*
538  * getCapacity:
539  *
540  * Get the buffer capacity
541  */
542 vm_size_t IOBufferMemoryDescriptor::getCapacity() const
543 {
544     return _capacity;
545 }
546 
547 /*
548  * setLength:
549  *
550  * Change the buffer length of the memory descriptor.  When a new buffer
551  * is created, the initial length of the buffer is set to be the same as
552  * the capacity.  The length can be adjusted via setLength for a shorter
553  * transfer (there is no need to create more buffer descriptors when you
554  * can reuse an existing one, even for different transfer sizes).   Note
555  * that the specified length must not exceed the capacity of the buffer.
556  */
557 void IOBufferMemoryDescriptor::setLength(vm_size_t length)
558 {
559     assert(length <= _capacity);
560 
561     _length = length;
562     _ranges.v64->length = length;
563 }
564 
565 /*
566  * setDirection:
567  *
568  * Change the direction of the transfer.  This method allows one to redirect
569  * the descriptor's transfer direction.  This eliminates the need to destroy
570  * and create new buffers when different transfer directions are needed.
571  */
572 void IOBufferMemoryDescriptor::setDirection(IODirection direction)
573 {
574     _direction = direction;
575 }
576 
577 /*
578  * appendBytes:
579  *
580  * Add some data to the end of the buffer.  This method automatically
581  * maintains the memory descriptor buffer length.  Note that appendBytes
582  * will not copy past the end of the memory descriptor's current capacity.
583  */
584 bool
585 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
586 {
587     vm_size_t   actualBytesToCopy = min(withLength, _capacity - _length);
588     IOByteCount offset;
589 
590     assert(_length <= _capacity);
591 
592     offset = _length;
593     _length += actualBytesToCopy;
594     _ranges.v64->length += actualBytesToCopy;
595 
596     if (_task == kernel_task)
597 	bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset),
598 	      actualBytesToCopy);
599     else
600 	writeBytes(offset, bytes, actualBytesToCopy);
601 
602     return true;
603 }
604 
605 /*
606  * getBytesNoCopy:
607  *
608  * Return the virtual address of the beginning of the buffer
609  */
610 void * IOBufferMemoryDescriptor::getBytesNoCopy()
611 {
612     if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
613 	return _buffer;
614     else
615 	return (void *)_ranges.v64->address;
616 }
617 
618 
619 /*
620  * getBytesNoCopy:
621  *
622  * Return the virtual address of an offset from the beginning of the buffer
623  */
624 void *
625 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
626 {
627     IOVirtualAddress address;
628     if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask))
629 	address = (IOVirtualAddress) _buffer;
630     else
631 	address = _ranges.v64->address;
632 
633    if (start < _length && (start + withLength) <= _length)
634         return (void *)(address + start);
635     return 0;
636 }
637 
638 /* DEPRECATED */ void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset,
639 /* DEPRECATED */ 							IOByteCount * lengthOfSegment)
640 {
641     void * bytes = getBytesNoCopy(offset, 0);
642 
643     if (bytes && lengthOfSegment)
644 	*lengthOfSegment = _length - offset;
645 
646     return bytes;
647 }
648 
649 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
650 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1);
651 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
652 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
653 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
654 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
655 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
656 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
657 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
658 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
659 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
660 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
661 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
662 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
663 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
664 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);
665