1 /*
2  * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_LICENSE_HEADER_START@
5  *
6  * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
7  *
8  * This file contains Original Code and/or Modifications of Original Code
9  * as defined in and that are subject to the Apple Public Source License
10  * Version 2.0 (the 'License'). You may not use this file except in
11  * compliance with the License. Please obtain a copy of the License at
12  * http://www.opensource.apple.com/apsl/ and read it before using this
13  * file.
14  *
15  * The Original Code and all software distributed under the License are
16  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20  * Please see the License for the specific language governing rights and
21  * limitations under the License.
22  *
23  * @APPLE_LICENSE_HEADER_END@
24  */
25 #include <IOKit/assert.h>
26 #include <IOKit/system.h>
27 
28 #include <IOKit/IOLib.h>
29 #include <IOKit/IOBufferMemoryDescriptor.h>
30 
31 __BEGIN_DECLS
32 void ipc_port_release_send(ipc_port_t port);
33 #include <vm/pmap.h>
34 
35 vm_map_t IOPageableMapForAddress( vm_address_t address );
36 __END_DECLS
37 
38 #define super IOGeneralMemoryDescriptor
39 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor,
40 				IOGeneralMemoryDescriptor);
41 
42 bool IOBufferMemoryDescriptor::initWithAddress(
43                                   void *      /* address       */ ,
44                                   IOByteCount /* withLength    */ ,
45                                   IODirection /* withDirection */ )
46 {
47     return false;
48 }
49 
50 bool IOBufferMemoryDescriptor::initWithAddress(
51                                   vm_address_t /* address       */ ,
52                                   IOByteCount  /* withLength    */ ,
53                                   IODirection  /* withDirection */ ,
54                                   task_t       /* withTask      */ )
55 {
56     return false;
57 }
58 
59 bool IOBufferMemoryDescriptor::initWithPhysicalAddress(
60                                   IOPhysicalAddress /* address       */ ,
61                                   IOByteCount       /* withLength    */ ,
62                                   IODirection       /* withDirection */ )
63 {
64     return false;
65 }
66 
67 bool IOBufferMemoryDescriptor::initWithPhysicalRanges(
68                                   IOPhysicalRange * /* ranges        */ ,
69                                   UInt32            /* withCount     */ ,
70                                   IODirection       /* withDirection */ ,
71                                   bool              /* asReference   */ )
72 {
73     return false;
74 }
75 
76 bool IOBufferMemoryDescriptor::initWithRanges(
77                                   IOVirtualRange * /* ranges        */ ,
78                                   UInt32           /* withCount     */ ,
79                                   IODirection      /* withDirection */ ,
80                                   task_t           /* withTask      */ ,
81                                   bool             /* asReference   */ )
82 {
83     return false;
84 }
85 
86 bool IOBufferMemoryDescriptor::initWithOptions(
87                                IOOptionBits options,
88                                vm_size_t    capacity,
89                                vm_offset_t  alignment,
90 			       task_t	    inTask)
91 {
92     vm_map_t map = 0;
93     IOOptionBits iomdOptions = kIOMemoryAsReference | kIOMemoryTypeVirtual;
94 
95     if (!capacity)
96         return false;
97 
98     _options   	  = options;
99     _capacity     = capacity;
100     _physAddrs    = 0;
101     _physSegCount = 0;
102     _buffer	  = 0;
103 
104     // Grab the direction and the Auto Prepare bits from the Buffer MD options
105     iomdOptions  |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare);
106 
107     if ((options & kIOMemorySharingTypeMask) && (alignment < page_size))
108         alignment = page_size;
109 
110     if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
111         return false;
112 
113     _alignment = alignment;
114     if (options & kIOMemoryPageable)
115     {
116         iomdOptions |= kIOMemoryBufferPageable;
117 	if (inTask == kernel_task)
118 	{
119 	    /* Allocate some kernel address space. */
120 	    _buffer = IOMallocPageable(capacity, alignment);
121 	    if (_buffer)
122 		map = IOPageableMapForAddress((vm_address_t) _buffer);
123 	}
124 	else
125 	{
126 	    kern_return_t kr;
127 
128 	    if( !reserved) {
129 		reserved = IONew( ExpansionData, 1 );
130 		if( !reserved)
131 		    return( false );
132 	    }
133 	    map = get_task_map(inTask);
134 	    vm_map_reference(map);
135 	    reserved->map = map;
136 	    kr = vm_allocate( map, (vm_address_t *) &_buffer, round_page_32(capacity),
137 				VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
138 	    if( KERN_SUCCESS != kr)
139 		return( false );
140 
141 	    // we have to make sure that these pages don't get copied on fork.
142 	    kr = vm_inherit( map, (vm_address_t) _buffer, round_page_32(capacity), VM_INHERIT_NONE);
143 	    if( KERN_SUCCESS != kr)
144 		return( false );
145 	}
146     }
147     else
148     {
149         // @@@ gvdl: Need to remove this
150         // Buffer should never auto prepare they should be prepared explicitly
151         // But it never was enforced so what are you going to do?
152         iomdOptions |= kIOMemoryAutoPrepare;
153 
154 	/* Allocate a wired-down buffer inside kernel space. */
155 	if (options & kIOMemoryPhysicallyContiguous)
156 	    _buffer = IOMallocContiguous(capacity, alignment, 0);
157 	else if (alignment > 1)
158 	    _buffer = IOMallocAligned(capacity, alignment);
159 	else
160 	    _buffer = IOMalloc(capacity);
161     }
162 
163     if (!_buffer)
164 	return false;
165 
166     _singleRange.v.address = (vm_address_t) _buffer;
167     _singleRange.v.length  = capacity;
168 
169     if (!super::initWithOptions(&_singleRange.v, 1, 0,
170                                inTask, iomdOptions, /* System mapper */ 0))
171 	return false;
172 
173     if (options & kIOMemoryPageable) {
174         kern_return_t kr;
175         ipc_port_t sharedMem = (ipc_port_t) _memEntry;
176         vm_size_t size = round_page_32(_ranges.v[0].length);
177 
178         // must create the entry before any pages are allocated
179         if( 0 == sharedMem) {
180 
181             // set memory entry cache
182             vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
183             switch (options & kIOMapCacheMask)
184             {
185 		case kIOMapInhibitCache:
186                     SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
187                     break;
188 
189 		case kIOMapWriteThruCache:
190                     SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
191                     break;
192 
193 		case kIOMapWriteCombineCache:
194                     SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
195                     break;
196 
197 		case kIOMapCopybackCache:
198                     SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
199                     break;
200 
201 		case kIOMapDefaultCache:
202 		default:
203                     SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
204                     break;
205             }
206 
207             kr = mach_make_memory_entry( map,
208                         &size, _ranges.v[0].address,
209                         memEntryCacheMode, &sharedMem,
210                         NULL );
211 
212             if( (KERN_SUCCESS == kr) && (size != round_page_32(_ranges.v[0].length))) {
213                 ipc_port_release_send( sharedMem );
214                 kr = kIOReturnVMError;
215             }
216             if( KERN_SUCCESS != kr)
217                 sharedMem = 0;
218             _memEntry = (void *) sharedMem;
219         }
220     }
221 
222     setLength(capacity);
223 
224     return true;
225 }
226 
227 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions(
228 					    task_t       inTask,
229                                             IOOptionBits options,
230                                             vm_size_t    capacity,
231                                             vm_offset_t  alignment)
232 {
233     IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
234 
235     if (me && !me->initWithOptions(options, capacity, alignment, inTask)) {
236 	me->release();
237 	me = 0;
238     }
239     return me;
240 }
241 
242 bool IOBufferMemoryDescriptor::initWithOptions(
243                                IOOptionBits options,
244                                vm_size_t    capacity,
245                                vm_offset_t  alignment)
246 {
247     return( initWithOptions(options, capacity, alignment, kernel_task) );
248 }
249 
250 IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions(
251                                             IOOptionBits options,
252                                             vm_size_t    capacity,
253                                             vm_offset_t  alignment)
254 {
255     IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
256 
257     if (me && !me->initWithOptions(options, capacity, alignment, kernel_task)) {
258 	me->release();
259 	me = 0;
260     }
261     return me;
262 }
263 
264 
265 /*
266  * withCapacity:
267  *
268  * Returns a new IOBufferMemoryDescriptor with a buffer large enough to
269  * hold capacity bytes.  The descriptor's length is initially set to the capacity.
270  */
271 IOBufferMemoryDescriptor *
272 IOBufferMemoryDescriptor::withCapacity(vm_size_t   inCapacity,
273                                        IODirection inDirection,
274                                        bool        inContiguous)
275 {
276     return( IOBufferMemoryDescriptor::withOptions(
277                inDirection | kIOMemoryUnshared
278                 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
279                inCapacity, inContiguous ? inCapacity : 1 ));
280 }
281 
282 /*
283  * initWithBytes:
284  *
285  * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied).
286  * The descriptor's length and capacity are set to the input buffer's size.
287  */
288 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes,
289                                              vm_size_t    inLength,
290                                              IODirection  inDirection,
291                                              bool         inContiguous)
292 {
293     if (!initWithOptions(
294                inDirection | kIOMemoryUnshared
295                 | (inContiguous ? kIOMemoryPhysicallyContiguous : 0),
296                inLength, inLength ))
297         return false;
298 
299     // start out with no data
300     setLength(0);
301 
302     if (!appendBytes(inBytes, inLength))
303         return false;
304 
305     return true;
306 }
307 
308 /*
309  * withBytes:
310  *
311  * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied).
312  * The descriptor's length and capacity are set to the input buffer's size.
313  */
314 IOBufferMemoryDescriptor *
315 IOBufferMemoryDescriptor::withBytes(const void * inBytes,
316                                     vm_size_t    inLength,
317                                     IODirection  inDirection,
318                                     bool         inContiguous)
319 {
320     IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor;
321 
322     if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous)){
323         me->release();
324         me = 0;
325     }
326     return me;
327 }
328 
329 /*
330  * free:
331  *
332  * Free resources
333  */
334 void IOBufferMemoryDescriptor::free()
335 {
336     // Cache all of the relevant information on the stack for use
337     // after we call super::free()!
338     IOOptionBits options   = _options;
339     vm_size_t    size	   = _capacity;
340     void *       buffer	   = _buffer;
341     vm_map_t	 map	   = 0;
342     vm_offset_t  alignment = _alignment;
343 
344     if (reserved)
345     {
346 	map = reserved->map;
347         IODelete( reserved, ExpansionData, 1 );
348     }
349 
350     /* super::free may unwire - deallocate buffer afterwards */
351     super::free();
352 
353     if (buffer)
354     {
355         if (options & kIOMemoryPageable)
356 	{
357 	    if (map)
358 		vm_deallocate(map, (vm_address_t) buffer, round_page_32(size));
359 	    else
360 	       IOFreePageable(buffer, size);
361 	}
362         else
363 	{
364             if (options & kIOMemoryPhysicallyContiguous)
365                 IOFreeContiguous(buffer, size);
366             else if (alignment > 1)
367                 IOFreeAligned(buffer, size);
368             else
369                 IOFree(buffer, size);
370         }
371     }
372     if (map)
373 	vm_map_deallocate(map);
374 }
375 
376 /*
377  * getCapacity:
378  *
379  * Get the buffer capacity
380  */
381 vm_size_t IOBufferMemoryDescriptor::getCapacity() const
382 {
383     return _capacity;
384 }
385 
386 /*
387  * setLength:
388  *
389  * Change the buffer length of the memory descriptor.  When a new buffer
390  * is created, the initial length of the buffer is set to be the same as
391  * the capacity.  The length can be adjusted via setLength for a shorter
392  * transfer (there is no need to create more buffer descriptors when you
393  * can reuse an existing one, even for different transfer sizes).   Note
394  * that the specified length must not exceed the capacity of the buffer.
395  */
396 void IOBufferMemoryDescriptor::setLength(vm_size_t length)
397 {
398     assert(length <= _capacity);
399 
400     _length = length;
401     _singleRange.v.length = length;
402 }
403 
404 /*
405  * setDirection:
406  *
407  * Change the direction of the transfer.  This method allows one to redirect
408  * the descriptor's transfer direction.  This eliminates the need to destroy
409  * and create new buffers when different transfer directions are needed.
410  */
411 void IOBufferMemoryDescriptor::setDirection(IODirection direction)
412 {
413     _direction = direction;
414 }
415 
416 /*
417  * appendBytes:
418  *
419  * Add some data to the end of the buffer.  This method automatically
420  * maintains the memory descriptor buffer length.  Note that appendBytes
421  * will not copy past the end of the memory descriptor's current capacity.
422  */
423 bool
424 IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength)
425 {
426     vm_size_t actualBytesToCopy = min(withLength, _capacity - _length);
427 
428     assert(_length <= _capacity);
429     bcopy(/* from */ bytes, (void *)(_singleRange.v.address + _length),
430           actualBytesToCopy);
431     _length += actualBytesToCopy;
432     _singleRange.v.length += actualBytesToCopy;
433 
434     return true;
435 }
436 
437 /*
438  * getBytesNoCopy:
439  *
440  * Return the virtual address of the beginning of the buffer
441  */
442 void * IOBufferMemoryDescriptor::getBytesNoCopy()
443 {
444     return (void *)_singleRange.v.address;
445 }
446 
447 /*
448  * getBytesNoCopy:
449  *
450  * Return the virtual address of an offset from the beginning of the buffer
451  */
452 void *
453 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength)
454 {
455     if (start < _length && (start + withLength) <= _length)
456         return (void *)(_singleRange.v.address + start);
457     return 0;
458 }
459 
460 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0);
461 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1);
462 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2);
463 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3);
464 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4);
465 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5);
466 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6);
467 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7);
468 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8);
469 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9);
470 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10);
471 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11);
472 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12);
473 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13);
474 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14);
475 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15);
476