1 /*
2  * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <IOKit/IOLib.h>
30 #include <IOKit/IOMultiMemoryDescriptor.h>
31 
32 #define super IOMemoryDescriptor
OSDefineMetaClassAndStructors(IOMultiMemoryDescriptor,IOMemoryDescriptor)33 OSDefineMetaClassAndStructors(IOMultiMemoryDescriptor, IOMemoryDescriptor)
34 
35 IOMultiMemoryDescriptor * IOMultiMemoryDescriptor::withDescriptors(
36 	IOMemoryDescriptor * *descriptors,
37 	UInt32                withCount,
38 	IODirection           withDirection,
39 	bool                  asReference )
40 {
41 	//
42 	// Create a new IOMultiMemoryDescriptor.  The "buffer" is made up of several
43 	// memory descriptors, that are to be chained end-to-end to make up a single
44 	// memory descriptor.
45 	//
46 	// Passing the ranges as a reference will avoid an extra allocation.
47 	//
48 
49 	IOMultiMemoryDescriptor * me = new IOMultiMemoryDescriptor;
50 
51 	if (me && me->initWithDescriptors(
52 		    /* descriptors   */ descriptors,
53 		    /* withCount     */ withCount,
54 		    /* withDirection */ withDirection,
55 		    /* asReference   */ asReference ) == false) {
56 		me->release();
57 		me = NULL;
58 	}
59 
60 	return me;
61 }
62 
63 bool
initWithDescriptors(IOMemoryDescriptor ** descriptors,UInt32 withCount,IODirection withDirection,bool asReference)64 IOMultiMemoryDescriptor::initWithDescriptors(
65 	IOMemoryDescriptor ** descriptors,
66 	UInt32                withCount,
67 	IODirection           withDirection,
68 	bool                  asReference )
69 {
70 	unsigned index;
71 	IOOptionBits copyFlags;
72 	//
73 	// Initialize an IOMultiMemoryDescriptor. The "buffer" is made up of several
74 	// memory descriptors, that are to be chained end-to-end to make up a single
75 	// memory descriptor.
76 	//
77 	// Passing the ranges as a reference will avoid an extra allocation.
78 	//
79 
80 	assert(descriptors);
81 
82 	// Release existing descriptors, if any
83 	if (_descriptors) {
84 		for (unsigned index = 0; index < _descriptorsCount; index++) {
85 			_descriptors[index]->release();
86 		}
87 
88 		if (_descriptorsIsAllocated) {
89 			IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount);
90 		}
91 	} else {
92 		// Ask our superclass' opinion.
93 		if (super::init() == false) {
94 			return false;
95 		}
96 	}
97 
98 	// Initialize our minimal state.
99 
100 	_descriptors            = NULL;
101 	_descriptorsCount       = withCount;
102 	_descriptorsIsAllocated = asReference ? false : true;
103 	_flags                  = withDirection;
104 #ifndef __LP64__
105 	_direction              = (IODirection) (_flags & kIOMemoryDirectionMask);
106 #endif /* !__LP64__ */
107 	_length                 = 0;
108 	_mappings               = NULL;
109 	_tag                    = 0;
110 
111 	if (asReference) {
112 		_descriptors = descriptors;
113 	} else {
114 		_descriptors = IONew(IOMemoryDescriptor *, withCount);
115 		if (_descriptors == NULL) {
116 			return false;
117 		}
118 
119 		bcopy( /* from  */ descriptors,
120 		    /* to    */ _descriptors,
121 		    /* bytes */ withCount * sizeof(IOMemoryDescriptor *));
122 	}
123 
124 	for (index = 0; index < withCount; index++) {
125 		descriptors[index]->retain();
126 		_length += descriptors[index]->getLength();
127 		if (_tag == 0) {
128 			_tag = descriptors[index]->getTag();
129 		}
130 		assert(descriptors[index]->getDirection() ==
131 		    (withDirection & kIOMemoryDirectionMask));
132 	}
133 
134 	enum { kCopyFlags = kIOMemoryBufferPageable };
135 	copyFlags = 0;
136 	for (index = 0; index < withCount; index++) {
137 		if (!index) {
138 			copyFlags =  (kCopyFlags & descriptors[index]->_flags);
139 		} else if (copyFlags != (kCopyFlags & descriptors[index]->_flags)) {
140 			break;
141 		}
142 	}
143 	if (index < withCount) {
144 		return false;
145 	}
146 	_flags |= copyFlags;
147 
148 	return true;
149 }
150 
151 void
free()152 IOMultiMemoryDescriptor::free()
153 {
154 	//
155 	// Free all of this object's outstanding resources.
156 	//
157 
158 	if (_descriptors) {
159 		for (unsigned index = 0; index < _descriptorsCount; index++) {
160 			_descriptors[index]->release();
161 		}
162 
163 		if (_descriptorsIsAllocated) {
164 			IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount);
165 		}
166 	}
167 
168 	super::free();
169 }
170 
171 IOReturn
prepare(IODirection forDirection)172 IOMultiMemoryDescriptor::prepare(IODirection forDirection)
173 {
174 	//
175 	// Prepare the memory for an I/O transfer.
176 	//
177 	// This involves paging in the memory and wiring it down for the duration
178 	// of the transfer.  The complete() method finishes the processing of the
179 	// memory after the I/O transfer finishes.
180 	//
181 
182 	unsigned index;
183 	IOReturn status = kIOReturnInternalError;
184 	IOReturn statusUndo;
185 
186 	if (forDirection == kIODirectionNone) {
187 		forDirection = getDirection();
188 	}
189 
190 	for (index = 0; index < _descriptorsCount; index++) {
191 		status = _descriptors[index]->prepare(forDirection);
192 		if (status != kIOReturnSuccess) {
193 			break;
194 		}
195 	}
196 
197 	if (status != kIOReturnSuccess) {
198 		for (unsigned indexUndo = 0; indexUndo < index; indexUndo++) {
199 			statusUndo = _descriptors[indexUndo]->complete(forDirection);
200 			assert(statusUndo == kIOReturnSuccess);
201 		}
202 	}
203 
204 	return status;
205 }
206 
207 IOReturn
complete(IODirection forDirection)208 IOMultiMemoryDescriptor::complete(IODirection forDirection)
209 {
210 	//
211 	// Complete processing of the memory after an I/O transfer finishes.
212 	//
213 	// This method shouldn't be called unless a prepare() was previously issued;
214 	// the prepare() and complete() must occur in pairs, before and after an I/O
215 	// transfer.
216 	//
217 
218 	IOReturn status;
219 	IOReturn statusFinal = kIOReturnSuccess;
220 
221 	if (forDirection == kIODirectionNone) {
222 		forDirection = getDirection();
223 	}
224 
225 	for (unsigned index = 0; index < _descriptorsCount; index++) {
226 		status = _descriptors[index]->complete(forDirection);
227 		if (status != kIOReturnSuccess) {
228 			statusFinal = status;
229 		}
230 		assert(status == kIOReturnSuccess);
231 	}
232 
233 	return statusFinal;
234 }
235 
236 addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * length,IOOptionBits options)237 IOMultiMemoryDescriptor::getPhysicalSegment(IOByteCount   offset,
238     IOByteCount * length,
239     IOOptionBits  options)
240 {
241 	//
242 	// This method returns the physical address of the byte at the given offset
243 	// into the memory,  and optionally the length of the physically contiguous
244 	// segment from that offset.
245 	//
246 
247 	assert(offset <= _length);
248 
249 	for (unsigned index = 0; index < _descriptorsCount; index++) {
250 		if (offset < _descriptors[index]->getLength()) {
251 			return _descriptors[index]->getPhysicalSegment(offset, length, options);
252 		}
253 		offset -= _descriptors[index]->getLength();
254 	}
255 
256 	if (length) {
257 		*length = 0;
258 	}
259 
260 	return 0;
261 }
262 
263 #include "IOKitKernelInternal.h"
264 
265 IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)266 IOMultiMemoryDescriptor::doMap(vm_map_t           __addressMap,
267     IOVirtualAddress *  __address,
268     IOOptionBits       options,
269     IOByteCount        __offset,
270     IOByteCount        __length)
271 {
272 	IOMemoryMap *     mapping = (IOMemoryMap *) *__address;
273 	vm_map_t          map     = mapping->fAddressMap;
274 	mach_vm_size_t    offset  = mapping->fOffset;
275 	mach_vm_size_t    length  = mapping->fLength;
276 	mach_vm_address_t address = mapping->fAddress;
277 
278 	kern_return_t     err;
279 	IOOptionBits      subOptions;
280 	mach_vm_size_t    mapOffset;
281 	mach_vm_size_t    bytesRemaining, chunk;
282 	mach_vm_address_t nextAddress;
283 	IOMemoryDescriptorMapAllocRef ref;
284 	vm_prot_t                     prot;
285 
286 	do{
287 		prot = VM_PROT_READ;
288 		if (!(kIOMapReadOnly & options)) {
289 			prot |= VM_PROT_WRITE;
290 		}
291 
292 		if (kIOMapOverwrite & options) {
293 			if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
294 				map = IOPageableMapForAddress(address);
295 			}
296 			err = KERN_SUCCESS;
297 		} else {
298 			ref.map     = map;
299 			ref.tag     = IOMemoryTag(map);
300 			ref.options = options;
301 			ref.size    = length;
302 			ref.prot    = prot;
303 			if (options & kIOMapAnywhere) {
304 				// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
305 				ref.mapped = 0;
306 			} else {
307 				ref.mapped = mapping->fAddress;
308 			}
309 
310 			if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
311 				err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
312 			} else {
313 				err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
314 			}
315 
316 			if (KERN_SUCCESS != err) {
317 				break;
318 			}
319 
320 			address = ref.mapped;
321 			mapping->fAddress = address;
322 		}
323 
324 		mapOffset = offset;
325 		bytesRemaining = length;
326 		nextAddress = address;
327 		assert(mapOffset <= _length);
328 		subOptions = (options & ~kIOMapAnywhere) | kIOMapOverwrite;
329 
330 		for (unsigned index = 0; bytesRemaining && (index < _descriptorsCount); index++) {
331 			chunk = _descriptors[index]->getLength();
332 			if (mapOffset >= chunk) {
333 				mapOffset -= chunk;
334 				continue;
335 			}
336 			chunk -= mapOffset;
337 			if (chunk > bytesRemaining) {
338 				chunk = bytesRemaining;
339 			}
340 			IOMemoryMap * subMap;
341 			subMap = _descriptors[index]->createMappingInTask(mapping->fAddressTask, nextAddress, subOptions, mapOffset, chunk );
342 			if (!subMap) {
343 				break;
344 			}
345 			subMap->release(); // kIOMapOverwrite means it will not deallocate
346 
347 			bytesRemaining -= chunk;
348 			nextAddress += chunk;
349 			mapOffset = 0;
350 		}
351 		if (bytesRemaining) {
352 			err = kIOReturnUnderrun;
353 		}
354 	}while (false);
355 
356 	return err;
357 }
358 
359 IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)360 IOMultiMemoryDescriptor::setPurgeable( IOOptionBits newState,
361     IOOptionBits * oldState )
362 {
363 	IOReturn     err;
364 	IOOptionBits totalState, state;
365 
366 	totalState = kIOMemoryPurgeableNonVolatile;
367 	err = kIOReturnSuccess;
368 	for (unsigned index = 0; index < _descriptorsCount; index++) {
369 		err = _descriptors[index]->setPurgeable(newState, &state);
370 		if (kIOReturnSuccess != err) {
371 			break;
372 		}
373 
374 		if (kIOMemoryPurgeableEmpty == state) {
375 			totalState = kIOMemoryPurgeableEmpty;
376 		} else if (kIOMemoryPurgeableEmpty == totalState) {
377 			continue;
378 		} else if (kIOMemoryPurgeableVolatile == totalState) {
379 			continue;
380 		} else if (kIOMemoryPurgeableVolatile == state) {
381 			totalState = kIOMemoryPurgeableVolatile;
382 		} else {
383 			totalState = kIOMemoryPurgeableNonVolatile;
384 		}
385 	}
386 	if (oldState) {
387 		*oldState = totalState;
388 	}
389 
390 	return err;
391 }
392 
393 IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)394 IOMultiMemoryDescriptor::setOwnership( task_t newOwner,
395     int newLedgerTag,
396     IOOptionBits newLedgerOptions )
397 {
398 	IOReturn     err;
399 
400 	if (iokit_iomd_setownership_enabled == FALSE) {
401 		return kIOReturnUnsupported;
402 	}
403 
404 	err = kIOReturnSuccess;
405 	for (unsigned index = 0; index < _descriptorsCount; index++) {
406 		err = _descriptors[index]->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
407 		if (kIOReturnSuccess != err) {
408 			break;
409 		}
410 	}
411 
412 	return err;
413 }
414 
415 IOReturn
getPageCounts(IOByteCount * pResidentPageCount,IOByteCount * pDirtyPageCount)416 IOMultiMemoryDescriptor::getPageCounts(IOByteCount * pResidentPageCount,
417     IOByteCount * pDirtyPageCount)
418 {
419 	IOReturn    err;
420 	IOByteCount totalResidentPageCount, totalDirtyPageCount;
421 	IOByteCount residentPageCount, dirtyPageCount;
422 
423 	err = kIOReturnSuccess;
424 	totalResidentPageCount = totalDirtyPageCount = 0;
425 	for (unsigned index = 0; index < _descriptorsCount; index++) {
426 		err = _descriptors[index]->getPageCounts(&residentPageCount, &dirtyPageCount);
427 		if (kIOReturnSuccess != err) {
428 			break;
429 		}
430 		totalResidentPageCount += residentPageCount;
431 		totalDirtyPageCount    += dirtyPageCount;
432 	}
433 
434 	if (pResidentPageCount) {
435 		*pResidentPageCount = totalResidentPageCount;
436 	}
437 	if (pDirtyPageCount) {
438 		*pDirtyPageCount = totalDirtyPageCount;
439 	}
440 
441 	return err;
442 }
443 
444 uint64_t
getPreparationID(void)445 IOMultiMemoryDescriptor::getPreparationID( void )
446 {
447 	if (!super::getKernelReserved()) {
448 		return kIOPreparationIDUnsupported;
449 	}
450 
451 	for (unsigned index = 0; index < _descriptorsCount; index++) {
452 		uint64_t preparationID = _descriptors[index]->getPreparationID();
453 
454 		if (preparationID == kIOPreparationIDUnsupported) {
455 			return kIOPreparationIDUnsupported;
456 		}
457 
458 		if (preparationID == kIOPreparationIDUnprepared) {
459 			return kIOPreparationIDUnprepared;
460 		}
461 	}
462 
463 	super::setPreparationID();
464 
465 	return super::getPreparationID();
466 }
467