xref: /xnu-11215/iokit/Kernel/IODMACommand.cpp (revision 8d741a5d)
1 /*
2  * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #define IOKIT_ENABLE_SHARED_PTR
30 
31 #include <IOKit/assert.h>
32 
33 #include <libkern/OSTypes.h>
34 #include <libkern/OSByteOrder.h>
35 #include <libkern/OSDebug.h>
36 
37 #include <IOKit/IOReturn.h>
38 #include <IOKit/IOLib.h>
39 #include <IOKit/IODMACommand.h>
40 #include <IOKit/IOMapper.h>
41 #include <IOKit/IOMemoryDescriptor.h>
42 #include <IOKit/IOBufferMemoryDescriptor.h>
43 
44 #include <vm/vm_kern_xnu.h>
45 #include <vm/vm_pageout_xnu.h>
46 #include <vm/vm_iokit.h>
47 
48 #include "IOKitKernelInternal.h"
49 
50 #define MAPTYPE(type)           ((UInt) (type) & kTypeMask)
51 #define IS_NONCOHERENT(type)    (MAPTYPE(type) == kNonCoherent)
52 
53 enum{
54 	kWalkSyncIn       = 0x01,// bounce -> md
55 	kWalkSyncOut      = 0x02,// bounce <- md
56 	kWalkSyncAlways   = 0x04,
57 	kWalkPreflight    = 0x08,
58 	kWalkDoubleBuffer = 0x10,
59 	kWalkPrepare      = 0x20,
60 	kWalkComplete     = 0x40,
61 	kWalkClient       = 0x80
62 };
63 
64 
65 #define fInternalState reserved
66 #define fState         reserved->fState
67 #define fMDSummary     reserved->fMDSummary
68 
69 
70 #if 1
71 // no direction => OutIn
72 #define SHOULD_COPY_DIR(op, direction)                                      \
73 	((kIODirectionNone == (direction))                                  \
74 	    || (kWalkSyncAlways & (op))                                     \
75 	    || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut)   \
76 	                                            & (direction)))
77 
78 #else
79 #define SHOULD_COPY_DIR(state, direction) (true)
80 #endif
81 
82 #if 0
83 #define DEBG(fmt, args...)      { IOLog(fmt, ## args); kprintf(fmt, ## args); }
84 #else
85 #define DEBG(fmt, args...)      {}
86 #endif
87 
88 #if 0
89 #define LOGTAG          0x87654321
90 #endif
91 
92 /**************************** class IODMACommand ***************************/
93 
94 #undef super
95 #define super IOCommand
96 OSDefineMetaClassAndStructorsWithZone(IODMACommand, IOCommand, ZC_NONE);
97 
98 OSMetaClassDefineReservedUsedX86(IODMACommand, 0);
99 OSMetaClassDefineReservedUsedX86(IODMACommand, 1);
100 OSMetaClassDefineReservedUsedX86(IODMACommand, 2);
101 OSMetaClassDefineReservedUsedX86(IODMACommand, 3);
102 OSMetaClassDefineReservedUsedX86(IODMACommand, 4);
103 OSMetaClassDefineReservedUsedX86(IODMACommand, 5);
104 OSMetaClassDefineReservedUsedX86(IODMACommand, 6);
105 OSMetaClassDefineReservedUnused(IODMACommand, 7);
106 OSMetaClassDefineReservedUnused(IODMACommand, 8);
107 OSMetaClassDefineReservedUnused(IODMACommand, 9);
108 OSMetaClassDefineReservedUnused(IODMACommand, 10);
109 OSMetaClassDefineReservedUnused(IODMACommand, 11);
110 OSMetaClassDefineReservedUnused(IODMACommand, 12);
111 OSMetaClassDefineReservedUnused(IODMACommand, 13);
112 OSMetaClassDefineReservedUnused(IODMACommand, 14);
113 OSMetaClassDefineReservedUnused(IODMACommand, 15);
114 
115 
116 OSSharedPtr<IODMACommand>
withRefCon(void * refCon)117 IODMACommand::withRefCon(void * refCon)
118 {
119 	OSSharedPtr<IODMACommand> me = OSMakeShared<IODMACommand>();
120 
121 	if (me && !me->initWithRefCon(refCon)) {
122 		return nullptr;
123 	}
124 
125 	return me;
126 }
127 
128 OSSharedPtr<IODMACommand>
withSpecification(SegmentFunction outSegFunc,const SegmentOptions * segmentOptions,uint32_t mappingOptions,IOMapper * mapper,void * refCon)129 IODMACommand::withSpecification(SegmentFunction  outSegFunc,
130     const SegmentOptions * segmentOptions,
131     uint32_t               mappingOptions,
132     IOMapper             * mapper,
133     void                 * refCon)
134 {
135 	OSSharedPtr<IODMACommand> me = OSMakeShared<IODMACommand>();
136 
137 	if (me && !me->initWithSpecification(outSegFunc, segmentOptions, mappingOptions,
138 	    mapper, refCon)) {
139 		return nullptr;
140 	}
141 
142 	return me;
143 }
144 
145 OSSharedPtr<IODMACommand>
withSpecification(SegmentFunction outSegFunc,UInt8 numAddressBits,UInt64 maxSegmentSize,MappingOptions mappingOptions,UInt64 maxTransferSize,UInt32 alignment,IOMapper * mapper,void * refCon)146 IODMACommand::withSpecification(SegmentFunction outSegFunc,
147     UInt8           numAddressBits,
148     UInt64          maxSegmentSize,
149     MappingOptions  mappingOptions,
150     UInt64          maxTransferSize,
151     UInt32          alignment,
152     IOMapper       *mapper,
153     void           *refCon)
154 {
155 	OSSharedPtr<IODMACommand> me = OSMakeShared<IODMACommand>();
156 
157 	if (me && !me->initWithSpecification(outSegFunc,
158 	    numAddressBits, maxSegmentSize,
159 	    mappingOptions, maxTransferSize,
160 	    alignment, mapper, refCon)) {
161 		return nullptr;
162 	}
163 
164 	return me;
165 }
166 
167 OSSharedPtr<IODMACommand>
cloneCommand(void * refCon)168 IODMACommand::cloneCommand(void *refCon)
169 {
170 	SegmentOptions segmentOptions =
171 	{
172 		.fStructSize                = sizeof(segmentOptions),
173 		.fNumAddressBits            = (uint8_t)fNumAddressBits,
174 		.fMaxSegmentSize            = fMaxSegmentSize,
175 		.fMaxTransferSize           = fMaxTransferSize,
176 		.fAlignment                 = fAlignMask + 1,
177 		.fAlignmentLength           = fAlignMaskInternalSegments + 1,
178 		.fAlignmentInternalSegments = fAlignMaskLength + 1
179 	};
180 
181 	return IODMACommand::withSpecification(fOutSeg, &segmentOptions,
182 	           fMappingOptions, fMapper.get(), refCon);
183 }
184 
185 #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
186 
187 bool
initWithRefCon(void * refCon)188 IODMACommand::initWithRefCon(void * refCon)
189 {
190 	if (!super::init()) {
191 		return false;
192 	}
193 
194 	if (!reserved) {
195 		reserved = IOMallocType(IODMACommandInternal);
196 	}
197 	fRefCon = refCon;
198 
199 	return true;
200 }
201 
202 bool
initWithSpecification(SegmentFunction outSegFunc,const SegmentOptions * segmentOptions,uint32_t mappingOptions,IOMapper * mapper,void * refCon)203 IODMACommand::initWithSpecification(SegmentFunction        outSegFunc,
204     const SegmentOptions * segmentOptions,
205     uint32_t               mappingOptions,
206     IOMapper             * mapper,
207     void                 * refCon)
208 {
209 	if (!initWithRefCon(refCon)) {
210 		return false;
211 	}
212 
213 	if (kIOReturnSuccess != setSpecification(outSegFunc, segmentOptions,
214 	    mappingOptions, mapper)) {
215 		return false;
216 	}
217 
218 	return true;
219 }
220 
221 bool
initWithSpecification(SegmentFunction outSegFunc,UInt8 numAddressBits,UInt64 maxSegmentSize,MappingOptions mappingOptions,UInt64 maxTransferSize,UInt32 alignment,IOMapper * mapper,void * refCon)222 IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
223     UInt8           numAddressBits,
224     UInt64          maxSegmentSize,
225     MappingOptions  mappingOptions,
226     UInt64          maxTransferSize,
227     UInt32          alignment,
228     IOMapper       *mapper,
229     void           *refCon)
230 {
231 	SegmentOptions segmentOptions =
232 	{
233 		.fStructSize                = sizeof(segmentOptions),
234 		.fNumAddressBits            = numAddressBits,
235 		.fMaxSegmentSize            = maxSegmentSize,
236 		.fMaxTransferSize           = maxTransferSize,
237 		.fAlignment                 = alignment,
238 		.fAlignmentLength           = 1,
239 		.fAlignmentInternalSegments = alignment
240 	};
241 
242 	return initWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper, refCon);
243 }
244 
245 IOReturn
setSpecification(SegmentFunction outSegFunc,const SegmentOptions * segmentOptions,uint32_t mappingOptions,IOMapper * mapper)246 IODMACommand::setSpecification(SegmentFunction        outSegFunc,
247     const SegmentOptions * segmentOptions,
248     uint32_t               mappingOptions,
249     IOMapper             * mapper)
250 {
251 	IOService * device = NULL;
252 	UInt8       numAddressBits;
253 	UInt64      maxSegmentSize;
254 	UInt64      maxTransferSize;
255 	UInt32      alignment;
256 
257 	bool        is32Bit;
258 
259 	if (!outSegFunc || !segmentOptions) {
260 		return kIOReturnBadArgument;
261 	}
262 
263 	is32Bit = ((OutputHost32 == outSegFunc)
264 	    || (OutputBig32 == outSegFunc)
265 	    || (OutputLittle32 == outSegFunc));
266 
267 	numAddressBits = segmentOptions->fNumAddressBits;
268 	maxSegmentSize = segmentOptions->fMaxSegmentSize;
269 	maxTransferSize = segmentOptions->fMaxTransferSize;
270 	alignment = segmentOptions->fAlignment;
271 	if (is32Bit) {
272 		if (!numAddressBits) {
273 			numAddressBits = 32;
274 		} else if (numAddressBits > 32) {
275 			return kIOReturnBadArgument;  // Wrong output function for bits
276 		}
277 	}
278 
279 	if (numAddressBits && (numAddressBits < PAGE_SHIFT)) {
280 		return kIOReturnBadArgument;
281 	}
282 
283 	if (!maxSegmentSize) {
284 		maxSegmentSize--;               // Set Max segment to -1
285 	}
286 	if (!maxTransferSize) {
287 		maxTransferSize--;              // Set Max transfer to -1
288 	}
289 	if (mapper && !OSDynamicCast(IOMapper, mapper)) {
290 		device = mapper;
291 		mapper = NULL;
292 	}
293 	if (!mapper && (kUnmapped != MAPTYPE(mappingOptions))) {
294 		IOMapper::checkForSystemMapper();
295 		mapper = IOMapper::gSystem;
296 	}
297 
298 	fNumSegments     = 0;
299 	fOutSeg          = outSegFunc;
300 	fNumAddressBits  = numAddressBits;
301 	fMaxSegmentSize  = maxSegmentSize;
302 	fMappingOptions  = mappingOptions;
303 	fMaxTransferSize = maxTransferSize;
304 	if (!alignment) {
305 		alignment = 1;
306 	}
307 	fAlignMask       = alignment - 1;
308 
309 	alignment = segmentOptions->fAlignmentLength;
310 	if (!alignment) {
311 		alignment = 1;
312 	}
313 	fAlignMaskLength = alignment - 1;
314 
315 	alignment = segmentOptions->fAlignmentInternalSegments;
316 	if (!alignment) {
317 		alignment = (fAlignMask + 1);
318 	}
319 	fAlignMaskInternalSegments = alignment - 1;
320 
321 	switch (MAPTYPE(mappingOptions)) {
322 	case kMapped:       break;
323 	case kUnmapped:     break;
324 	case kNonCoherent:  break;
325 
326 	case kBypassed:
327 		if (!mapper) {
328 			break;
329 		}
330 		return kIOReturnBadArgument;
331 
332 	default:
333 		return kIOReturnBadArgument;
334 	}
335 
336 	if (mapper != fMapper) {
337 		fMapper.reset(mapper, OSRetain);
338 	}
339 
340 	fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
341 	if (0 != (kIODMAMapOptionDextOwner & mappingOptions)) {
342 		fInternalState->fDextLock = IOLockAlloc();
343 	}
344 	fInternalState->fDevice = device;
345 
346 
347 	return kIOReturnSuccess;
348 }
349 
350 void
free()351 IODMACommand::free()
352 {
353 	if (reserved) {
354 		if (fInternalState->fDextLock) {
355 			if (fActive) {
356 				CompleteDMA(kIODMACommandCompleteDMANoOptions);
357 			}
358 			IOLockFree(fInternalState->fDextLock);
359 		}
360 		IOFreeType(reserved, IODMACommandInternal);
361 	}
362 
363 	fMapper.reset();
364 
365 	// Correct use of this class when setting an IOMemoryDescriptor
366 	// in fMemory via setMemoryDescriptor(desc) is, for the caller, to
367 	// have a matching call to clearMemoryDescriptor() before releasing
368 	// the object. The matching call has also the effect of releasing
369 	// the ref taken on the IOMemoryDescriptor in setMemoryDescriptor().
370 	//
371 	// A number of "misbehaving" drivers has been found during testing,
372 	// whereby a matching call to clearMemoryDescriptor() is missing:
373 	//
374 	// rdar://59947343
375 	// rdar://59946968
376 	//
377 	// Both the approaches taken in said drivers are wrong, but have gone
378 	// basically silent with fMemory being a regular pointer. With fMemory
379 	// becoming a OSSharedPtr, the IODMACommand destructor expects to find
380 	// either fMemory reset (through the call to clearMemoryDescriptor()) or
381 	// a reference hold for the release.
382 	//
383 	// For this reason, this workaround of detaching fMemory is put in
384 	// place here, choosing the leak over the panic for misbehaving
385 	// drivers. Once all instances are fixed, this workaround will be
386 	// removed.
387 	//
388 	// Note: all well behaving drivers that have matching calls for
389 	// setMemoryDescriptor() and clearMemoryDescriptor() are unaffected
390 	// since fMemory will be null at this point.
391 	fMemory.detach();
392 
393 	super::free();
394 }
395 
396 IOReturn
setMemoryDescriptor(const IOMemoryDescriptor * mem,bool autoPrepare)397 IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare)
398 {
399 	IOReturn err = kIOReturnSuccess;
400 
401 	if (mem == fMemory) {
402 		if (!autoPrepare) {
403 			while (fActive) {
404 				complete();
405 			}
406 		}
407 		return kIOReturnSuccess;
408 	}
409 
410 	if (fMemory) {
411 		// As we are almost certainly being called from a work loop thread
412 		// if fActive is true it is probably not a good time to potentially
413 		// block.  Just test for it and return an error
414 		if (fActive) {
415 			return kIOReturnBusy;
416 		}
417 		clearMemoryDescriptor();
418 	}
419 
420 	if (mem) {
421 		bzero(&fMDSummary, sizeof(fMDSummary));
422 		err = mem->dmaCommandOperation(kIOMDGetCharacteristics | (kMapped == MAPTYPE(fMappingOptions)),
423 		    &fMDSummary, sizeof(fMDSummary));
424 		if (err) {
425 			return err;
426 		}
427 
428 		ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage;
429 
430 		if ((kMapped == MAPTYPE(fMappingOptions))
431 		    && fMapper) {
432 			fInternalState->fCheckAddressing = false;
433 		} else {
434 			fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT))));
435 		}
436 
437 		fInternalState->fNewMD = true;
438 		fMemory.reset(const_cast<IOMemoryDescriptor *>(mem), OSRetain);
439 		fInternalState->fSetActiveNoMapper = (!fMapper);
440 		if (fInternalState->fSetActiveNoMapper) {
441 			mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0);
442 		}
443 		if (autoPrepare) {
444 			err = prepare();
445 			if (err) {
446 				clearMemoryDescriptor();
447 			}
448 		}
449 	}
450 
451 	return err;
452 }
453 
454 IOReturn
clearMemoryDescriptor(bool autoComplete)455 IODMACommand::clearMemoryDescriptor(bool autoComplete)
456 {
457 	if (fActive && !autoComplete) {
458 		return kIOReturnNotReady;
459 	}
460 
461 	if (fMemory) {
462 		while (fActive) {
463 			complete();
464 		}
465 		if (fInternalState->fSetActiveNoMapper) {
466 			fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0);
467 		}
468 		fMemory.reset();
469 	}
470 
471 	return kIOReturnSuccess;
472 }
473 
474 const IOMemoryDescriptor *
getMemoryDescriptor() const475 IODMACommand::getMemoryDescriptor() const
476 {
477 	return fMemory.get();
478 }
479 
480 IOMemoryDescriptor *
getIOMemoryDescriptor() const481 IODMACommand::getIOMemoryDescriptor() const
482 {
483 	OSSharedPtr<IOMemoryDescriptor> mem;
484 
485 	mem = reserved->fCopyMD;
486 	if (!mem) {
487 		mem = fMemory;
488 	}
489 
490 	return mem.get();
491 }
492 
493 IOReturn
segmentOp(void * reference,IODMACommand * target,Segment64 segment,void * segments,UInt32 segmentIndex)494 IODMACommand::segmentOp(
495 	void         *reference,
496 	IODMACommand *target,
497 	Segment64     segment,
498 	void         *segments,
499 	UInt32        segmentIndex)
500 {
501 	IOOptionBits op = (IOOptionBits)(uintptr_t) reference;
502 	addr64_t     maxPhys, address;
503 	uint64_t     length;
504 	uint32_t     numPages;
505 	uint32_t     mask;
506 
507 	IODMACommandInternal * state = target->reserved;
508 
509 	if (target->fNumAddressBits && (target->fNumAddressBits < 64) && (state->fLocalMapperAllocValid || !target->fMapper)) {
510 		maxPhys = (1ULL << target->fNumAddressBits);
511 	} else {
512 		maxPhys = 0;
513 	}
514 	maxPhys--;
515 
516 	address = segment.fIOVMAddr;
517 	length = segment.fLength;
518 
519 	assert(length);
520 
521 	if (!state->fMisaligned) {
522 		mask = (segmentIndex ? target->fAlignMaskInternalSegments : state->fSourceAlignMask);
523 		state->fMisaligned |= (0 != (mask & address));
524 		if (state->fMisaligned) {
525 			DEBG("misaligned address %qx:%qx, %x\n", address, length, mask);
526 		}
527 	}
528 	if (!state->fMisaligned) {
529 		mask = target->fAlignMaskLength;
530 		state->fMisaligned |= (0 != (mask & length));
531 		if (state->fMisaligned) {
532 			DEBG("misaligned length %qx:%qx, %x\n", address, length, mask);
533 		}
534 	}
535 
536 	if (state->fMisaligned && (kWalkPreflight & op)) {
537 		return kIOReturnNotAligned;
538 	}
539 
540 	if (!state->fDoubleBuffer) {
541 		if ((address + length - 1) <= maxPhys) {
542 			length = 0;
543 		} else if (address <= maxPhys) {
544 			DEBG("tail %qx, %qx", address, length);
545 			length = (address + length - maxPhys - 1);
546 			address = maxPhys + 1;
547 			DEBG("-> %qx, %qx\n", address, length);
548 		}
549 	}
550 
551 	if (!length) {
552 		return kIOReturnSuccess;
553 	}
554 
555 	uint64_t numPages64 = atop_64(round_page_64((address & PAGE_MASK) + length));
556 	if (numPages64 > UINT_MAX) {
557 		return kIOReturnVMError;
558 	}
559 	numPages = (typeof(numPages))numPages64;
560 
561 	if (kWalkPreflight & op) {
562 		state->fCopyPageCount += numPages;
563 	} else {
564 		vm_page_t lastPage;
565 		lastPage = NULL;
566 		if (kWalkPrepare & op) {
567 			lastPage = state->fCopyNext;
568 			for (IOItemCount idx = 0; idx < numPages; idx++) {
569 				vm_page_set_offset(lastPage, atop_64(address) + idx);
570 				lastPage = vm_page_get_next(lastPage);
571 			}
572 		}
573 
574 		if (!lastPage || SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) {
575 			lastPage = state->fCopyNext;
576 			for (IOItemCount idx = 0; idx < numPages; idx++) {
577 				if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) {
578 					addr64_t cpuAddr = address;
579 					addr64_t remapAddr;
580 					uint64_t chunk;
581 
582 					if ((kMapped == MAPTYPE(target->fMappingOptions))
583 					    && target->fMapper) {
584 						cpuAddr = target->fMapper->mapToPhysicalAddress(address);
585 					}
586 
587 					remapAddr = ptoa_64(vm_page_get_phys_page(lastPage));
588 					if (!state->fDoubleBuffer) {
589 						remapAddr += (address & PAGE_MASK);
590 					}
591 					chunk = PAGE_SIZE - (address & PAGE_MASK);
592 					if (chunk > length) {
593 						chunk = length;
594 					}
595 					if (chunk > (UINT_MAX - PAGE_SIZE + 1)) {
596 						chunk = (UINT_MAX - PAGE_SIZE + 1);
597 					}
598 
599 					DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr,
600 					    (kWalkSyncIn & op) ? "->" : "<-",
601 					    address, chunk, op);
602 
603 					if (kWalkSyncIn & op) { // cppvNoModSnk
604 						copypv(remapAddr, cpuAddr, (unsigned int) chunk,
605 						    cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
606 					} else {
607 						copypv(cpuAddr, remapAddr, (unsigned int) chunk,
608 						    cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
609 					}
610 					address += chunk;
611 					length -= chunk;
612 				}
613 				lastPage = vm_page_get_next(lastPage);
614 			}
615 		}
616 		state->fCopyNext = lastPage;
617 	}
618 
619 	return kIOReturnSuccess;
620 }
621 
622 OSSharedPtr<IOBufferMemoryDescriptor>
createCopyBuffer(IODirection direction,UInt64 length)623 IODMACommand::createCopyBuffer(IODirection direction, UInt64 length)
624 {
625 	mach_vm_address_t mask = 0xFFFFF000;    //state->fSourceAlignMask
626 	return IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task,
627 	           direction, length, mask);
628 }
629 
630 IOReturn
walkAll(uint32_t op)631 IODMACommand::walkAll(uint32_t op)
632 {
633 	IODMACommandInternal * state = fInternalState;
634 
635 	IOReturn     ret = kIOReturnSuccess;
636 	UInt32       numSegments;
637 	UInt64       offset;
638 
639 	if (kWalkPreflight & op) {
640 		state->fMisaligned     = false;
641 		state->fDoubleBuffer   = false;
642 		state->fPrepared       = false;
643 		state->fCopyNext       = NULL;
644 		state->fCopyPageAlloc  = NULL;
645 		state->fCopyPageCount  = 0;
646 		state->fNextRemapPage  = NULL;
647 		state->fCopyMD         = NULL;
648 
649 		if (!(kWalkDoubleBuffer & op)) {
650 			offset = 0;
651 			numSegments = 0 - 1;
652 			ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
653 		}
654 
655 		op &= ~kWalkPreflight;
656 
657 		state->fDoubleBuffer = (state->fMisaligned || state->fForceDoubleBuffer);
658 		state->fForceDoubleBuffer = false;
659 		if (state->fDoubleBuffer) {
660 			state->fCopyPageCount = (typeof(state->fCopyPageCount))(atop_64(round_page(state->fPreparedLength)));
661 		}
662 
663 		if (state->fCopyPageCount) {
664 			vm_page_t mapBase = NULL;
665 
666 			DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount);
667 
668 			if (!fMapper && !state->fDoubleBuffer) {
669 				kern_return_t kr;
670 
671 				if (fMapper) {
672 					panic("fMapper copying");
673 				}
674 
675 				kr = vm_page_alloc_list(state->fCopyPageCount,
676 				    (kma_flags_t)(KMA_LOMEM | KMA_NOPAGEWAIT), &mapBase);
677 				if (KERN_SUCCESS != kr) {
678 					DEBG("vm_page_alloc_list(%d) failed (%d)\n", state->fCopyPageCount, kr);
679 					mapBase = NULL;
680 				}
681 			}
682 
683 			if (mapBase) {
684 				state->fCopyPageAlloc = mapBase;
685 				state->fCopyNext = state->fCopyPageAlloc;
686 				offset = 0;
687 				numSegments = 0 - 1;
688 				ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
689 				state->fPrepared = true;
690 				op &= ~(kWalkSyncIn | kWalkSyncOut);
691 			} else {
692 				DEBG("alloc IOBMD\n");
693 				state->fCopyMD = createCopyBuffer(fMDSummary.fDirection, state->fPreparedLength);
694 
695 				if (state->fCopyMD) {
696 					ret = kIOReturnSuccess;
697 					state->fPrepared = true;
698 				} else {
699 					DEBG("IODMACommand !alloc IOBMD");
700 					return kIOReturnNoResources;
701 				}
702 			}
703 		}
704 	}
705 
706 	if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op)) {
707 		if (state->fCopyPageCount) {
708 			DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount);
709 
710 			if (state->fCopyPageAlloc) {
711 				state->fCopyNext = state->fCopyPageAlloc;
712 				offset = 0;
713 				numSegments = 0 - 1;
714 				ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
715 			} else if (state->fCopyMD) {
716 				DEBG("sync IOBMD\n");
717 
718 				if (SHOULD_COPY_DIR(op, fMDSummary.fDirection)) {
719 					OSSharedPtr<IOMemoryDescriptor> poMD = fMemory;
720 
721 					IOByteCount bytes;
722 
723 					if (kWalkSyncIn & op) {
724 						bytes = poMD->writeBytes(state->fPreparedOffset,
725 						    state->fCopyMD->getBytesNoCopy(),
726 						    state->fPreparedLength);
727 					} else {
728 						bytes = poMD->readBytes(state->fPreparedOffset,
729 						    state->fCopyMD->getBytesNoCopy(),
730 						    state->fPreparedLength);
731 					}
732 					DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn & op) ? "wrote" : "read", bytes);
733 					ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun;
734 				} else {
735 					ret = kIOReturnSuccess;
736 				}
737 			}
738 		}
739 	}
740 
741 	if (kWalkComplete & op) {
742 		if (state->fCopyPageAlloc) {
743 			vm_page_free_list(state->fCopyPageAlloc, FALSE);
744 			state->fCopyPageAlloc = NULL;
745 			state->fCopyPageCount = 0;
746 		}
747 		if (state->fCopyMD) {
748 			state->fCopyMD.reset();
749 		}
750 
751 		state->fPrepared = false;
752 	}
753 	return ret;
754 }
755 
756 UInt8
getNumAddressBits(void)757 IODMACommand::getNumAddressBits(void)
758 {
759 	return (UInt8) fNumAddressBits;
760 }
761 
762 UInt32
getAlignment(void)763 IODMACommand::getAlignment(void)
764 {
765 	return fAlignMask + 1;
766 }
767 
768 uint32_t
getAlignmentLength(void)769 IODMACommand::getAlignmentLength(void)
770 {
771 	return fAlignMaskLength + 1;
772 }
773 
774 uint32_t
getAlignmentInternalSegments(void)775 IODMACommand::getAlignmentInternalSegments(void)
776 {
777 	return fAlignMaskInternalSegments + 1;
778 }
779 
780 IOReturn
prepareWithSpecification(SegmentFunction outSegFunc,const SegmentOptions * segmentOptions,uint32_t mappingOptions,IOMapper * mapper,UInt64 offset,UInt64 length,bool flushCache,bool synchronize)781 IODMACommand::prepareWithSpecification(SegmentFunction        outSegFunc,
782     const SegmentOptions * segmentOptions,
783     uint32_t               mappingOptions,
784     IOMapper             * mapper,
785     UInt64                 offset,
786     UInt64                 length,
787     bool                   flushCache,
788     bool                   synchronize)
789 {
790 	IOReturn ret;
791 
792 	if (fActive) {
793 		return kIOReturnNotPermitted;
794 	}
795 
796 	ret = setSpecification(outSegFunc, segmentOptions, mappingOptions, mapper);
797 	if (kIOReturnSuccess != ret) {
798 		return ret;
799 	}
800 
801 	ret = prepare(offset, length, flushCache, synchronize);
802 
803 	return ret;
804 }
805 
806 IOReturn
prepareWithSpecification(SegmentFunction outSegFunc,UInt8 numAddressBits,UInt64 maxSegmentSize,MappingOptions mappingOptions,UInt64 maxTransferSize,UInt32 alignment,IOMapper * mapper,UInt64 offset,UInt64 length,bool flushCache,bool synchronize)807 IODMACommand::prepareWithSpecification(SegmentFunction  outSegFunc,
808     UInt8            numAddressBits,
809     UInt64           maxSegmentSize,
810     MappingOptions   mappingOptions,
811     UInt64           maxTransferSize,
812     UInt32           alignment,
813     IOMapper         *mapper,
814     UInt64           offset,
815     UInt64           length,
816     bool             flushCache,
817     bool             synchronize)
818 {
819 	SegmentOptions segmentOptions =
820 	{
821 		.fStructSize                = sizeof(segmentOptions),
822 		.fNumAddressBits            = numAddressBits,
823 		.fMaxSegmentSize            = maxSegmentSize,
824 		.fMaxTransferSize           = maxTransferSize,
825 		.fAlignment                 = alignment,
826 		.fAlignmentLength           = 1,
827 		.fAlignmentInternalSegments = alignment
828 	};
829 
830 	return prepareWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper,
831 	           offset, length, flushCache, synchronize);
832 }
833 
834 
835 IOReturn
prepare(UInt64 offset,UInt64 length,bool flushCache,bool synchronize)836 IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize)
837 {
838 	IODMACommandInternal *  state = fInternalState;
839 	IOReturn                  ret = kIOReturnSuccess;
840 	uint32_t       mappingOptions = fMappingOptions;
841 
842 	// check specification has been set
843 	if (!fOutSeg) {
844 		return kIOReturnNotReady;
845 	}
846 
847 	if (!length) {
848 		length = fMDSummary.fLength;
849 	}
850 
851 	if (length > fMaxTransferSize) {
852 		return kIOReturnNoSpace;
853 	}
854 
855 	if (fActive++) {
856 		if ((state->fPreparedOffset != offset)
857 		    || (state->fPreparedLength != length)) {
858 			ret = kIOReturnNotReady;
859 		}
860 	} else {
861 		if (fAlignMaskLength & length) {
862 			return kIOReturnNotAligned;
863 		}
864 
865 		if (atop_64(state->fPreparedLength) > UINT_MAX) {
866 			return kIOReturnVMError;
867 		}
868 		state->fPreparedOffset = offset;
869 		state->fPreparedLength = length;
870 
871 		state->fMisaligned     = false;
872 		state->fDoubleBuffer   = false;
873 		state->fPrepared       = false;
874 		state->fCopyNext       = NULL;
875 		state->fCopyPageAlloc  = NULL;
876 		state->fCopyPageCount  = 0;
877 		state->fNextRemapPage  = NULL;
878 		state->fCopyMD         = NULL;
879 		state->fLocalMapperAlloc       = 0;
880 		state->fLocalMapperAllocValid  = false;
881 		state->fLocalMapperAllocLength = 0;
882 
883 		state->fSourceAlignMask = fAlignMask;
884 		if (fMapper) {
885 			state->fSourceAlignMask &= page_mask;
886 		}
887 
888 		state->fCursor = state->fIterateOnly
889 		    || (!state->fCheckAddressing
890 		    && (!state->fSourceAlignMask
891 		    || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask)))));
892 
893 		if (!state->fCursor) {
894 			IOOptionBits op = kWalkPrepare | kWalkPreflight;
895 			if (synchronize) {
896 				op |= kWalkSyncOut;
897 			}
898 			ret = walkAll(op);
899 		}
900 
901 		if (IS_NONCOHERENT(mappingOptions) && flushCache) {
902 			if (state->fCopyMD) {
903 				state->fCopyMD->performOperation(kIOMemoryIncoherentIOStore, 0, length);
904 			} else {
905 				fMemory->performOperation(kIOMemoryIncoherentIOStore, offset, length);
906 			}
907 		}
908 
909 		if (fMapper) {
910 			IOMDDMAMapArgs mapArgs;
911 			bzero(&mapArgs, sizeof(mapArgs));
912 			mapArgs.fMapper = fMapper.get();
913 			mapArgs.fCommand = this;
914 			mapArgs.fMapSpec.device         = state->fDevice;
915 			mapArgs.fMapSpec.alignment      = fAlignMask + 1;
916 			mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? ((UInt8) fNumAddressBits) : 64;
917 			mapArgs.fLength = state->fPreparedLength;
918 			OSSharedPtr<IOMemoryDescriptor> md = state->fCopyMD;
919 			if (md) {
920 				mapArgs.fOffset = 0;
921 			} else {
922 				md = fMemory;
923 				mapArgs.fOffset = state->fPreparedOffset;
924 			}
925 
926 			ret = md->dmaCommandOperation(kIOMDDMAMap, &mapArgs, sizeof(mapArgs));
927 
928 			if ((kIOReturnSuccess == ret)
929 			    && mapArgs.fAllocLength
930 			    && (mapArgs.fAllocLength != mapArgs.fLength)) {
931 				do {
932 					// multisegment case
933 					IOMDDMAWalkSegmentState  walkState;
934 					IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState;
935 					IOOptionBits             mdOp;
936 					uint64_t                 index;
937 					IOPhysicalLength         segLen;
938 					uint32_t                         segCount;
939 					uint64_t                         phys, align;
940 					uint64_t                         mapperPageMask;
941 					uint64_t                         mapperPageShift;
942 					uint64_t                         insertOffset;
943 					uint32_t                         mapOptions;
944 					uint64_t                         length;
945 
946 					assert(mapArgs.fAllocLength > mapArgs.fLength);
947 
948 					mapperPageMask    = fMapper->getPageSize();
949 					assert(mapperPageMask);
950 					mapperPageMask   -= 1;
951 					mapperPageShift   = (64 - __builtin_clzll(mapperPageMask));
952 					walkArgs->fMapped = false;
953 					length            = state->fPreparedLength;
954 					mdOp              = kIOMDFirstSegment;
955 					segCount          = 0;
956 					for (index = 0; index < length; segCount++) {
957 						walkArgs->fOffset = state->fPreparedOffset + index;
958 
959 						ret    = md->dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
960 						mdOp   = kIOMDWalkSegments;
961 						assert(kIOReturnSuccess == ret);
962 						if (ret != kIOReturnSuccess) {
963 							panic("dmaCommandOperation");
964 						}
965 						segLen = walkArgs->fLength;
966 						index += segLen;
967 					}
968 					if (ret != kIOReturnSuccess) {
969 						break;
970 					}
971 
972 #if defined(LOGTAG)
973 					if (LOGTAG == fMemory->getTag()) {
974 						IOLog("DMA[%p] alloc 0x%qx, 0x%qx\n", this, mapArgs.fAlloc, mapArgs.fAllocLength);
975 					}
976 #endif /* defined(LOGTAG) */
977 
978 					state->fMapSegments = IONewZeroData(IODMACommandMapSegment, segCount);
979 					if (!state->fMapSegments) {
980 						ret = kIOReturnNoMemory;
981 						break;
982 					}
983 					state->fMapSegmentsCount = segCount;
984 
985 					switch (kIODirectionOutIn & fMDSummary.fDirection) {
986 					case kIODirectionOut:
987 						mapOptions = kIODMAMapReadAccess;
988 						break;
989 					case kIODirectionIn:
990 						mapOptions = kIODMAMapWriteAccess;
991 						break;
992 					default:
993 						mapOptions = kIODMAMapReadAccess | kIODMAMapWriteAccess;
994 						break;
995 					}
996 
997 					mdOp = kIOMDFirstSegment;
998 					segCount = 0;
999 					for (insertOffset = 0, index = 0; index < length; segCount++) {
1000 						walkArgs->fOffset = state->fPreparedOffset + index;
1001 						ret = md->dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
1002 						mdOp = kIOMDWalkSegments;
1003 						if (ret != kIOReturnSuccess) {
1004 							panic("dmaCommandOperation 0x%x", ret);
1005 						}
1006 						phys = walkArgs->fIOVMAddr;
1007 						segLen = walkArgs->fLength;
1008 
1009 #if defined(LOGTAG)
1010 						if (LOGTAG == fMemory->getTag()) {
1011 							IOLog("DMA[%p] phys[%d] 0x%qx, 0x%qx\n", this, segCount, (uint64_t) phys, (uint64_t) segLen);
1012 						}
1013 #endif /* defined(LOGTAG) */
1014 
1015 						align = (phys & mapperPageMask);
1016 
1017 #if defined(LOGTAG)
1018 						if (LOGTAG == fMemory->getTag()) {
1019 							IOLog("DMA[%p] runs[%d] dmaoff 0x%qx, mapoff 0x%qx, align 0x%qx\n", this, segCount, index, insertOffset, align);
1020 						}
1021 #endif /* defined(LOGTAG) */
1022 
1023 						assert(segCount < state->fMapSegmentsCount);
1024 						state->fMapSegments[segCount].fDMAOffset = state->fPreparedOffset + index;
1025 						state->fMapSegments[segCount].fMapOffset = insertOffset;
1026 						state->fMapSegments[segCount].fPageOffset = align;
1027 						index  += segLen;
1028 
1029 						// segment page align
1030 						segLen  = ((phys + segLen + mapperPageMask) & ~mapperPageMask);
1031 						phys   -= align;
1032 						segLen -= phys;
1033 						insertOffset += segLen;
1034 					}
1035 					state->fLocalMapperAllocBase = (mapArgs.fAlloc & ~mapperPageMask);
1036 #if defined(LOGTAG)
1037 					if (LOGTAG == fMemory->getTag()) {
1038 						IOLog("IODMACommand fMapSegmentsCount %d\n", state->fMapSegmentsCount);
1039 					}
1040 #endif /* defined(LOGTAG) */
1041 				} while (false);
1042 			}
1043 			if (kIOReturnSuccess == ret) {
1044 				state->fLocalMapperAlloc       = mapArgs.fAlloc;
1045 				state->fLocalMapperAllocValid  = true;
1046 				state->fLocalMapperAllocLength = mapArgs.fAllocLength;
1047 			}
1048 		}
1049 		if (kIOReturnSuccess == ret) {
1050 			state->fPrepared = true;
1051 		}
1052 	}
1053 	return ret;
1054 }
1055 
1056 IOReturn
complete(bool invalidateCache,bool synchronize)1057 IODMACommand::complete(bool invalidateCache, bool synchronize)
1058 {
1059 	IODMACommandInternal * state = fInternalState;
1060 	IOReturn               ret   = kIOReturnSuccess;
1061 	OSSharedPtr<IOMemoryDescriptor> copyMD;
1062 
1063 	if (fActive < 1) {
1064 		return kIOReturnNotReady;
1065 	}
1066 
1067 	if (!--fActive) {
1068 		copyMD = state->fCopyMD;
1069 
1070 		if (IS_NONCOHERENT(fMappingOptions) && invalidateCache) {
1071 			if (copyMD) {
1072 				copyMD->performOperation(kIOMemoryIncoherentIOFlush, 0, state->fPreparedLength);
1073 			} else {
1074 				OSSharedPtr<IOMemoryDescriptor> md = fMemory;
1075 				md->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength);
1076 			}
1077 		}
1078 
1079 		if (!state->fCursor) {
1080 			IOOptionBits op = kWalkComplete;
1081 			if (synchronize) {
1082 				op |= kWalkSyncIn;
1083 			}
1084 			ret = walkAll(op);
1085 		}
1086 
1087 		if (state->fLocalMapperAllocValid) {
1088 			IOMDDMAMapArgs mapArgs;
1089 			bzero(&mapArgs, sizeof(mapArgs));
1090 			mapArgs.fMapper = fMapper.get();
1091 			mapArgs.fCommand = this;
1092 			mapArgs.fAlloc = state->fLocalMapperAlloc;
1093 			mapArgs.fAllocLength = state->fLocalMapperAllocLength;
1094 			OSSharedPtr<IOMemoryDescriptor> md = copyMD;
1095 			if (md) {
1096 				mapArgs.fOffset = 0;
1097 			} else {
1098 				md = fMemory;
1099 				mapArgs.fOffset = state->fPreparedOffset;
1100 			}
1101 
1102 			ret = md->dmaCommandOperation(kIOMDDMAUnmap, &mapArgs, sizeof(mapArgs));
1103 
1104 			state->fLocalMapperAlloc       = 0;
1105 			state->fLocalMapperAllocValid  = false;
1106 			state->fLocalMapperAllocLength = 0;
1107 			if (state->fMapSegments) {
1108 				IODeleteData(state->fMapSegments, IODMACommandMapSegment, state->fMapSegmentsCount);
1109 				state->fMapSegments      = NULL;
1110 				state->fMapSegmentsCount = 0;
1111 			}
1112 		}
1113 
1114 		state->fPrepared = false;
1115 	}
1116 
1117 	return ret;
1118 }
1119 
1120 IOReturn
getPreparedOffsetAndLength(UInt64 * offset,UInt64 * length)1121 IODMACommand::getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length)
1122 {
1123 	IODMACommandInternal * state = fInternalState;
1124 	if (fActive < 1) {
1125 		return kIOReturnNotReady;
1126 	}
1127 
1128 	if (offset) {
1129 		*offset = state->fPreparedOffset;
1130 	}
1131 	if (length) {
1132 		*length = state->fPreparedLength;
1133 	}
1134 
1135 	return kIOReturnSuccess;
1136 }
1137 
1138 IOReturn
synchronize(IOOptionBits options)1139 IODMACommand::synchronize(IOOptionBits options)
1140 {
1141 	IODMACommandInternal * state = fInternalState;
1142 	IOReturn               ret   = kIOReturnSuccess;
1143 	IOOptionBits           op;
1144 
1145 	if (kIODirectionOutIn == (kIODirectionOutIn & options)) {
1146 		return kIOReturnBadArgument;
1147 	}
1148 
1149 	if (fActive < 1) {
1150 		return kIOReturnNotReady;
1151 	}
1152 
1153 	op = 0;
1154 	if (kForceDoubleBuffer & options) {
1155 		if (state->fDoubleBuffer) {
1156 			return kIOReturnSuccess;
1157 		}
1158 		ret = complete(false /* invalidateCache */, true /* synchronize */);
1159 		state->fCursor = false;
1160 		state->fForceDoubleBuffer = true;
1161 		ret = prepare(state->fPreparedOffset, state->fPreparedLength, false /* flushCache */, true /* synchronize */);
1162 
1163 		return ret;
1164 	} else if (state->fCursor) {
1165 		return kIOReturnSuccess;
1166 	}
1167 
1168 	if (kIODirectionIn & options) {
1169 		op |= kWalkSyncIn | kWalkSyncAlways;
1170 	} else if (kIODirectionOut & options) {
1171 		op |= kWalkSyncOut | kWalkSyncAlways;
1172 	}
1173 
1174 	ret = walkAll(op);
1175 
1176 	return ret;
1177 }
1178 
1179 struct IODMACommandTransferContext {
1180 	void *   buffer;
1181 	UInt64   bufferOffset;
1182 	UInt64   remaining;
1183 	UInt32   op;
1184 };
1185 enum{
1186 	kIODMACommandTransferOpReadBytes  = 1,
1187 	kIODMACommandTransferOpWriteBytes = 2
1188 };
1189 
1190 IOReturn
transferSegment(void * reference,IODMACommand * target,Segment64 segment,void * segments,UInt32 segmentIndex)1191 IODMACommand::transferSegment(void   *reference,
1192     IODMACommand *target,
1193     Segment64     segment,
1194     void         *segments,
1195     UInt32        segmentIndex)
1196 {
1197 	IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference;
1198 	UInt64   length  = min(segment.fLength, context->remaining);
1199 	addr64_t ioAddr  = segment.fIOVMAddr;
1200 	addr64_t cpuAddr = ioAddr;
1201 
1202 	context->remaining -= length;
1203 
1204 	while (length) {
1205 		UInt64 copyLen = length;
1206 		if ((kMapped == MAPTYPE(target->fMappingOptions))
1207 		    && target->fMapper) {
1208 			cpuAddr = target->fMapper->mapToPhysicalAddress(ioAddr);
1209 			copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1)));
1210 			ioAddr += copyLen;
1211 		}
1212 		if (copyLen > (UINT_MAX - PAGE_SIZE + 1)) {
1213 			copyLen = (UINT_MAX - PAGE_SIZE + 1);
1214 		}
1215 
1216 		switch (context->op) {
1217 		case kIODMACommandTransferOpReadBytes:
1218 			copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, (unsigned int) copyLen,
1219 			    cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1220 			break;
1221 		case kIODMACommandTransferOpWriteBytes:
1222 			copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, (unsigned int) copyLen,
1223 			    cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1224 			break;
1225 		}
1226 		length                -= copyLen;
1227 		context->bufferOffset += copyLen;
1228 	}
1229 
1230 	return context->remaining ? kIOReturnSuccess : kIOReturnOverrun;
1231 }
1232 
1233 UInt64
transfer(IOOptionBits transferOp,UInt64 offset,void * buffer,UInt64 length)1234 IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length)
1235 {
1236 	IODMACommandInternal *      state = fInternalState;
1237 	IODMACommandTransferContext context;
1238 	Segment64                   segments[1];
1239 	UInt32                      numSegments = 0 - 1;
1240 
1241 	if (fActive < 1) {
1242 		return 0;
1243 	}
1244 
1245 	if (offset >= state->fPreparedLength) {
1246 		return 0;
1247 	}
1248 	length = min(length, state->fPreparedLength - offset);
1249 
1250 	context.buffer       = buffer;
1251 	context.bufferOffset = 0;
1252 	context.remaining    = length;
1253 	context.op           = transferOp;
1254 	(void) genIOVMSegments(kWalkClient, transferSegment, &context, &offset, &segments[0], &numSegments);
1255 
1256 	return length - context.remaining;
1257 }
1258 
1259 UInt64
readBytes(UInt64 offset,void * bytes,UInt64 length)1260 IODMACommand::readBytes(UInt64 offset, void *bytes, UInt64 length)
1261 {
1262 	return transfer(kIODMACommandTransferOpReadBytes, offset, bytes, length);
1263 }
1264 
1265 UInt64
writeBytes(UInt64 offset,const void * bytes,UInt64 length)1266 IODMACommand::writeBytes(UInt64 offset, const void *bytes, UInt64 length)
1267 {
1268 	return transfer(kIODMACommandTransferOpWriteBytes, offset, const_cast<void *>(bytes), length);
1269 }
1270 
1271 IOReturn
genIOVMSegments(UInt64 * offsetP,void * segmentsP,UInt32 * numSegmentsP)1272 IODMACommand::genIOVMSegments(UInt64 *offsetP,
1273     void   *segmentsP,
1274     UInt32 *numSegmentsP)
1275 {
1276 	return genIOVMSegments(kWalkClient, clientOutputSegment, (void *) fOutSeg,
1277 	           offsetP, segmentsP, numSegmentsP);
1278 }
1279 
1280 IOReturn
genIOVMSegments(uint32_t op,InternalSegmentFunction outSegFunc,void * reference,UInt64 * offsetP,void * segmentsP,UInt32 * numSegmentsP)1281 IODMACommand::genIOVMSegments(uint32_t op,
1282     InternalSegmentFunction outSegFunc,
1283     void   *reference,
1284     UInt64 *offsetP,
1285     void   *segmentsP,
1286     UInt32 *numSegmentsP)
1287 {
1288 	IODMACommandInternal * internalState = fInternalState;
1289 	IOOptionBits           mdOp = kIOMDWalkSegments;
1290 	IOReturn               ret  = kIOReturnSuccess;
1291 
1292 	if (!(kWalkComplete & op) && !fActive) {
1293 		return kIOReturnNotReady;
1294 	}
1295 
1296 	if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP) {
1297 		return kIOReturnBadArgument;
1298 	}
1299 
1300 	IOMDDMAWalkSegmentArgs *state =
1301 	    (IOMDDMAWalkSegmentArgs *)(void *) fState;
1302 
1303 	UInt64 offset    = *offsetP + internalState->fPreparedOffset;
1304 	UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength;
1305 
1306 	if (offset >= memLength) {
1307 		return kIOReturnOverrun;
1308 	}
1309 
1310 	if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) {
1311 		state->fOffset                                   = 0;
1312 		internalState->fIOVMAddrValid = state->fIOVMAddr = 0;
1313 		internalState->fNextRemapPage                    = NULL;
1314 		internalState->fNewMD                            = false;
1315 		mdOp                                             = kIOMDFirstSegment;
1316 		if (fMapper) {
1317 			if (internalState->fLocalMapperAllocValid) {
1318 				state->fMapped = true;
1319 				state->fMappedBase = internalState->fLocalMapperAlloc;
1320 			} else {
1321 				state->fMapped = false;
1322 			}
1323 		}
1324 	}
1325 
1326 	UInt32    segIndex = 0;
1327 	UInt32    numSegments = *numSegmentsP;
1328 	Segment64 curSeg = { 0, 0 };
1329 	bool      curSegValid = false;
1330 	addr64_t  maxPhys;
1331 
1332 	if (fNumAddressBits && (fNumAddressBits < 64)) {
1333 		maxPhys = (1ULL << fNumAddressBits);
1334 	} else {
1335 		maxPhys = 0;
1336 	}
1337 	maxPhys--;
1338 
1339 	while (internalState->fIOVMAddrValid || (state->fOffset < memLength)) {
1340 		// state = next seg
1341 		if (!internalState->fIOVMAddrValid) {
1342 			IOReturn rtn;
1343 
1344 			state->fOffset = offset;
1345 			state->fLength = memLength - offset;
1346 
1347 			bool done = false;
1348 			bool check = false;
1349 
1350 			if (internalState->fLocalMapperAllocValid) {
1351 				if (!internalState->fMapSegmentsCount) {
1352 					state->fIOVMAddr = internalState->fLocalMapperAlloc + offset - internalState->fPreparedOffset;
1353 					rtn = kIOReturnSuccess;
1354 					done = true;
1355 					check = true;
1356 				} else {
1357 					uint64_t address;
1358 					uint64_t length;
1359 					uint64_t runOffset;
1360 					uint64_t ind;
1361 					uint64_t off2Ind = internalState->fOffset2Index;
1362 
1363 					// Validate the previous offset
1364 					if (offset
1365 					    && (offset == internalState->fNextOffset || off2Ind <= offset)) {
1366 						ind = internalState->fIndex;
1367 					} else {
1368 						ind = off2Ind = 0; // Start from beginning
1369 					}
1370 #if defined(LOGTAG)
1371 					if (LOGTAG == fMemory->getTag()) {
1372 						IOLog("DMA[%p] offsets 0x%qx, 0x%qx, 0x%qx ind %qd\n", this, offset, internalState->fPreparedOffset, internalState->fNextOffset, ind);
1373 					}
1374 #endif /* defined(LOGTAG) */
1375 
1376 					// Scan through iopl info blocks looking for block containing offset
1377 					while (ind < internalState->fMapSegmentsCount && offset >= internalState->fMapSegments[ind].fDMAOffset) {
1378 						ind++;
1379 					}
1380 					if (ind < internalState->fMapSegmentsCount) {
1381 						length = internalState->fMapSegments[ind].fDMAOffset;
1382 					} else {
1383 						length = memLength;
1384 					}
1385 					length -= offset;       // Remainder within iopl
1386 
1387 					// Go back to actual range as search goes past it
1388 					ind--;
1389 					off2Ind = internalState->fMapSegments[ind].fDMAOffset;
1390 
1391 					// Subtract offset till this iopl in total list
1392 					runOffset = offset - off2Ind;
1393 
1394 					// Compute an offset relative to the mapped base
1395 
1396 					runOffset += internalState->fMapSegments[ind].fPageOffset;
1397 					address = internalState->fLocalMapperAllocBase + internalState->fMapSegments[ind].fMapOffset + runOffset;
1398 #if defined(LOGTAG)
1399 					if (LOGTAG == fMemory->getTag()) {
1400 						IOLog("DMA[%p] addrlen 0x%qx, 0x%qx\n", this, address, length);
1401 					}
1402 #endif /* defined(LOGTAG) */
1403 
1404 					state->fIOVMAddr = address;
1405 					state->fLength   = length;
1406 
1407 					internalState->fIndex        = ind;
1408 					internalState->fOffset2Index = off2Ind;
1409 					internalState->fNextOffset   = state->fOffset + length;
1410 
1411 					rtn = kIOReturnSuccess;
1412 					done = true;
1413 					check = true;
1414 				}
1415 			}
1416 
1417 			if (!done) {
1418 				IOMemoryDescriptor * memory =
1419 				    internalState->fCopyMD ? internalState->fCopyMD.get() : fMemory.get();
1420 				rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState));
1421 				mdOp = kIOMDWalkSegments;
1422 			}
1423 #if 0
1424 			if (check
1425 			    && !ml_at_interrupt_context()
1426 			    && (rtn == kIOReturnSuccess)
1427 			    && fMapper
1428 			    && strcmp("AppleNVMeMMU", fMapper->getName())) {
1429 				uint64_t checkOffset;
1430 				IOPhysicalLength segLen;
1431 				IOMemoryDescriptor * memory =
1432 				    internalState->fCopyMD ? internalState->fCopyMD.get() : fMemory.get();
1433 				for (checkOffset = 0; checkOffset < state->fLength;) {
1434 					addr64_t phys = memory->getPhysicalSegment(offset + checkOffset, &segLen, kIOMemoryMapperNone);
1435 					addr64_t mapperPhys;
1436 
1437 					mapperPhys = fMapper->mapToPhysicalAddress(state->fIOVMAddr + checkOffset);
1438 					mapperPhys |= (phys & (fMapper->getPageSize() - 1));
1439 					if (mapperPhys != phys) {
1440 						panic("DMA[%p] mismatch at offset %llx + %llx, dma %llx mapperPhys %llx != %llx, len %llx",
1441 						    this, offset, checkOffset,
1442 						    state->fIOVMAddr + checkOffset, mapperPhys, phys, state->fLength);
1443 					}
1444 					checkOffset += page_size - (phys & page_mask);
1445 				}
1446 			}
1447 #endif
1448 			if (rtn == kIOReturnSuccess) {
1449 				internalState->fIOVMAddrValid = true;
1450 				assert(state->fLength);
1451 				if (curSegValid && ((curSeg.fIOVMAddr + curSeg.fLength) == state->fIOVMAddr)) {
1452 					UInt64 length = state->fLength;
1453 					offset          += length;
1454 					curSeg.fLength  += length;
1455 					internalState->fIOVMAddrValid = state->fIOVMAddr = 0;
1456 				}
1457 			} else if (rtn == kIOReturnOverrun) {
1458 				internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end
1459 			} else {
1460 				return rtn;
1461 			}
1462 		}
1463 
1464 		// seg = state, offset = end of seg
1465 		if (!curSegValid) {
1466 			UInt64 length                 = state->fLength;
1467 			offset                       += length;
1468 			curSeg.fIOVMAddr              = state->fIOVMAddr;
1469 			curSeg.fLength                = length;
1470 			curSegValid                   = true;
1471 			internalState->fIOVMAddrValid = state->fIOVMAddr = 0;
1472 		}
1473 
1474 		if (!internalState->fIOVMAddrValid) {
1475 			// maxPhys
1476 			if ((kWalkClient & op) && (curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys) {
1477 				if (internalState->fCursor) {
1478 					curSegValid = curSeg.fIOVMAddr = 0;
1479 					ret = kIOReturnMessageTooLarge;
1480 					break;
1481 				} else if (curSeg.fIOVMAddr <= maxPhys) {
1482 					UInt64 remain, newLength;
1483 
1484 					newLength        = (maxPhys + 1 - curSeg.fIOVMAddr);
1485 					DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength);
1486 					remain           = curSeg.fLength - newLength;
1487 					state->fIOVMAddr = newLength + curSeg.fIOVMAddr;
1488 					internalState->fIOVMAddrValid = true;
1489 					curSeg.fLength   = newLength;
1490 					state->fLength   = remain;
1491 					offset          -= remain;
1492 				} else {
1493 					UInt64    addr = curSeg.fIOVMAddr;
1494 					ppnum_t   addrPage = (ppnum_t) atop_64(addr);
1495 					vm_page_t remap = NULL;
1496 					UInt64    remain, newLength;
1497 
1498 					DEBG("sparse switch %qx, %qx ", addr, curSeg.fLength);
1499 
1500 					remap = internalState->fNextRemapPage;
1501 					if (remap && (addrPage == vm_page_get_offset(remap))) {
1502 					} else {
1503 						for (remap = internalState->fCopyPageAlloc;
1504 						    remap && (addrPage != vm_page_get_offset(remap));
1505 						    remap = vm_page_get_next(remap)) {
1506 						}
1507 					}
1508 
1509 					if (!remap) {
1510 						panic("no remap page found");
1511 					}
1512 
1513 					curSeg.fIOVMAddr = ptoa_64(vm_page_get_phys_page(remap))
1514 					    + (addr & PAGE_MASK);
1515 					curSegValid = true;
1516 					internalState->fNextRemapPage = vm_page_get_next(remap);
1517 
1518 					newLength            = PAGE_SIZE - (addr & PAGE_MASK);
1519 					if (newLength < curSeg.fLength) {
1520 						remain           = curSeg.fLength - newLength;
1521 						state->fIOVMAddr = addr + newLength;
1522 						internalState->fIOVMAddrValid = true;
1523 						curSeg.fLength   = newLength;
1524 						state->fLength   = remain;
1525 						offset          -= remain;
1526 					}
1527 					DEBG("-> %qx, %qx offset %qx\n", curSeg.fIOVMAddr, curSeg.fLength, offset);
1528 				}
1529 			}
1530 
1531 			// reduce size of output segment
1532 			uint64_t reduce, leftover = 0;
1533 
1534 			// fMaxSegmentSize
1535 			if (curSeg.fLength > fMaxSegmentSize) {
1536 				leftover      += curSeg.fLength - fMaxSegmentSize;
1537 				curSeg.fLength = fMaxSegmentSize;
1538 				state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
1539 				internalState->fIOVMAddrValid = true;
1540 			}
1541 
1542 			// alignment current length
1543 
1544 			reduce = (curSeg.fLength & fAlignMaskLength);
1545 			if (reduce && (curSeg.fLength > reduce)) {
1546 				leftover       += reduce;
1547 				curSeg.fLength -= reduce;
1548 				state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
1549 				internalState->fIOVMAddrValid = true;
1550 			}
1551 
1552 			// alignment next address
1553 
1554 			reduce = (state->fIOVMAddr & fAlignMaskInternalSegments);
1555 			if (reduce && (curSeg.fLength > reduce)) {
1556 				leftover       += reduce;
1557 				curSeg.fLength -= reduce;
1558 				state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
1559 				internalState->fIOVMAddrValid = true;
1560 			}
1561 
1562 			if (leftover) {
1563 				DEBG("reduce seg by 0x%llx @ 0x%llx [0x%llx, 0x%llx]\n",
1564 				    leftover, offset,
1565 				    curSeg.fIOVMAddr, curSeg.fLength);
1566 				state->fLength   = leftover;
1567 				offset          -= leftover;
1568 			}
1569 
1570 			//
1571 
1572 			if (internalState->fCursor) {
1573 				bool misaligned;
1574 				uint32_t mask;
1575 
1576 				mask = (segIndex ? fAlignMaskInternalSegments : internalState->fSourceAlignMask);
1577 				misaligned = (0 != (mask & curSeg.fIOVMAddr));
1578 				if (!misaligned) {
1579 					mask = fAlignMaskLength;
1580 					misaligned |= (0 != (mask &  curSeg.fLength));
1581 				}
1582 				if (misaligned) {
1583 					if (misaligned) {
1584 						DEBG("cursor misaligned %qx:%qx\n", curSeg.fIOVMAddr, curSeg.fLength);
1585 					}
1586 					curSegValid = curSeg.fIOVMAddr = 0;
1587 					ret = kIOReturnNotAligned;
1588 					break;
1589 				}
1590 			}
1591 
1592 			if (offset >= memLength) {
1593 				curSeg.fLength   -= (offset - memLength);
1594 				offset = memLength;
1595 				internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end
1596 				break;
1597 			}
1598 		}
1599 
1600 		if (internalState->fIOVMAddrValid) {
1601 			if ((segIndex + 1 == numSegments)) {
1602 				break;
1603 			}
1604 #if defined(LOGTAG)
1605 			if ((LOGTAG == fMemory->getTag()) && (kWalkClient == op)) {
1606 				IOLog("DMA[%p] outseg 0x%qx, 0x%qx\n", this, curSeg.fIOVMAddr, curSeg.fLength);
1607 			}
1608 #endif /* defined(LOGTAG) */
1609 			ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1610 			curSegValid = curSeg.fIOVMAddr = 0;
1611 			if (kIOReturnSuccess != ret) {
1612 				break;
1613 			}
1614 		}
1615 	}
1616 
1617 	if (curSegValid) {
1618 #if defined(LOGTAG)
1619 		if ((LOGTAG == fMemory->getTag()) && (kWalkClient == op)) {
1620 			IOLog("DMA[%p] outseg 0x%qx, 0x%qx\n", this, curSeg.fIOVMAddr, curSeg.fLength);
1621 		}
1622 #endif /* defined(LOGTAG) */
1623 		ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1624 	}
1625 
1626 	if (kIOReturnSuccess == ret) {
1627 		state->fOffset = offset;
1628 		*offsetP       = offset - internalState->fPreparedOffset;
1629 		*numSegmentsP  = segIndex;
1630 	}
1631 	return ret;
1632 }
1633 
1634 IOReturn
clientOutputSegment(void * reference,IODMACommand * target,Segment64 segment,void * vSegList,UInt32 outSegIndex)1635 IODMACommand::clientOutputSegment(
1636 	void *reference, IODMACommand *target,
1637 	Segment64 segment, void *vSegList, UInt32 outSegIndex)
1638 {
1639 	SegmentFunction segmentFunction = (SegmentFunction) reference;
1640 	IOReturn ret = kIOReturnSuccess;
1641 
1642 	if (target->fNumAddressBits && (target->fNumAddressBits < 64)
1643 	    && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits)
1644 	    && (target->reserved->fLocalMapperAllocValid || !target->fMapper)) {
1645 		DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1646 		ret = kIOReturnMessageTooLarge;
1647 	}
1648 
1649 	if (!(*segmentFunction)(target, segment, vSegList, outSegIndex)) {
1650 		DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1651 		ret = kIOReturnMessageTooLarge;
1652 	}
1653 
1654 	return ret;
1655 }
1656 
1657 IOReturn
genIOVMSegments(SegmentFunction segmentFunction,UInt64 * offsetP,void * segmentsP,UInt32 * numSegmentsP)1658 IODMACommand::genIOVMSegments(SegmentFunction segmentFunction,
1659     UInt64   *offsetP,
1660     void     *segmentsP,
1661     UInt32   *numSegmentsP)
1662 {
1663 	return genIOVMSegments(kWalkClient, clientOutputSegment, (void *) segmentFunction,
1664 	           offsetP, segmentsP, numSegmentsP);
1665 }
1666 
1667 bool
OutputHost32(IODMACommand *,Segment64 segment,void * vSegList,UInt32 outSegIndex)1668 IODMACommand::OutputHost32(IODMACommand *,
1669     Segment64 segment, void *vSegList, UInt32 outSegIndex)
1670 {
1671 	Segment32 *base = (Segment32 *) vSegList;
1672 	base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr;
1673 	base[outSegIndex].fLength   = (UInt32) segment.fLength;
1674 	return true;
1675 }
1676 
1677 bool
OutputBig32(IODMACommand *,Segment64 segment,void * vSegList,UInt32 outSegIndex)1678 IODMACommand::OutputBig32(IODMACommand *,
1679     Segment64 segment, void *vSegList, UInt32 outSegIndex)
1680 {
1681 	const UInt offAddr = outSegIndex * sizeof(Segment32);
1682 	const UInt offLen  = offAddr + sizeof(UInt32);
1683 	OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1684 	OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength);
1685 	return true;
1686 }
1687 
1688 bool
OutputLittle32(IODMACommand *,Segment64 segment,void * vSegList,UInt32 outSegIndex)1689 IODMACommand::OutputLittle32(IODMACommand *,
1690     Segment64 segment, void *vSegList, UInt32 outSegIndex)
1691 {
1692 	const UInt offAddr = outSegIndex * sizeof(Segment32);
1693 	const UInt offLen  = offAddr + sizeof(UInt32);
1694 	OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1695 	OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength);
1696 	return true;
1697 }
1698 
1699 bool
OutputHost64(IODMACommand *,Segment64 segment,void * vSegList,UInt32 outSegIndex)1700 IODMACommand::OutputHost64(IODMACommand *,
1701     Segment64 segment, void *vSegList, UInt32 outSegIndex)
1702 {
1703 	Segment64 *base = (Segment64 *) vSegList;
1704 	base[outSegIndex] = segment;
1705 	return true;
1706 }
1707 
1708 bool
OutputBig64(IODMACommand *,Segment64 segment,void * vSegList,UInt32 outSegIndex)1709 IODMACommand::OutputBig64(IODMACommand *,
1710     Segment64 segment, void *vSegList, UInt32 outSegIndex)
1711 {
1712 	const UInt offAddr = outSegIndex * sizeof(Segment64);
1713 	const UInt offLen  = offAddr + sizeof(UInt64);
1714 	OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1715 	OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength);
1716 	return true;
1717 }
1718 
1719 bool
OutputLittle64(IODMACommand *,Segment64 segment,void * vSegList,UInt32 outSegIndex)1720 IODMACommand::OutputLittle64(IODMACommand *,
1721     Segment64 segment, void *vSegList, UInt32 outSegIndex)
1722 {
1723 	const UInt offAddr = outSegIndex * sizeof(Segment64);
1724 	const UInt offLen  = offAddr + sizeof(UInt64);
1725 	OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1726 	OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength);
1727 	return true;
1728 }
1729