1 /*
2 * Copyright (c) 1998-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #define IOKIT_ENABLE_SHARED_PTR
29
30 #include <sys/cdefs.h>
31
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
39
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
42 #include <IOKit/IOBufferMemoryDescriptor.h>
43
44 #include <IOKit/IOKitDebug.h>
45 #include <IOKit/IOTimeStamp.h>
46 #include <libkern/OSDebug.h>
47 #include <libkern/OSKextLibPrivate.h>
48
49 #include "IOKitKernelInternal.h"
50
51 #include <libkern/c++/OSAllocation.h>
52 #include <libkern/c++/OSContainers.h>
53 #include <libkern/c++/OSDictionary.h>
54 #include <libkern/c++/OSArray.h>
55 #include <libkern/c++/OSSymbol.h>
56 #include <libkern/c++/OSNumber.h>
57 #include <os/overflow.h>
58 #include <os/cpp_util.h>
59 #include <os/base_private.h>
60
61 #include <sys/uio.h>
62
63 __BEGIN_DECLS
64 #include <vm/pmap.h>
65 #include <vm/vm_pageout_xnu.h>
66 #include <mach/memory_object_types.h>
67 #include <device/device_port.h>
68
69 #include <mach/vm_prot.h>
70 #include <mach/mach_vm.h>
71 #include <mach/memory_entry.h>
72 #include <mach/mach_host.h>
73 #include <vm/vm_fault_xnu.h>
74 #include <vm/vm_protos.h>
75 #include <vm/vm_memory_entry.h>
76 #include <vm/vm_kern_xnu.h>
77 #include <vm/vm_iokit.h>
78 #include <vm/vm_map_xnu.h>
79
80 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
81 extern void ipc_port_release_send(ipc_port_t port);
82
83 __END_DECLS
84
85 #define kIOMapperWaitSystem ((IOMapper *) 1)
86
87 static IOMapper * gIOSystemMapper = NULL;
88
89 ppnum_t gIOLastPage;
90
91 enum {
92 kIOMapGuardSizeLarge = 65536
93 };
94
95 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
96
97 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
98
99 #define super IOMemoryDescriptor
100
101 OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor,
102 IOMemoryDescriptor, ZC_ZFREE_CLEARMEM)
103
104 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
105
106 static IORecursiveLock * gIOMemoryLock;
107
108 #define LOCK IORecursiveLockLock( gIOMemoryLock)
109 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
110 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
111 #define WAKEUP \
112 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
113
114 #if 0
115 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
116 #else
117 #define DEBG(fmt, args...) {}
118 #endif
119
120 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
121
122 // Some data structures and accessor macros used by the initWithOptions
123 // Function
124
125 enum ioPLBlockFlags {
126 kIOPLOnDevice = 0x00000001,
127 kIOPLExternUPL = 0x00000002,
128 };
129
130 struct IOMDPersistentInitData {
131 const IOGeneralMemoryDescriptor * fMD;
132 IOMemoryReference * fMemRef;
133 };
134
135 struct ioPLBlock {
136 upl_t fIOPL;
137 vm_address_t fPageInfo; // Pointer to page list or index into it
138 uint64_t fIOMDOffset; // The offset of this iopl in descriptor
139 ppnum_t fMappedPage; // Page number of first page in this iopl
140 unsigned int fPageOffset; // Offset within first page of iopl
141 unsigned int fFlags; // Flags
142 };
143
144 enum { kMaxWireTags = 6 };
145
146 struct ioGMDData {
147 IOMapper * fMapper;
148 uint64_t fDMAMapAlignment;
149 uint64_t fMappedBase;
150 uint64_t fMappedLength;
151 uint64_t fPreparationID;
152 #if IOTRACKING
153 IOTracking fWireTracking;
154 #endif /* IOTRACKING */
155 unsigned int fPageCnt;
156 uint8_t fDMAMapNumAddressBits;
157 unsigned char fCompletionError:1;
158 unsigned char fMappedBaseValid:1;
159 unsigned char _resv:4;
160 unsigned char fDMAAccess:2;
161
162 /* variable length arrays */
163 upl_page_info_t fPageList[1]
164 #if __LP64__
165 // align fPageList as for ioPLBlock
166 __attribute__((aligned(sizeof(upl_t))))
167 #endif
168 ;
169 //ioPLBlock fBlocks[1];
170 };
171
172 #pragma GCC visibility push(hidden)
173
174 class _IOMemoryDescriptorMixedData : public OSObject
175 {
176 OSDeclareDefaultStructors(_IOMemoryDescriptorMixedData);
177
178 public:
179 static OSPtr<_IOMemoryDescriptorMixedData> withCapacity(size_t capacity);
180 bool initWithCapacity(size_t capacity);
181 virtual void free() APPLE_KEXT_OVERRIDE;
182
183 bool appendBytes(const void * bytes, size_t length);
184 bool setLength(size_t length);
185
186 const void * getBytes() const;
187 size_t getLength() const;
188
189 private:
190 void freeMemory();
191
192 void * _data = nullptr;
193 size_t _length = 0;
194 size_t _capacity = 0;
195 };
196
197 #pragma GCC visibility pop
198
199 #define getDataP(osd) ((ioGMDData *) (osd)->getBytes())
200 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
201 #define getNumIOPL(osd, d) \
202 ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
203 #define getPageList(d) (&(d->fPageList[0]))
204 #define computeDataSize(p, u) \
205 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
206
207 enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
208
209 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
210
211 extern "C" {
212 kern_return_t
device_data_action(uintptr_t device_handle,ipc_port_t device_pager,vm_prot_t protection,vm_object_offset_t offset,vm_size_t size)213 device_data_action(
214 uintptr_t device_handle,
215 ipc_port_t device_pager,
216 vm_prot_t protection,
217 vm_object_offset_t offset,
218 vm_size_t size)
219 {
220 kern_return_t kr;
221 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
222 OSSharedPtr<IOMemoryDescriptor> memDesc;
223
224 LOCK;
225 if (ref->dp.memory) {
226 memDesc.reset(ref->dp.memory, OSRetain);
227 kr = memDesc->handleFault(device_pager, offset, size);
228 memDesc.reset();
229 } else {
230 kr = KERN_ABORTED;
231 }
232 UNLOCK;
233
234 return kr;
235 }
236
237 kern_return_t
device_close(uintptr_t device_handle)238 device_close(
239 uintptr_t device_handle)
240 {
241 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
242
243 IOFreeType( ref, IOMemoryDescriptorReserved );
244
245 return kIOReturnSuccess;
246 }
247 }; // end extern "C"
248
249 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
250
251 // Note this inline function uses C++ reference arguments to return values
252 // This means that pointers are not passed and NULLs don't have to be
253 // checked for as a NULL reference is illegal.
254 static inline void
getAddrLenForInd(mach_vm_address_t & addr,mach_vm_size_t & len,UInt32 type,IOGeneralMemoryDescriptor::Ranges r,UInt32 ind,task_t task __unused)255 getAddrLenForInd(
256 mach_vm_address_t &addr,
257 mach_vm_size_t &len, // Output variables
258 UInt32 type,
259 IOGeneralMemoryDescriptor::Ranges r,
260 UInt32 ind,
261 task_t task __unused)
262 {
263 assert(kIOMemoryTypeUIO == type
264 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
265 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
266 if (kIOMemoryTypeUIO == type) {
267 user_size_t us;
268 user_addr_t ad;
269 uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
270 }
271 #ifndef __LP64__
272 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
273 IOAddressRange cur = r.v64[ind];
274 addr = cur.address;
275 len = cur.length;
276 }
277 #endif /* !__LP64__ */
278 else {
279 IOVirtualRange cur = r.v[ind];
280 addr = cur.address;
281 len = cur.length;
282 }
283 #if CONFIG_PROB_GZALLOC
284 if (task == kernel_task) {
285 addr = pgz_decode(addr, len);
286 }
287 #endif /* CONFIG_PROB_GZALLOC */
288 }
289
290 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
291
292 static IOReturn
purgeableControlBits(IOOptionBits newState,vm_purgable_t * control,int * state)293 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
294 {
295 IOReturn err = kIOReturnSuccess;
296
297 *control = VM_PURGABLE_SET_STATE;
298
299 enum { kIOMemoryPurgeableControlMask = 15 };
300
301 switch (kIOMemoryPurgeableControlMask & newState) {
302 case kIOMemoryPurgeableKeepCurrent:
303 *control = VM_PURGABLE_GET_STATE;
304 break;
305
306 case kIOMemoryPurgeableNonVolatile:
307 *state = VM_PURGABLE_NONVOLATILE;
308 break;
309 case kIOMemoryPurgeableVolatile:
310 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
311 break;
312 case kIOMemoryPurgeableEmpty:
313 *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
314 break;
315 default:
316 err = kIOReturnBadArgument;
317 break;
318 }
319
320 if (*control == VM_PURGABLE_SET_STATE) {
321 // let VM know this call is from the kernel and is allowed to alter
322 // the volatility of the memory entry even if it was created with
323 // MAP_MEM_PURGABLE_KERNEL_ONLY
324 *control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
325 }
326
327 return err;
328 }
329
330 static IOReturn
purgeableStateBits(int * state)331 purgeableStateBits(int * state)
332 {
333 IOReturn err = kIOReturnSuccess;
334
335 switch (VM_PURGABLE_STATE_MASK & *state) {
336 case VM_PURGABLE_NONVOLATILE:
337 *state = kIOMemoryPurgeableNonVolatile;
338 break;
339 case VM_PURGABLE_VOLATILE:
340 *state = kIOMemoryPurgeableVolatile;
341 break;
342 case VM_PURGABLE_EMPTY:
343 *state = kIOMemoryPurgeableEmpty;
344 break;
345 default:
346 *state = kIOMemoryPurgeableNonVolatile;
347 err = kIOReturnNotReady;
348 break;
349 }
350 return err;
351 }
352
353 typedef struct {
354 unsigned int wimg;
355 unsigned int object_type;
356 } iokit_memtype_entry;
357
358 static const iokit_memtype_entry iomd_mem_types[] = {
359 [kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
360 [kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
361 [kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
362 [kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
363 [kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
364 [kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
365 [kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
366 [kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
367 [kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
368 [kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
369 };
370
371 static vm_prot_t
vmProtForCacheMode(IOOptionBits cacheMode)372 vmProtForCacheMode(IOOptionBits cacheMode)
373 {
374 assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
375 if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
376 cacheMode = kIODefaultCache;
377 }
378 vm_prot_t prot = 0;
379 SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
380 return prot;
381 }
382
383 static unsigned int
pagerFlagsForCacheMode(IOOptionBits cacheMode)384 pagerFlagsForCacheMode(IOOptionBits cacheMode)
385 {
386 assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
387 if (cacheMode >= (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0]))) {
388 cacheMode = kIODefaultCache;
389 }
390 if (cacheMode == kIODefaultCache) {
391 return -1U;
392 }
393 return iomd_mem_types[cacheMode].wimg;
394 }
395
396 static IOOptionBits
cacheModeForPagerFlags(unsigned int pagerFlags)397 cacheModeForPagerFlags(unsigned int pagerFlags)
398 {
399 pagerFlags &= VM_WIMG_MASK;
400 IOOptionBits cacheMode = kIODefaultCache;
401 for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
402 if (iomd_mem_types[i].wimg == pagerFlags) {
403 cacheMode = i;
404 break;
405 }
406 }
407 return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
408 }
409
410 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
411 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
412
413 struct IOMemoryEntry {
414 ipc_port_t entry;
415 int64_t offset;
416 uint64_t size;
417 uint64_t start;
418 };
419
420 struct IOMemoryReference {
421 volatile SInt32 refCount;
422 vm_prot_t prot;
423 uint32_t capacity;
424 uint32_t count;
425 struct IOMemoryReference * mapRef;
426 IOMemoryEntry entries[0];
427 };
428
429 enum{
430 kIOMemoryReferenceReuse = 0x00000001,
431 kIOMemoryReferenceWrite = 0x00000002,
432 kIOMemoryReferenceCOW = 0x00000004,
433 };
434
435 SInt32 gIOMemoryReferenceCount;
436
437 IOMemoryReference *
memoryReferenceAlloc(uint32_t capacity,IOMemoryReference * realloc)438 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
439 {
440 IOMemoryReference * ref;
441 size_t oldCapacity;
442
443 if (realloc) {
444 oldCapacity = realloc->capacity;
445 } else {
446 oldCapacity = 0;
447 }
448
449 // Use the kalloc API instead of manually handling the reallocation
450 ref = krealloc_type(IOMemoryReference, IOMemoryEntry,
451 oldCapacity, capacity, realloc, Z_WAITOK_ZERO);
452 if (ref) {
453 if (oldCapacity == 0) {
454 ref->refCount = 1;
455 OSIncrementAtomic(&gIOMemoryReferenceCount);
456 }
457 ref->capacity = capacity;
458 }
459 return ref;
460 }
461
462 void
memoryReferenceFree(IOMemoryReference * ref)463 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
464 {
465 IOMemoryEntry * entries;
466
467 if (ref->mapRef) {
468 memoryReferenceFree(ref->mapRef);
469 ref->mapRef = NULL;
470 }
471
472 entries = ref->entries + ref->count;
473 while (entries > &ref->entries[0]) {
474 entries--;
475 ipc_port_release_send(entries->entry);
476 }
477 kfree_type(IOMemoryReference, IOMemoryEntry, ref->capacity, ref);
478
479 OSDecrementAtomic(&gIOMemoryReferenceCount);
480 }
481
482 void
memoryReferenceRelease(IOMemoryReference * ref)483 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
484 {
485 if (1 == OSDecrementAtomic(&ref->refCount)) {
486 memoryReferenceFree(ref);
487 }
488 }
489
490
491 IOReturn
memoryReferenceCreate(IOOptionBits options,IOMemoryReference ** reference)492 IOGeneralMemoryDescriptor::memoryReferenceCreate(
493 IOOptionBits options,
494 IOMemoryReference ** reference)
495 {
496 enum { kCapacity = 4, kCapacityInc = 4 };
497
498 kern_return_t err;
499 IOMemoryReference * ref;
500 IOMemoryEntry * entries;
501 IOMemoryEntry * cloneEntries = NULL;
502 vm_map_t map;
503 ipc_port_t entry, cloneEntry;
504 vm_prot_t prot;
505 memory_object_size_t actualSize;
506 uint32_t rangeIdx;
507 uint32_t count;
508 mach_vm_address_t entryAddr, endAddr, entrySize;
509 mach_vm_size_t srcAddr, srcLen;
510 mach_vm_size_t nextAddr, nextLen;
511 mach_vm_size_t offset, remain;
512 vm_map_offset_t overmap_start = 0, overmap_end = 0;
513 int misaligned_start = 0, misaligned_end = 0;
514 IOByteCount physLen;
515 IOOptionBits type = (_flags & kIOMemoryTypeMask);
516 IOOptionBits cacheMode;
517 unsigned int pagerFlags;
518 vm_tag_t tag;
519 vm_named_entry_kernel_flags_t vmne_kflags;
520
521 ref = memoryReferenceAlloc(kCapacity, NULL);
522 if (!ref) {
523 return kIOReturnNoMemory;
524 }
525
526 tag = (vm_tag_t) getVMTag(kernel_map);
527 vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
528 entries = &ref->entries[0];
529 count = 0;
530 err = KERN_SUCCESS;
531
532 offset = 0;
533 rangeIdx = 0;
534 remain = _length;
535 if (_task) {
536 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
537
538 // account for IOBMD setLength(), use its capacity as length
539 IOBufferMemoryDescriptor * bmd;
540 if ((bmd = OSDynamicCast(IOBufferMemoryDescriptor, this))) {
541 nextLen = bmd->getCapacity();
542 remain = nextLen;
543 }
544 } else {
545 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
546 nextLen = physLen;
547
548 // default cache mode for physical
549 if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
550 IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
551 _flags |= (mode << kIOMemoryBufferCacheShift);
552 }
553 }
554
555 // cache mode & vm_prot
556 prot = VM_PROT_READ;
557 cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
558 prot |= vmProtForCacheMode(cacheMode);
559 // VM system requires write access to change cache mode
560 if (kIODefaultCache != cacheMode) {
561 prot |= VM_PROT_WRITE;
562 }
563 if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
564 prot |= VM_PROT_WRITE;
565 }
566 if (kIOMemoryReferenceWrite & options) {
567 prot |= VM_PROT_WRITE;
568 }
569 if (kIOMemoryReferenceCOW & options) {
570 prot |= MAP_MEM_VM_COPY;
571 }
572
573 if (kIOMemoryUseReserve & _flags) {
574 prot |= MAP_MEM_GRAB_SECLUDED;
575 }
576
577 if ((kIOMemoryReferenceReuse & options) && _memRef) {
578 cloneEntries = &_memRef->entries[0];
579 prot |= MAP_MEM_NAMED_REUSE;
580 }
581
582 if (_task) {
583 // virtual ranges
584
585 if (kIOMemoryBufferPageable & _flags) {
586 int ledger_tag, ledger_no_footprint;
587
588 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
589 prot |= MAP_MEM_NAMED_CREATE;
590
591 // default accounting settings:
592 // + "none" ledger tag
593 // + include in footprint
594 // can be changed later with ::setOwnership()
595 ledger_tag = VM_LEDGER_TAG_NONE;
596 ledger_no_footprint = 0;
597
598 if (kIOMemoryBufferPurgeable & _flags) {
599 prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
600 if (VM_KERN_MEMORY_SKYWALK == tag) {
601 // Skywalk purgeable memory accounting:
602 // + "network" ledger tag
603 // + not included in footprint
604 ledger_tag = VM_LEDGER_TAG_NETWORK;
605 ledger_no_footprint = 1;
606 } else {
607 // regular purgeable memory accounting:
608 // + no ledger tag
609 // + included in footprint
610 ledger_tag = VM_LEDGER_TAG_NONE;
611 ledger_no_footprint = 0;
612 }
613 }
614 vmne_kflags.vmnekf_ledger_tag = ledger_tag;
615 vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
616 if (kIOMemoryUseReserve & _flags) {
617 prot |= MAP_MEM_GRAB_SECLUDED;
618 }
619
620 prot |= VM_PROT_WRITE;
621 map = NULL;
622 } else {
623 prot |= MAP_MEM_USE_DATA_ADDR;
624 map = get_task_map(_task);
625 }
626 DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot);
627
628 while (remain) {
629 srcAddr = nextAddr;
630 srcLen = nextLen;
631 nextAddr = 0;
632 nextLen = 0;
633 // coalesce addr range
634 for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
635 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
636 if ((srcAddr + srcLen) != nextAddr) {
637 break;
638 }
639 srcLen += nextLen;
640 }
641
642 if (MAP_MEM_USE_DATA_ADDR & prot) {
643 entryAddr = srcAddr;
644 endAddr = srcAddr + srcLen;
645 } else {
646 entryAddr = trunc_page_64(srcAddr);
647 endAddr = round_page_64(srcAddr + srcLen);
648 }
649 if (vm_map_page_mask(get_task_map(_task)) < PAGE_MASK) {
650 DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen);
651 }
652
653 do{
654 entrySize = (endAddr - entryAddr);
655 if (!entrySize) {
656 break;
657 }
658 actualSize = entrySize;
659
660 cloneEntry = MACH_PORT_NULL;
661 if (MAP_MEM_NAMED_REUSE & prot) {
662 if (cloneEntries < &_memRef->entries[_memRef->count]) {
663 cloneEntry = cloneEntries->entry;
664 } else {
665 prot &= ~MAP_MEM_NAMED_REUSE;
666 }
667 }
668
669 err = mach_make_memory_entry_internal(map,
670 &actualSize, entryAddr, prot, vmne_kflags, &entry, cloneEntry);
671
672 if (KERN_SUCCESS != err) {
673 DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddr, actualSize, prot, err);
674 break;
675 }
676 if (MAP_MEM_USE_DATA_ADDR & prot) {
677 if (actualSize > entrySize) {
678 actualSize = entrySize;
679 }
680 } else if (actualSize > entrySize) {
681 panic("mach_make_memory_entry_64 actualSize");
682 }
683
684 memory_entry_check_for_adjustment(map, entry, &overmap_start, &overmap_end);
685
686 if (count && overmap_start) {
687 /*
688 * Track misaligned start for all
689 * except the first entry.
690 */
691 misaligned_start++;
692 }
693
694 if (overmap_end) {
695 /*
696 * Ignore misaligned end for the
697 * last entry.
698 */
699 if ((entryAddr + actualSize) != endAddr) {
700 misaligned_end++;
701 }
702 }
703
704 if (count) {
705 /* Middle entries */
706 if (misaligned_start || misaligned_end) {
707 DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr);
708 ipc_port_release_send(entry);
709 err = KERN_NOT_SUPPORTED;
710 break;
711 }
712 }
713
714 if (count >= ref->capacity) {
715 ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
716 entries = &ref->entries[count];
717 }
718 entries->entry = entry;
719 entries->size = actualSize;
720 entries->offset = offset + (entryAddr - srcAddr);
721 entries->start = entryAddr;
722 entryAddr += actualSize;
723 if (MAP_MEM_NAMED_REUSE & prot) {
724 if ((cloneEntries->entry == entries->entry)
725 && (cloneEntries->size == entries->size)
726 && (cloneEntries->offset == entries->offset)) {
727 cloneEntries++;
728 } else {
729 prot &= ~MAP_MEM_NAMED_REUSE;
730 }
731 }
732 entries++;
733 count++;
734 }while (true);
735 offset += srcLen;
736 remain -= srcLen;
737 }
738 } else {
739 // _task == 0, physical or kIOMemoryTypeUPL
740 memory_object_t pager;
741 vm_size_t size = ptoa_64(_pages);
742
743 if (!getKernelReserved()) {
744 panic("getKernelReserved");
745 }
746
747 reserved->dp.pagerContig = (1 == _rangesCount);
748 reserved->dp.memory = this;
749
750 pagerFlags = pagerFlagsForCacheMode(cacheMode);
751 if (-1U == pagerFlags) {
752 panic("phys is kIODefaultCache");
753 }
754 if (reserved->dp.pagerContig) {
755 pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
756 }
757
758 pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
759 size, pagerFlags);
760 assert(pager);
761 if (!pager) {
762 DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags);
763 err = kIOReturnVMError;
764 } else {
765 srcAddr = nextAddr;
766 entryAddr = trunc_page_64(srcAddr);
767 err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
768 size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
769 assert(KERN_SUCCESS == err);
770 if (KERN_SUCCESS != err) {
771 device_pager_deallocate(pager);
772 } else {
773 reserved->dp.devicePager = pager;
774 entries->entry = entry;
775 entries->size = size;
776 entries->offset = offset + (entryAddr - srcAddr);
777 entries++;
778 count++;
779 }
780 }
781 }
782
783 ref->count = count;
784 ref->prot = prot;
785
786 if (_task && (KERN_SUCCESS == err)
787 && (kIOMemoryMapCopyOnWrite & _flags)
788 && !(kIOMemoryReferenceCOW & options)) {
789 err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
790 if (KERN_SUCCESS != err) {
791 DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err);
792 }
793 }
794
795 if (KERN_SUCCESS == err) {
796 if (MAP_MEM_NAMED_REUSE & prot) {
797 memoryReferenceFree(ref);
798 OSIncrementAtomic(&_memRef->refCount);
799 ref = _memRef;
800 }
801 } else {
802 DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err);
803 memoryReferenceFree(ref);
804 ref = NULL;
805 }
806
807 *reference = ref;
808
809 return err;
810 }
811
812 static mach_vm_size_t
IOMemoryDescriptorMapGuardSize(vm_map_t map,IOOptionBits options)813 IOMemoryDescriptorMapGuardSize(vm_map_t map, IOOptionBits options)
814 {
815 switch (kIOMapGuardedMask & options) {
816 default:
817 case kIOMapGuardedSmall:
818 return vm_map_page_size(map);
819 case kIOMapGuardedLarge:
820 assert(0 == (kIOMapGuardSizeLarge & vm_map_page_mask(map)));
821 return kIOMapGuardSizeLarge;
822 }
823 ;
824 }
825
826 static kern_return_t
IOMemoryDescriptorMapDealloc(IOOptionBits options,vm_map_t map,vm_map_offset_t addr,mach_vm_size_t size)827 IOMemoryDescriptorMapDealloc(IOOptionBits options, vm_map_t map,
828 vm_map_offset_t addr, mach_vm_size_t size)
829 {
830 kern_return_t kr;
831 vm_map_offset_t actualAddr;
832 mach_vm_size_t actualSize;
833
834 actualAddr = vm_map_trunc_page(addr, vm_map_page_mask(map));
835 actualSize = vm_map_round_page(addr + size, vm_map_page_mask(map)) - actualAddr;
836
837 if (kIOMapGuardedMask & options) {
838 mach_vm_size_t guardSize = IOMemoryDescriptorMapGuardSize(map, options);
839 actualAddr -= guardSize;
840 actualSize += 2 * guardSize;
841 }
842 kr = mach_vm_deallocate(map, actualAddr, actualSize);
843
844 return kr;
845 }
846
847 kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map,void * _ref)848 IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
849 {
850 IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
851 IOReturn err;
852 vm_map_offset_t addr;
853 mach_vm_size_t size;
854 mach_vm_size_t guardSize;
855 vm_map_kernel_flags_t vmk_flags;
856
857 addr = ref->mapped;
858 size = ref->size;
859 guardSize = 0;
860
861 if (kIOMapGuardedMask & ref->options) {
862 if (!(kIOMapAnywhere & ref->options)) {
863 return kIOReturnBadArgument;
864 }
865 guardSize = IOMemoryDescriptorMapGuardSize(map, ref->options);
866 size += 2 * guardSize;
867 }
868 if (kIOMapAnywhere & ref->options) {
869 vmk_flags = VM_MAP_KERNEL_FLAGS_ANYWHERE();
870 } else {
871 vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED();
872 }
873 vmk_flags.vm_tag = ref->tag;
874
875 /*
876 * Mapping memory into the kernel_map using IOMDs use the data range.
877 * Memory being mapped should not contain kernel pointers.
878 */
879 if (map == kernel_map) {
880 vmk_flags.vmkf_range_id = KMEM_RANGE_ID_DATA;
881 }
882
883 err = mach_vm_map_kernel(map, &addr, size,
884 #if __ARM_MIXED_PAGE_SIZE__
885 // TODO4K this should not be necessary...
886 (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0),
887 #else /* __ARM_MIXED_PAGE_SIZE__ */
888 (vm_map_offset_t) 0,
889 #endif /* __ARM_MIXED_PAGE_SIZE__ */
890 vmk_flags,
891 IPC_PORT_NULL,
892 (memory_object_offset_t) 0,
893 false, /* copy */
894 ref->prot,
895 ref->prot,
896 VM_INHERIT_NONE);
897 if (KERN_SUCCESS == err) {
898 ref->mapped = (mach_vm_address_t) addr;
899 ref->map = map;
900 if (kIOMapGuardedMask & ref->options) {
901 vm_map_offset_t lastpage = vm_map_trunc_page(addr + size - guardSize, vm_map_page_mask(map));
902
903 err = mach_vm_protect(map, addr, guardSize, false /*set max*/, VM_PROT_NONE);
904 assert(KERN_SUCCESS == err);
905 err = mach_vm_protect(map, lastpage, guardSize, false /*set max*/, VM_PROT_NONE);
906 assert(KERN_SUCCESS == err);
907 ref->mapped += guardSize;
908 }
909 }
910
911 return err;
912 }
913
914 IOReturn
memoryReferenceMap(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)915 IOGeneralMemoryDescriptor::memoryReferenceMap(
916 IOMemoryReference * ref,
917 vm_map_t map,
918 mach_vm_size_t inoffset,
919 mach_vm_size_t size,
920 IOOptionBits options,
921 mach_vm_address_t * inaddr)
922 {
923 IOReturn err;
924 int64_t offset = inoffset;
925 uint32_t rangeIdx, entryIdx;
926 vm_map_offset_t addr, mapAddr;
927 vm_map_offset_t pageOffset, entryOffset, remain, chunk;
928
929 mach_vm_address_t nextAddr;
930 mach_vm_size_t nextLen;
931 IOByteCount physLen;
932 IOMemoryEntry * entry;
933 vm_prot_t prot, memEntryCacheMode;
934 IOOptionBits type;
935 IOOptionBits cacheMode;
936 vm_tag_t tag;
937 // for the kIOMapPrefault option.
938 upl_page_info_t * pageList = NULL;
939 UInt currentPageIndex = 0;
940 bool didAlloc;
941
942 DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
943
944 if (ref->mapRef) {
945 err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
946 return err;
947 }
948
949 if (MAP_MEM_USE_DATA_ADDR & ref->prot) {
950 err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr);
951 return err;
952 }
953
954 type = _flags & kIOMemoryTypeMask;
955
956 prot = VM_PROT_READ;
957 if (!(kIOMapReadOnly & options)) {
958 prot |= VM_PROT_WRITE;
959 }
960 prot &= ref->prot;
961
962 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
963 if (kIODefaultCache != cacheMode) {
964 // VM system requires write access to update named entry cache mode
965 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
966 }
967
968 tag = (typeof(tag))getVMTag(map);
969
970 if (_task) {
971 // Find first range for offset
972 if (!_rangesCount) {
973 return kIOReturnBadArgument;
974 }
975 for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
976 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx, _task);
977 if (remain < nextLen) {
978 break;
979 }
980 remain -= nextLen;
981 }
982 } else {
983 rangeIdx = 0;
984 remain = 0;
985 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
986 nextLen = size;
987 }
988
989 assert(remain < nextLen);
990 if (remain >= nextLen) {
991 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen);
992 return kIOReturnBadArgument;
993 }
994
995 nextAddr += remain;
996 nextLen -= remain;
997 #if __ARM_MIXED_PAGE_SIZE__
998 pageOffset = (vm_map_page_mask(map) & nextAddr);
999 #else /* __ARM_MIXED_PAGE_SIZE__ */
1000 pageOffset = (page_mask & nextAddr);
1001 #endif /* __ARM_MIXED_PAGE_SIZE__ */
1002 addr = 0;
1003 didAlloc = false;
1004
1005 if (!(options & kIOMapAnywhere)) {
1006 addr = *inaddr;
1007 if (pageOffset != (vm_map_page_mask(map) & addr)) {
1008 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset);
1009 }
1010 addr -= pageOffset;
1011 }
1012
1013 // find first entry for offset
1014 for (entryIdx = 0;
1015 (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
1016 entryIdx++) {
1017 }
1018 entryIdx--;
1019 entry = &ref->entries[entryIdx];
1020
1021 // allocate VM
1022 #if __ARM_MIXED_PAGE_SIZE__
1023 size = round_page_mask_64(size + pageOffset, vm_map_page_mask(map));
1024 #else
1025 size = round_page_64(size + pageOffset);
1026 #endif
1027 if (kIOMapOverwrite & options) {
1028 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1029 map = IOPageableMapForAddress(addr);
1030 }
1031 err = KERN_SUCCESS;
1032 } else {
1033 IOMemoryDescriptorMapAllocRef ref;
1034 ref.map = map;
1035 ref.tag = tag;
1036 ref.options = options;
1037 ref.size = size;
1038 ref.prot = prot;
1039 if (options & kIOMapAnywhere) {
1040 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1041 ref.mapped = 0;
1042 } else {
1043 ref.mapped = addr;
1044 }
1045 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1046 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1047 } else {
1048 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1049 }
1050 if (KERN_SUCCESS == err) {
1051 addr = ref.mapped;
1052 map = ref.map;
1053 didAlloc = true;
1054 }
1055 }
1056
1057 /*
1058 * If the memory is associated with a device pager but doesn't have a UPL,
1059 * it will be immediately faulted in through the pager via populateDevicePager().
1060 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1061 * operations.
1062 */
1063 if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1064 options &= ~kIOMapPrefault;
1065 }
1066
1067 /*
1068 * Prefaulting is only possible if we wired the memory earlier. Check the
1069 * memory type, and the underlying data.
1070 */
1071 if (options & kIOMapPrefault) {
1072 /*
1073 * The memory must have been wired by calling ::prepare(), otherwise
1074 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1075 */
1076 assert(_wireCount != 0);
1077 assert(_memoryEntries != NULL);
1078 if ((_wireCount == 0) ||
1079 (_memoryEntries == NULL)) {
1080 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr);
1081 return kIOReturnBadArgument;
1082 }
1083
1084 // Get the page list.
1085 ioGMDData* dataP = getDataP(_memoryEntries);
1086 ioPLBlock const* ioplList = getIOPLList(dataP);
1087 pageList = getPageList(dataP);
1088
1089 // Get the number of IOPLs.
1090 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1091
1092 /*
1093 * Scan through the IOPL Info Blocks, looking for the first block containing
1094 * the offset. The research will go past it, so we'll need to go back to the
1095 * right range at the end.
1096 */
1097 UInt ioplIndex = 0;
1098 while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1099 ioplIndex++;
1100 }
1101 ioplIndex--;
1102
1103 // Retrieve the IOPL info block.
1104 ioPLBlock ioplInfo = ioplList[ioplIndex];
1105
1106 /*
1107 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1108 * array.
1109 */
1110 if (ioplInfo.fFlags & kIOPLExternUPL) {
1111 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1112 } else {
1113 pageList = &pageList[ioplInfo.fPageInfo];
1114 }
1115
1116 // Rebase [offset] into the IOPL in order to looks for the first page index.
1117 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1118
1119 // Retrieve the index of the first page corresponding to the offset.
1120 currentPageIndex = atop_32(offsetInIOPL);
1121 }
1122
1123 // enter mappings
1124 remain = size;
1125 mapAddr = addr;
1126 addr += pageOffset;
1127
1128 while (remain && (KERN_SUCCESS == err)) {
1129 entryOffset = offset - entry->offset;
1130 if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) {
1131 err = kIOReturnNotAligned;
1132 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset);
1133 break;
1134 }
1135
1136 if (kIODefaultCache != cacheMode) {
1137 vm_size_t unused = 0;
1138 err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1139 memEntryCacheMode, NULL, entry->entry);
1140 assert(KERN_SUCCESS == err);
1141 }
1142
1143 entryOffset -= pageOffset;
1144 if (entryOffset >= entry->size) {
1145 panic("entryOffset");
1146 }
1147 chunk = entry->size - entryOffset;
1148 if (chunk) {
1149 vm_map_kernel_flags_t vmk_flags = {
1150 .vmf_fixed = true,
1151 .vmf_overwrite = true,
1152 .vm_tag = tag,
1153 .vmkf_iokit_acct = true,
1154 };
1155
1156 if (chunk > remain) {
1157 chunk = remain;
1158 }
1159 if (options & kIOMapPrefault) {
1160 UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1161
1162 err = vm_map_enter_mem_object_prefault(map,
1163 &mapAddr,
1164 chunk, 0 /* mask */,
1165 vmk_flags,
1166 entry->entry,
1167 entryOffset,
1168 prot, // cur
1169 prot, // max
1170 &pageList[currentPageIndex],
1171 nb_pages);
1172
1173 if (err || vm_map_page_mask(map) < PAGE_MASK) {
1174 DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1175 }
1176 // Compute the next index in the page list.
1177 currentPageIndex += nb_pages;
1178 assert(currentPageIndex <= _pages);
1179 } else {
1180 err = mach_vm_map_kernel(map,
1181 &mapAddr,
1182 chunk, 0 /* mask */,
1183 vmk_flags,
1184 entry->entry,
1185 entryOffset,
1186 false, // copy
1187 prot, // cur
1188 prot, // max
1189 VM_INHERIT_NONE);
1190 }
1191 if (KERN_SUCCESS != err) {
1192 DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1193 break;
1194 }
1195 remain -= chunk;
1196 if (!remain) {
1197 break;
1198 }
1199 mapAddr += chunk;
1200 offset += chunk - pageOffset;
1201 }
1202 pageOffset = 0;
1203 entry++;
1204 entryIdx++;
1205 if (entryIdx >= ref->count) {
1206 err = kIOReturnOverrun;
1207 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count);
1208 break;
1209 }
1210 }
1211
1212 if ((KERN_SUCCESS != err) && didAlloc) {
1213 (void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1214 addr = 0;
1215 }
1216 *inaddr = addr;
1217
1218 if (err /* || vm_map_page_mask(map) < PAGE_MASK */) {
1219 DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err);
1220 }
1221 return err;
1222 }
1223
1224 #define LOGUNALIGN 0
1225 IOReturn
memoryReferenceMapNew(IOMemoryReference * ref,vm_map_t map,mach_vm_size_t inoffset,mach_vm_size_t size,IOOptionBits options,mach_vm_address_t * inaddr)1226 IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1227 IOMemoryReference * ref,
1228 vm_map_t map,
1229 mach_vm_size_t inoffset,
1230 mach_vm_size_t size,
1231 IOOptionBits options,
1232 mach_vm_address_t * inaddr)
1233 {
1234 IOReturn err;
1235 int64_t offset = inoffset;
1236 uint32_t entryIdx, firstEntryIdx;
1237 vm_map_offset_t addr, mapAddr, mapAddrOut;
1238 vm_map_offset_t entryOffset, remain, chunk;
1239
1240 IOMemoryEntry * entry;
1241 vm_prot_t prot, memEntryCacheMode;
1242 IOOptionBits type;
1243 IOOptionBits cacheMode;
1244 vm_tag_t tag;
1245 // for the kIOMapPrefault option.
1246 upl_page_info_t * pageList = NULL;
1247 UInt currentPageIndex = 0;
1248 bool didAlloc;
1249
1250 DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
1251
1252 if (ref->mapRef) {
1253 err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
1254 return err;
1255 }
1256
1257 #if LOGUNALIGN
1258 printf("MAP offset %qx, %qx\n", inoffset, size);
1259 #endif
1260
1261 type = _flags & kIOMemoryTypeMask;
1262
1263 prot = VM_PROT_READ;
1264 if (!(kIOMapReadOnly & options)) {
1265 prot |= VM_PROT_WRITE;
1266 }
1267 prot &= ref->prot;
1268
1269 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
1270 if (kIODefaultCache != cacheMode) {
1271 // VM system requires write access to update named entry cache mode
1272 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
1273 }
1274
1275 tag = (vm_tag_t) getVMTag(map);
1276
1277 addr = 0;
1278 didAlloc = false;
1279
1280 if (!(options & kIOMapAnywhere)) {
1281 addr = *inaddr;
1282 }
1283
1284 // find first entry for offset
1285 for (firstEntryIdx = 0;
1286 (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset);
1287 firstEntryIdx++) {
1288 }
1289 firstEntryIdx--;
1290
1291 // calculate required VM space
1292
1293 entryIdx = firstEntryIdx;
1294 entry = &ref->entries[entryIdx];
1295
1296 remain = size;
1297 int64_t iteroffset = offset;
1298 uint64_t mapSize = 0;
1299 while (remain) {
1300 entryOffset = iteroffset - entry->offset;
1301 if (entryOffset >= entry->size) {
1302 panic("entryOffset");
1303 }
1304
1305 #if LOGUNALIGN
1306 printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1307 entryIdx, entry->size, entry->offset, entry->start, iteroffset);
1308 #endif
1309
1310 chunk = entry->size - entryOffset;
1311 if (chunk) {
1312 if (chunk > remain) {
1313 chunk = remain;
1314 }
1315 mach_vm_size_t entrySize;
1316 err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1317 assert(KERN_SUCCESS == err);
1318 mapSize += entrySize;
1319
1320 remain -= chunk;
1321 if (!remain) {
1322 break;
1323 }
1324 iteroffset += chunk; // - pageOffset;
1325 }
1326 entry++;
1327 entryIdx++;
1328 if (entryIdx >= ref->count) {
1329 panic("overrun");
1330 err = kIOReturnOverrun;
1331 break;
1332 }
1333 }
1334
1335 if (kIOMapOverwrite & options) {
1336 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1337 map = IOPageableMapForAddress(addr);
1338 }
1339 err = KERN_SUCCESS;
1340 } else {
1341 IOMemoryDescriptorMapAllocRef ref;
1342 ref.map = map;
1343 ref.tag = tag;
1344 ref.options = options;
1345 ref.size = mapSize;
1346 ref.prot = prot;
1347 if (options & kIOMapAnywhere) {
1348 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1349 ref.mapped = 0;
1350 } else {
1351 ref.mapped = addr;
1352 }
1353 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1354 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1355 } else {
1356 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1357 }
1358
1359 if (KERN_SUCCESS == err) {
1360 addr = ref.mapped;
1361 map = ref.map;
1362 didAlloc = true;
1363 }
1364 #if LOGUNALIGN
1365 IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
1366 #endif
1367 }
1368
1369 /*
1370 * If the memory is associated with a device pager but doesn't have a UPL,
1371 * it will be immediately faulted in through the pager via populateDevicePager().
1372 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1373 * operations.
1374 */
1375 if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1376 options &= ~kIOMapPrefault;
1377 }
1378
1379 /*
1380 * Prefaulting is only possible if we wired the memory earlier. Check the
1381 * memory type, and the underlying data.
1382 */
1383 if (options & kIOMapPrefault) {
1384 /*
1385 * The memory must have been wired by calling ::prepare(), otherwise
1386 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1387 */
1388 assert(_wireCount != 0);
1389 assert(_memoryEntries != NULL);
1390 if ((_wireCount == 0) ||
1391 (_memoryEntries == NULL)) {
1392 return kIOReturnBadArgument;
1393 }
1394
1395 // Get the page list.
1396 ioGMDData* dataP = getDataP(_memoryEntries);
1397 ioPLBlock const* ioplList = getIOPLList(dataP);
1398 pageList = getPageList(dataP);
1399
1400 // Get the number of IOPLs.
1401 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1402
1403 /*
1404 * Scan through the IOPL Info Blocks, looking for the first block containing
1405 * the offset. The research will go past it, so we'll need to go back to the
1406 * right range at the end.
1407 */
1408 UInt ioplIndex = 0;
1409 while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1410 ioplIndex++;
1411 }
1412 ioplIndex--;
1413
1414 // Retrieve the IOPL info block.
1415 ioPLBlock ioplInfo = ioplList[ioplIndex];
1416
1417 /*
1418 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1419 * array.
1420 */
1421 if (ioplInfo.fFlags & kIOPLExternUPL) {
1422 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1423 } else {
1424 pageList = &pageList[ioplInfo.fPageInfo];
1425 }
1426
1427 // Rebase [offset] into the IOPL in order to looks for the first page index.
1428 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1429
1430 // Retrieve the index of the first page corresponding to the offset.
1431 currentPageIndex = atop_32(offsetInIOPL);
1432 }
1433
1434 // enter mappings
1435 remain = size;
1436 mapAddr = addr;
1437 entryIdx = firstEntryIdx;
1438 entry = &ref->entries[entryIdx];
1439
1440 while (remain && (KERN_SUCCESS == err)) {
1441 #if LOGUNALIGN
1442 printf("offset %qx, %qx\n", offset, entry->offset);
1443 #endif
1444 if (kIODefaultCache != cacheMode) {
1445 vm_size_t unused = 0;
1446 err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1447 memEntryCacheMode, NULL, entry->entry);
1448 assert(KERN_SUCCESS == err);
1449 }
1450 entryOffset = offset - entry->offset;
1451 if (entryOffset >= entry->size) {
1452 panic("entryOffset");
1453 }
1454 chunk = entry->size - entryOffset;
1455 #if LOGUNALIGN
1456 printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
1457 #endif
1458 if (chunk) {
1459 vm_map_kernel_flags_t vmk_flags = {
1460 .vmf_fixed = true,
1461 .vmf_overwrite = true,
1462 .vmf_return_data_addr = true,
1463 .vm_tag = tag,
1464 .vmkf_iokit_acct = true,
1465 };
1466
1467 if (chunk > remain) {
1468 chunk = remain;
1469 }
1470 mapAddrOut = mapAddr;
1471 if (options & kIOMapPrefault) {
1472 UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1473
1474 err = vm_map_enter_mem_object_prefault(map,
1475 &mapAddrOut,
1476 chunk, 0 /* mask */,
1477 vmk_flags,
1478 entry->entry,
1479 entryOffset,
1480 prot, // cur
1481 prot, // max
1482 &pageList[currentPageIndex],
1483 nb_pages);
1484
1485 // Compute the next index in the page list.
1486 currentPageIndex += nb_pages;
1487 assert(currentPageIndex <= _pages);
1488 } else {
1489 #if LOGUNALIGN
1490 printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
1491 #endif
1492 err = mach_vm_map_kernel(map,
1493 &mapAddrOut,
1494 chunk, 0 /* mask */,
1495 vmk_flags,
1496 entry->entry,
1497 entryOffset,
1498 false, // copy
1499 prot, // cur
1500 prot, // max
1501 VM_INHERIT_NONE);
1502 }
1503 if (KERN_SUCCESS != err) {
1504 panic("map enter err %x", err);
1505 break;
1506 }
1507 #if LOGUNALIGN
1508 printf("mapAddr o %qx\n", mapAddrOut);
1509 #endif
1510 if (entryIdx == firstEntryIdx) {
1511 addr = mapAddrOut;
1512 }
1513 remain -= chunk;
1514 if (!remain) {
1515 break;
1516 }
1517 mach_vm_size_t entrySize;
1518 err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1519 assert(KERN_SUCCESS == err);
1520 mapAddr += entrySize;
1521 offset += chunk;
1522 }
1523
1524 entry++;
1525 entryIdx++;
1526 if (entryIdx >= ref->count) {
1527 err = kIOReturnOverrun;
1528 break;
1529 }
1530 }
1531
1532 if (KERN_SUCCESS != err) {
1533 DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
1534 }
1535
1536 if ((KERN_SUCCESS != err) && didAlloc) {
1537 (void) IOMemoryDescriptorMapDealloc(options, map, trunc_page_64(addr), size);
1538 addr = 0;
1539 }
1540 *inaddr = addr;
1541
1542 return err;
1543 }
1544
1545 uint64_t
memoryReferenceGetDMAMapLength(IOMemoryReference * ref,uint64_t * offset)1546 IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1547 IOMemoryReference * ref,
1548 uint64_t * offset)
1549 {
1550 kern_return_t kr;
1551 vm_object_offset_t data_offset = 0;
1552 uint64_t total;
1553 uint32_t idx;
1554
1555 assert(ref->count);
1556 if (offset) {
1557 *offset = (uint64_t) data_offset;
1558 }
1559 total = 0;
1560 for (idx = 0; idx < ref->count; idx++) {
1561 kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
1562 &data_offset);
1563 if (KERN_SUCCESS != kr) {
1564 DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
1565 } else if (0 != data_offset) {
1566 DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
1567 }
1568 if (offset && !idx) {
1569 *offset = (uint64_t) data_offset;
1570 }
1571 total += round_page(data_offset + ref->entries[idx].size);
1572 }
1573
1574 DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
1575 (offset ? *offset : (vm_object_offset_t)-1), total);
1576
1577 return total;
1578 }
1579
1580
1581 IOReturn
memoryReferenceGetPageCounts(IOMemoryReference * ref,IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)1582 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1583 IOMemoryReference * ref,
1584 IOByteCount * residentPageCount,
1585 IOByteCount * dirtyPageCount)
1586 {
1587 IOReturn err;
1588 IOMemoryEntry * entries;
1589 unsigned int resident, dirty;
1590 unsigned int totalResident, totalDirty;
1591
1592 totalResident = totalDirty = 0;
1593 err = kIOReturnSuccess;
1594 entries = ref->entries + ref->count;
1595 while (entries > &ref->entries[0]) {
1596 entries--;
1597 err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1598 if (KERN_SUCCESS != err) {
1599 break;
1600 }
1601 totalResident += resident;
1602 totalDirty += dirty;
1603 }
1604
1605 if (residentPageCount) {
1606 *residentPageCount = totalResident;
1607 }
1608 if (dirtyPageCount) {
1609 *dirtyPageCount = totalDirty;
1610 }
1611 return err;
1612 }
1613
1614 IOReturn
memoryReferenceSetPurgeable(IOMemoryReference * ref,IOOptionBits newState,IOOptionBits * oldState)1615 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1616 IOMemoryReference * ref,
1617 IOOptionBits newState,
1618 IOOptionBits * oldState)
1619 {
1620 IOReturn err;
1621 IOMemoryEntry * entries;
1622 vm_purgable_t control;
1623 int totalState, state;
1624
1625 totalState = kIOMemoryPurgeableNonVolatile;
1626 err = kIOReturnSuccess;
1627 entries = ref->entries + ref->count;
1628 while (entries > &ref->entries[0]) {
1629 entries--;
1630
1631 err = purgeableControlBits(newState, &control, &state);
1632 if (KERN_SUCCESS != err) {
1633 break;
1634 }
1635 err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1636 if (KERN_SUCCESS != err) {
1637 break;
1638 }
1639 err = purgeableStateBits(&state);
1640 if (KERN_SUCCESS != err) {
1641 break;
1642 }
1643
1644 if (kIOMemoryPurgeableEmpty == state) {
1645 totalState = kIOMemoryPurgeableEmpty;
1646 } else if (kIOMemoryPurgeableEmpty == totalState) {
1647 continue;
1648 } else if (kIOMemoryPurgeableVolatile == totalState) {
1649 continue;
1650 } else if (kIOMemoryPurgeableVolatile == state) {
1651 totalState = kIOMemoryPurgeableVolatile;
1652 } else {
1653 totalState = kIOMemoryPurgeableNonVolatile;
1654 }
1655 }
1656
1657 if (oldState) {
1658 *oldState = totalState;
1659 }
1660 return err;
1661 }
1662
1663 IOReturn
memoryReferenceSetOwnership(IOMemoryReference * ref,task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)1664 IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1665 IOMemoryReference * ref,
1666 task_t newOwner,
1667 int newLedgerTag,
1668 IOOptionBits newLedgerOptions)
1669 {
1670 IOReturn err, totalErr;
1671 IOMemoryEntry * entries;
1672
1673 totalErr = kIOReturnSuccess;
1674 entries = ref->entries + ref->count;
1675 while (entries > &ref->entries[0]) {
1676 entries--;
1677
1678 err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1679 if (KERN_SUCCESS != err) {
1680 totalErr = err;
1681 }
1682 }
1683
1684 return totalErr;
1685 }
1686
1687 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1688
1689 OSSharedPtr<IOMemoryDescriptor>
withAddress(void * address,IOByteCount length,IODirection direction)1690 IOMemoryDescriptor::withAddress(void * address,
1691 IOByteCount length,
1692 IODirection direction)
1693 {
1694 return IOMemoryDescriptor::
1695 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1696 }
1697
1698 #ifndef __LP64__
1699 OSSharedPtr<IOMemoryDescriptor>
withAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)1700 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1701 IOByteCount length,
1702 IODirection direction,
1703 task_t task)
1704 {
1705 OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1706 if (that) {
1707 if (that->initWithAddress(address, length, direction, task)) {
1708 return os::move(that);
1709 }
1710 }
1711 return nullptr;
1712 }
1713 #endif /* !__LP64__ */
1714
1715 OSSharedPtr<IOMemoryDescriptor>
withPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)1716 IOMemoryDescriptor::withPhysicalAddress(
1717 IOPhysicalAddress address,
1718 IOByteCount length,
1719 IODirection direction )
1720 {
1721 return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1722 }
1723
1724 #ifndef __LP64__
1725 OSSharedPtr<IOMemoryDescriptor>
withRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)1726 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1727 UInt32 withCount,
1728 IODirection direction,
1729 task_t task,
1730 bool asReference)
1731 {
1732 OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1733 if (that) {
1734 if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1735 return os::move(that);
1736 }
1737 }
1738 return nullptr;
1739 }
1740 #endif /* !__LP64__ */
1741
1742 OSSharedPtr<IOMemoryDescriptor>
withAddressRange(mach_vm_address_t address,mach_vm_size_t length,IOOptionBits options,task_t task)1743 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1744 mach_vm_size_t length,
1745 IOOptionBits options,
1746 task_t task)
1747 {
1748 IOAddressRange range = { address, length };
1749 return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1750 }
1751
1752 OSSharedPtr<IOMemoryDescriptor>
withAddressRanges(IOAddressRange * ranges,UInt32 rangeCount,IOOptionBits options,task_t task)1753 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
1754 UInt32 rangeCount,
1755 IOOptionBits options,
1756 task_t task)
1757 {
1758 OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1759 if (that) {
1760 if (task) {
1761 options |= kIOMemoryTypeVirtual64;
1762 } else {
1763 options |= kIOMemoryTypePhysical64;
1764 }
1765
1766 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1767 return os::move(that);
1768 }
1769 }
1770
1771 return nullptr;
1772 }
1773
1774
1775 /*
1776 * withOptions:
1777 *
1778 * Create a new IOMemoryDescriptor. The buffer is made up of several
1779 * virtual address ranges, from a given task.
1780 *
1781 * Passing the ranges as a reference will avoid an extra allocation.
1782 */
1783 OSSharedPtr<IOMemoryDescriptor>
withOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits opts,IOMapper * mapper)1784 IOMemoryDescriptor::withOptions(void * buffers,
1785 UInt32 count,
1786 UInt32 offset,
1787 task_t task,
1788 IOOptionBits opts,
1789 IOMapper * mapper)
1790 {
1791 OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
1792
1793 if (self
1794 && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1795 return nullptr;
1796 }
1797
1798 return os::move(self);
1799 }
1800
1801 bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1802 IOMemoryDescriptor::initWithOptions(void * buffers,
1803 UInt32 count,
1804 UInt32 offset,
1805 task_t task,
1806 IOOptionBits options,
1807 IOMapper * mapper)
1808 {
1809 return false;
1810 }
1811
1812 #ifndef __LP64__
1813 OSSharedPtr<IOMemoryDescriptor>
withPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)1814 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1815 UInt32 withCount,
1816 IODirection direction,
1817 bool asReference)
1818 {
1819 OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1820 if (that) {
1821 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1822 return os::move(that);
1823 }
1824 }
1825 return nullptr;
1826 }
1827
1828 OSSharedPtr<IOMemoryDescriptor>
withSubRange(IOMemoryDescriptor * of,IOByteCount offset,IOByteCount length,IODirection direction)1829 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1830 IOByteCount offset,
1831 IOByteCount length,
1832 IODirection direction)
1833 {
1834 return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1835 }
1836 #endif /* !__LP64__ */
1837
1838 OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOMemoryDescriptor * originalMD)1839 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1840 {
1841 IOGeneralMemoryDescriptor *origGenMD =
1842 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1843
1844 if (origGenMD) {
1845 return IOGeneralMemoryDescriptor::
1846 withPersistentMemoryDescriptor(origGenMD);
1847 } else {
1848 return nullptr;
1849 }
1850 }
1851
1852 OSSharedPtr<IOMemoryDescriptor>
withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor * originalMD)1853 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1854 {
1855 IOMemoryReference * memRef;
1856 OSSharedPtr<IOGeneralMemoryDescriptor> self;
1857
1858 if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1859 return nullptr;
1860 }
1861
1862 if (memRef == originalMD->_memRef) {
1863 self.reset(originalMD, OSRetain);
1864 originalMD->memoryReferenceRelease(memRef);
1865 return os::move(self);
1866 }
1867
1868 self = OSMakeShared<IOGeneralMemoryDescriptor>();
1869 IOMDPersistentInitData initData = { originalMD, memRef };
1870
1871 if (self
1872 && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1873 return nullptr;
1874 }
1875 return os::move(self);
1876 }
1877
1878 #ifndef __LP64__
1879 bool
initWithAddress(void * address,IOByteCount withLength,IODirection withDirection)1880 IOGeneralMemoryDescriptor::initWithAddress(void * address,
1881 IOByteCount withLength,
1882 IODirection withDirection)
1883 {
1884 _singleRange.v.address = (vm_offset_t) address;
1885 _singleRange.v.length = withLength;
1886
1887 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1888 }
1889
1890 bool
initWithAddress(IOVirtualAddress address,IOByteCount withLength,IODirection withDirection,task_t withTask)1891 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1892 IOByteCount withLength,
1893 IODirection withDirection,
1894 task_t withTask)
1895 {
1896 _singleRange.v.address = address;
1897 _singleRange.v.length = withLength;
1898
1899 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1900 }
1901
1902 bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount withLength,IODirection withDirection)1903 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1904 IOPhysicalAddress address,
1905 IOByteCount withLength,
1906 IODirection withDirection )
1907 {
1908 _singleRange.p.address = address;
1909 _singleRange.p.length = withLength;
1910
1911 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1912 }
1913
1914 bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 count,IODirection direction,bool reference)1915 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1916 IOPhysicalRange * ranges,
1917 UInt32 count,
1918 IODirection direction,
1919 bool reference)
1920 {
1921 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1922
1923 if (reference) {
1924 mdOpts |= kIOMemoryAsReference;
1925 }
1926
1927 return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1928 }
1929
1930 bool
initWithRanges(IOVirtualRange * ranges,UInt32 count,IODirection direction,task_t task,bool reference)1931 IOGeneralMemoryDescriptor::initWithRanges(
1932 IOVirtualRange * ranges,
1933 UInt32 count,
1934 IODirection direction,
1935 task_t task,
1936 bool reference)
1937 {
1938 IOOptionBits mdOpts = direction;
1939
1940 if (reference) {
1941 mdOpts |= kIOMemoryAsReference;
1942 }
1943
1944 if (task) {
1945 mdOpts |= kIOMemoryTypeVirtual;
1946
1947 // Auto-prepare if this is a kernel memory descriptor as very few
1948 // clients bother to prepare() kernel memory.
1949 // But it was not enforced so what are you going to do?
1950 if (task == kernel_task) {
1951 mdOpts |= kIOMemoryAutoPrepare;
1952 }
1953 } else {
1954 mdOpts |= kIOMemoryTypePhysical;
1955 }
1956
1957 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1958 }
1959 #endif /* !__LP64__ */
1960
1961 /*
1962 * initWithOptions:
1963 *
1964 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1965 * from a given task, several physical ranges, an UPL from the ubc
1966 * system or a uio (may be 64bit) from the BSD subsystem.
1967 *
1968 * Passing the ranges as a reference will avoid an extra allocation.
1969 *
1970 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1971 * existing instance -- note this behavior is not commonly supported in other
1972 * I/O Kit classes, although it is supported here.
1973 */
1974
1975 bool
initWithOptions(void * buffers,UInt32 count,UInt32 offset,task_t task,IOOptionBits options,IOMapper * mapper)1976 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1977 UInt32 count,
1978 UInt32 offset,
1979 task_t task,
1980 IOOptionBits options,
1981 IOMapper * mapper)
1982 {
1983 IOOptionBits type = options & kIOMemoryTypeMask;
1984
1985 #ifndef __LP64__
1986 if (task
1987 && (kIOMemoryTypeVirtual == type)
1988 && vm_map_is_64bit(get_task_map(task))
1989 && ((IOVirtualRange *) buffers)->address) {
1990 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1991 return false;
1992 }
1993 #endif /* !__LP64__ */
1994
1995 // Grab the original MD's configuation data to initialse the
1996 // arguments to this function.
1997 if (kIOMemoryTypePersistentMD == type) {
1998 IOMDPersistentInitData *initData = (typeof(initData))buffers;
1999 const IOGeneralMemoryDescriptor *orig = initData->fMD;
2000 ioGMDData *dataP = getDataP(orig->_memoryEntries);
2001
2002 // Only accept persistent memory descriptors with valid dataP data.
2003 assert(orig->_rangesCount == 1);
2004 if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
2005 return false;
2006 }
2007
2008 _memRef = initData->fMemRef; // Grab the new named entry
2009 options = orig->_flags & ~kIOMemoryAsReference;
2010 type = options & kIOMemoryTypeMask;
2011 buffers = orig->_ranges.v;
2012 count = orig->_rangesCount;
2013
2014 // Now grab the original task and whatever mapper was previously used
2015 task = orig->_task;
2016 mapper = dataP->fMapper;
2017
2018 // We are ready to go through the original initialisation now
2019 }
2020
2021 switch (type) {
2022 case kIOMemoryTypeUIO:
2023 case kIOMemoryTypeVirtual:
2024 #ifndef __LP64__
2025 case kIOMemoryTypeVirtual64:
2026 #endif /* !__LP64__ */
2027 assert(task);
2028 if (!task) {
2029 return false;
2030 }
2031 break;
2032
2033 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
2034 #ifndef __LP64__
2035 case kIOMemoryTypePhysical64:
2036 #endif /* !__LP64__ */
2037 case kIOMemoryTypeUPL:
2038 assert(!task);
2039 break;
2040 default:
2041 return false; /* bad argument */
2042 }
2043
2044 assert(buffers);
2045 assert(count);
2046
2047 /*
2048 * We can check the _initialized instance variable before having ever set
2049 * it to an initial value because I/O Kit guarantees that all our instance
2050 * variables are zeroed on an object's allocation.
2051 */
2052
2053 if (_initialized) {
2054 /*
2055 * An existing memory descriptor is being retargeted to point to
2056 * somewhere else. Clean up our present state.
2057 */
2058 IOOptionBits type = _flags & kIOMemoryTypeMask;
2059 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
2060 while (_wireCount) {
2061 complete();
2062 }
2063 }
2064 if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2065 if (kIOMemoryTypeUIO == type) {
2066 uio_free((uio_t) _ranges.v);
2067 }
2068 #ifndef __LP64__
2069 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2070 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2071 }
2072 #endif /* !__LP64__ */
2073 else {
2074 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2075 }
2076 }
2077
2078 options |= (kIOMemoryRedirected & _flags);
2079 if (!(kIOMemoryRedirected & options)) {
2080 if (_memRef) {
2081 memoryReferenceRelease(_memRef);
2082 _memRef = NULL;
2083 }
2084 if (_mappings) {
2085 _mappings->flushCollection();
2086 }
2087 }
2088 } else {
2089 if (!super::init()) {
2090 return false;
2091 }
2092 _initialized = true;
2093 }
2094
2095 // Grab the appropriate mapper
2096 if (kIOMemoryHostOrRemote & options) {
2097 options |= kIOMemoryMapperNone;
2098 }
2099 if (kIOMemoryMapperNone & options) {
2100 mapper = NULL; // No Mapper
2101 } else if (mapper == kIOMapperSystem) {
2102 IOMapper::checkForSystemMapper();
2103 gIOSystemMapper = mapper = IOMapper::gSystem;
2104 }
2105
2106 // Remove the dynamic internal use flags from the initial setting
2107 options &= ~(kIOMemoryPreparedReadOnly);
2108 _flags = options;
2109 _task = task;
2110
2111 #ifndef __LP64__
2112 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
2113 #endif /* !__LP64__ */
2114
2115 _dmaReferences = 0;
2116 __iomd_reservedA = 0;
2117 __iomd_reservedB = 0;
2118 _highestPage = 0;
2119
2120 if (kIOMemoryThreadSafe & options) {
2121 if (!_prepareLock) {
2122 _prepareLock = IOLockAlloc();
2123 }
2124 } else if (_prepareLock) {
2125 IOLockFree(_prepareLock);
2126 _prepareLock = NULL;
2127 }
2128
2129 if (kIOMemoryTypeUPL == type) {
2130 ioGMDData *dataP;
2131 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
2132
2133 if (!initMemoryEntries(dataSize, mapper)) {
2134 return false;
2135 }
2136 dataP = getDataP(_memoryEntries);
2137 dataP->fPageCnt = 0;
2138 switch (kIOMemoryDirectionMask & options) {
2139 case kIODirectionOut:
2140 dataP->fDMAAccess = kIODMAMapReadAccess;
2141 break;
2142 case kIODirectionIn:
2143 dataP->fDMAAccess = kIODMAMapWriteAccess;
2144 break;
2145 case kIODirectionNone:
2146 case kIODirectionOutIn:
2147 default:
2148 panic("bad dir for upl 0x%x", (int) options);
2149 break;
2150 }
2151 // _wireCount++; // UPLs start out life wired
2152
2153 _length = count;
2154 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
2155
2156 ioPLBlock iopl;
2157 iopl.fIOPL = (upl_t) buffers;
2158 upl_set_referenced(iopl.fIOPL, true);
2159 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
2160
2161 if (upl_get_size(iopl.fIOPL) < (count + offset)) {
2162 panic("short external upl");
2163 }
2164
2165 _highestPage = upl_get_highest_page(iopl.fIOPL);
2166 DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2167
2168 // Set the flag kIOPLOnDevice convieniently equal to 1
2169 iopl.fFlags = pageList->device | kIOPLExternUPL;
2170 if (!pageList->device) {
2171 // Pre-compute the offset into the UPL's page list
2172 pageList = &pageList[atop_32(offset)];
2173 offset &= PAGE_MASK;
2174 }
2175 iopl.fIOMDOffset = 0;
2176 iopl.fMappedPage = 0;
2177 iopl.fPageInfo = (vm_address_t) pageList;
2178 iopl.fPageOffset = offset;
2179 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
2180 } else {
2181 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2182 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2183
2184 // Initialize the memory descriptor
2185 if (options & kIOMemoryAsReference) {
2186 #ifndef __LP64__
2187 _rangesIsAllocated = false;
2188 #endif /* !__LP64__ */
2189
2190 // Hack assignment to get the buffer arg into _ranges.
2191 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2192 // work, C++ sigh.
2193 // This also initialises the uio & physical ranges.
2194 _ranges.v = (IOVirtualRange *) buffers;
2195 } else {
2196 #ifndef __LP64__
2197 _rangesIsAllocated = true;
2198 #endif /* !__LP64__ */
2199 switch (type) {
2200 case kIOMemoryTypeUIO:
2201 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
2202 break;
2203
2204 #ifndef __LP64__
2205 case kIOMemoryTypeVirtual64:
2206 case kIOMemoryTypePhysical64:
2207 if (count == 1
2208 #ifndef __arm__
2209 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
2210 #endif
2211 ) {
2212 if (kIOMemoryTypeVirtual64 == type) {
2213 type = kIOMemoryTypeVirtual;
2214 } else {
2215 type = kIOMemoryTypePhysical;
2216 }
2217 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
2218 _rangesIsAllocated = false;
2219 _ranges.v = &_singleRange.v;
2220 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
2221 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
2222 break;
2223 }
2224 _ranges.v64 = IONew(IOAddressRange, count);
2225 if (!_ranges.v64) {
2226 return false;
2227 }
2228 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
2229 break;
2230 #endif /* !__LP64__ */
2231 case kIOMemoryTypeVirtual:
2232 case kIOMemoryTypePhysical:
2233 if (count == 1) {
2234 _flags |= kIOMemoryAsReference;
2235 #ifndef __LP64__
2236 _rangesIsAllocated = false;
2237 #endif /* !__LP64__ */
2238 _ranges.v = &_singleRange.v;
2239 } else {
2240 _ranges.v = IONew(IOVirtualRange, count);
2241 if (!_ranges.v) {
2242 return false;
2243 }
2244 }
2245 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
2246 break;
2247 }
2248 }
2249 _rangesCount = count;
2250
2251 // Find starting address within the vector of ranges
2252 Ranges vec = _ranges;
2253 mach_vm_size_t totalLength = 0;
2254 unsigned int ind, pages = 0;
2255 for (ind = 0; ind < count; ind++) {
2256 mach_vm_address_t addr;
2257 mach_vm_address_t endAddr;
2258 mach_vm_size_t len;
2259
2260 // addr & len are returned by this function
2261 getAddrLenForInd(addr, len, type, vec, ind, _task);
2262 if (_task) {
2263 mach_vm_size_t phys_size;
2264 kern_return_t kret;
2265 kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
2266 if (KERN_SUCCESS != kret) {
2267 break;
2268 }
2269 if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
2270 break;
2271 }
2272 } else {
2273 if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
2274 break;
2275 }
2276 if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
2277 break;
2278 }
2279 if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
2280 break;
2281 }
2282 }
2283 if (os_add_overflow(totalLength, len, &totalLength)) {
2284 break;
2285 }
2286 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2287 uint64_t highPage = atop_64(addr + len - 1);
2288 if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
2289 _highestPage = (ppnum_t) highPage;
2290 DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2291 }
2292 }
2293 }
2294 if ((ind < count)
2295 || (totalLength != ((IOByteCount) totalLength))) {
2296 return false; /* overflow */
2297 }
2298 _length = totalLength;
2299 _pages = pages;
2300
2301 // Auto-prepare memory at creation time.
2302 // Implied completion when descriptor is free-ed
2303
2304
2305 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2306 _wireCount++; // Physical MDs are, by definition, wired
2307 } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2308 ioGMDData *dataP;
2309 unsigned dataSize;
2310
2311 if (_pages > atop_64(max_mem)) {
2312 return false;
2313 }
2314
2315 dataSize = computeDataSize(_pages, /* upls */ count * 2);
2316 if (!initMemoryEntries(dataSize, mapper)) {
2317 return false;
2318 }
2319 dataP = getDataP(_memoryEntries);
2320 dataP->fPageCnt = _pages;
2321
2322 if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
2323 && (VM_KERN_MEMORY_NONE == _kernelTag)) {
2324 _kernelTag = IOMemoryTag(kernel_map);
2325 if (_kernelTag == gIOSurfaceTag) {
2326 _userTag = VM_MEMORY_IOSURFACE;
2327 }
2328 }
2329
2330 if ((kIOMemoryPersistent & _flags) && !_memRef) {
2331 IOReturn
2332 err = memoryReferenceCreate(0, &_memRef);
2333 if (kIOReturnSuccess != err) {
2334 return false;
2335 }
2336 }
2337
2338 if ((_flags & kIOMemoryAutoPrepare)
2339 && prepare() != kIOReturnSuccess) {
2340 return false;
2341 }
2342 }
2343 }
2344
2345 return true;
2346 }
2347
2348 /*
2349 * free
2350 *
2351 * Free resources.
2352 */
2353 void
free()2354 IOGeneralMemoryDescriptor::free()
2355 {
2356 IOOptionBits type = _flags & kIOMemoryTypeMask;
2357
2358 if (reserved && reserved->dp.memory) {
2359 LOCK;
2360 reserved->dp.memory = NULL;
2361 UNLOCK;
2362 }
2363 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2364 ioGMDData * dataP;
2365 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
2366 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
2367 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
2368 }
2369 } else {
2370 while (_wireCount) {
2371 complete();
2372 }
2373 }
2374
2375 if (_memoryEntries) {
2376 _memoryEntries.reset();
2377 }
2378
2379 if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2380 if (kIOMemoryTypeUIO == type) {
2381 uio_free((uio_t) _ranges.v);
2382 }
2383 #ifndef __LP64__
2384 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2385 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2386 }
2387 #endif /* !__LP64__ */
2388 else {
2389 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2390 }
2391
2392 _ranges.v = NULL;
2393 }
2394
2395 if (reserved) {
2396 cleanKernelReserved(reserved);
2397 if (reserved->dp.devicePager) {
2398 // memEntry holds a ref on the device pager which owns reserved
2399 // (IOMemoryDescriptorReserved) so no reserved access after this point
2400 device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
2401 } else {
2402 IOFreeType(reserved, IOMemoryDescriptorReserved);
2403 }
2404 reserved = NULL;
2405 }
2406
2407 if (_memRef) {
2408 memoryReferenceRelease(_memRef);
2409 }
2410 if (_prepareLock) {
2411 IOLockFree(_prepareLock);
2412 }
2413
2414 super::free();
2415 }
2416
2417 #ifndef __LP64__
2418 void
unmapFromKernel()2419 IOGeneralMemoryDescriptor::unmapFromKernel()
2420 {
2421 panic("IOGMD::unmapFromKernel deprecated");
2422 }
2423
2424 void
mapIntoKernel(unsigned rangeIndex)2425 IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
2426 {
2427 panic("IOGMD::mapIntoKernel deprecated");
2428 }
2429 #endif /* !__LP64__ */
2430
2431 /*
2432 * getDirection:
2433 *
2434 * Get the direction of the transfer.
2435 */
2436 IODirection
getDirection() const2437 IOMemoryDescriptor::getDirection() const
2438 {
2439 #ifndef __LP64__
2440 if (_direction) {
2441 return _direction;
2442 }
2443 #endif /* !__LP64__ */
2444 return (IODirection) (_flags & kIOMemoryDirectionMask);
2445 }
2446
2447 /*
2448 * getLength:
2449 *
2450 * Get the length of the transfer (over all ranges).
2451 */
2452 IOByteCount
getLength() const2453 IOMemoryDescriptor::getLength() const
2454 {
2455 return _length;
2456 }
2457
2458 void
setTag(IOOptionBits tag)2459 IOMemoryDescriptor::setTag( IOOptionBits tag )
2460 {
2461 _tag = tag;
2462 }
2463
2464 IOOptionBits
getTag(void)2465 IOMemoryDescriptor::getTag( void )
2466 {
2467 return _tag;
2468 }
2469
2470 uint64_t
getFlags(void)2471 IOMemoryDescriptor::getFlags(void)
2472 {
2473 return _flags;
2474 }
2475
2476 OSObject *
copyContext(void) const2477 IOMemoryDescriptor::copyContext(void) const
2478 {
2479 if (reserved) {
2480 OSObject * context = reserved->contextObject;
2481 if (context) {
2482 context->retain();
2483 }
2484 return context;
2485 } else {
2486 return NULL;
2487 }
2488 }
2489
2490 void
setContext(OSObject * obj)2491 IOMemoryDescriptor::setContext(OSObject * obj)
2492 {
2493 if (this->reserved == NULL && obj == NULL) {
2494 // No existing object, and no object to set
2495 return;
2496 }
2497
2498 IOMemoryDescriptorReserved * reserved = getKernelReserved();
2499 if (reserved) {
2500 OSObject * oldObject = reserved->contextObject;
2501 if (oldObject && OSCompareAndSwapPtr(oldObject, NULL, &reserved->contextObject)) {
2502 oldObject->release();
2503 }
2504 if (obj != NULL) {
2505 obj->retain();
2506 reserved->contextObject = obj;
2507 }
2508 }
2509 }
2510
2511 #ifndef __LP64__
2512 #pragma clang diagnostic push
2513 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2514
2515 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
2516 IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * length)2517 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
2518 {
2519 addr64_t physAddr = 0;
2520
2521 if (prepare() == kIOReturnSuccess) {
2522 physAddr = getPhysicalSegment64( offset, length );
2523 complete();
2524 }
2525
2526 return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
2527 }
2528
2529 #pragma clang diagnostic pop
2530
2531 #endif /* !__LP64__ */
2532
2533 IOByteCount
readBytes(IOByteCount offset,void * bytes,IOByteCount length)2534 IOMemoryDescriptor::readBytes
2535 (IOByteCount offset, void *bytes, IOByteCount length)
2536 {
2537 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
2538 IOByteCount endoffset;
2539 IOByteCount remaining;
2540
2541
2542 // Check that this entire I/O is within the available range
2543 if ((offset > _length)
2544 || os_add_overflow(length, offset, &endoffset)
2545 || (endoffset > _length)) {
2546 assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
2547 return 0;
2548 }
2549 if (offset >= _length) {
2550 return 0;
2551 }
2552
2553 assert(!(kIOMemoryRemote & _flags));
2554 if (kIOMemoryRemote & _flags) {
2555 return 0;
2556 }
2557
2558 if (kIOMemoryThreadSafe & _flags) {
2559 LOCK;
2560 }
2561
2562 remaining = length = min(length, _length - offset);
2563 while (remaining) { // (process another target segment?)
2564 addr64_t srcAddr64;
2565 IOByteCount srcLen;
2566
2567 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
2568 if (!srcAddr64) {
2569 break;
2570 }
2571
2572 // Clip segment length to remaining
2573 if (srcLen > remaining) {
2574 srcLen = remaining;
2575 }
2576
2577 if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
2578 srcLen = (UINT_MAX - PAGE_SIZE + 1);
2579 }
2580 copypv(srcAddr64, dstAddr, (unsigned int) srcLen,
2581 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
2582
2583 dstAddr += srcLen;
2584 offset += srcLen;
2585 remaining -= srcLen;
2586 }
2587
2588 if (kIOMemoryThreadSafe & _flags) {
2589 UNLOCK;
2590 }
2591
2592 assert(!remaining);
2593
2594 return length - remaining;
2595 }
2596
2597 IOByteCount
writeBytes(IOByteCount inoffset,const void * bytes,IOByteCount length)2598 IOMemoryDescriptor::writeBytes
2599 (IOByteCount inoffset, const void *bytes, IOByteCount length)
2600 {
2601 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
2602 IOByteCount remaining;
2603 IOByteCount endoffset;
2604 IOByteCount offset = inoffset;
2605
2606 assert( !(kIOMemoryPreparedReadOnly & _flags));
2607
2608 // Check that this entire I/O is within the available range
2609 if ((offset > _length)
2610 || os_add_overflow(length, offset, &endoffset)
2611 || (endoffset > _length)) {
2612 assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
2613 return 0;
2614 }
2615 if (kIOMemoryPreparedReadOnly & _flags) {
2616 return 0;
2617 }
2618 if (offset >= _length) {
2619 return 0;
2620 }
2621
2622 assert(!(kIOMemoryRemote & _flags));
2623 if (kIOMemoryRemote & _flags) {
2624 return 0;
2625 }
2626
2627 if (kIOMemoryThreadSafe & _flags) {
2628 LOCK;
2629 }
2630
2631 remaining = length = min(length, _length - offset);
2632 while (remaining) { // (process another target segment?)
2633 addr64_t dstAddr64;
2634 IOByteCount dstLen;
2635
2636 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2637 if (!dstAddr64) {
2638 break;
2639 }
2640
2641 // Clip segment length to remaining
2642 if (dstLen > remaining) {
2643 dstLen = remaining;
2644 }
2645
2646 if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
2647 dstLen = (UINT_MAX - PAGE_SIZE + 1);
2648 }
2649 if (!srcAddr) {
2650 bzero_phys(dstAddr64, (unsigned int) dstLen);
2651 } else {
2652 copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen,
2653 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
2654 srcAddr += dstLen;
2655 }
2656 offset += dstLen;
2657 remaining -= dstLen;
2658 }
2659
2660 if (kIOMemoryThreadSafe & _flags) {
2661 UNLOCK;
2662 }
2663
2664 assert(!remaining);
2665
2666 #if defined(__x86_64__)
2667 // copypv does not cppvFsnk on intel
2668 #else
2669 if (!srcAddr) {
2670 performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2671 }
2672 #endif
2673
2674 return length - remaining;
2675 }
2676
2677 #ifndef __LP64__
2678 void
setPosition(IOByteCount position)2679 IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2680 {
2681 panic("IOGMD::setPosition deprecated");
2682 }
2683 #endif /* !__LP64__ */
2684
2685 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2686 static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
2687
2688 uint64_t
getPreparationID(void)2689 IOGeneralMemoryDescriptor::getPreparationID( void )
2690 {
2691 ioGMDData *dataP;
2692
2693 if (!_wireCount) {
2694 return kIOPreparationIDUnprepared;
2695 }
2696
2697 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2698 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2699 IOMemoryDescriptor::setPreparationID();
2700 return IOMemoryDescriptor::getPreparationID();
2701 }
2702
2703 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2704 return kIOPreparationIDUnprepared;
2705 }
2706
2707 if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2708 SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2709 OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2710 }
2711 return dataP->fPreparationID;
2712 }
2713
2714 void
cleanKernelReserved(IOMemoryDescriptorReserved * reserved)2715 IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2716 {
2717 if (reserved->creator) {
2718 task_deallocate(reserved->creator);
2719 reserved->creator = NULL;
2720 }
2721
2722 if (reserved->contextObject) {
2723 reserved->contextObject->release();
2724 reserved->contextObject = NULL;
2725 }
2726 }
2727
2728 IOMemoryDescriptorReserved *
getKernelReserved(void)2729 IOMemoryDescriptor::getKernelReserved( void )
2730 {
2731 if (!reserved) {
2732 reserved = IOMallocType(IOMemoryDescriptorReserved);
2733 }
2734 return reserved;
2735 }
2736
2737 void
setPreparationID(void)2738 IOMemoryDescriptor::setPreparationID( void )
2739 {
2740 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2741 SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2742 OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2743 }
2744 }
2745
2746 uint64_t
getPreparationID(void)2747 IOMemoryDescriptor::getPreparationID( void )
2748 {
2749 if (reserved) {
2750 return reserved->preparationID;
2751 } else {
2752 return kIOPreparationIDUnsupported;
2753 }
2754 }
2755
2756 void
setDescriptorID(void)2757 IOMemoryDescriptor::setDescriptorID( void )
2758 {
2759 if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
2760 SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
2761 OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
2762 }
2763 }
2764
2765 uint64_t
getDescriptorID(void)2766 IOMemoryDescriptor::getDescriptorID( void )
2767 {
2768 setDescriptorID();
2769
2770 if (reserved) {
2771 return reserved->descriptorID;
2772 } else {
2773 return kIODescriptorIDInvalid;
2774 }
2775 }
2776
2777 IOReturn
ktraceEmitPhysicalSegments(void)2778 IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2779 {
2780 if (!kdebug_debugid_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
2781 return kIOReturnSuccess;
2782 }
2783
2784 assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
2785 if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
2786 return kIOReturnBadArgument;
2787 }
2788
2789 uint64_t descriptorID = getDescriptorID();
2790 assert(descriptorID != kIODescriptorIDInvalid);
2791 if (getDescriptorID() == kIODescriptorIDInvalid) {
2792 return kIOReturnBadArgument;
2793 }
2794
2795 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
2796
2797 #if __LP64__
2798 static const uint8_t num_segments_page = 8;
2799 #else
2800 static const uint8_t num_segments_page = 4;
2801 #endif
2802 static const uint8_t num_segments_long = 2;
2803
2804 IOPhysicalAddress segments_page[num_segments_page];
2805 IOPhysicalRange segments_long[num_segments_long];
2806 memset(segments_page, UINT32_MAX, sizeof(segments_page));
2807 memset(segments_long, 0, sizeof(segments_long));
2808
2809 uint8_t segment_page_idx = 0;
2810 uint8_t segment_long_idx = 0;
2811
2812 IOPhysicalRange physical_segment;
2813 for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
2814 physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
2815
2816 if (physical_segment.length == 0) {
2817 break;
2818 }
2819
2820 /**
2821 * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages. To optimize for trace
2822 * buffer memory, pack segment events according to the following.
2823 *
2824 * Mappings must be emitted in ascending order starting from offset 0. Mappings can be associated with the previous
2825 * IOMDPA_MAPPED event emitted on by the current thread_id.
2826 *
2827 * IOMDPA_SEGMENTS_PAGE = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
2828 * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
2829 * - unmapped pages will have a ppn of MAX_INT_32
2830 * IOMDPA_SEGMENTS_LONG = up to 2 virtually contiguous mappings of variable length
2831 * - address_0, length_0, address_0, length_1
2832 * - unmapped pages will have an address of 0
2833 *
2834 * During each iteration do the following depending on the length of the mapping:
2835 * 1. add the current segment to the appropriate queue of pending segments
2836 * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
2837 * 1a. if FALSE emit and reset all events in the previous queue
2838 * 2. check if we have filled up the current queue of pending events
2839 * 2a. if TRUE emit and reset all events in the pending queue
2840 * 3. after completing all iterations emit events in the current queue
2841 */
2842
2843 bool emit_page = false;
2844 bool emit_long = false;
2845 if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
2846 segments_page[segment_page_idx] = physical_segment.address;
2847 segment_page_idx++;
2848
2849 emit_long = segment_long_idx != 0;
2850 emit_page = segment_page_idx == num_segments_page;
2851
2852 if (os_unlikely(emit_long)) {
2853 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2854 segments_long[0].address, segments_long[0].length,
2855 segments_long[1].address, segments_long[1].length);
2856 }
2857
2858 if (os_unlikely(emit_page)) {
2859 #if __LP64__
2860 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2861 ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2862 ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2863 ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2864 ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2865 #else
2866 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2867 (ppnum_t) atop_32(segments_page[1]),
2868 (ppnum_t) atop_32(segments_page[2]),
2869 (ppnum_t) atop_32(segments_page[3]),
2870 (ppnum_t) atop_32(segments_page[4]));
2871 #endif
2872 }
2873 } else {
2874 segments_long[segment_long_idx] = physical_segment;
2875 segment_long_idx++;
2876
2877 emit_page = segment_page_idx != 0;
2878 emit_long = segment_long_idx == num_segments_long;
2879
2880 if (os_unlikely(emit_page)) {
2881 #if __LP64__
2882 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2883 ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2884 ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2885 ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2886 ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2887 #else
2888 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2889 (ppnum_t) atop_32(segments_page[1]),
2890 (ppnum_t) atop_32(segments_page[2]),
2891 (ppnum_t) atop_32(segments_page[3]),
2892 (ppnum_t) atop_32(segments_page[4]));
2893 #endif
2894 }
2895
2896 if (emit_long) {
2897 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2898 segments_long[0].address, segments_long[0].length,
2899 segments_long[1].address, segments_long[1].length);
2900 }
2901 }
2902
2903 if (os_unlikely(emit_page)) {
2904 memset(segments_page, UINT32_MAX, sizeof(segments_page));
2905 segment_page_idx = 0;
2906 }
2907
2908 if (os_unlikely(emit_long)) {
2909 memset(segments_long, 0, sizeof(segments_long));
2910 segment_long_idx = 0;
2911 }
2912 }
2913
2914 if (segment_page_idx != 0) {
2915 assert(segment_long_idx == 0);
2916 #if __LP64__
2917 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2918 ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2919 ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2920 ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2921 ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2922 #else
2923 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2924 (ppnum_t) atop_32(segments_page[1]),
2925 (ppnum_t) atop_32(segments_page[2]),
2926 (ppnum_t) atop_32(segments_page[3]),
2927 (ppnum_t) atop_32(segments_page[4]));
2928 #endif
2929 } else if (segment_long_idx != 0) {
2930 assert(segment_page_idx == 0);
2931 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2932 segments_long[0].address, segments_long[0].length,
2933 segments_long[1].address, segments_long[1].length);
2934 }
2935
2936 return kIOReturnSuccess;
2937 }
2938
2939 void
setVMTags(uint32_t kernelTag,uint32_t userTag)2940 IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
2941 {
2942 _kernelTag = (vm_tag_t) kernelTag;
2943 _userTag = (vm_tag_t) userTag;
2944 }
2945
2946 uint32_t
getVMTag(vm_map_t map)2947 IOMemoryDescriptor::getVMTag(vm_map_t map)
2948 {
2949 if (vm_kernel_map_is_kernel(map)) {
2950 if (VM_KERN_MEMORY_NONE != _kernelTag) {
2951 return (uint32_t) _kernelTag;
2952 }
2953 } else {
2954 if (VM_KERN_MEMORY_NONE != _userTag) {
2955 return (uint32_t) _userTag;
2956 }
2957 }
2958 return IOMemoryTag(map);
2959 }
2960
2961 IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const2962 IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2963 {
2964 IOReturn err = kIOReturnSuccess;
2965 DMACommandOps params;
2966 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2967 ioGMDData *dataP;
2968
2969 params = (op & ~kIOMDDMACommandOperationMask & op);
2970 op &= kIOMDDMACommandOperationMask;
2971
2972 if (kIOMDDMAMap == op) {
2973 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2974 return kIOReturnUnderrun;
2975 }
2976
2977 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2978
2979 if (!_memoryEntries
2980 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2981 return kIOReturnNoMemory;
2982 }
2983
2984 if (_memoryEntries && data->fMapper) {
2985 bool remap, keepMap;
2986 dataP = getDataP(_memoryEntries);
2987
2988 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2989 dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2990 }
2991 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
2992 dataP->fDMAMapAlignment = data->fMapSpec.alignment;
2993 }
2994
2995 keepMap = (data->fMapper == gIOSystemMapper);
2996 keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
2997
2998 if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2999 IOLockLock(_prepareLock);
3000 }
3001
3002 remap = (!keepMap);
3003 remap |= (dataP->fDMAMapNumAddressBits < 64)
3004 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
3005 remap |= (dataP->fDMAMapAlignment > page_size);
3006
3007 if (remap || !dataP->fMappedBaseValid) {
3008 err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3009 if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
3010 dataP->fMappedBase = data->fAlloc;
3011 dataP->fMappedBaseValid = true;
3012 dataP->fMappedLength = data->fAllocLength;
3013 data->fAllocLength = 0; // IOMD owns the alloc now
3014 }
3015 } else {
3016 data->fAlloc = dataP->fMappedBase;
3017 data->fAllocLength = 0; // give out IOMD map
3018 md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
3019 }
3020
3021 if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
3022 IOLockUnlock(_prepareLock);
3023 }
3024 }
3025 return err;
3026 }
3027 if (kIOMDDMAUnmap == op) {
3028 if (dataSize < sizeof(IOMDDMAMapArgs)) {
3029 return kIOReturnUnderrun;
3030 }
3031 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3032
3033 if (_pages) {
3034 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3035 }
3036
3037 return kIOReturnSuccess;
3038 }
3039
3040 if (kIOMDAddDMAMapSpec == op) {
3041 if (dataSize < sizeof(IODMAMapSpecification)) {
3042 return kIOReturnUnderrun;
3043 }
3044
3045 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
3046
3047 if (!_memoryEntries
3048 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3049 return kIOReturnNoMemory;
3050 }
3051
3052 if (_memoryEntries) {
3053 dataP = getDataP(_memoryEntries);
3054 if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
3055 dataP->fDMAMapNumAddressBits = data->numAddressBits;
3056 }
3057 if (data->alignment > dataP->fDMAMapAlignment) {
3058 dataP->fDMAMapAlignment = data->alignment;
3059 }
3060 }
3061 return kIOReturnSuccess;
3062 }
3063
3064 if (kIOMDGetCharacteristics == op) {
3065 if (dataSize < sizeof(IOMDDMACharacteristics)) {
3066 return kIOReturnUnderrun;
3067 }
3068
3069 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3070 data->fLength = _length;
3071 data->fSGCount = _rangesCount;
3072 data->fPages = _pages;
3073 data->fDirection = getDirection();
3074 if (!_wireCount) {
3075 data->fIsPrepared = false;
3076 } else {
3077 data->fIsPrepared = true;
3078 data->fHighestPage = _highestPage;
3079 if (_memoryEntries) {
3080 dataP = getDataP(_memoryEntries);
3081 ioPLBlock *ioplList = getIOPLList(dataP);
3082 UInt count = getNumIOPL(_memoryEntries, dataP);
3083 if (count == 1) {
3084 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
3085 }
3086 }
3087 }
3088
3089 return kIOReturnSuccess;
3090 } else if (kIOMDDMAActive == op) {
3091 if (params) {
3092 int16_t prior;
3093 prior = OSAddAtomic16(1, &md->_dmaReferences);
3094 if (!prior) {
3095 md->_mapName = NULL;
3096 }
3097 } else {
3098 if (md->_dmaReferences) {
3099 OSAddAtomic16(-1, &md->_dmaReferences);
3100 } else {
3101 panic("_dmaReferences underflow");
3102 }
3103 }
3104 } else if (kIOMDWalkSegments != op) {
3105 return kIOReturnBadArgument;
3106 }
3107
3108 // Get the next segment
3109 struct InternalState {
3110 IOMDDMAWalkSegmentArgs fIO;
3111 mach_vm_size_t fOffset2Index;
3112 mach_vm_size_t fNextOffset;
3113 UInt fIndex;
3114 } *isP;
3115
3116 // Find the next segment
3117 if (dataSize < sizeof(*isP)) {
3118 return kIOReturnUnderrun;
3119 }
3120
3121 isP = (InternalState *) vData;
3122 uint64_t offset = isP->fIO.fOffset;
3123 uint8_t mapped = isP->fIO.fMapped;
3124 uint64_t mappedBase;
3125
3126 if (mapped && (kIOMemoryRemote & _flags)) {
3127 return kIOReturnNotAttached;
3128 }
3129
3130 if (IOMapper::gSystem && mapped
3131 && (!(kIOMemoryHostOnly & _flags))
3132 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
3133 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
3134 if (!_memoryEntries
3135 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
3136 return kIOReturnNoMemory;
3137 }
3138
3139 dataP = getDataP(_memoryEntries);
3140 if (dataP->fMapper) {
3141 IODMAMapSpecification mapSpec;
3142 bzero(&mapSpec, sizeof(mapSpec));
3143 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3144 mapSpec.alignment = dataP->fDMAMapAlignment;
3145 err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
3146 if (kIOReturnSuccess != err) {
3147 return err;
3148 }
3149 dataP->fMappedBaseValid = true;
3150 }
3151 }
3152
3153 if (mapped) {
3154 if (IOMapper::gSystem
3155 && (!(kIOMemoryHostOnly & _flags))
3156 && _memoryEntries
3157 && (dataP = getDataP(_memoryEntries))
3158 && dataP->fMappedBaseValid) {
3159 mappedBase = dataP->fMappedBase;
3160 } else {
3161 mapped = 0;
3162 }
3163 }
3164
3165 if (offset >= _length) {
3166 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
3167 }
3168
3169 // Validate the previous offset
3170 UInt ind;
3171 mach_vm_size_t off2Ind = isP->fOffset2Index;
3172 if (!params
3173 && offset
3174 && (offset == isP->fNextOffset || off2Ind <= offset)) {
3175 ind = isP->fIndex;
3176 } else {
3177 ind = off2Ind = 0; // Start from beginning
3178 }
3179 mach_vm_size_t length;
3180 UInt64 address;
3181
3182 if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
3183 // Physical address based memory descriptor
3184 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
3185
3186 // Find the range after the one that contains the offset
3187 mach_vm_size_t len;
3188 for (len = 0; off2Ind <= offset; ind++) {
3189 len = physP[ind].length;
3190 off2Ind += len;
3191 }
3192
3193 // Calculate length within range and starting address
3194 length = off2Ind - offset;
3195 address = physP[ind - 1].address + len - length;
3196
3197 if (true && mapped) {
3198 address = mappedBase + offset;
3199 } else {
3200 // see how far we can coalesce ranges
3201 while (ind < _rangesCount && address + length == physP[ind].address) {
3202 len = physP[ind].length;
3203 length += len;
3204 off2Ind += len;
3205 ind++;
3206 }
3207 }
3208
3209 // correct contiguous check overshoot
3210 ind--;
3211 off2Ind -= len;
3212 }
3213 #ifndef __LP64__
3214 else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
3215 // Physical address based memory descriptor
3216 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
3217
3218 // Find the range after the one that contains the offset
3219 mach_vm_size_t len;
3220 for (len = 0; off2Ind <= offset; ind++) {
3221 len = physP[ind].length;
3222 off2Ind += len;
3223 }
3224
3225 // Calculate length within range and starting address
3226 length = off2Ind - offset;
3227 address = physP[ind - 1].address + len - length;
3228
3229 if (true && mapped) {
3230 address = mappedBase + offset;
3231 } else {
3232 // see how far we can coalesce ranges
3233 while (ind < _rangesCount && address + length == physP[ind].address) {
3234 len = physP[ind].length;
3235 length += len;
3236 off2Ind += len;
3237 ind++;
3238 }
3239 }
3240 // correct contiguous check overshoot
3241 ind--;
3242 off2Ind -= len;
3243 }
3244 #endif /* !__LP64__ */
3245 else {
3246 do {
3247 if (!_wireCount) {
3248 panic("IOGMD: not wired for the IODMACommand");
3249 }
3250
3251 assert(_memoryEntries);
3252
3253 dataP = getDataP(_memoryEntries);
3254 const ioPLBlock *ioplList = getIOPLList(dataP);
3255 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
3256 upl_page_info_t *pageList = getPageList(dataP);
3257
3258 assert(numIOPLs > 0);
3259
3260 // Scan through iopl info blocks looking for block containing offset
3261 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
3262 ind++;
3263 }
3264
3265 // Go back to actual range as search goes past it
3266 ioPLBlock ioplInfo = ioplList[ind - 1];
3267 off2Ind = ioplInfo.fIOMDOffset;
3268
3269 if (ind < numIOPLs) {
3270 length = ioplList[ind].fIOMDOffset;
3271 } else {
3272 length = _length;
3273 }
3274 length -= offset; // Remainder within iopl
3275
3276 // Subtract offset till this iopl in total list
3277 offset -= off2Ind;
3278
3279 // If a mapped address is requested and this is a pre-mapped IOPL
3280 // then just need to compute an offset relative to the mapped base.
3281 if (mapped) {
3282 offset += (ioplInfo.fPageOffset & PAGE_MASK);
3283 address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
3284 continue; // Done leave do/while(false) now
3285 }
3286
3287 // The offset is rebased into the current iopl.
3288 // Now add the iopl 1st page offset.
3289 offset += ioplInfo.fPageOffset;
3290
3291 // For external UPLs the fPageInfo field points directly to
3292 // the upl's upl_page_info_t array.
3293 if (ioplInfo.fFlags & kIOPLExternUPL) {
3294 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
3295 } else {
3296 pageList = &pageList[ioplInfo.fPageInfo];
3297 }
3298
3299 // Check for direct device non-paged memory
3300 if (ioplInfo.fFlags & kIOPLOnDevice) {
3301 address = ptoa_64(pageList->phys_addr) + offset;
3302 continue; // Done leave do/while(false) now
3303 }
3304
3305 // Now we need compute the index into the pageList
3306 UInt pageInd = atop_32(offset);
3307 offset &= PAGE_MASK;
3308
3309 // Compute the starting address of this segment
3310 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
3311 if (!pageAddr) {
3312 panic("!pageList phys_addr");
3313 }
3314
3315 address = ptoa_64(pageAddr) + offset;
3316
3317 // length is currently set to the length of the remainider of the iopl.
3318 // We need to check that the remainder of the iopl is contiguous.
3319 // This is indicated by pageList[ind].phys_addr being sequential.
3320 IOByteCount contigLength = PAGE_SIZE - offset;
3321 while (contigLength < length
3322 && ++pageAddr == pageList[++pageInd].phys_addr) {
3323 contigLength += PAGE_SIZE;
3324 }
3325
3326 if (contigLength < length) {
3327 length = contigLength;
3328 }
3329
3330
3331 assert(address);
3332 assert(length);
3333 } while (false);
3334 }
3335
3336 // Update return values and state
3337 isP->fIO.fIOVMAddr = address;
3338 isP->fIO.fLength = length;
3339 isP->fIndex = ind;
3340 isP->fOffset2Index = off2Ind;
3341 isP->fNextOffset = isP->fIO.fOffset + length;
3342
3343 return kIOReturnSuccess;
3344 }
3345
3346 addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3347 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3348 {
3349 IOReturn ret;
3350 mach_vm_address_t address = 0;
3351 mach_vm_size_t length = 0;
3352 IOMapper * mapper = gIOSystemMapper;
3353 IOOptionBits type = _flags & kIOMemoryTypeMask;
3354
3355 if (lengthOfSegment) {
3356 *lengthOfSegment = 0;
3357 }
3358
3359 if (offset >= _length) {
3360 return 0;
3361 }
3362
3363 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3364 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3365 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3366 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3367
3368 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
3369 unsigned rangesIndex = 0;
3370 Ranges vec = _ranges;
3371 mach_vm_address_t addr;
3372
3373 // Find starting address within the vector of ranges
3374 for (;;) {
3375 getAddrLenForInd(addr, length, type, vec, rangesIndex, _task);
3376 if (offset < length) {
3377 break;
3378 }
3379 offset -= length; // (make offset relative)
3380 rangesIndex++;
3381 }
3382
3383 // Now that we have the starting range,
3384 // lets find the last contiguous range
3385 addr += offset;
3386 length -= offset;
3387
3388 for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
3389 mach_vm_address_t newAddr;
3390 mach_vm_size_t newLen;
3391
3392 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex, _task);
3393 if (addr + length != newAddr) {
3394 break;
3395 }
3396 length += newLen;
3397 }
3398 if (addr) {
3399 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
3400 }
3401 } else {
3402 IOMDDMAWalkSegmentState _state;
3403 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
3404
3405 state->fOffset = offset;
3406 state->fLength = _length - offset;
3407 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
3408
3409 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
3410
3411 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
3412 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3413 ret, this, state->fOffset,
3414 state->fIOVMAddr, state->fLength);
3415 }
3416 if (kIOReturnSuccess == ret) {
3417 address = state->fIOVMAddr;
3418 length = state->fLength;
3419 }
3420
3421 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3422 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3423
3424 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
3425 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
3426 addr64_t origAddr = address;
3427 IOByteCount origLen = length;
3428
3429 address = mapper->mapToPhysicalAddress(origAddr);
3430 length = page_size - (address & (page_size - 1));
3431 while ((length < origLen)
3432 && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
3433 length += page_size;
3434 }
3435 if (length > origLen) {
3436 length = origLen;
3437 }
3438 }
3439 }
3440 }
3441
3442 if (!address) {
3443 length = 0;
3444 }
3445
3446 if (lengthOfSegment) {
3447 *lengthOfSegment = length;
3448 }
3449
3450 return address;
3451 }
3452
3453 #ifndef __LP64__
3454 #pragma clang diagnostic push
3455 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3456
3457 addr64_t
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment,IOOptionBits options)3458 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3459 {
3460 addr64_t address = 0;
3461
3462 if (options & _kIOMemorySourceSegment) {
3463 address = getSourceSegment(offset, lengthOfSegment);
3464 } else if (options & kIOMemoryMapperNone) {
3465 address = getPhysicalSegment64(offset, lengthOfSegment);
3466 } else {
3467 address = getPhysicalSegment(offset, lengthOfSegment);
3468 }
3469
3470 return address;
3471 }
3472 #pragma clang diagnostic pop
3473
3474 addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3475 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3476 {
3477 return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
3478 }
3479
3480 IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3481 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3482 {
3483 addr64_t address = 0;
3484 IOByteCount length = 0;
3485
3486 address = getPhysicalSegment(offset, lengthOfSegment, 0);
3487
3488 if (lengthOfSegment) {
3489 length = *lengthOfSegment;
3490 }
3491
3492 if ((address + length) > 0x100000000ULL) {
3493 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3494 address, (long) length, (getMetaClass())->getClassName());
3495 }
3496
3497 return (IOPhysicalAddress) address;
3498 }
3499
3500 addr64_t
getPhysicalSegment64(IOByteCount offset,IOByteCount * lengthOfSegment)3501 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3502 {
3503 IOPhysicalAddress phys32;
3504 IOByteCount length;
3505 addr64_t phys64;
3506 IOMapper * mapper = NULL;
3507
3508 phys32 = getPhysicalSegment(offset, lengthOfSegment);
3509 if (!phys32) {
3510 return 0;
3511 }
3512
3513 if (gIOSystemMapper) {
3514 mapper = gIOSystemMapper;
3515 }
3516
3517 if (mapper) {
3518 IOByteCount origLen;
3519
3520 phys64 = mapper->mapToPhysicalAddress(phys32);
3521 origLen = *lengthOfSegment;
3522 length = page_size - (phys64 & (page_size - 1));
3523 while ((length < origLen)
3524 && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
3525 length += page_size;
3526 }
3527 if (length > origLen) {
3528 length = origLen;
3529 }
3530
3531 *lengthOfSegment = length;
3532 } else {
3533 phys64 = (addr64_t) phys32;
3534 }
3535
3536 return phys64;
3537 }
3538
3539 IOPhysicalAddress
getPhysicalSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3540 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3541 {
3542 return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
3543 }
3544
3545 IOPhysicalAddress
getSourceSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3546 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3547 {
3548 return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
3549 }
3550
3551 #pragma clang diagnostic push
3552 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3553
3554 void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)3555 IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3556 IOByteCount * lengthOfSegment)
3557 {
3558 if (_task == kernel_task) {
3559 return (void *) getSourceSegment(offset, lengthOfSegment);
3560 } else {
3561 panic("IOGMD::getVirtualSegment deprecated");
3562 }
3563
3564 return NULL;
3565 }
3566 #pragma clang diagnostic pop
3567 #endif /* !__LP64__ */
3568
3569 IOReturn
dmaCommandOperation(DMACommandOps op,void * vData,UInt dataSize) const3570 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3571 {
3572 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
3573 DMACommandOps params;
3574 IOReturn err;
3575
3576 params = (op & ~kIOMDDMACommandOperationMask & op);
3577 op &= kIOMDDMACommandOperationMask;
3578
3579 if (kIOMDGetCharacteristics == op) {
3580 if (dataSize < sizeof(IOMDDMACharacteristics)) {
3581 return kIOReturnUnderrun;
3582 }
3583
3584 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3585 data->fLength = getLength();
3586 data->fSGCount = 0;
3587 data->fDirection = getDirection();
3588 data->fIsPrepared = true; // Assume prepared - fails safe
3589 } else if (kIOMDWalkSegments == op) {
3590 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
3591 return kIOReturnUnderrun;
3592 }
3593
3594 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
3595 IOByteCount offset = (IOByteCount) data->fOffset;
3596 IOPhysicalLength length, nextLength;
3597 addr64_t addr, nextAddr;
3598
3599 if (data->fMapped) {
3600 panic("fMapped %p %s %qx", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3601 }
3602 addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
3603 offset += length;
3604 while (offset < getLength()) {
3605 nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone);
3606 if ((addr + length) != nextAddr) {
3607 break;
3608 }
3609 length += nextLength;
3610 offset += nextLength;
3611 }
3612 data->fIOVMAddr = addr;
3613 data->fLength = length;
3614 } else if (kIOMDAddDMAMapSpec == op) {
3615 return kIOReturnUnsupported;
3616 } else if (kIOMDDMAMap == op) {
3617 if (dataSize < sizeof(IOMDDMAMapArgs)) {
3618 return kIOReturnUnderrun;
3619 }
3620 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3621
3622 err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3623
3624 return err;
3625 } else if (kIOMDDMAUnmap == op) {
3626 if (dataSize < sizeof(IOMDDMAMapArgs)) {
3627 return kIOReturnUnderrun;
3628 }
3629 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3630
3631 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3632
3633 return kIOReturnSuccess;
3634 } else {
3635 return kIOReturnBadArgument;
3636 }
3637
3638 return kIOReturnSuccess;
3639 }
3640
3641 IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3642 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
3643 IOOptionBits * oldState )
3644 {
3645 IOReturn err = kIOReturnSuccess;
3646
3647 vm_purgable_t control;
3648 int state;
3649
3650 assert(!(kIOMemoryRemote & _flags));
3651 if (kIOMemoryRemote & _flags) {
3652 return kIOReturnNotAttached;
3653 }
3654
3655 if (_memRef) {
3656 err = super::setPurgeable(newState, oldState);
3657 } else {
3658 if (kIOMemoryThreadSafe & _flags) {
3659 LOCK;
3660 }
3661 do{
3662 // Find the appropriate vm_map for the given task
3663 vm_map_t curMap;
3664 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3665 err = kIOReturnNotReady;
3666 break;
3667 } else if (!_task) {
3668 err = kIOReturnUnsupported;
3669 break;
3670 } else {
3671 curMap = get_task_map(_task);
3672 if (NULL == curMap) {
3673 err = KERN_INVALID_ARGUMENT;
3674 break;
3675 }
3676 }
3677
3678 // can only do one range
3679 Ranges vec = _ranges;
3680 IOOptionBits type = _flags & kIOMemoryTypeMask;
3681 mach_vm_address_t addr;
3682 mach_vm_size_t len;
3683 getAddrLenForInd(addr, len, type, vec, 0, _task);
3684
3685 err = purgeableControlBits(newState, &control, &state);
3686 if (kIOReturnSuccess != err) {
3687 break;
3688 }
3689 err = vm_map_purgable_control(curMap, addr, control, &state);
3690 if (oldState) {
3691 if (kIOReturnSuccess == err) {
3692 err = purgeableStateBits(&state);
3693 *oldState = state;
3694 }
3695 }
3696 }while (false);
3697 if (kIOMemoryThreadSafe & _flags) {
3698 UNLOCK;
3699 }
3700 }
3701
3702 return err;
3703 }
3704
3705 IOReturn
setPurgeable(IOOptionBits newState,IOOptionBits * oldState)3706 IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
3707 IOOptionBits * oldState )
3708 {
3709 IOReturn err = kIOReturnNotReady;
3710
3711 if (kIOMemoryThreadSafe & _flags) {
3712 LOCK;
3713 }
3714 if (_memRef) {
3715 err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
3716 }
3717 if (kIOMemoryThreadSafe & _flags) {
3718 UNLOCK;
3719 }
3720
3721 return err;
3722 }
3723
3724 IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3725 IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
3726 int newLedgerTag,
3727 IOOptionBits newLedgerOptions )
3728 {
3729 IOReturn err = kIOReturnSuccess;
3730
3731 assert(!(kIOMemoryRemote & _flags));
3732 if (kIOMemoryRemote & _flags) {
3733 return kIOReturnNotAttached;
3734 }
3735
3736 if (iokit_iomd_setownership_enabled == FALSE) {
3737 return kIOReturnUnsupported;
3738 }
3739
3740 if (_memRef) {
3741 err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3742 } else {
3743 err = kIOReturnUnsupported;
3744 }
3745
3746 return err;
3747 }
3748
3749 IOReturn
setOwnership(task_t newOwner,int newLedgerTag,IOOptionBits newLedgerOptions)3750 IOMemoryDescriptor::setOwnership( task_t newOwner,
3751 int newLedgerTag,
3752 IOOptionBits newLedgerOptions )
3753 {
3754 IOReturn err = kIOReturnNotReady;
3755
3756 assert(!(kIOMemoryRemote & _flags));
3757 if (kIOMemoryRemote & _flags) {
3758 return kIOReturnNotAttached;
3759 }
3760
3761 if (iokit_iomd_setownership_enabled == FALSE) {
3762 return kIOReturnUnsupported;
3763 }
3764
3765 if (kIOMemoryThreadSafe & _flags) {
3766 LOCK;
3767 }
3768 if (_memRef) {
3769 err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
3770 } else {
3771 IOMultiMemoryDescriptor * mmd;
3772 IOSubMemoryDescriptor * smd;
3773 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3774 err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3775 } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3776 err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3777 }
3778 }
3779 if (kIOMemoryThreadSafe & _flags) {
3780 UNLOCK;
3781 }
3782
3783 return err;
3784 }
3785
3786
3787 uint64_t
getDMAMapLength(uint64_t * offset)3788 IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
3789 {
3790 uint64_t length;
3791
3792 if (_memRef) {
3793 length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset);
3794 } else {
3795 IOByteCount iterate, segLen;
3796 IOPhysicalAddress sourceAddr, sourceAlign;
3797
3798 if (kIOMemoryThreadSafe & _flags) {
3799 LOCK;
3800 }
3801 length = 0;
3802 iterate = 0;
3803 while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) {
3804 sourceAlign = (sourceAddr & page_mask);
3805 if (offset && !iterate) {
3806 *offset = sourceAlign;
3807 }
3808 length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr);
3809 iterate += segLen;
3810 }
3811 if (!iterate) {
3812 length = getLength();
3813 if (offset) {
3814 *offset = 0;
3815 }
3816 }
3817 if (kIOMemoryThreadSafe & _flags) {
3818 UNLOCK;
3819 }
3820 }
3821
3822 return length;
3823 }
3824
3825
3826 IOReturn
getPageCounts(IOByteCount * residentPageCount,IOByteCount * dirtyPageCount)3827 IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
3828 IOByteCount * dirtyPageCount )
3829 {
3830 IOReturn err = kIOReturnNotReady;
3831
3832 assert(!(kIOMemoryRemote & _flags));
3833 if (kIOMemoryRemote & _flags) {
3834 return kIOReturnNotAttached;
3835 }
3836
3837 if (kIOMemoryThreadSafe & _flags) {
3838 LOCK;
3839 }
3840 if (_memRef) {
3841 err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
3842 } else {
3843 IOMultiMemoryDescriptor * mmd;
3844 IOSubMemoryDescriptor * smd;
3845 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3846 err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3847 } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3848 err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3849 }
3850 }
3851 if (kIOMemoryThreadSafe & _flags) {
3852 UNLOCK;
3853 }
3854
3855 return err;
3856 }
3857
3858
3859 #if defined(__arm64__)
3860 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3861 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3862 #else /* defined(__arm64__) */
3863 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3864 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3865 #endif /* defined(__arm64__) */
3866
3867 static void
SetEncryptOp(addr64_t pa,unsigned int count)3868 SetEncryptOp(addr64_t pa, unsigned int count)
3869 {
3870 ppnum_t page, end;
3871
3872 page = (ppnum_t) atop_64(round_page_64(pa));
3873 end = (ppnum_t) atop_64(trunc_page_64(pa + count));
3874 for (; page < end; page++) {
3875 pmap_clear_noencrypt(page);
3876 }
3877 }
3878
3879 static void
ClearEncryptOp(addr64_t pa,unsigned int count)3880 ClearEncryptOp(addr64_t pa, unsigned int count)
3881 {
3882 ppnum_t page, end;
3883
3884 page = (ppnum_t) atop_64(round_page_64(pa));
3885 end = (ppnum_t) atop_64(trunc_page_64(pa + count));
3886 for (; page < end; page++) {
3887 pmap_set_noencrypt(page);
3888 }
3889 }
3890
3891 IOReturn
performOperation(IOOptionBits options,IOByteCount offset,IOByteCount length)3892 IOMemoryDescriptor::performOperation( IOOptionBits options,
3893 IOByteCount offset, IOByteCount length )
3894 {
3895 IOByteCount remaining;
3896 unsigned int res;
3897 void (*func)(addr64_t pa, unsigned int count) = NULL;
3898 #if defined(__arm64__)
3899 void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3900 #endif
3901
3902 assert(!(kIOMemoryRemote & _flags));
3903 if (kIOMemoryRemote & _flags) {
3904 return kIOReturnNotAttached;
3905 }
3906
3907 switch (options) {
3908 case kIOMemoryIncoherentIOFlush:
3909 #if defined(__arm64__)
3910 func_ext = &dcache_incoherent_io_flush64;
3911 #if __ARM_COHERENT_IO__
3912 func_ext(0, 0, 0, &res);
3913 return kIOReturnSuccess;
3914 #else /* __ARM_COHERENT_IO__ */
3915 break;
3916 #endif /* __ARM_COHERENT_IO__ */
3917 #else /* defined(__arm64__) */
3918 func = &dcache_incoherent_io_flush64;
3919 break;
3920 #endif /* defined(__arm64__) */
3921 case kIOMemoryIncoherentIOStore:
3922 #if defined(__arm64__)
3923 func_ext = &dcache_incoherent_io_store64;
3924 #if __ARM_COHERENT_IO__
3925 func_ext(0, 0, 0, &res);
3926 return kIOReturnSuccess;
3927 #else /* __ARM_COHERENT_IO__ */
3928 break;
3929 #endif /* __ARM_COHERENT_IO__ */
3930 #else /* defined(__arm64__) */
3931 func = &dcache_incoherent_io_store64;
3932 break;
3933 #endif /* defined(__arm64__) */
3934
3935 case kIOMemorySetEncrypted:
3936 func = &SetEncryptOp;
3937 break;
3938 case kIOMemoryClearEncrypted:
3939 func = &ClearEncryptOp;
3940 break;
3941 }
3942
3943 #if defined(__arm64__)
3944 if ((func == NULL) && (func_ext == NULL)) {
3945 return kIOReturnUnsupported;
3946 }
3947 #else /* defined(__arm64__) */
3948 if (!func) {
3949 return kIOReturnUnsupported;
3950 }
3951 #endif /* defined(__arm64__) */
3952
3953 if (kIOMemoryThreadSafe & _flags) {
3954 LOCK;
3955 }
3956
3957 res = 0x0UL;
3958 remaining = length = min(length, getLength() - offset);
3959 while (remaining) {
3960 // (process another target segment?)
3961 addr64_t dstAddr64;
3962 IOByteCount dstLen;
3963
3964 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3965 if (!dstAddr64) {
3966 break;
3967 }
3968
3969 // Clip segment length to remaining
3970 if (dstLen > remaining) {
3971 dstLen = remaining;
3972 }
3973 if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
3974 dstLen = (UINT_MAX - PAGE_SIZE + 1);
3975 }
3976 if (remaining > UINT_MAX) {
3977 remaining = UINT_MAX;
3978 }
3979
3980 #if defined(__arm64__)
3981 if (func) {
3982 (*func)(dstAddr64, (unsigned int) dstLen);
3983 }
3984 if (func_ext) {
3985 (*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
3986 if (res != 0x0UL) {
3987 remaining = 0;
3988 break;
3989 }
3990 }
3991 #else /* defined(__arm64__) */
3992 (*func)(dstAddr64, (unsigned int) dstLen);
3993 #endif /* defined(__arm64__) */
3994
3995 offset += dstLen;
3996 remaining -= dstLen;
3997 }
3998
3999 if (kIOMemoryThreadSafe & _flags) {
4000 UNLOCK;
4001 }
4002
4003 return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
4004 }
4005
4006 /*
4007 *
4008 */
4009
4010 #if defined(__i386__) || defined(__x86_64__)
4011
4012 extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
4013
4014 /* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
4015 * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
4016 * kernel non-text data -- should we just add another range instead?
4017 */
4018 #define io_kernel_static_start vm_kernel_stext
4019 #define io_kernel_static_end (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
4020
4021 #elif defined(__arm64__)
4022
4023 extern vm_offset_t static_memory_end;
4024
4025 #if defined(__arm64__)
4026 #define io_kernel_static_start vm_kext_base
4027 #else /* defined(__arm64__) */
4028 #define io_kernel_static_start vm_kernel_stext
4029 #endif /* defined(__arm64__) */
4030
4031 #define io_kernel_static_end static_memory_end
4032
4033 #else
4034 #error io_kernel_static_end is undefined for this architecture
4035 #endif
4036
4037 static kern_return_t
io_get_kernel_static_upl(vm_map_t,uintptr_t offset,upl_size_t * upl_size,unsigned int * page_offset,upl_t * upl,upl_page_info_array_t page_list,unsigned int * count,ppnum_t * highest_page)4038 io_get_kernel_static_upl(
4039 vm_map_t /* map */,
4040 uintptr_t offset,
4041 upl_size_t *upl_size,
4042 unsigned int *page_offset,
4043 upl_t *upl,
4044 upl_page_info_array_t page_list,
4045 unsigned int *count,
4046 ppnum_t *highest_page)
4047 {
4048 unsigned int pageCount, page;
4049 ppnum_t phys;
4050 ppnum_t highestPage = 0;
4051
4052 pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
4053 if (pageCount > *count) {
4054 pageCount = *count;
4055 }
4056 *upl_size = (upl_size_t) ptoa_64(pageCount);
4057
4058 *upl = NULL;
4059 *page_offset = ((unsigned int) page_mask & offset);
4060
4061 for (page = 0; page < pageCount; page++) {
4062 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
4063 if (!phys) {
4064 break;
4065 }
4066 page_list[page].phys_addr = phys;
4067 page_list[page].free_when_done = 0;
4068 page_list[page].absent = 0;
4069 page_list[page].dirty = 0;
4070 page_list[page].precious = 0;
4071 page_list[page].device = 0;
4072 if (phys > highestPage) {
4073 highestPage = phys;
4074 }
4075 }
4076
4077 *highest_page = highestPage;
4078
4079 return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
4080 }
4081
4082 IOReturn
wireVirtual(IODirection forDirection)4083 IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
4084 {
4085 IOOptionBits type = _flags & kIOMemoryTypeMask;
4086 IOReturn error = kIOReturnSuccess;
4087 ioGMDData *dataP;
4088 upl_page_info_array_t pageInfo;
4089 ppnum_t mapBase;
4090 vm_tag_t tag = VM_KERN_MEMORY_NONE;
4091 mach_vm_size_t numBytesWired = 0;
4092
4093 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
4094
4095 if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
4096 forDirection = (IODirection) (forDirection | getDirection());
4097 }
4098
4099 dataP = getDataP(_memoryEntries);
4100 upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
4101 switch (kIODirectionOutIn & forDirection) {
4102 case kIODirectionOut:
4103 // Pages do not need to be marked as dirty on commit
4104 uplFlags = UPL_COPYOUT_FROM;
4105 dataP->fDMAAccess = kIODMAMapReadAccess;
4106 break;
4107
4108 case kIODirectionIn:
4109 dataP->fDMAAccess = kIODMAMapWriteAccess;
4110 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
4111 break;
4112
4113 default:
4114 dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
4115 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
4116 break;
4117 }
4118
4119 if (_wireCount) {
4120 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
4121 OSReportWithBacktrace("IOMemoryDescriptor 0x%zx prepared read only",
4122 (size_t)VM_KERNEL_ADDRPERM(this));
4123 error = kIOReturnNotWritable;
4124 }
4125 } else {
4126 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
4127 IOMapper *mapper;
4128
4129 mapper = dataP->fMapper;
4130 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4131
4132 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
4133 tag = _kernelTag;
4134 if (VM_KERN_MEMORY_NONE == tag) {
4135 tag = IOMemoryTag(kernel_map);
4136 }
4137
4138 if (kIODirectionPrepareToPhys32 & forDirection) {
4139 if (!mapper) {
4140 uplFlags |= UPL_NEED_32BIT_ADDR;
4141 }
4142 if (dataP->fDMAMapNumAddressBits > 32) {
4143 dataP->fDMAMapNumAddressBits = 32;
4144 }
4145 }
4146 if (kIODirectionPrepareNoFault & forDirection) {
4147 uplFlags |= UPL_REQUEST_NO_FAULT;
4148 }
4149 if (kIODirectionPrepareNoZeroFill & forDirection) {
4150 uplFlags |= UPL_NOZEROFILLIO;
4151 }
4152 if (kIODirectionPrepareNonCoherent & forDirection) {
4153 uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
4154 }
4155
4156 mapBase = 0;
4157
4158 // Note that appendBytes(NULL) zeros the data up to the desired length
4159 size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
4160 if (uplPageSize > ((unsigned int)uplPageSize)) {
4161 error = kIOReturnNoMemory;
4162 traceInterval.setEndArg2(error);
4163 return error;
4164 }
4165 if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
4166 error = kIOReturnNoMemory;
4167 traceInterval.setEndArg2(error);
4168 return error;
4169 }
4170 dataP = NULL;
4171
4172 // Find the appropriate vm_map for the given task
4173 vm_map_t curMap;
4174 if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
4175 curMap = NULL;
4176 } else {
4177 curMap = get_task_map(_task);
4178 }
4179
4180 // Iterate over the vector of virtual ranges
4181 Ranges vec = _ranges;
4182 unsigned int pageIndex = 0;
4183 IOByteCount mdOffset = 0;
4184 ppnum_t highestPage = 0;
4185 bool byteAlignUPL;
4186
4187 IOMemoryEntry * memRefEntry = NULL;
4188 if (_memRef) {
4189 memRefEntry = &_memRef->entries[0];
4190 byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
4191 } else {
4192 byteAlignUPL = true;
4193 }
4194
4195 for (UInt range = 0; mdOffset < _length; range++) {
4196 ioPLBlock iopl;
4197 mach_vm_address_t startPage, startPageOffset;
4198 mach_vm_size_t numBytes;
4199 ppnum_t highPage = 0;
4200
4201 if (_memRef) {
4202 if (range >= _memRef->count) {
4203 panic("memRefEntry");
4204 }
4205 memRefEntry = &_memRef->entries[range];
4206 numBytes = memRefEntry->size;
4207 startPage = -1ULL;
4208 if (byteAlignUPL) {
4209 startPageOffset = 0;
4210 } else {
4211 startPageOffset = (memRefEntry->start & PAGE_MASK);
4212 }
4213 } else {
4214 // Get the startPage address and length of vec[range]
4215 getAddrLenForInd(startPage, numBytes, type, vec, range, _task);
4216 if (byteAlignUPL) {
4217 startPageOffset = 0;
4218 } else {
4219 startPageOffset = startPage & PAGE_MASK;
4220 startPage = trunc_page_64(startPage);
4221 }
4222 }
4223 iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
4224 numBytes += startPageOffset;
4225
4226 if (mapper) {
4227 iopl.fMappedPage = mapBase + pageIndex;
4228 } else {
4229 iopl.fMappedPage = 0;
4230 }
4231
4232 // Iterate over the current range, creating UPLs
4233 while (numBytes) {
4234 vm_address_t kernelStart = (vm_address_t) startPage;
4235 vm_map_t theMap;
4236 if (curMap) {
4237 theMap = curMap;
4238 } else if (_memRef) {
4239 theMap = NULL;
4240 } else {
4241 assert(_task == kernel_task);
4242 theMap = IOPageableMapForAddress(kernelStart);
4243 }
4244
4245 // ioplFlags is an in/out parameter
4246 upl_control_flags_t ioplFlags = uplFlags;
4247 dataP = getDataP(_memoryEntries);
4248 pageInfo = getPageList(dataP);
4249 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
4250
4251 mach_vm_size_t ioplPhysSize;
4252 upl_size_t ioplSize;
4253 unsigned int numPageInfo;
4254
4255 if (_memRef) {
4256 error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize);
4257 DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
4258 } else {
4259 error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize);
4260 DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
4261 }
4262 if (error != KERN_SUCCESS) {
4263 if (_memRef) {
4264 DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
4265 } else {
4266 DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
4267 }
4268 printf("entry size error %d\n", error);
4269 goto abortExit;
4270 }
4271 ioplPhysSize = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
4272 numPageInfo = atop_32(ioplPhysSize);
4273 if (byteAlignUPL) {
4274 if (numBytes > ioplPhysSize) {
4275 ioplSize = ((typeof(ioplSize))ioplPhysSize);
4276 } else {
4277 ioplSize = ((typeof(ioplSize))numBytes);
4278 }
4279 } else {
4280 ioplSize = ((typeof(ioplSize))ioplPhysSize);
4281 }
4282
4283 if (_memRef) {
4284 memory_object_offset_t entryOffset;
4285
4286 entryOffset = mdOffset;
4287 if (byteAlignUPL) {
4288 entryOffset = (entryOffset - memRefEntry->offset);
4289 } else {
4290 entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
4291 }
4292 if (ioplSize > (memRefEntry->size - entryOffset)) {
4293 ioplSize = ((typeof(ioplSize))(memRefEntry->size - entryOffset));
4294 }
4295 error = memory_object_iopl_request(memRefEntry->entry,
4296 entryOffset,
4297 &ioplSize,
4298 &iopl.fIOPL,
4299 baseInfo,
4300 &numPageInfo,
4301 &ioplFlags,
4302 tag);
4303 } else if ((theMap == kernel_map)
4304 && (kernelStart >= io_kernel_static_start)
4305 && (kernelStart < io_kernel_static_end)) {
4306 error = io_get_kernel_static_upl(theMap,
4307 kernelStart,
4308 &ioplSize,
4309 &iopl.fPageOffset,
4310 &iopl.fIOPL,
4311 baseInfo,
4312 &numPageInfo,
4313 &highPage);
4314 } else {
4315 assert(theMap);
4316 error = vm_map_create_upl(theMap,
4317 startPage,
4318 (upl_size_t*)&ioplSize,
4319 &iopl.fIOPL,
4320 baseInfo,
4321 &numPageInfo,
4322 &ioplFlags,
4323 tag);
4324 }
4325
4326 if (error != KERN_SUCCESS) {
4327 traceInterval.setEndArg2(error);
4328 DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
4329 goto abortExit;
4330 }
4331
4332 assert(ioplSize);
4333
4334 if (iopl.fIOPL) {
4335 highPage = upl_get_highest_page(iopl.fIOPL);
4336 }
4337 if (highPage > highestPage) {
4338 highestPage = highPage;
4339 }
4340
4341 if (baseInfo->device) {
4342 numPageInfo = 1;
4343 iopl.fFlags = kIOPLOnDevice;
4344 } else {
4345 iopl.fFlags = 0;
4346 }
4347
4348 if (byteAlignUPL) {
4349 if (iopl.fIOPL) {
4350 DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
4351 iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL);
4352 }
4353 if (startPage != (mach_vm_address_t)-1) {
4354 // assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4355 startPage -= iopl.fPageOffset;
4356 }
4357 ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
4358 numBytes += iopl.fPageOffset;
4359 }
4360
4361 iopl.fIOMDOffset = mdOffset;
4362 iopl.fPageInfo = pageIndex;
4363
4364 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
4365 // Clean up partial created and unsaved iopl
4366 if (iopl.fIOPL) {
4367 upl_abort(iopl.fIOPL, 0);
4368 upl_deallocate(iopl.fIOPL);
4369 }
4370 error = kIOReturnNoMemory;
4371 traceInterval.setEndArg2(error);
4372 goto abortExit;
4373 }
4374 dataP = NULL;
4375
4376 // Check for a multiple iopl's in one virtual range
4377 pageIndex += numPageInfo;
4378 mdOffset -= iopl.fPageOffset;
4379 numBytesWired += ioplSize;
4380 if (ioplSize < numBytes) {
4381 numBytes -= ioplSize;
4382 if (startPage != (mach_vm_address_t)-1) {
4383 startPage += ioplSize;
4384 }
4385 mdOffset += ioplSize;
4386 iopl.fPageOffset = 0;
4387 if (mapper) {
4388 iopl.fMappedPage = mapBase + pageIndex;
4389 }
4390 } else {
4391 mdOffset += numBytes;
4392 break;
4393 }
4394 }
4395 }
4396
4397 _highestPage = highestPage;
4398 DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
4399
4400 if (UPL_COPYOUT_FROM & uplFlags) {
4401 _flags |= kIOMemoryPreparedReadOnly;
4402 }
4403 traceInterval.setEndCodes(numBytesWired, error);
4404 }
4405
4406 #if IOTRACKING
4407 if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
4408 dataP = getDataP(_memoryEntries);
4409 if (!dataP->fWireTracking.link.next) {
4410 IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
4411 }
4412 }
4413 #endif /* IOTRACKING */
4414
4415 return error;
4416
4417 abortExit:
4418 {
4419 dataP = getDataP(_memoryEntries);
4420 UInt done = getNumIOPL(_memoryEntries, dataP);
4421 ioPLBlock *ioplList = getIOPLList(dataP);
4422
4423 for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
4424 if (ioplList[ioplIdx].fIOPL) {
4425 upl_abort(ioplList[ioplIdx].fIOPL, 0);
4426 upl_deallocate(ioplList[ioplIdx].fIOPL);
4427 }
4428 }
4429 _memoryEntries->setLength(computeDataSize(0, 0));
4430 }
4431
4432 if (error == KERN_FAILURE) {
4433 error = kIOReturnCannotWire;
4434 } else if (error == KERN_MEMORY_ERROR) {
4435 error = kIOReturnNoResources;
4436 }
4437
4438 return error;
4439 }
4440
4441 bool
initMemoryEntries(size_t size,IOMapper * mapper)4442 IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
4443 {
4444 ioGMDData * dataP;
4445
4446 if (size > UINT_MAX) {
4447 return false;
4448 }
4449 if (!_memoryEntries) {
4450 _memoryEntries = _IOMemoryDescriptorMixedData::withCapacity(size);
4451 if (!_memoryEntries) {
4452 return false;
4453 }
4454 } else if (!_memoryEntries->initWithCapacity(size)) {
4455 return false;
4456 }
4457
4458 _memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
4459 dataP = getDataP(_memoryEntries);
4460
4461 if (mapper == kIOMapperWaitSystem) {
4462 IOMapper::checkForSystemMapper();
4463 mapper = IOMapper::gSystem;
4464 }
4465 dataP->fMapper = mapper;
4466 dataP->fPageCnt = 0;
4467 dataP->fMappedBase = 0;
4468 dataP->fDMAMapNumAddressBits = 64;
4469 dataP->fDMAMapAlignment = 0;
4470 dataP->fPreparationID = kIOPreparationIDUnprepared;
4471 dataP->fCompletionError = false;
4472 dataP->fMappedBaseValid = false;
4473
4474 return true;
4475 }
4476
4477 IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4478 IOMemoryDescriptor::dmaMap(
4479 IOMapper * mapper,
4480 IOMemoryDescriptor * memory,
4481 IODMACommand * command,
4482 const IODMAMapSpecification * mapSpec,
4483 uint64_t offset,
4484 uint64_t length,
4485 uint64_t * mapAddress,
4486 uint64_t * mapLength)
4487 {
4488 IOReturn err;
4489 uint32_t mapOptions;
4490
4491 mapOptions = 0;
4492 mapOptions |= kIODMAMapReadAccess;
4493 if (!(kIOMemoryPreparedReadOnly & _flags)) {
4494 mapOptions |= kIODMAMapWriteAccess;
4495 }
4496
4497 err = mapper->iovmMapMemory(memory, offset, length, mapOptions,
4498 mapSpec, command, NULL, mapAddress, mapLength);
4499
4500 if (kIOReturnSuccess == err) {
4501 dmaMapRecord(mapper, command, *mapLength);
4502 }
4503
4504 return err;
4505 }
4506
4507 void
dmaMapRecord(IOMapper * mapper,IODMACommand * command,uint64_t mapLength)4508 IOMemoryDescriptor::dmaMapRecord(
4509 IOMapper * mapper,
4510 IODMACommand * command,
4511 uint64_t mapLength)
4512 {
4513 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
4514 kern_allocation_name_t alloc;
4515 int16_t prior;
4516
4517 if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
4518 kern_allocation_update_size(mapper->fAllocName, mapLength, NULL);
4519 }
4520
4521 if (!command) {
4522 return;
4523 }
4524 prior = OSAddAtomic16(1, &_dmaReferences);
4525 if (!prior) {
4526 if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4527 _mapName = alloc;
4528 mapLength = _length;
4529 kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
4530 } else {
4531 _mapName = NULL;
4532 }
4533 }
4534 }
4535
4536 IOReturn
dmaUnmap(IOMapper * mapper,IODMACommand * command,uint64_t offset,uint64_t mapAddress,uint64_t mapLength)4537 IOMemoryDescriptor::dmaUnmap(
4538 IOMapper * mapper,
4539 IODMACommand * command,
4540 uint64_t offset,
4541 uint64_t mapAddress,
4542 uint64_t mapLength)
4543 {
4544 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
4545 IOReturn ret;
4546 kern_allocation_name_t alloc;
4547 kern_allocation_name_t mapName;
4548 int16_t prior;
4549
4550 mapName = NULL;
4551 prior = 0;
4552 if (command) {
4553 mapName = _mapName;
4554 if (_dmaReferences) {
4555 prior = OSAddAtomic16(-1, &_dmaReferences);
4556 } else {
4557 panic("_dmaReferences underflow");
4558 }
4559 }
4560
4561 if (!mapLength) {
4562 traceInterval.setEndArg1(kIOReturnSuccess);
4563 return kIOReturnSuccess;
4564 }
4565
4566 ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
4567
4568 if ((alloc = mapper->fAllocName)) {
4569 kern_allocation_update_size(alloc, -mapLength, NULL);
4570 if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4571 mapLength = _length;
4572 kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
4573 }
4574 }
4575
4576 traceInterval.setEndArg1(ret);
4577 return ret;
4578 }
4579
4580 IOReturn
dmaMap(IOMapper * mapper,IOMemoryDescriptor * memory,IODMACommand * command,const IODMAMapSpecification * mapSpec,uint64_t offset,uint64_t length,uint64_t * mapAddress,uint64_t * mapLength)4581 IOGeneralMemoryDescriptor::dmaMap(
4582 IOMapper * mapper,
4583 IOMemoryDescriptor * memory,
4584 IODMACommand * command,
4585 const IODMAMapSpecification * mapSpec,
4586 uint64_t offset,
4587 uint64_t length,
4588 uint64_t * mapAddress,
4589 uint64_t * mapLength)
4590 {
4591 IOReturn err = kIOReturnSuccess;
4592 ioGMDData * dataP;
4593 IOOptionBits type = _flags & kIOMemoryTypeMask;
4594
4595 *mapAddress = 0;
4596 if (kIOMemoryHostOnly & _flags) {
4597 return kIOReturnSuccess;
4598 }
4599 if (kIOMemoryRemote & _flags) {
4600 return kIOReturnNotAttached;
4601 }
4602
4603 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
4604 || offset || (length != _length)) {
4605 err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
4606 } else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
4607 const ioPLBlock * ioplList = getIOPLList(dataP);
4608 upl_page_info_t * pageList;
4609 uint32_t mapOptions = 0;
4610
4611 IODMAMapSpecification mapSpec;
4612 bzero(&mapSpec, sizeof(mapSpec));
4613 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
4614 mapSpec.alignment = dataP->fDMAMapAlignment;
4615
4616 // For external UPLs the fPageInfo field points directly to
4617 // the upl's upl_page_info_t array.
4618 if (ioplList->fFlags & kIOPLExternUPL) {
4619 pageList = (upl_page_info_t *) ioplList->fPageInfo;
4620 mapOptions |= kIODMAMapPagingPath;
4621 } else {
4622 pageList = getPageList(dataP);
4623 }
4624
4625 if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
4626 mapOptions |= kIODMAMapPageListFullyOccupied;
4627 }
4628
4629 assert(dataP->fDMAAccess);
4630 mapOptions |= dataP->fDMAAccess;
4631
4632 // Check for direct device non-paged memory
4633 if (ioplList->fFlags & kIOPLOnDevice) {
4634 mapOptions |= kIODMAMapPhysicallyContiguous;
4635 }
4636
4637 IODMAMapPageList dmaPageList =
4638 {
4639 .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask),
4640 .pageListCount = _pages,
4641 .pageList = &pageList[0]
4642 };
4643 err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec,
4644 command, &dmaPageList, mapAddress, mapLength);
4645
4646 if (kIOReturnSuccess == err) {
4647 dmaMapRecord(mapper, command, *mapLength);
4648 }
4649 }
4650
4651 return err;
4652 }
4653
4654 /*
4655 * prepare
4656 *
4657 * Prepare the memory for an I/O transfer. This involves paging in
4658 * the memory, if necessary, and wiring it down for the duration of
4659 * the transfer. The complete() method completes the processing of
4660 * the memory after the I/O transfer finishes. This method needn't
4661 * called for non-pageable memory.
4662 */
4663
4664 IOReturn
prepare(IODirection forDirection)4665 IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
4666 {
4667 IOReturn error = kIOReturnSuccess;
4668 IOOptionBits type = _flags & kIOMemoryTypeMask;
4669 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection);
4670
4671 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4672 traceInterval.setEndArg1(kIOReturnSuccess);
4673 return kIOReturnSuccess;
4674 }
4675
4676 assert(!(kIOMemoryRemote & _flags));
4677 if (kIOMemoryRemote & _flags) {
4678 traceInterval.setEndArg1(kIOReturnNotAttached);
4679 return kIOReturnNotAttached;
4680 }
4681
4682 if (_prepareLock) {
4683 IOLockLock(_prepareLock);
4684 }
4685
4686 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4687 if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4688 error = kIOReturnNotReady;
4689 goto finish;
4690 }
4691 error = wireVirtual(forDirection);
4692 }
4693
4694 if (kIOReturnSuccess == error) {
4695 if (1 == ++_wireCount) {
4696 if (kIOMemoryClearEncrypt & _flags) {
4697 performOperation(kIOMemoryClearEncrypted, 0, _length);
4698 }
4699
4700 ktraceEmitPhysicalSegments();
4701 }
4702 }
4703
4704 finish:
4705
4706 if (_prepareLock) {
4707 IOLockUnlock(_prepareLock);
4708 }
4709 traceInterval.setEndArg1(error);
4710
4711 return error;
4712 }
4713
4714 /*
4715 * complete
4716 *
4717 * Complete processing of the memory after an I/O transfer finishes.
4718 * This method should not be called unless a prepare was previously
4719 * issued; the prepare() and complete() must occur in pairs, before
4720 * before and after an I/O transfer involving pageable memory.
4721 */
4722
4723 IOReturn
complete(IODirection forDirection)4724 IOGeneralMemoryDescriptor::complete(IODirection forDirection)
4725 {
4726 IOOptionBits type = _flags & kIOMemoryTypeMask;
4727 ioGMDData * dataP;
4728 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection);
4729
4730 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4731 traceInterval.setEndArg1(kIOReturnSuccess);
4732 return kIOReturnSuccess;
4733 }
4734
4735 assert(!(kIOMemoryRemote & _flags));
4736 if (kIOMemoryRemote & _flags) {
4737 traceInterval.setEndArg1(kIOReturnNotAttached);
4738 return kIOReturnNotAttached;
4739 }
4740
4741 if (_prepareLock) {
4742 IOLockLock(_prepareLock);
4743 }
4744 do{
4745 assert(_wireCount);
4746 if (!_wireCount) {
4747 break;
4748 }
4749 dataP = getDataP(_memoryEntries);
4750 if (!dataP) {
4751 break;
4752 }
4753
4754 if (kIODirectionCompleteWithError & forDirection) {
4755 dataP->fCompletionError = true;
4756 }
4757
4758 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
4759 performOperation(kIOMemorySetEncrypted, 0, _length);
4760 }
4761
4762 _wireCount--;
4763 if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
4764 ioPLBlock *ioplList = getIOPLList(dataP);
4765 UInt ind, count = getNumIOPL(_memoryEntries, dataP);
4766
4767 if (_wireCount) {
4768 // kIODirectionCompleteWithDataValid & forDirection
4769 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4770 vm_tag_t tag;
4771 tag = (typeof(tag))getVMTag(kernel_map);
4772 for (ind = 0; ind < count; ind++) {
4773 if (ioplList[ind].fIOPL) {
4774 iopl_valid_data(ioplList[ind].fIOPL, tag);
4775 }
4776 }
4777 }
4778 } else {
4779 if (_dmaReferences) {
4780 panic("complete() while dma active");
4781 }
4782
4783 if (dataP->fMappedBaseValid) {
4784 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
4785 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4786 }
4787 #if IOTRACKING
4788 if (dataP->fWireTracking.link.next) {
4789 IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
4790 }
4791 #endif /* IOTRACKING */
4792 // Only complete iopls that we created which are for TypeVirtual
4793 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4794 for (ind = 0; ind < count; ind++) {
4795 if (ioplList[ind].fIOPL) {
4796 if (dataP->fCompletionError) {
4797 upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
4798 } else {
4799 upl_commit(ioplList[ind].fIOPL, NULL, 0);
4800 }
4801 upl_deallocate(ioplList[ind].fIOPL);
4802 }
4803 }
4804 } else if (kIOMemoryTypeUPL == type) {
4805 upl_set_referenced(ioplList[0].fIOPL, false);
4806 }
4807
4808 _memoryEntries->setLength(computeDataSize(0, 0));
4809
4810 dataP->fPreparationID = kIOPreparationIDUnprepared;
4811 _flags &= ~kIOMemoryPreparedReadOnly;
4812
4813 if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) {
4814 IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
4815 }
4816 }
4817 }
4818 }while (false);
4819
4820 if (_prepareLock) {
4821 IOLockUnlock(_prepareLock);
4822 }
4823
4824 traceInterval.setEndArg1(kIOReturnSuccess);
4825 return kIOReturnSuccess;
4826 }
4827
4828 IOOptionBits
memoryReferenceCreateOptions(IOOptionBits options,IOMemoryMap * mapping)4829 IOGeneralMemoryDescriptor::memoryReferenceCreateOptions(IOOptionBits options, IOMemoryMap * mapping)
4830 {
4831 IOOptionBits createOptions = 0;
4832
4833 if (!(kIOMap64Bit & options)) {
4834 panic("IOMemoryDescriptor::makeMapping !64bit");
4835 }
4836 if (!(kIOMapReadOnly & options)) {
4837 createOptions |= kIOMemoryReferenceWrite;
4838 #if DEVELOPMENT || DEBUG
4839 if ((kIODirectionOut == (kIODirectionOutIn & _flags))
4840 && (!reserved || (reserved->creator != mapping->fAddressTask))) {
4841 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
4842 }
4843 #endif
4844 }
4845 return createOptions;
4846 }
4847
4848 /*
4849 * Attempt to create any kIOMemoryMapCopyOnWrite named entry needed ahead of the global
4850 * lock taken in IOMemoryDescriptor::makeMapping() since it may allocate real pages on
4851 * creation.
4852 */
4853
4854 IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4855 IOGeneralMemoryDescriptor::makeMapping(
4856 IOMemoryDescriptor * owner,
4857 task_t __intoTask,
4858 IOVirtualAddress __address,
4859 IOOptionBits options,
4860 IOByteCount __offset,
4861 IOByteCount __length )
4862 {
4863 IOReturn err = kIOReturnSuccess;
4864 IOMemoryMap * mapping;
4865
4866 if ((kIOMemoryMapCopyOnWrite & _flags) && _task && !_memRef) {
4867 struct IOMemoryReference * newRef;
4868 err = memoryReferenceCreate(memoryReferenceCreateOptions(options, (IOMemoryMap *) __address), &newRef);
4869 if (kIOReturnSuccess == err) {
4870 if (!OSCompareAndSwapPtr(NULL, newRef, &_memRef)) {
4871 memoryReferenceFree(newRef);
4872 }
4873 }
4874 }
4875 if (kIOReturnSuccess != err) {
4876 return NULL;
4877 }
4878 mapping = IOMemoryDescriptor::makeMapping(
4879 owner, __intoTask, __address, options, __offset, __length);
4880
4881 #if IOTRACKING
4882 if ((mapping == (IOMemoryMap *) __address)
4883 && (0 == (kIOMapStatic & mapping->fOptions))
4884 && (NULL == mapping->fSuperMap)
4885 && ((kIOTracking & gIOKitDebug) || _task)) {
4886 // only dram maps in the default on development case
4887 IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4888 }
4889 #endif /* IOTRACKING */
4890
4891 return mapping;
4892 }
4893
4894 IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)4895 IOGeneralMemoryDescriptor::doMap(
4896 vm_map_t __addressMap,
4897 IOVirtualAddress * __address,
4898 IOOptionBits options,
4899 IOByteCount __offset,
4900 IOByteCount __length )
4901 {
4902 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length);
4903 traceInterval.setEndArg1(kIOReturnSuccess);
4904 #ifndef __LP64__
4905 if (!(kIOMap64Bit & options)) {
4906 panic("IOGeneralMemoryDescriptor::doMap !64bit");
4907 }
4908 #endif /* !__LP64__ */
4909
4910 kern_return_t err;
4911
4912 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
4913 mach_vm_size_t offset = mapping->fOffset + __offset;
4914 mach_vm_size_t length = mapping->fLength;
4915
4916 IOOptionBits type = _flags & kIOMemoryTypeMask;
4917 Ranges vec = _ranges;
4918
4919 mach_vm_address_t range0Addr = 0;
4920 mach_vm_size_t range0Len = 0;
4921
4922 if ((offset >= _length) || ((offset + length) > _length)) {
4923 traceInterval.setEndArg1(kIOReturnBadArgument);
4924 DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length);
4925 // assert(offset == 0 && _length == 0 && length == 0);
4926 return kIOReturnBadArgument;
4927 }
4928
4929 assert(!(kIOMemoryRemote & _flags));
4930 if (kIOMemoryRemote & _flags) {
4931 return 0;
4932 }
4933
4934 if (vec.v) {
4935 getAddrLenForInd(range0Addr, range0Len, type, vec, 0, _task);
4936 }
4937
4938 // mapping source == dest? (could be much better)
4939 if (_task
4940 && (mapping->fAddressTask == _task)
4941 && (mapping->fAddressMap == get_task_map(_task))
4942 && (options & kIOMapAnywhere)
4943 && (!(kIOMapUnique & options))
4944 && (!(kIOMapGuardedMask & options))
4945 && (1 == _rangesCount)
4946 && (0 == offset)
4947 && range0Addr
4948 && (length <= range0Len)) {
4949 mapping->fAddress = range0Addr;
4950 mapping->fOptions |= kIOMapStatic;
4951
4952 return kIOReturnSuccess;
4953 }
4954
4955 if (!_memRef) {
4956 err = memoryReferenceCreate(memoryReferenceCreateOptions(options, mapping), &_memRef);
4957 if (kIOReturnSuccess != err) {
4958 traceInterval.setEndArg1(err);
4959 DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4960 return err;
4961 }
4962 }
4963
4964 memory_object_t pager;
4965 pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
4966
4967 // <upl_transpose //
4968 if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
4969 do{
4970 upl_t redirUPL2;
4971 upl_size_t size;
4972 upl_control_flags_t flags;
4973 unsigned int lock_count;
4974
4975 if (!_memRef || (1 != _memRef->count)) {
4976 err = kIOReturnNotReadable;
4977 DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4978 break;
4979 }
4980
4981 size = (upl_size_t) round_page(mapping->fLength);
4982 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4983 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4984
4985 if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
4986 NULL, NULL,
4987 &flags, (vm_tag_t) getVMTag(kernel_map))) {
4988 redirUPL2 = NULL;
4989 }
4990
4991 for (lock_count = 0;
4992 IORecursiveLockHaveLock(gIOMemoryLock);
4993 lock_count++) {
4994 UNLOCK;
4995 }
4996 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
4997 for (;
4998 lock_count;
4999 lock_count--) {
5000 LOCK;
5001 }
5002
5003 if (kIOReturnSuccess != err) {
5004 IOLog("upl_transpose(%x)\n", err);
5005 err = kIOReturnSuccess;
5006 }
5007
5008 if (redirUPL2) {
5009 upl_commit(redirUPL2, NULL, 0);
5010 upl_deallocate(redirUPL2);
5011 redirUPL2 = NULL;
5012 }
5013 {
5014 // swap the memEntries since they now refer to different vm_objects
5015 IOMemoryReference * me = _memRef;
5016 _memRef = mapping->fMemory->_memRef;
5017 mapping->fMemory->_memRef = me;
5018 }
5019 if (pager) {
5020 err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
5021 }
5022 }while (false);
5023 }
5024 // upl_transpose> //
5025 else {
5026 err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
5027 if (err) {
5028 DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err);
5029 }
5030 if ((err == KERN_SUCCESS) && pager) {
5031 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
5032
5033 if (err != KERN_SUCCESS) {
5034 doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
5035 } else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
5036 mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
5037 }
5038 }
5039 }
5040
5041 traceInterval.setEndArg1(err);
5042 if (err) {
5043 DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
5044 }
5045 return err;
5046 }
5047
5048 #if IOTRACKING
5049 IOReturn
IOMemoryMapTracking(IOTrackingUser * tracking,task_t * task,mach_vm_address_t * address,mach_vm_size_t * size)5050 IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
5051 mach_vm_address_t * address, mach_vm_size_t * size)
5052 {
5053 #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
5054
5055 IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
5056
5057 if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
5058 return kIOReturnNotReady;
5059 }
5060
5061 *task = map->fAddressTask;
5062 *address = map->fAddress;
5063 *size = map->fLength;
5064
5065 return kIOReturnSuccess;
5066 }
5067 #endif /* IOTRACKING */
5068
5069 IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5070 IOGeneralMemoryDescriptor::doUnmap(
5071 vm_map_t addressMap,
5072 IOVirtualAddress __address,
5073 IOByteCount __length )
5074 {
5075 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length);
5076 IOReturn ret;
5077 ret = super::doUnmap(addressMap, __address, __length);
5078 traceInterval.setEndArg1(ret);
5079 return ret;
5080 }
5081
5082 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5083
5084 #undef super
5085 #define super OSObject
5086
5087 OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE )
5088
5089 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
5090 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
5091 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
5092 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
5093 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
5094 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
5095 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
5096 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
5097
5098 /* ex-inline function implementation */
5099 IOPhysicalAddress
getPhysicalAddress()5100 IOMemoryMap::getPhysicalAddress()
5101 {
5102 return getPhysicalSegment( 0, NULL );
5103 }
5104
5105 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5106
5107 bool
init(task_t intoTask,mach_vm_address_t toAddress,IOOptionBits _options,mach_vm_size_t _offset,mach_vm_size_t _length)5108 IOMemoryMap::init(
5109 task_t intoTask,
5110 mach_vm_address_t toAddress,
5111 IOOptionBits _options,
5112 mach_vm_size_t _offset,
5113 mach_vm_size_t _length )
5114 {
5115 if (!intoTask) {
5116 return false;
5117 }
5118
5119 if (!super::init()) {
5120 return false;
5121 }
5122
5123 fAddressMap = get_task_map(intoTask);
5124 if (!fAddressMap) {
5125 return false;
5126 }
5127 vm_map_reference(fAddressMap);
5128
5129 fAddressTask = intoTask;
5130 fOptions = _options;
5131 fLength = _length;
5132 fOffset = _offset;
5133 fAddress = toAddress;
5134
5135 return true;
5136 }
5137
5138 bool
setMemoryDescriptor(IOMemoryDescriptor * _memory,mach_vm_size_t _offset)5139 IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
5140 {
5141 if (!_memory) {
5142 return false;
5143 }
5144
5145 if (!fSuperMap) {
5146 if ((_offset + fLength) > _memory->getLength()) {
5147 return false;
5148 }
5149 fOffset = _offset;
5150 }
5151
5152
5153 OSSharedPtr<IOMemoryDescriptor> tempval(_memory, OSRetain);
5154 if (fMemory) {
5155 if (fMemory != _memory) {
5156 fMemory->removeMapping(this);
5157 }
5158 }
5159 fMemory = os::move(tempval);
5160
5161 return true;
5162 }
5163
5164 IOReturn
doMap(vm_map_t __addressMap,IOVirtualAddress * __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5165 IOMemoryDescriptor::doMap(
5166 vm_map_t __addressMap,
5167 IOVirtualAddress * __address,
5168 IOOptionBits options,
5169 IOByteCount __offset,
5170 IOByteCount __length )
5171 {
5172 return kIOReturnUnsupported;
5173 }
5174
5175 IOReturn
handleFault(void * _pager,mach_vm_size_t sourceOffset,mach_vm_size_t length)5176 IOMemoryDescriptor::handleFault(
5177 void * _pager,
5178 mach_vm_size_t sourceOffset,
5179 mach_vm_size_t length)
5180 {
5181 if (kIOMemoryRedirected & _flags) {
5182 #if DEBUG
5183 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
5184 #endif
5185 do {
5186 SLEEP;
5187 } while (kIOMemoryRedirected & _flags);
5188 }
5189 return kIOReturnSuccess;
5190 }
5191
5192 IOReturn
populateDevicePager(void * _pager,vm_map_t addressMap,mach_vm_address_t address,mach_vm_size_t sourceOffset,mach_vm_size_t length,IOOptionBits options)5193 IOMemoryDescriptor::populateDevicePager(
5194 void * _pager,
5195 vm_map_t addressMap,
5196 mach_vm_address_t address,
5197 mach_vm_size_t sourceOffset,
5198 mach_vm_size_t length,
5199 IOOptionBits options )
5200 {
5201 IOReturn err = kIOReturnSuccess;
5202 memory_object_t pager = (memory_object_t) _pager;
5203 mach_vm_size_t size;
5204 mach_vm_size_t bytes;
5205 mach_vm_size_t page;
5206 mach_vm_size_t pageOffset;
5207 mach_vm_size_t pagerOffset;
5208 IOPhysicalLength segLen, chunk;
5209 addr64_t physAddr;
5210 IOOptionBits type;
5211
5212 type = _flags & kIOMemoryTypeMask;
5213
5214 if (reserved->dp.pagerContig) {
5215 sourceOffset = 0;
5216 pagerOffset = 0;
5217 }
5218
5219 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
5220 assert( physAddr );
5221 pageOffset = physAddr - trunc_page_64( physAddr );
5222 pagerOffset = sourceOffset;
5223
5224 size = length + pageOffset;
5225 physAddr -= pageOffset;
5226
5227 segLen += pageOffset;
5228 bytes = size;
5229 do{
5230 // in the middle of the loop only map whole pages
5231 if (segLen >= bytes) {
5232 segLen = bytes;
5233 } else if (segLen != trunc_page_64(segLen)) {
5234 err = kIOReturnVMError;
5235 }
5236 if (physAddr != trunc_page_64(physAddr)) {
5237 err = kIOReturnBadArgument;
5238 }
5239
5240 if (kIOReturnSuccess != err) {
5241 break;
5242 }
5243
5244 #if DEBUG || DEVELOPMENT
5245 if ((kIOMemoryTypeUPL != type)
5246 && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) {
5247 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx",
5248 physAddr, (uint64_t)segLen);
5249 }
5250 #endif /* DEBUG || DEVELOPMENT */
5251
5252 chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
5253 for (page = 0;
5254 (page < segLen) && (KERN_SUCCESS == err);
5255 page += chunk) {
5256 err = device_pager_populate_object(pager, pagerOffset,
5257 (ppnum_t)(atop_64(physAddr + page)), chunk);
5258 pagerOffset += chunk;
5259 }
5260
5261 assert(KERN_SUCCESS == err);
5262 if (err) {
5263 break;
5264 }
5265
5266 // This call to vm_fault causes an early pmap level resolution
5267 // of the mappings created above for kernel mappings, since
5268 // faulting in later can't take place from interrupt level.
5269 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
5270 err = vm_fault(addressMap,
5271 (vm_map_offset_t)trunc_page_64(address),
5272 options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
5273 FALSE, VM_KERN_MEMORY_NONE,
5274 THREAD_UNINT, NULL,
5275 (vm_map_offset_t)0);
5276
5277 if (KERN_SUCCESS != err) {
5278 break;
5279 }
5280 }
5281
5282 sourceOffset += segLen - pageOffset;
5283 address += segLen;
5284 bytes -= segLen;
5285 pageOffset = 0;
5286 }while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
5287
5288 if (bytes) {
5289 err = kIOReturnBadArgument;
5290 }
5291
5292 return err;
5293 }
5294
5295 IOReturn
doUnmap(vm_map_t addressMap,IOVirtualAddress __address,IOByteCount __length)5296 IOMemoryDescriptor::doUnmap(
5297 vm_map_t addressMap,
5298 IOVirtualAddress __address,
5299 IOByteCount __length )
5300 {
5301 IOReturn err;
5302 IOMemoryMap * mapping;
5303 mach_vm_address_t address;
5304 mach_vm_size_t length;
5305
5306 if (__length) {
5307 panic("doUnmap");
5308 }
5309
5310 mapping = (IOMemoryMap *) __address;
5311 addressMap = mapping->fAddressMap;
5312 address = mapping->fAddress;
5313 length = mapping->fLength;
5314
5315 if (kIOMapOverwrite & mapping->fOptions) {
5316 err = KERN_SUCCESS;
5317 } else {
5318 if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
5319 addressMap = IOPageableMapForAddress( address );
5320 }
5321 #if DEBUG
5322 if (kIOLogMapping & gIOKitDebug) {
5323 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5324 addressMap, address, length );
5325 }
5326 #endif
5327 err = IOMemoryDescriptorMapDealloc(mapping->fOptions, addressMap, address, length );
5328 if (vm_map_page_mask(addressMap) < PAGE_MASK) {
5329 DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err);
5330 }
5331 }
5332
5333 #if IOTRACKING
5334 IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
5335 #endif /* IOTRACKING */
5336
5337 return err;
5338 }
5339
5340 IOReturn
redirect(task_t safeTask,bool doRedirect)5341 IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
5342 {
5343 IOReturn err = kIOReturnSuccess;
5344 IOMemoryMap * mapping = NULL;
5345 OSSharedPtr<OSIterator> iter;
5346
5347 LOCK;
5348
5349 if (doRedirect) {
5350 _flags |= kIOMemoryRedirected;
5351 } else {
5352 _flags &= ~kIOMemoryRedirected;
5353 }
5354
5355 do {
5356 if ((iter = OSCollectionIterator::withCollection( _mappings.get()))) {
5357 memory_object_t pager;
5358
5359 if (reserved) {
5360 pager = (memory_object_t) reserved->dp.devicePager;
5361 } else {
5362 pager = MACH_PORT_NULL;
5363 }
5364
5365 while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
5366 mapping->redirect( safeTask, doRedirect );
5367 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
5368 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
5369 }
5370 }
5371
5372 iter.reset();
5373 }
5374 } while (false);
5375
5376 if (!doRedirect) {
5377 WAKEUP;
5378 }
5379
5380 UNLOCK;
5381
5382 #ifndef __LP64__
5383 // temporary binary compatibility
5384 IOSubMemoryDescriptor * subMem;
5385 if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
5386 err = subMem->redirect( safeTask, doRedirect );
5387 } else {
5388 err = kIOReturnSuccess;
5389 }
5390 #endif /* !__LP64__ */
5391
5392 return err;
5393 }
5394
5395 IOReturn
redirect(task_t safeTask,bool doRedirect)5396 IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
5397 {
5398 IOReturn err = kIOReturnSuccess;
5399
5400 if (fSuperMap) {
5401 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5402 } else {
5403 LOCK;
5404
5405 do{
5406 if (!fAddress) {
5407 break;
5408 }
5409 if (!fAddressMap) {
5410 break;
5411 }
5412
5413 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
5414 && (0 == (fOptions & kIOMapStatic))) {
5415 IOUnmapPages( fAddressMap, fAddress, fLength );
5416 err = kIOReturnSuccess;
5417 #if DEBUG
5418 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
5419 #endif
5420 } else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
5421 IOOptionBits newMode;
5422 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
5423 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
5424 }
5425 }while (false);
5426 UNLOCK;
5427 }
5428
5429 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5430 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
5431 && safeTask
5432 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
5433 fMemory->redirect(safeTask, doRedirect);
5434 }
5435
5436 return err;
5437 }
5438
5439 IOReturn
unmap(void)5440 IOMemoryMap::unmap( void )
5441 {
5442 IOReturn err;
5443
5444 LOCK;
5445
5446 if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
5447 && (0 == (kIOMapStatic & fOptions))) {
5448 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
5449 } else {
5450 err = kIOReturnSuccess;
5451 }
5452
5453 if (fAddressMap) {
5454 vm_map_deallocate(fAddressMap);
5455 fAddressMap = NULL;
5456 }
5457
5458 fAddress = 0;
5459
5460 UNLOCK;
5461
5462 return err;
5463 }
5464
5465 void
taskDied(void)5466 IOMemoryMap::taskDied( void )
5467 {
5468 LOCK;
5469 if (fUserClientUnmap) {
5470 unmap();
5471 }
5472 #if IOTRACKING
5473 else {
5474 IOTrackingRemoveUser(gIOMapTracking, &fTracking);
5475 }
5476 #endif /* IOTRACKING */
5477
5478 if (fAddressMap) {
5479 vm_map_deallocate(fAddressMap);
5480 fAddressMap = NULL;
5481 }
5482 fAddressTask = NULL;
5483 fAddress = 0;
5484 UNLOCK;
5485 }
5486
5487 IOReturn
userClientUnmap(void)5488 IOMemoryMap::userClientUnmap( void )
5489 {
5490 fUserClientUnmap = true;
5491 return kIOReturnSuccess;
5492 }
5493
5494 // Overload the release mechanism. All mappings must be a member
5495 // of a memory descriptors _mappings set. This means that we
5496 // always have 2 references on a mapping. When either of these mappings
5497 // are released we need to free ourselves.
5498 void
taggedRelease(const void * tag) const5499 IOMemoryMap::taggedRelease(const void *tag) const
5500 {
5501 LOCK;
5502 super::taggedRelease(tag, 2);
5503 UNLOCK;
5504 }
5505
5506 void
free()5507 IOMemoryMap::free()
5508 {
5509 unmap();
5510
5511 if (fMemory) {
5512 LOCK;
5513 fMemory->removeMapping(this);
5514 UNLOCK;
5515 fMemory.reset();
5516 }
5517
5518 if (fSuperMap) {
5519 fSuperMap.reset();
5520 }
5521
5522 if (fRedirUPL) {
5523 upl_commit(fRedirUPL, NULL, 0);
5524 upl_deallocate(fRedirUPL);
5525 }
5526
5527 super::free();
5528 }
5529
5530 IOByteCount
getLength()5531 IOMemoryMap::getLength()
5532 {
5533 return fLength;
5534 }
5535
5536 IOVirtualAddress
getVirtualAddress()5537 IOMemoryMap::getVirtualAddress()
5538 {
5539 #ifndef __LP64__
5540 if (fSuperMap) {
5541 fSuperMap->getVirtualAddress();
5542 } else if (fAddressMap
5543 && vm_map_is_64bit(fAddressMap)
5544 && (sizeof(IOVirtualAddress) < 8)) {
5545 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
5546 }
5547 #endif /* !__LP64__ */
5548
5549 return fAddress;
5550 }
5551
5552 #ifndef __LP64__
5553 mach_vm_address_t
getAddress()5554 IOMemoryMap::getAddress()
5555 {
5556 return fAddress;
5557 }
5558
5559 mach_vm_size_t
getSize()5560 IOMemoryMap::getSize()
5561 {
5562 return fLength;
5563 }
5564 #endif /* !__LP64__ */
5565
5566
5567 task_t
getAddressTask()5568 IOMemoryMap::getAddressTask()
5569 {
5570 if (fSuperMap) {
5571 return fSuperMap->getAddressTask();
5572 } else {
5573 return fAddressTask;
5574 }
5575 }
5576
5577 IOOptionBits
getMapOptions()5578 IOMemoryMap::getMapOptions()
5579 {
5580 return fOptions;
5581 }
5582
5583 IOMemoryDescriptor *
getMemoryDescriptor()5584 IOMemoryMap::getMemoryDescriptor()
5585 {
5586 return fMemory.get();
5587 }
5588
5589 IOMemoryMap *
copyCompatible(IOMemoryMap * newMapping)5590 IOMemoryMap::copyCompatible(
5591 IOMemoryMap * newMapping )
5592 {
5593 task_t task = newMapping->getAddressTask();
5594 mach_vm_address_t toAddress = newMapping->fAddress;
5595 IOOptionBits _options = newMapping->fOptions;
5596 mach_vm_size_t _offset = newMapping->fOffset;
5597 mach_vm_size_t _length = newMapping->fLength;
5598
5599 if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
5600 return NULL;
5601 }
5602 if ((fOptions ^ _options) & kIOMapReadOnly) {
5603 return NULL;
5604 }
5605 if ((fOptions ^ _options) & kIOMapGuardedMask) {
5606 return NULL;
5607 }
5608 if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
5609 && ((fOptions ^ _options) & kIOMapCacheMask)) {
5610 return NULL;
5611 }
5612
5613 if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
5614 return NULL;
5615 }
5616
5617 if (_offset < fOffset) {
5618 return NULL;
5619 }
5620
5621 _offset -= fOffset;
5622
5623 if ((_offset + _length) > fLength) {
5624 return NULL;
5625 }
5626
5627 if ((fLength == _length) && (!_offset)) {
5628 retain();
5629 newMapping = this;
5630 } else {
5631 newMapping->fSuperMap.reset(this, OSRetain);
5632 newMapping->fOffset = fOffset + _offset;
5633 newMapping->fAddress = fAddress + _offset;
5634 }
5635
5636 return newMapping;
5637 }
5638
5639 IOReturn
wireRange(uint32_t options,mach_vm_size_t offset,mach_vm_size_t length)5640 IOMemoryMap::wireRange(
5641 uint32_t options,
5642 mach_vm_size_t offset,
5643 mach_vm_size_t length)
5644 {
5645 IOReturn kr;
5646 mach_vm_address_t start = trunc_page_64(fAddress + offset);
5647 mach_vm_address_t end = round_page_64(fAddress + offset + length);
5648 vm_prot_t prot;
5649
5650 prot = (kIODirectionOutIn & options);
5651 if (prot) {
5652 kr = vm_map_wire_kernel(fAddressMap, start, end, prot, (vm_tag_t) fMemory->getVMTag(kernel_map), FALSE);
5653 } else {
5654 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
5655 }
5656
5657 return kr;
5658 }
5659
5660
5661 IOPhysicalAddress
5662 #ifdef __LP64__
getPhysicalSegment(IOByteCount _offset,IOPhysicalLength * _length,IOOptionBits _options)5663 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
5664 #else /* !__LP64__ */
5665 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
5666 #endif /* !__LP64__ */
5667 {
5668 IOPhysicalAddress address;
5669
5670 LOCK;
5671 #ifdef __LP64__
5672 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
5673 #else /* !__LP64__ */
5674 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
5675 #endif /* !__LP64__ */
5676 UNLOCK;
5677
5678 return address;
5679 }
5680
5681 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5682
5683 #undef super
5684 #define super OSObject
5685
5686 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5687
5688 void
initialize(void)5689 IOMemoryDescriptor::initialize( void )
5690 {
5691 if (NULL == gIOMemoryLock) {
5692 gIOMemoryLock = IORecursiveLockAlloc();
5693 }
5694
5695 gIOLastPage = IOGetLastPageNumber();
5696 }
5697
5698 void
free(void)5699 IOMemoryDescriptor::free( void )
5700 {
5701 if (_mappings) {
5702 _mappings.reset();
5703 }
5704
5705 if (reserved) {
5706 cleanKernelReserved(reserved);
5707 IOFreeType(reserved, IOMemoryDescriptorReserved);
5708 reserved = NULL;
5709 }
5710 super::free();
5711 }
5712
5713 OSSharedPtr<IOMemoryMap>
setMapping(task_t intoTask,IOVirtualAddress mapAddress,IOOptionBits options)5714 IOMemoryDescriptor::setMapping(
5715 task_t intoTask,
5716 IOVirtualAddress mapAddress,
5717 IOOptionBits options )
5718 {
5719 return createMappingInTask( intoTask, mapAddress,
5720 options | kIOMapStatic,
5721 0, getLength());
5722 }
5723
5724 OSSharedPtr<IOMemoryMap>
map(IOOptionBits options)5725 IOMemoryDescriptor::map(
5726 IOOptionBits options )
5727 {
5728 return createMappingInTask( kernel_task, 0,
5729 options | kIOMapAnywhere,
5730 0, getLength());
5731 }
5732
5733 #ifndef __LP64__
5734 OSSharedPtr<IOMemoryMap>
map(task_t intoTask,IOVirtualAddress atAddress,IOOptionBits options,IOByteCount offset,IOByteCount length)5735 IOMemoryDescriptor::map(
5736 task_t intoTask,
5737 IOVirtualAddress atAddress,
5738 IOOptionBits options,
5739 IOByteCount offset,
5740 IOByteCount length )
5741 {
5742 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
5743 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5744 return NULL;
5745 }
5746
5747 return createMappingInTask(intoTask, atAddress,
5748 options, offset, length);
5749 }
5750 #endif /* !__LP64__ */
5751
5752 OSSharedPtr<IOMemoryMap>
createMappingInTask(task_t intoTask,mach_vm_address_t atAddress,IOOptionBits options,mach_vm_size_t offset,mach_vm_size_t length)5753 IOMemoryDescriptor::createMappingInTask(
5754 task_t intoTask,
5755 mach_vm_address_t atAddress,
5756 IOOptionBits options,
5757 mach_vm_size_t offset,
5758 mach_vm_size_t length)
5759 {
5760 IOMemoryMap * result;
5761 IOMemoryMap * mapping;
5762
5763 if (0 == length) {
5764 length = getLength();
5765 }
5766
5767 mapping = new IOMemoryMap;
5768
5769 if (mapping
5770 && !mapping->init( intoTask, atAddress,
5771 options, offset, length )) {
5772 mapping->release();
5773 mapping = NULL;
5774 }
5775
5776 if (mapping) {
5777 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
5778 } else {
5779 result = nullptr;
5780 }
5781
5782 #if DEBUG
5783 if (!result) {
5784 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
5785 this, atAddress, (uint32_t) options, offset, length);
5786 }
5787 #endif
5788
5789 // already retained through makeMapping
5790 OSSharedPtr<IOMemoryMap> retval(result, OSNoRetain);
5791
5792 return retval;
5793 }
5794
5795 #ifndef __LP64__ // there is only a 64 bit version for LP64
5796 IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,IOByteCount offset)5797 IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5798 IOOptionBits options,
5799 IOByteCount offset)
5800 {
5801 return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
5802 }
5803 #endif
5804
5805 IOReturn
redirect(IOMemoryDescriptor * newBackingMemory,IOOptionBits options,mach_vm_size_t offset)5806 IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5807 IOOptionBits options,
5808 mach_vm_size_t offset)
5809 {
5810 IOReturn err = kIOReturnSuccess;
5811 OSSharedPtr<IOMemoryDescriptor> physMem;
5812
5813 LOCK;
5814
5815 if (fAddress && fAddressMap) {
5816 do{
5817 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5818 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5819 physMem = fMemory;
5820 }
5821
5822 if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
5823 upl_size_t size = (typeof(size))round_page(fLength);
5824 upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5825 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5826 if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
5827 NULL, NULL,
5828 &flags, (vm_tag_t) fMemory->getVMTag(kernel_map))) {
5829 fRedirUPL = NULL;
5830 }
5831
5832 if (physMem) {
5833 IOUnmapPages( fAddressMap, fAddress, fLength );
5834 if ((false)) {
5835 physMem->redirect(NULL, true);
5836 }
5837 }
5838 }
5839
5840 if (newBackingMemory) {
5841 if (newBackingMemory != fMemory) {
5842 fOffset = 0;
5843 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
5844 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
5845 offset, fLength)) {
5846 err = kIOReturnError;
5847 }
5848 }
5849 if (fRedirUPL) {
5850 upl_commit(fRedirUPL, NULL, 0);
5851 upl_deallocate(fRedirUPL);
5852 fRedirUPL = NULL;
5853 }
5854 if ((false) && physMem) {
5855 physMem->redirect(NULL, false);
5856 }
5857 }
5858 }while (false);
5859 }
5860
5861 UNLOCK;
5862
5863 return err;
5864 }
5865
5866 IOMemoryMap *
makeMapping(IOMemoryDescriptor * owner,task_t __intoTask,IOVirtualAddress __address,IOOptionBits options,IOByteCount __offset,IOByteCount __length)5867 IOMemoryDescriptor::makeMapping(
5868 IOMemoryDescriptor * owner,
5869 task_t __intoTask,
5870 IOVirtualAddress __address,
5871 IOOptionBits options,
5872 IOByteCount __offset,
5873 IOByteCount __length )
5874 {
5875 #ifndef __LP64__
5876 if (!(kIOMap64Bit & options)) {
5877 panic("IOMemoryDescriptor::makeMapping !64bit");
5878 }
5879 #endif /* !__LP64__ */
5880
5881 OSSharedPtr<IOMemoryDescriptor> mapDesc;
5882 __block IOMemoryMap * result = NULL;
5883
5884 IOMemoryMap * mapping = (IOMemoryMap *) __address;
5885 mach_vm_size_t offset = mapping->fOffset + __offset;
5886 mach_vm_size_t length = mapping->fLength;
5887
5888 mapping->fOffset = offset;
5889
5890 LOCK;
5891
5892 do{
5893 if (kIOMapStatic & options) {
5894 result = mapping;
5895 addMapping(mapping);
5896 mapping->setMemoryDescriptor(this, 0);
5897 continue;
5898 }
5899
5900 if (kIOMapUnique & options) {
5901 addr64_t phys;
5902 IOByteCount physLen;
5903
5904 // if (owner != this) continue;
5905
5906 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5907 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5908 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
5909 if (!phys || (physLen < length)) {
5910 continue;
5911 }
5912
5913 mapDesc = IOMemoryDescriptor::withAddressRange(
5914 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
5915 if (!mapDesc) {
5916 continue;
5917 }
5918 offset = 0;
5919 mapping->fOffset = offset;
5920 }
5921 } else {
5922 // look for a compatible existing mapping
5923 if (_mappings) {
5924 _mappings->iterateObjects(^(OSObject * object)
5925 {
5926 IOMemoryMap * lookMapping = (IOMemoryMap *) object;
5927 if ((result = lookMapping->copyCompatible(mapping))) {
5928 addMapping(result);
5929 result->setMemoryDescriptor(this, offset);
5930 return true;
5931 }
5932 return false;
5933 });
5934 }
5935 if (result || (options & kIOMapReference)) {
5936 if (result != mapping) {
5937 mapping->release();
5938 mapping = NULL;
5939 }
5940 continue;
5941 }
5942 }
5943
5944 if (!mapDesc) {
5945 mapDesc.reset(this, OSRetain);
5946 }
5947 IOReturn
5948 kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
5949 if (kIOReturnSuccess == kr) {
5950 result = mapping;
5951 mapDesc->addMapping(result);
5952 result->setMemoryDescriptor(mapDesc.get(), offset);
5953 } else {
5954 mapping->release();
5955 mapping = NULL;
5956 }
5957 }while (false);
5958
5959 UNLOCK;
5960
5961 return result;
5962 }
5963
5964 void
addMapping(IOMemoryMap * mapping)5965 IOMemoryDescriptor::addMapping(
5966 IOMemoryMap * mapping )
5967 {
5968 if (mapping) {
5969 if (NULL == _mappings) {
5970 _mappings = OSSet::withCapacity(1);
5971 }
5972 if (_mappings) {
5973 _mappings->setObject( mapping );
5974 }
5975 }
5976 }
5977
5978 void
removeMapping(IOMemoryMap * mapping)5979 IOMemoryDescriptor::removeMapping(
5980 IOMemoryMap * mapping )
5981 {
5982 if (_mappings) {
5983 _mappings->removeObject( mapping);
5984 }
5985 }
5986
5987 void
setMapperOptions(uint16_t options)5988 IOMemoryDescriptor::setMapperOptions( uint16_t options)
5989 {
5990 _iomapperOptions = options;
5991 }
5992
5993 uint16_t
getMapperOptions(void)5994 IOMemoryDescriptor::getMapperOptions( void )
5995 {
5996 return _iomapperOptions;
5997 }
5998
5999 #ifndef __LP64__
6000 // obsolete initializers
6001 // - initWithOptions is the designated initializer
6002 bool
initWithAddress(void * address,IOByteCount length,IODirection direction)6003 IOMemoryDescriptor::initWithAddress(void * address,
6004 IOByteCount length,
6005 IODirection direction)
6006 {
6007 return false;
6008 }
6009
6010 bool
initWithAddress(IOVirtualAddress address,IOByteCount length,IODirection direction,task_t task)6011 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
6012 IOByteCount length,
6013 IODirection direction,
6014 task_t task)
6015 {
6016 return false;
6017 }
6018
6019 bool
initWithPhysicalAddress(IOPhysicalAddress address,IOByteCount length,IODirection direction)6020 IOMemoryDescriptor::initWithPhysicalAddress(
6021 IOPhysicalAddress address,
6022 IOByteCount length,
6023 IODirection direction )
6024 {
6025 return false;
6026 }
6027
6028 bool
initWithRanges(IOVirtualRange * ranges,UInt32 withCount,IODirection direction,task_t task,bool asReference)6029 IOMemoryDescriptor::initWithRanges(
6030 IOVirtualRange * ranges,
6031 UInt32 withCount,
6032 IODirection direction,
6033 task_t task,
6034 bool asReference)
6035 {
6036 return false;
6037 }
6038
6039 bool
initWithPhysicalRanges(IOPhysicalRange * ranges,UInt32 withCount,IODirection direction,bool asReference)6040 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
6041 UInt32 withCount,
6042 IODirection direction,
6043 bool asReference)
6044 {
6045 return false;
6046 }
6047
6048 void *
getVirtualSegment(IOByteCount offset,IOByteCount * lengthOfSegment)6049 IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
6050 IOByteCount * lengthOfSegment)
6051 {
6052 return NULL;
6053 }
6054 #endif /* !__LP64__ */
6055
6056 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6057
6058 bool
serialize(OSSerialize * s) const6059 IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
6060 {
6061 OSSharedPtr<OSSymbol const> keys[2] = {NULL};
6062 OSSharedPtr<OSObject> values[2] = {NULL};
6063 OSSharedPtr<OSArray> array;
6064
6065 struct SerData {
6066 user_addr_t address;
6067 user_size_t length;
6068 };
6069
6070 unsigned int index;
6071
6072 IOOptionBits type = _flags & kIOMemoryTypeMask;
6073
6074 if (s == NULL) {
6075 return false;
6076 }
6077
6078 array = OSArray::withCapacity(4);
6079 if (!array) {
6080 return false;
6081 }
6082
6083 OSDataAllocation<struct SerData> vcopy(_rangesCount, OSAllocateMemory);
6084 if (!vcopy) {
6085 return false;
6086 }
6087
6088 keys[0] = OSSymbol::withCString("address");
6089 keys[1] = OSSymbol::withCString("length");
6090
6091 // Copy the volatile data so we don't have to allocate memory
6092 // while the lock is held.
6093 LOCK;
6094 if (vcopy.size() == _rangesCount) {
6095 Ranges vec = _ranges;
6096 for (index = 0; index < vcopy.size(); index++) {
6097 mach_vm_address_t addr; mach_vm_size_t len;
6098 getAddrLenForInd(addr, len, type, vec, index, _task);
6099 vcopy[index].address = addr;
6100 vcopy[index].length = len;
6101 }
6102 } else {
6103 // The descriptor changed out from under us. Give up.
6104 UNLOCK;
6105 return false;
6106 }
6107 UNLOCK;
6108
6109 for (index = 0; index < vcopy.size(); index++) {
6110 user_addr_t addr = vcopy[index].address;
6111 IOByteCount len = (IOByteCount) vcopy[index].length;
6112 values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
6113 if (values[0] == NULL) {
6114 return false;
6115 }
6116 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
6117 if (values[1] == NULL) {
6118 return false;
6119 }
6120 OSSharedPtr<OSDictionary> dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
6121 if (dict == NULL) {
6122 return false;
6123 }
6124 array->setObject(dict.get());
6125 dict.reset();
6126 values[0].reset();
6127 values[1].reset();
6128 }
6129
6130 return array->serialize(s);
6131 }
6132 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6133
6134 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0);
6135 #ifdef __LP64__
6136 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
6137 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
6138 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
6139 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
6140 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
6141 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
6142 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
6143 #else /* !__LP64__ */
6144 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1);
6145 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2);
6146 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3);
6147 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4);
6148 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5);
6149 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6);
6150 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7);
6151 #endif /* !__LP64__ */
6152 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
6153 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
6154 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
6155 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
6156 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
6157 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
6158 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
6159 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
6160
6161 /* for real this is a ioGMDData + upl_page_info_t + ioPLBlock */
6162 KALLOC_TYPE_VAR_DEFINE(KT_IOMD_MIXED_DATA,
6163 struct ioGMDData, struct ioPLBlock, KT_DEFAULT);
6164
6165 /* ex-inline function implementation */
6166 IOPhysicalAddress
getPhysicalAddress()6167 IOMemoryDescriptor::getPhysicalAddress()
6168 {
6169 return getPhysicalSegment( 0, NULL );
6170 }
6171
OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData,OSObject)6172 OSDefineMetaClassAndStructors(_IOMemoryDescriptorMixedData, OSObject)
6173
6174 OSPtr<_IOMemoryDescriptorMixedData>
6175 _IOMemoryDescriptorMixedData::withCapacity(size_t capacity)
6176 {
6177 OSSharedPtr<_IOMemoryDescriptorMixedData> me = OSMakeShared<_IOMemoryDescriptorMixedData>();
6178 if (me && !me->initWithCapacity(capacity)) {
6179 return nullptr;
6180 }
6181 return me;
6182 }
6183
6184 bool
initWithCapacity(size_t capacity)6185 _IOMemoryDescriptorMixedData::initWithCapacity(size_t capacity)
6186 {
6187 if (_data && (!capacity || (_capacity < capacity))) {
6188 freeMemory();
6189 }
6190
6191 if (!OSObject::init()) {
6192 return false;
6193 }
6194
6195 if (!_data && capacity) {
6196 _data = kalloc_type_var_impl(KT_IOMD_MIXED_DATA, capacity,
6197 Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT), NULL);
6198 if (!_data) {
6199 return false;
6200 }
6201 _capacity = capacity;
6202 }
6203
6204 _length = 0;
6205
6206 return true;
6207 }
6208
6209 void
free()6210 _IOMemoryDescriptorMixedData::free()
6211 {
6212 freeMemory();
6213 OSObject::free();
6214 }
6215
6216 void
freeMemory()6217 _IOMemoryDescriptorMixedData::freeMemory()
6218 {
6219 kfree_type_var_impl(KT_IOMD_MIXED_DATA, _data, _capacity);
6220 _data = nullptr;
6221 _capacity = _length = 0;
6222 }
6223
6224 bool
appendBytes(const void * bytes,size_t length)6225 _IOMemoryDescriptorMixedData::appendBytes(const void * bytes, size_t length)
6226 {
6227 const auto oldLength = getLength();
6228 size_t newLength;
6229 if (os_add_overflow(oldLength, length, &newLength)) {
6230 return false;
6231 }
6232
6233 if (!setLength(newLength)) {
6234 return false;
6235 }
6236
6237 unsigned char * const dest = &(((unsigned char *)_data)[oldLength]);
6238 if (bytes) {
6239 bcopy(bytes, dest, length);
6240 }
6241
6242 return true;
6243 }
6244
6245 bool
setLength(size_t length)6246 _IOMemoryDescriptorMixedData::setLength(size_t length)
6247 {
6248 if (!_data || (length > _capacity)) {
6249 void *newData;
6250
6251 newData = __krealloc_type(KT_IOMD_MIXED_DATA, _data, _capacity,
6252 length, Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_IOKIT),
6253 NULL);
6254 if (!newData) {
6255 return false;
6256 }
6257
6258 _data = newData;
6259 _capacity = length;
6260 }
6261
6262 _length = length;
6263 return true;
6264 }
6265
6266 const void *
getBytes() const6267 _IOMemoryDescriptorMixedData::getBytes() const
6268 {
6269 return _length ? _data : nullptr;
6270 }
6271
6272 size_t
getLength() const6273 _IOMemoryDescriptorMixedData::getLength() const
6274 {
6275 return _data ? _length : 0;
6276 }
6277