1 /*
2 * Copyright (c) 2014-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/cdefs.h>
30
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33 #include <IOKit/IOLib.h>
34 #include <IOKit/IOMemoryDescriptor.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IODMACommand.h>
37 #include <IOKit/IOKitKeysPrivate.h>
38 #include <Kernel/IOKitKernelInternal.h>
39 #include <IOKit/IOUserClient.h>
40 #include <IOKit/IOService.h>
41 #include "Tests.h"
42
43 #ifndef __LP64__
44 #include <IOKit/IOSubMemoryDescriptor.h>
45 #endif /* !__LP64__ */
46 #include <IOKit/IOSubMemoryDescriptor.h>
47 #include <IOKit/IOMultiMemoryDescriptor.h>
48 #include <IOKit/IOBufferMemoryDescriptor.h>
49 #include <IOKit/IOGuardPageMemoryDescriptor.h>
50
51 #include <IOKit/IOKitDebug.h>
52 #include <libkern/OSDebug.h>
53 #include <sys/uio.h>
54 #include <libkern/sysctl.h>
55 #include <sys/sysctl.h>
56
57 __BEGIN_DECLS
58 #include <vm/pmap.h>
59 #include <vm/vm_pageout.h>
60 #include <mach/memory_object_types.h>
61 #include <device/device_port.h>
62
63 #include <mach/vm_prot.h>
64 #include <mach/mach_vm.h>
65 #include <mach/vm_param.h>
66 #include <vm/vm_fault.h>
67 #include <vm/vm_protos.h>
68 #include <vm/vm_kern_xnu.h>
69 __END_DECLS
70
71
72 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
73
74 #if DEVELOPMENT || DEBUG
75
76 extern SInt32 gIOMemoryReferenceCount;
77
78 static int
IOMultMemoryDescriptorTest(int newValue)79 IOMultMemoryDescriptorTest(int newValue)
80 {
81 IOMemoryDescriptor * mds[3];
82 IOMultiMemoryDescriptor * mmd;
83 IOMemoryMap * map;
84 void * addr;
85 uint8_t * data;
86 uint32_t i;
87 IOAddressRange ranges[2];
88
89 data = (typeof(data))IOMallocAligned(ptoa(8), page_size);
90 for (i = 0; i < ptoa(8); i++) {
91 data[i] = ((uint8_t) atop(i)) | 0xD0;
92 }
93
94 ranges[0].address = (IOVirtualAddress)(data + ptoa(4));
95 ranges[0].length = ptoa(4);
96 ranges[1].address = (IOVirtualAddress)(data + ptoa(0));
97 ranges[1].length = ptoa(4);
98
99 mds[0] = IOMemoryDescriptor::withAddressRange((mach_vm_address_t) data, 2, kIODirectionOutIn, kernel_task);
100 assert(mds[0]);
101 {
102 uint64_t dmaLen, dmaOffset;
103 dmaLen = mds[0]->getDMAMapLength(&dmaOffset);
104 assert(0 == dmaOffset);
105 assert(ptoa(1) == dmaLen);
106 }
107 mds[0]->release();
108 mds[0] = IOMemoryDescriptor::withAddressRange((mach_vm_address_t) (data + page_size - 2), 4, kIODirectionOutIn, kernel_task);
109 assert(mds[0]);
110 {
111 uint64_t dmaLen, dmaOffset;
112 dmaLen = mds[0]->getDMAMapLength(&dmaOffset);
113 assert((page_size - 2) == dmaOffset);
114 assert(ptoa(2) == dmaLen);
115 }
116 mds[0]->release();
117
118 mds[0] = IOMemoryDescriptor::withAddressRanges(&ranges[0], 2, kIODirectionOutIn, kernel_task);
119 {
120 uint64_t dmaLen, dmaOffset;
121 dmaLen = mds[0]->getDMAMapLength(&dmaOffset);
122 assert(0 == dmaOffset);
123 assert(ptoa(8) == dmaLen);
124 }
125 mds[1] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(3), ptoa(2), kIODirectionOutIn);
126 {
127 uint64_t dmaLen, dmaOffset;
128 dmaLen = mds[1]->getDMAMapLength(&dmaOffset);
129 assert(0 == dmaOffset);
130 assert(ptoa(2) == dmaLen);
131 }
132 mds[2] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(7), ptoa(1), kIODirectionOutIn);
133
134 mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds) / sizeof(mds[0]), kIODirectionOutIn, false);
135 {
136 uint64_t dmaLen, dmaOffset;
137 dmaLen = mmd->getDMAMapLength(&dmaOffset);
138 assert(0 == dmaOffset);
139 assert(ptoa(11) == dmaLen);
140 }
141 mds[2]->release();
142 mds[1]->release();
143 mds[0]->release();
144 map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapGuardedSmall, ptoa(7), mmd->getLength() - ptoa(7));
145 mmd->release();
146 assert(map);
147
148 addr = (void *) map->getVirtualAddress();
149 assert(ptoa(4) == map->getLength());
150 assert(0xd3d3d3d3 == ((uint32_t *)addr)[ptoa(0) / sizeof(uint32_t)]);
151 assert(0xd7d7d7d7 == ((uint32_t *)addr)[ptoa(1) / sizeof(uint32_t)]);
152 assert(0xd0d0d0d0 == ((uint32_t *)addr)[ptoa(2) / sizeof(uint32_t)]);
153 assert(0xd3d3d3d3 == ((uint32_t *)addr)[ptoa(3) / sizeof(uint32_t)]);
154 map->release();
155 IOFreeAligned(data, ptoa(8));
156
157 return 0;
158 }
159
160
161
162 // <rdar://problem/30102458>
163 static int
IODMACommandForceDoubleBufferTest(int newValue)164 IODMACommandForceDoubleBufferTest(int newValue)
165 {
166 IOReturn ret;
167 IOBufferMemoryDescriptor * bmd;
168 IODMACommand * dma;
169 uint32_t dir, data;
170 IODMACommand::SegmentOptions segOptions =
171 {
172 .fStructSize = sizeof(segOptions),
173 .fNumAddressBits = 64,
174 .fMaxSegmentSize = 0x2000,
175 .fMaxTransferSize = 128 * 1024,
176 .fAlignment = 1,
177 .fAlignmentLength = 1,
178 .fAlignmentInternalSegments = 1
179 };
180 IODMACommand::Segment64 segments[1];
181 UInt32 numSegments;
182 UInt64 dmaOffset;
183
184
185 for (dir = kIODirectionIn;; dir++) {
186 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task,
187 dir | kIOMemoryPageable, ptoa(8));
188 assert(bmd);
189 {
190 uint64_t dmaLen, dmaOffset;
191 dmaLen = bmd->getDMAMapLength(&dmaOffset);
192 assert(0 == dmaOffset);
193 assert(ptoa(8) == dmaLen);
194 }
195
196 ((uint32_t*) bmd->getBytesNoCopy())[0] = 0x53535300 | dir;
197
198 ret = bmd->prepare((IODirection) dir);
199 assert(kIOReturnSuccess == ret);
200
201 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
202 kIODMAMapOptionMapped,
203 NULL, NULL);
204 assert(dma);
205 ret = dma->setMemoryDescriptor(bmd, true);
206 assert(kIOReturnSuccess == ret);
207
208 ret = dma->synchronize(IODMACommand::kForceDoubleBuffer | kIODirectionOut);
209 assert(kIOReturnSuccess == ret);
210
211 dmaOffset = 0;
212 numSegments = 1;
213 ret = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments);
214 assert(kIOReturnSuccess == ret);
215 assert(1 == numSegments);
216
217 if (kIODirectionOut & dir) {
218 data = ((uint32_t*) bmd->getBytesNoCopy())[0];
219 assertf((0x53535300 | dir) == data, "mismatch 0x%x", data);
220 }
221 if (kIODirectionIn & dir) {
222 IOMappedWrite32(segments[0].fIOVMAddr, 0x11223300 | dir);
223 }
224
225 ret = dma->clearMemoryDescriptor(true);
226 assert(kIOReturnSuccess == ret);
227 dma->release();
228
229 bmd->complete((IODirection) dir);
230
231 if (kIODirectionIn & dir) {
232 data = ((uint32_t*) bmd->getBytesNoCopy())[0];
233 assertf((0x11223300 | dir) == data, "mismatch 0x%x", data);
234 }
235
236 bmd->release();
237
238 if (dir == kIODirectionInOut) {
239 break;
240 }
241 }
242
243 return 0;
244 }
245
246 // <rdar://problem/34322778>
247 static int __unused
IODMACommandLocalMappedNonContig(int newValue)248 IODMACommandLocalMappedNonContig(int newValue)
249 {
250 IOReturn kr;
251 IOMemoryDescriptor * md;
252 IODMACommand * dma;
253 OSDictionary * matching;
254 IOService * device;
255 IOMapper * mapper;
256 IODMACommand::SegmentOptions segOptions =
257 {
258 .fStructSize = sizeof(segOptions),
259 .fNumAddressBits = 64,
260 .fMaxSegmentSize = 128 * 1024,
261 .fMaxTransferSize = 128 * 1024,
262 .fAlignment = 1,
263 .fAlignmentLength = 1,
264 .fAlignmentInternalSegments = 1
265 };
266 IODMACommand::Segment64 segments[1];
267 UInt32 numSegments;
268 UInt64 dmaOffset;
269 UInt64 segPhys;
270 mach_vm_address_t buffer;
271 vm_size_t bufSize = ptoa(4);
272
273 if (!IOMapper::gSystem) {
274 return 0;
275 }
276
277 buffer = 0;
278 kr = mach_vm_allocate_kernel(kernel_map, &buffer, bufSize,
279 VM_MAP_KERNEL_FLAGS_ANYWHERE(.vm_tag = VM_KERN_MEMORY_IOKIT));
280 assert(KERN_SUCCESS == kr);
281
282 // fragment the vmentries
283 kr = mach_vm_inherit(kernel_map, buffer + ptoa(1), ptoa(1), VM_INHERIT_NONE);
284 assert(KERN_SUCCESS == kr);
285
286 md = IOMemoryDescriptor::withAddressRange(
287 buffer + 0xa00, 0x2000, kIODirectionOutIn, kernel_task);
288 assert(md);
289 kr = md->prepare(kIODirectionOutIn);
290 assert(kIOReturnSuccess == kr);
291
292 segPhys = md->getPhysicalSegment(0, NULL, 0);
293
294 matching = IOService::nameMatching("XHC1");
295 assert(matching);
296 device = IOService::copyMatchingService(matching);
297 matching->release();
298 mapper = device ? IOMapper::copyMapperForDeviceWithIndex(device, 0) : NULL;
299 OSSafeReleaseNULL(device);
300
301 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
302 kIODMAMapOptionMapped,
303 mapper, NULL);
304 assert(dma);
305 kr = dma->setMemoryDescriptor(md, true);
306 assert(kIOReturnSuccess == kr);
307
308 dmaOffset = 0;
309 numSegments = 1;
310 kr = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments);
311 assert(kIOReturnSuccess == kr);
312 assert(1 == numSegments);
313
314 if (mapper) {
315 assertf(segments[0].fIOVMAddr != segPhys, "phys !local 0x%qx, 0x%qx, %p", segments[0].fIOVMAddr, segPhys, dma);
316 }
317
318 kr = dma->clearMemoryDescriptor(true);
319 assert(kIOReturnSuccess == kr);
320 dma->release();
321
322 kr = md->complete(kIODirectionOutIn);
323 assert(kIOReturnSuccess == kr);
324 md->release();
325
326 kr = mach_vm_deallocate(kernel_map, buffer, bufSize);
327 assert(KERN_SUCCESS == kr);
328 OSSafeReleaseNULL(mapper);
329
330 return 0;
331 }
332
333 // <rdar://problem/30102458>
334 static int
IOMemoryRemoteTest(int newValue)335 IOMemoryRemoteTest(int newValue)
336 {
337 IOReturn ret;
338 IOMemoryDescriptor * md;
339 IOByteCount offset, length;
340 addr64_t addr;
341 uint32_t idx;
342
343 IODMACommand * dma;
344 IODMACommand::SegmentOptions segOptions =
345 {
346 .fStructSize = sizeof(segOptions),
347 .fNumAddressBits = 64,
348 .fMaxSegmentSize = 0x2000,
349 .fMaxTransferSize = 128 * 1024,
350 .fAlignment = 1,
351 .fAlignmentLength = 1,
352 .fAlignmentInternalSegments = 1
353 };
354 IODMACommand::Segment64 segments[1];
355 UInt32 numSegments;
356 UInt64 dmaOffset;
357
358 IOAddressRange ranges[2] = {
359 { 0x1234567890123456ULL, 0x1000 }, { 0x5432109876543210, 0x2000 },
360 };
361
362 md = IOMemoryDescriptor::withAddressRanges(&ranges[0], 2, kIODirectionOutIn | kIOMemoryRemote, TASK_NULL);
363 assert(md);
364
365 // md->map();
366 // md->readBytes(0, &idx, sizeof(idx));
367
368 ret = md->prepare(kIODirectionOutIn);
369 assert(kIOReturnSuccess == ret);
370
371 printf("remote md flags 0x%qx, r %d\n",
372 md->getFlags(), (0 != (kIOMemoryRemote & md->getFlags())));
373
374 for (offset = 0, idx = 0; true; offset += length, idx++) {
375 addr = md->getPhysicalSegment(offset, &length, 0);
376 if (!length) {
377 break;
378 }
379 assert(idx < 2);
380 assert(addr == ranges[idx].address);
381 assert(length == ranges[idx].length);
382 }
383 assert(offset == md->getLength());
384
385 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
386 kIODMAMapOptionUnmapped | kIODMAMapOptionIterateOnly,
387 NULL, NULL);
388 assert(dma);
389 ret = dma->setMemoryDescriptor(md, true);
390 assert(kIOReturnSuccess == ret);
391
392 for (dmaOffset = 0, idx = 0; dmaOffset < md->getLength(); idx++) {
393 numSegments = 1;
394 ret = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments);
395 assert(kIOReturnSuccess == ret);
396 assert(1 == numSegments);
397 assert(idx < 2);
398 assert(segments[0].fIOVMAddr == ranges[idx].address);
399 assert(segments[0].fLength == ranges[idx].length);
400 }
401 assert(dmaOffset == md->getLength());
402
403 ret = dma->clearMemoryDescriptor(true);
404 assert(kIOReturnSuccess == ret);
405 dma->release();
406 md->complete(kIODirectionOutIn);
407 md->release();
408
409 return 0;
410 }
411
412 static IOReturn
IOMemoryPrefaultTest(uint32_t options)413 IOMemoryPrefaultTest(uint32_t options)
414 {
415 IOBufferMemoryDescriptor * bmd;
416 IOMemoryMap * map;
417 IOReturn kr;
418 uint32_t data;
419 uint32_t * p;
420 IOSimpleLock * lock;
421
422 lock = IOSimpleLockAlloc();
423 assert(lock);
424
425 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(current_task(),
426 kIODirectionOutIn | kIOMemoryPageable, ptoa(8));
427 assert(bmd);
428 kr = bmd->prepare();
429 assert(KERN_SUCCESS == kr);
430
431 map = bmd->map(kIOMapPrefault);
432 assert(map);
433
434 p = (typeof(p))map->getVirtualAddress();
435 IOSimpleLockLock(lock);
436 data = p[0];
437 IOSimpleLockUnlock(lock);
438
439 IOLog("IOMemoryPrefaultTest %d\n", data);
440
441 map->release();
442 bmd->release();
443 IOSimpleLockFree(lock);
444
445 return kIOReturnSuccess;
446 }
447
448 static IOReturn
IOBMDOverflowTest(uint32_t options)449 IOBMDOverflowTest(uint32_t options)
450 {
451 IOBufferMemoryDescriptor * bmd;
452
453 bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, kIOMemoryPageable | kIODirectionOut,
454 0xffffffffffffffff, 0);
455 assert(NULL == bmd);
456
457 return kIOReturnSuccess;
458 }
459
460 static IOReturn
IOBMDSetLengthMapTest(uint32_t options)461 IOBMDSetLengthMapTest(uint32_t options)
462 {
463 IOBufferMemoryDescriptor * bmd;
464 IOMemoryMap * map;
465
466 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(
467 kernel_task, kIOMemoryDirectionOutIn | kIOMemoryKernelUserShared, 0x4000, 0x4000);
468 assert(bmd);
469
470 bmd->setLength(0x100);
471 map = bmd->createMappingInTask(current_task(), 0, kIOMapAnywhere, 0, 0);
472 assert(map);
473 OSSafeReleaseNULL(map);
474
475 bmd->setLength(0x200);
476 map = bmd->createMappingInTask(current_task(), 0, kIOMapAnywhere, 0, 0);
477 assert(map);
478 OSSafeReleaseNULL(map);
479
480 bmd->release();
481
482 return kIOReturnSuccess;
483 }
484
485 // <rdar://problem/26375234>
486 static IOReturn
ZeroLengthTest(int newValue)487 ZeroLengthTest(int newValue)
488 {
489 IOMemoryDescriptor * md;
490
491 md = IOMemoryDescriptor::withAddressRange(
492 0, 0, kIODirectionNone, current_task());
493 assert(md);
494 md->prepare();
495 md->complete();
496 md->release();
497 return 0;
498 }
499
500 // <rdar://problem/27002624>
501 static IOReturn
BadFixedAllocTest(int newValue)502 BadFixedAllocTest(int newValue)
503 {
504 IOBufferMemoryDescriptor * bmd;
505 IOMemoryMap * map;
506
507 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(NULL,
508 kIODirectionIn | kIOMemoryPageable, ptoa(1));
509 assert(bmd);
510 map = bmd->createMappingInTask(kernel_task, 0x2000, 0);
511 assert(!map);
512
513 bmd->release();
514 return 0;
515 }
516
517 // <rdar://problem/26466423>
518 static IOReturn
IODirectionPrepareNoZeroFillTest(int newValue)519 IODirectionPrepareNoZeroFillTest(int newValue)
520 {
521 IOBufferMemoryDescriptor * bmd;
522
523 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(NULL,
524 kIODirectionIn | kIOMemoryPageable, ptoa(24));
525 assert(bmd);
526 bmd->prepare((IODirection)(kIODirectionIn | kIODirectionPrepareNoZeroFill));
527 bmd->prepare(kIODirectionIn);
528 bmd->complete((IODirection)(kIODirectionIn | kIODirectionCompleteWithDataValid));
529 bmd->complete(kIODirectionIn);
530 bmd->release();
531 return 0;
532 }
533
534 // <rdar://problem/28190483>
535 static IOReturn
IOMemoryMapTest(uint32_t options)536 IOMemoryMapTest(uint32_t options)
537 {
538 IOBufferMemoryDescriptor * bmd;
539 IOMemoryDescriptor * md;
540 IOMemoryMap * map;
541 uint32_t data;
542 user_addr_t p;
543 uint8_t * p2;
544 int r;
545 uint64_t time, nano;
546
547 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(current_task(),
548 kIODirectionOutIn | kIOMemoryPageable, 0x4018 + 0x800);
549 assert(bmd);
550 p = (typeof(p))bmd->getBytesNoCopy();
551 p += 0x800;
552 data = 0x11111111;
553 r = copyout(&data, p, sizeof(data));
554 assert(r == 0);
555 data = 0x22222222;
556 r = copyout(&data, p + 0x1000, sizeof(data));
557 assert(r == 0);
558 data = 0x33333333;
559 r = copyout(&data, p + 0x2000, sizeof(data));
560 assert(r == 0);
561 data = 0x44444444;
562 r = copyout(&data, p + 0x3000, sizeof(data));
563 assert(r == 0);
564
565 md = IOMemoryDescriptor::withAddressRange(p, 0x4018,
566 kIODirectionOut | options,
567 current_task());
568 assert(md);
569 time = mach_absolute_time();
570 map = md->map(kIOMapReadOnly);
571 time = mach_absolute_time() - time;
572 assert(map);
573 absolutetime_to_nanoseconds(time, &nano);
574
575 p2 = (typeof(p2))map->getVirtualAddress();
576 assert(0x11 == p2[0]);
577 assert(0x22 == p2[0x1000]);
578 assert(0x33 == p2[0x2000]);
579 assert(0x44 == p2[0x3000]);
580
581 data = 0x99999999;
582 r = copyout(&data, p + 0x2000, sizeof(data));
583 assert(r == 0);
584
585 assert(0x11 == p2[0]);
586 assert(0x22 == p2[0x1000]);
587 assert(0x44 == p2[0x3000]);
588 if (kIOMemoryMapCopyOnWrite & options) {
589 assert(0x33 == p2[0x2000]);
590 } else {
591 assert(0x99 == p2[0x2000]);
592 }
593
594 IOLog("IOMemoryMapCopyOnWriteTest map(%s) %lld ns\n",
595 kIOMemoryMapCopyOnWrite & options ? "kIOMemoryMapCopyOnWrite" : "",
596 nano);
597
598 map->release();
599 md->release();
600 bmd->release();
601
602 return kIOReturnSuccess;
603 }
604
605 static int
IOMemoryMapCopyOnWriteTest(int newValue)606 IOMemoryMapCopyOnWriteTest(int newValue)
607 {
608 IOMemoryMapTest(0);
609 IOMemoryMapTest(kIOMemoryMapCopyOnWrite);
610 return 0;
611 }
612
613 static int
AllocationNameTest(int newValue)614 AllocationNameTest(int newValue)
615 {
616 IOMemoryDescriptor * bmd;
617 kern_allocation_name_t name, prior;
618
619 name = kern_allocation_name_allocate("com.apple.iokit.test", 0);
620 assert(name);
621
622 prior = thread_set_allocation_name(name);
623
624 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
625 kIODirectionOutIn | kIOMemoryPageable | kIOMemoryKernelUserShared,
626 ptoa(13));
627 assert(bmd);
628 bmd->prepare();
629
630 thread_set_allocation_name(prior);
631 kern_allocation_name_release(name);
632
633 if (newValue != 7) {
634 bmd->release();
635 }
636
637 return 0;
638 }
639
640 static IOReturn
IOGuardPageMDTest(int newValue)641 IOGuardPageMDTest(int newValue)
642 {
643 constexpr size_t MAX_LEFT_GUARD_PAGES = 5;
644 constexpr size_t MAX_RIGHT_GUARD_PAGES = 5;
645
646 IOMemoryDescriptor * mds[3];
647 IOMemoryDescriptor * dataMD;
648 IOMultiMemoryDescriptor * mmd;
649 IOBufferMemoryDescriptor * iobmd;
650 IOMemoryMap * map;
651 void * addr;
652 uint8_t * data;
653 uint32_t i;
654
655 data = (typeof(data))IOMallocAligned(page_size, page_size);
656 for (i = 0; i < page_size; i++) {
657 data[i] = (uint8_t)(i & 0xFF);
658 }
659
660 dataMD = IOMemoryDescriptor::withAddressRange((mach_vm_address_t) data, page_size, kIODirectionOutIn, kernel_task);
661 assert(dataMD);
662
663
664 for (size_t leftGuardSize = 1; leftGuardSize < MAX_LEFT_GUARD_PAGES; leftGuardSize++) {
665 for (size_t rightGuardSize = 1; rightGuardSize < MAX_RIGHT_GUARD_PAGES; rightGuardSize++) {
666 mds[0] = IOGuardPageMemoryDescriptor::withSize(page_size * leftGuardSize);
667 assert(mds[0]);
668
669 mds[1] = dataMD;
670 mds[1]->retain();
671
672 mds[2] = IOGuardPageMemoryDescriptor::withSize(page_size * rightGuardSize);
673 assert(mds[2]);
674
675 mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds) / sizeof(mds[0]), kIODirectionOutIn, false);
676
677 OSSafeReleaseNULL(mds[2]);
678 OSSafeReleaseNULL(mds[1]);
679 OSSafeReleaseNULL(mds[0]);
680
681 map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere, 0, mmd->getLength());
682
683 OSSafeReleaseNULL(mmd);
684 assert(map);
685 addr = (void *)map->getAddress();
686
687 // check data
688 for (i = 0; i < page_size; i++) {
689 assert(*(uint8_t *)((uintptr_t)addr + page_size * leftGuardSize + i) == (uint8_t)(i & 0xFF));
690 }
691
692 // check map length
693 assert(page_size * leftGuardSize + page_size + page_size * rightGuardSize == map->getLength());
694
695 // check page protections
696 for (i = 0; i < leftGuardSize + 1 + rightGuardSize; i++) {
697 mach_vm_address_t regionAddr = (vm_address_t)addr + i * page_size;
698 mach_vm_size_t regionSize;
699 vm_region_extended_info regionInfo;
700 mach_msg_type_number_t count = VM_REGION_EXTENDED_INFO_COUNT;
701 mach_port_t unused;
702 kern_return_t kr = mach_vm_region(kernel_map, ®ionAddr, ®ionSize, VM_REGION_EXTENDED_INFO, (vm_region_info_t)®ionInfo, &count, &unused);
703 assert(kr == KERN_SUCCESS);
704 if (i < leftGuardSize || i > leftGuardSize + 1) {
705 assert(regionInfo.protection == VM_PROT_NONE);
706 }
707 }
708 OSSafeReleaseNULL(map);
709 }
710 }
711
712 OSSafeReleaseNULL(dataMD);
713 IOFreeAligned(data, page_size);
714
715 for (size_t iobmdCapacity = page_size / 8; iobmdCapacity < page_size * 10; iobmdCapacity += page_size / 8) {
716 iobmd = IOBufferMemoryDescriptor::inTaskWithGuardPages(kernel_task, kIODirectionOutIn, iobmdCapacity);
717
718 // Capacity should be rounded up to page size
719 assert(iobmd->getLength() == round_page(iobmdCapacity));
720
721 // Buffer should be page aligned
722 addr = iobmd->getBytesNoCopy();
723 assert((vm_offset_t)addr == round_page((vm_offset_t)addr));
724
725 // fill buffer
726 for (size_t i = 0; i < iobmdCapacity; i++) {
727 *((char *)addr + i) = (char)(i & 0xFF);
728 }
729
730 map = iobmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique, 0, iobmd->getLength());
731 assert(map->getLength() == iobmd->getLength());
732
733 // check buffer
734 for (size_t i = 0; i < iobmdCapacity; i++) {
735 assert(*((char *)map->getAddress() + i) == (char)(i & 0xFF));
736 }
737
738 OSSafeReleaseNULL(map);
739 OSSafeReleaseNULL(iobmd);
740 }
741
742 return kIOReturnSuccess;
743 }
744
745 static IOReturn
IOMDContextTest(int newValue)746 IOMDContextTest(int newValue)
747 {
748 IOBufferMemoryDescriptor * bmd = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
749 kIODirectionOutIn | kIOMemoryPageable | kIOMemoryKernelUserShared,
750 ptoa(13));
751
752 OSObject * current = NULL;
753 OSString * firstString = OSString::withCStringNoCopy("firstString");
754 OSString * secondString = OSString::withCStringNoCopy("secondString");
755
756 assert(bmd->copyContext() == NULL);
757
758 bmd->setContext(NULL);
759 assert(bmd->copyContext() == NULL);
760
761 bmd->setContext(firstString);
762 current = bmd->copyContext();
763 assert(current == firstString);
764 OSSafeReleaseNULL(current);
765
766 bmd->setContext(NULL);
767 assert(bmd->copyContext() == NULL);
768
769 bmd->setContext(secondString);
770 current = bmd->copyContext();
771 assert(current == secondString);
772 OSSafeReleaseNULL(current);
773
774 bmd->release();
775
776 assert(firstString->getRetainCount() == 1);
777 assert(secondString->getRetainCount() == 1);
778
779 firstString->release();
780 secondString->release();
781
782 return kIOReturnSuccess;
783 }
784
785 int
IOMemoryDescriptorTest(int newValue)786 IOMemoryDescriptorTest(int newValue)
787 {
788 int result;
789
790 IOLog("/IOMemoryDescriptorTest %d\n", (int) gIOMemoryReferenceCount);
791
792 #if 0
793 if (6 == newValue) {
794 IOMemoryDescriptor * sbmds[3];
795 IOMultiMemoryDescriptor * smmd;
796 IOMemoryDescriptor * mds[2];
797 IOMultiMemoryDescriptor * mmd;
798 IOMemoryMap * map;
799
800 sbmds[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(1));
801 sbmds[1] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(2));
802 sbmds[2] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(3));
803 smmd = IOMultiMemoryDescriptor::withDescriptors(&sbmds[0], sizeof(sbmds) / sizeof(sbmds[0]), kIODirectionOutIn, false);
804
805 mds[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(1));
806 mds[1] = smmd;
807 mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds) / sizeof(mds[0]), kIODirectionOutIn, false);
808 map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapGuardedSmall);
809 assert(map);
810 map->release();
811 mmd->release();
812 mds[0]->release();
813 mds[1]->release();
814 sbmds[0]->release();
815 sbmds[1]->release();
816 sbmds[2]->release();
817
818 return 0;
819 } else if (5 == newValue) {
820 IOReturn ret;
821 IOMemoryDescriptor * md;
822 IODMACommand * dma;
823 IODMACommand::SegmentOptions segOptions =
824 {
825 .fStructSize = sizeof(segOptions),
826 .fNumAddressBits = 64,
827 .fMaxSegmentSize = 4096,
828 .fMaxTransferSize = 128 * 1024,
829 .fAlignment = 4,
830 .fAlignmentLength = 4,
831 .fAlignmentInternalSegments = 0x1000
832 };
833
834 IOAddressRange ranges[3][2] =
835 {
836 {
837 { (uintptr_t) &IOMemoryDescriptorTest, 0x2ffc },
838 { 0, 0 },
839 },
840 {
841 { ranges[0][0].address, 0x10 },
842 { 0x3000 + ranges[0][0].address, 0xff0 },
843 },
844 {
845 { ranges[0][0].address, 0x2ffc },
846 { trunc_page(ranges[0][0].address), 0x800 },
847 },
848 };
849 static const uint32_t rangesCount[3] = { 1, 2, 2 };
850 uint32_t test;
851
852 for (test = 0; test < 3; test++) {
853 kprintf("---[%d] address 0x%qx-0x%qx, 0x%qx-0x%qx\n", test,
854 ranges[test][0].address, ranges[test][0].length,
855 ranges[test][1].address, ranges[test][1].length);
856
857 md = IOMemoryDescriptor::withAddressRanges((IOAddressRange*)&ranges[test][0], rangesCount[test], kIODirectionOut, kernel_task);
858 assert(md);
859 ret = md->prepare();
860 assert(kIOReturnSuccess == ret);
861 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
862 IODMACommand::kMapped, NULL, NULL);
863 assert(dma);
864 ret = dma->setMemoryDescriptor(md, true);
865 if (kIOReturnSuccess == ret) {
866 IODMACommand::Segment64 segments[1];
867 UInt32 numSegments;
868 UInt64 offset;
869
870 offset = 0;
871 do{
872 numSegments = 1;
873 ret = dma->gen64IOVMSegments(&offset, &segments[0], &numSegments);
874 assert(kIOReturnSuccess == ret);
875 assert(1 == numSegments);
876 kprintf("seg 0x%qx, 0x%qx\n", segments[0].fIOVMAddr, segments[0].fLength);
877 }while (offset < md->getLength());
878
879 ret = dma->clearMemoryDescriptor(true);
880 assert(kIOReturnSuccess == ret);
881 dma->release();
882 }
883 md->release();
884 }
885
886 return kIOReturnSuccess;
887 } else if (4 == newValue) {
888 IOService * isp;
889 IOMapper * mapper;
890 IOBufferMemoryDescriptor * md1;
891 IODMACommand * dma;
892 IOReturn ret;
893 size_t bufSize = 8192 * 8192 * sizeof(uint32_t);
894 uint64_t start, time, nano;
895
896 isp = IOService::copyMatchingService(IOService::nameMatching("isp"));
897 assert(isp);
898 mapper = IOMapper::copyMapperForDeviceWithIndex(isp, 0);
899 assert(mapper);
900
901 md1 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
902 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
903 bufSize, page_size);
904
905 ret = md1->prepare();
906 assert(kIOReturnSuccess == ret);
907
908 IODMAMapSpecification mapSpec;
909 bzero(&mapSpec, sizeof(mapSpec));
910 uint64_t mapped;
911 uint64_t mappedLength;
912
913 start = mach_absolute_time();
914
915 ret = md1->dmaMap(mapper, NULL, &mapSpec, 0, bufSize, &mapped, &mappedLength);
916 assert(kIOReturnSuccess == ret);
917
918 time = mach_absolute_time() - start;
919
920 absolutetime_to_nanoseconds(time, &nano);
921 kprintf("time %lld us\n", nano / 1000ULL);
922 kprintf("seg0 0x%qx, 0x%qx\n", mapped, mappedLength);
923
924 assert(md1);
925
926 dma = IODMACommand::withSpecification(kIODMACommandOutputHost32,
927 32, 0, IODMACommand::kMapped, 0, 1, mapper, NULL);
928
929 assert(dma);
930
931 start = mach_absolute_time();
932 ret = dma->setMemoryDescriptor(md1, true);
933 assert(kIOReturnSuccess == ret);
934 time = mach_absolute_time() - start;
935
936 absolutetime_to_nanoseconds(time, &nano);
937 kprintf("time %lld us\n", nano / 1000ULL);
938
939
940 IODMACommand::Segment32 segments[1];
941 UInt32 numSegments = 1;
942 UInt64 offset;
943
944 offset = 0;
945 ret = dma->gen32IOVMSegments(&offset, &segments[0], &numSegments);
946 assert(kIOReturnSuccess == ret);
947 assert(1 == numSegments);
948 kprintf("seg0 0x%x, 0x%x\n", (int)segments[0].fIOVMAddr, (int)segments[0].fLength);
949
950 ret = dma->clearMemoryDescriptor(true);
951 assert(kIOReturnSuccess == ret);
952
953 md1->release();
954
955 return kIOReturnSuccess;
956 }
957
958 if (3 == newValue) {
959 IOBufferMemoryDescriptor * md1;
960 IOBufferMemoryDescriptor * md2;
961 IOMemoryMap * map1;
962 IOMemoryMap * map2;
963 uint32_t * buf1;
964 uint32_t * buf2;
965 IOReturn err;
966
967 md1 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
968 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
969 64 * 1024, page_size);
970 assert(md1);
971 map1 = md1->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique);
972 assert(map1);
973 buf1 = (uint32_t *) map1->getVirtualAddress();
974
975 md2 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
976 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
977 64 * 1024, page_size);
978 assert(md2);
979 map2 = md2->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique);
980 assert(map2);
981 buf2 = (uint32_t *) map2->getVirtualAddress();
982
983 memset(buf1, 0x11, 64 * 1024L);
984 memset(buf2, 0x22, 64 * 1024L);
985
986 kprintf("md1 %p, map1 %p, buf2 %p; md2 %p, map2 %p, buf2 %p\n", md1, map1, buf1, md2, map2, buf2);
987
988 kprintf("no redir 0x%08x, 0x%08x\n", buf1[0], buf2[0]);
989 assert(0x11111111 == buf1[0]);
990 assert(0x22222222 == buf2[0]);
991 err = map1->redirect(md2, 0, 0ULL);
992 kprintf("redir md2(0x%x) 0x%08x, 0x%08x\n", err, buf1[0], buf2[0]);
993 assert(0x11111111 == buf2[0]);
994 assert(0x22222222 == buf1[0]);
995 err = map1->redirect(md1, 0, 0ULL);
996 kprintf("redir md1(0x%x) 0x%08x, 0x%08x\n", err, buf1[0], buf2[0]);
997 assert(0x11111111 == buf1[0]);
998 assert(0x22222222 == buf2[0]);
999 map1->release();
1000 map2->release();
1001 md1->release();
1002 md2->release();
1003 }
1004 #endif
1005
1006 // result = IODMACommandLocalMappedNonContig(newValue);
1007 // if (result) return (result);
1008
1009 result = IODMACommandForceDoubleBufferTest(newValue);
1010 if (result) {
1011 return result;
1012 }
1013
1014 result = AllocationNameTest(newValue);
1015 if (result) {
1016 return result;
1017 }
1018
1019 result = IOMemoryMapCopyOnWriteTest(newValue);
1020 if (result) {
1021 return result;
1022 }
1023
1024 result = IOMultMemoryDescriptorTest(newValue);
1025 if (result) {
1026 return result;
1027 }
1028
1029 result = IOBMDOverflowTest(newValue);
1030 if (result) {
1031 return result;
1032 }
1033
1034 result = IOBMDSetLengthMapTest(newValue);
1035 if (result) {
1036 return result;
1037 }
1038
1039 result = ZeroLengthTest(newValue);
1040 if (result) {
1041 return result;
1042 }
1043
1044 result = IODirectionPrepareNoZeroFillTest(newValue);
1045 if (result) {
1046 return result;
1047 }
1048
1049 result = BadFixedAllocTest(newValue);
1050 if (result) {
1051 return result;
1052 }
1053
1054 result = IOMemoryRemoteTest(newValue);
1055 if (result) {
1056 return result;
1057 }
1058
1059 result = IOMemoryPrefaultTest(newValue);
1060 if (result) {
1061 return result;
1062 }
1063
1064 result = IOGuardPageMDTest(newValue);
1065 if (result) {
1066 return result;
1067 }
1068
1069 result = IOMDContextTest(newValue);
1070 if (result) {
1071 return result;
1072 }
1073
1074 IOGeneralMemoryDescriptor * md;
1075 mach_vm_offset_t data[2];
1076 vm_size_t bsize = 16 * 1024 * 1024;
1077 vm_size_t srcsize, srcoffset, mapoffset, size;
1078 kern_return_t kr;
1079
1080 data[0] = data[1] = 0;
1081 kr = mach_vm_allocate_kernel(kernel_map, &data[0], bsize,
1082 VM_MAP_KERNEL_FLAGS_ANYWHERE(.vm_tag = VM_KERN_MEMORY_IOKIT));
1083 assert(KERN_SUCCESS == kr);
1084
1085 mach_vm_inherit(kernel_map, data[0] + ptoa(1), ptoa(1), VM_INHERIT_NONE);
1086 mach_vm_inherit(kernel_map, data[0] + ptoa(16), ptoa(4), VM_INHERIT_NONE);
1087
1088 IOLog("data 0x%lx, 0x%lx\n", (long)data[0], (long)data[1]);
1089
1090 uint32_t idx, offidx;
1091 for (idx = 0; idx < (bsize / sizeof(uint32_t)); idx++) {
1092 ((uint32_t*)data[0])[idx] = idx;
1093 }
1094
1095 for (srcoffset = 0; srcoffset < bsize; srcoffset = ((srcoffset << 2) + 0x40c)) {
1096 for (srcsize = 4; srcsize < (bsize - srcoffset - 1); srcsize = ((srcsize << 2) + 0x3fc)) {
1097 IOAddressRange ranges[3];
1098 uint32_t rangeCount = 1;
1099
1100 bzero(&ranges[0], sizeof(ranges));
1101 ranges[0].address = data[0] + srcoffset;
1102 ranges[0].length = srcsize;
1103 ranges[1].address = ranges[2].address = data[0];
1104
1105 if (srcsize > ptoa(5)) {
1106 ranges[0].length = 7634;
1107 ranges[1].length = 9870;
1108 ranges[2].length = srcsize - ranges[0].length - ranges[1].length;
1109 ranges[1].address = ranges[0].address + ranges[0].length;
1110 ranges[2].address = ranges[1].address + ranges[1].length;
1111 rangeCount = 3;
1112 } else if ((srcsize > ptoa(2)) && !(page_mask & srcoffset)) {
1113 ranges[0].length = ptoa(1);
1114 ranges[1].length = ptoa(1);
1115 ranges[2].length = srcsize - ranges[0].length - ranges[1].length;
1116 ranges[0].address = data[0] + srcoffset + ptoa(1);
1117 ranges[1].address = data[0] + srcoffset;
1118 ranges[2].address = ranges[0].address + ranges[0].length;
1119 rangeCount = 3;
1120 }
1121
1122 md = OSDynamicCast(IOGeneralMemoryDescriptor,
1123 IOMemoryDescriptor::withAddressRanges(&ranges[0], rangeCount, kIODirectionInOut, kernel_task));
1124 assert(md);
1125
1126 IOLog("IOMemoryDescriptor::withAddressRanges [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n",
1127 (long) srcsize, (long) srcoffset,
1128 (long long) ranges[0].address - data[0], (long long) ranges[0].length,
1129 (long long) ranges[1].address - data[0], (long long) ranges[1].length,
1130 (long long) ranges[2].address - data[0], (long long) ranges[2].length);
1131
1132 if (kIOReturnSuccess == kr) {
1133 for (mapoffset = 0; mapoffset < srcsize; mapoffset = ((mapoffset << 1) + 0xf00)) {
1134 for (size = 4; size < (srcsize - mapoffset - 1); size = ((size << 2) + 0x200)) {
1135 IOMemoryMap * map;
1136 mach_vm_address_t addr = 0;
1137 uint32_t data;
1138
1139 // IOLog("<mapRef [0x%lx @ 0x%lx]\n", (long) size, (long) mapoffset);
1140
1141 map = md->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapGuardedSmall, mapoffset, size);
1142 if (map) {
1143 addr = map->getAddress();
1144 } else {
1145 kr = kIOReturnError;
1146 }
1147
1148 // IOLog(">mapRef 0x%x %llx\n", kr, addr);
1149
1150 if (kIOReturnSuccess != kr) {
1151 break;
1152 }
1153 kr = md->prepare();
1154 if (kIOReturnSuccess != kr) {
1155 panic("prepare() fail 0x%x", kr);
1156 break;
1157 }
1158 for (idx = 0; idx < size; idx += sizeof(uint32_t)) {
1159 offidx = (typeof(offidx))(idx + mapoffset + srcoffset);
1160 if ((srcsize <= ptoa(5)) && (srcsize > ptoa(2)) && !(page_mask & srcoffset)) {
1161 if (offidx < ptoa(2)) {
1162 offidx ^= ptoa(1);
1163 }
1164 }
1165 offidx /= sizeof(uint32_t);
1166
1167 if (offidx != ((uint32_t*)addr)[idx / sizeof(uint32_t)]) {
1168 panic("vm mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx,", md, map, idx, (long) srcoffset, (long) mapoffset);
1169 kr = kIOReturnBadMedia;
1170 } else {
1171 if (sizeof(data) != md->readBytes(mapoffset + idx, &data, sizeof(data))) {
1172 data = 0;
1173 }
1174 if (offidx != data) {
1175 panic("phys mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx,", md, map, idx, (long) srcoffset, (long) mapoffset);
1176 kr = kIOReturnBadMedia;
1177 }
1178 }
1179 }
1180 md->complete();
1181 map->release();
1182 // IOLog("unmapRef %llx\n", addr);
1183 }
1184 if (kIOReturnSuccess != kr) {
1185 break;
1186 }
1187 }
1188 }
1189 md->release();
1190 if (kIOReturnSuccess != kr) {
1191 break;
1192 }
1193 }
1194 if (kIOReturnSuccess != kr) {
1195 break;
1196 }
1197 }
1198
1199 if (kIOReturnSuccess != kr) {
1200 IOLog("FAIL: src 0x%lx @ 0x%lx, map 0x%lx @ 0x%lx\n",
1201 (long) srcsize, (long) srcoffset, (long) size, (long) mapoffset);
1202 }
1203
1204 assert(kr == kIOReturnSuccess);
1205
1206 mach_vm_deallocate(kernel_map, data[0], bsize);
1207 //mach_vm_deallocate(kernel_map, data[1], size);
1208
1209 IOLog("IOMemoryDescriptorTest/ %d\n", (int) gIOMemoryReferenceCount);
1210
1211 return 0;
1212 }
1213
1214
1215 #endif /* DEVELOPMENT || DEBUG */
1216