xref: /xnu-11215/tests/vm_test_mach_map.c (revision 4f1223e8)
1 /* Mach vm map miscellaneous unit tests
2  *
3  * This test program serves to be a regression test suite for legacy
4  * vm issues, ideally each test will be linked to a radar number and
5  * perform a set of certain validations.
6  *
7  */
8 #include <darwintest.h>
9 
10 #include <dlfcn.h>
11 #include <errno.h>
12 #include <ptrauth.h>
13 #include <signal.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <time.h>
18 
19 #include <sys/mman.h>
20 #include <sys/proc.h>
21 
22 #include <mach/mach_error.h>
23 #include <mach/mach_init.h>
24 #include <mach/mach_port.h>
25 #include <mach/mach_vm.h>
26 #include <mach/vm_map.h>
27 #include <mach/vm_param.h>
28 #include <mach/task.h>
29 #include <mach/task_info.h>
30 #include <mach/shared_region.h>
31 #include <machine/cpu_capabilities.h>
32 
33 T_GLOBAL_META(
34 	T_META_NAMESPACE("xnu.vm"),
35 	T_META_RADAR_COMPONENT_NAME("xnu"),
36 	T_META_RADAR_COMPONENT_VERSION("VM"),
37 	T_META_RUN_CONCURRENTLY(true));
38 
39 static void
test_memory_entry_tagging(int override_tag)40 test_memory_entry_tagging(int override_tag)
41 {
42 	int                     pass;
43 	int                     do_copy;
44 	kern_return_t           kr;
45 	mach_vm_address_t       vmaddr_orig, vmaddr_shared, vmaddr_copied;
46 	mach_vm_size_t          vmsize_orig, vmsize_shared, vmsize_copied;
47 	mach_vm_address_t       *vmaddr_ptr;
48 	mach_vm_size_t          *vmsize_ptr;
49 	mach_vm_address_t       vmaddr_chunk;
50 	mach_vm_size_t          vmsize_chunk;
51 	mach_vm_offset_t        vmoff;
52 	mach_port_t             mem_entry_copied, mem_entry_shared;
53 	mach_port_t             *mem_entry_ptr;
54 	unsigned int            i;
55 	vm_region_submap_short_info_data_64_t ri;
56 	mach_msg_type_number_t  ri_count;
57 	unsigned int            depth;
58 	int                     vm_flags;
59 	unsigned int            expected_tag;
60 
61 	vmaddr_copied = 0;
62 	vmaddr_shared = 0;
63 	vmsize_copied = 0;
64 	vmsize_shared = 0;
65 	vmaddr_chunk = 0;
66 	vmsize_chunk = 16 * 1024;
67 	vmaddr_orig = 0;
68 	vmsize_orig = 3 * vmsize_chunk;
69 	mem_entry_copied = MACH_PORT_NULL;
70 	mem_entry_shared = MACH_PORT_NULL;
71 	pass = 0;
72 
73 	vmaddr_orig = 0;
74 	kr = mach_vm_allocate(mach_task_self(),
75 	    &vmaddr_orig,
76 	    vmsize_orig,
77 	    VM_FLAGS_ANYWHERE);
78 	T_QUIET;
79 	T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d] vm_allocate(%lld)",
80 	    override_tag, vmsize_orig);
81 	if (T_RESULT == T_RESULT_FAIL) {
82 		goto done;
83 	}
84 
85 	for (i = 0; i < vmsize_orig / vmsize_chunk; i++) {
86 		vmaddr_chunk = vmaddr_orig + (i * vmsize_chunk);
87 		kr = mach_vm_allocate(mach_task_self(),
88 		    &vmaddr_chunk,
89 		    vmsize_chunk,
90 		    (VM_FLAGS_FIXED |
91 		    VM_FLAGS_OVERWRITE |
92 		    VM_MAKE_TAG(100 + (int)i)));
93 		T_QUIET;
94 		T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d] vm_allocate(%lld)",
95 		    override_tag, vmsize_chunk);
96 		if (T_RESULT == T_RESULT_FAIL) {
97 			goto done;
98 		}
99 	}
100 
101 	for (vmoff = 0;
102 	    vmoff < vmsize_orig;
103 	    vmoff += PAGE_SIZE) {
104 		*((unsigned char *)(uintptr_t)(vmaddr_orig + vmoff)) = 'x';
105 	}
106 
107 	do_copy = time(NULL) & 1;
108 again:
109 	*((unsigned char *)(uintptr_t)vmaddr_orig) = 'x';
110 	if (do_copy) {
111 		mem_entry_ptr = &mem_entry_copied;
112 		vmsize_copied = vmsize_orig;
113 		vmsize_ptr = &vmsize_copied;
114 		vmaddr_copied = 0;
115 		vmaddr_ptr = &vmaddr_copied;
116 		vm_flags = MAP_MEM_VM_COPY;
117 	} else {
118 		mem_entry_ptr = &mem_entry_shared;
119 		vmsize_shared = vmsize_orig;
120 		vmsize_ptr = &vmsize_shared;
121 		vmaddr_shared = 0;
122 		vmaddr_ptr = &vmaddr_shared;
123 		vm_flags = MAP_MEM_VM_SHARE;
124 	}
125 	kr = mach_make_memory_entry_64(mach_task_self(),
126 	    vmsize_ptr,
127 	    vmaddr_orig,                            /* offset */
128 	    (vm_flags |
129 	    VM_PROT_READ | VM_PROT_WRITE),
130 	    mem_entry_ptr,
131 	    MACH_PORT_NULL);
132 	T_QUIET;
133 	T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_make_memory_entry()",
134 	    override_tag, do_copy);
135 	if (T_RESULT == T_RESULT_FAIL) {
136 		goto done;
137 	}
138 	T_QUIET;
139 	T_EXPECT_EQ(*vmsize_ptr, vmsize_orig, "[override_tag:%d][do_copy:%d] vmsize (0x%llx) != vmsize_orig (0x%llx)",
140 	    override_tag, do_copy, (uint64_t) *vmsize_ptr, (uint64_t) vmsize_orig);
141 	if (T_RESULT == T_RESULT_FAIL) {
142 		goto done;
143 	}
144 	T_QUIET;
145 	T_EXPECT_NOTNULL(*mem_entry_ptr, "[override_tag:%d][do_copy:%d] mem_entry == 0x%x",
146 	    override_tag, do_copy, *mem_entry_ptr);
147 	if (T_RESULT == T_RESULT_FAIL) {
148 		goto done;
149 	}
150 
151 	*vmaddr_ptr = 0;
152 	if (override_tag) {
153 		vm_flags = VM_MAKE_TAG(200);
154 	} else {
155 		vm_flags = 0;
156 	}
157 	kr = mach_vm_map(mach_task_self(),
158 	    vmaddr_ptr,
159 	    vmsize_orig,
160 	    0,              /* mask */
161 	    vm_flags | VM_FLAGS_ANYWHERE,
162 	    *mem_entry_ptr,
163 	    0,              /* offset */
164 	    FALSE,              /* copy */
165 	    VM_PROT_READ | VM_PROT_WRITE,
166 	    VM_PROT_READ | VM_PROT_WRITE,
167 	    VM_INHERIT_DEFAULT);
168 	T_QUIET;
169 	T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_vm_map()",
170 	    override_tag, do_copy);
171 	if (T_RESULT == T_RESULT_FAIL) {
172 		goto done;
173 	}
174 
175 	*((unsigned char *)(uintptr_t)vmaddr_orig) = 'X';
176 	if (*(unsigned char *)(uintptr_t)*vmaddr_ptr == 'X') {
177 		T_QUIET;
178 		T_EXPECT_EQ(do_copy, 0, "[override_tag:%d][do_copy:%d] memory shared instead of copied",
179 		    override_tag, do_copy);
180 		if (T_RESULT == T_RESULT_FAIL) {
181 			goto done;
182 		}
183 	} else {
184 		T_QUIET;
185 		T_EXPECT_NE(do_copy, 0, "[override_tag:%d][do_copy:%d] memory copied instead of shared",
186 		    override_tag, do_copy);
187 		if (T_RESULT == T_RESULT_FAIL) {
188 			goto done;
189 		}
190 	}
191 
192 	for (i = 0; i < vmsize_orig / vmsize_chunk; i++) {
193 		mach_vm_address_t       vmaddr_info;
194 		mach_vm_size_t          vmsize_info;
195 
196 		vmaddr_info = *vmaddr_ptr + (i * vmsize_chunk);
197 		vmsize_info = 0;
198 		depth = 1;
199 		ri_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
200 		kr = mach_vm_region_recurse(mach_task_self(),
201 		    &vmaddr_info,
202 		    &vmsize_info,
203 		    &depth,
204 		    (vm_region_recurse_info_t) &ri,
205 		    &ri_count);
206 		T_QUIET;
207 		T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx)",
208 		    override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk);
209 		if (T_RESULT == T_RESULT_FAIL) {
210 			goto done;
211 		}
212 		T_QUIET;
213 		T_EXPECT_EQ(vmaddr_info, *vmaddr_ptr + (i * vmsize_chunk), "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned addr 0x%llx",
214 		    override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk, vmaddr_info);
215 		if (T_RESULT == T_RESULT_FAIL) {
216 			goto done;
217 		}
218 		T_QUIET;
219 		T_EXPECT_EQ(vmsize_info, vmsize_chunk, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned size 0x%llx expected 0x%llx",
220 		    override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk, vmsize_info, vmsize_chunk);
221 		if (T_RESULT == T_RESULT_FAIL) {
222 			goto done;
223 		}
224 		if (override_tag) {
225 			expected_tag = 200;
226 		} else {
227 			expected_tag = 100 + i;
228 		}
229 		T_QUIET;
230 		T_EXPECT_EQ(ri.user_tag, expected_tag, "[override_tag:%d][do_copy:%d] i=%u tag=%u expected %u",
231 		    override_tag, do_copy, i, ri.user_tag, expected_tag);
232 		if (T_RESULT == T_RESULT_FAIL) {
233 			goto done;
234 		}
235 	}
236 
237 	if (++pass < 2) {
238 		do_copy = !do_copy;
239 		goto again;
240 	}
241 
242 done:
243 	if (vmaddr_orig != 0) {
244 		mach_vm_deallocate(mach_task_self(),
245 		    vmaddr_orig,
246 		    vmsize_orig);
247 		vmaddr_orig = 0;
248 		vmsize_orig = 0;
249 	}
250 	if (vmaddr_copied != 0) {
251 		mach_vm_deallocate(mach_task_self(),
252 		    vmaddr_copied,
253 		    vmsize_copied);
254 		vmaddr_copied = 0;
255 		vmsize_copied = 0;
256 	}
257 	if (vmaddr_shared != 0) {
258 		mach_vm_deallocate(mach_task_self(),
259 		    vmaddr_shared,
260 		    vmsize_shared);
261 		vmaddr_shared = 0;
262 		vmsize_shared = 0;
263 	}
264 	if (mem_entry_copied != MACH_PORT_NULL) {
265 		mach_port_deallocate(mach_task_self(), mem_entry_copied);
266 		mem_entry_copied = MACH_PORT_NULL;
267 	}
268 	if (mem_entry_shared != MACH_PORT_NULL) {
269 		mach_port_deallocate(mach_task_self(), mem_entry_shared);
270 		mem_entry_shared = MACH_PORT_NULL;
271 	}
272 
273 	return;
274 }
275 
276 static void
test_map_memory_entry(void)277 test_map_memory_entry(void)
278 {
279 	kern_return_t           kr;
280 	mach_vm_address_t       vmaddr1, vmaddr2;
281 	mach_vm_size_t          vmsize1, vmsize2;
282 	mach_port_t             mem_entry;
283 	unsigned char           *cp1, *cp2;
284 
285 	vmaddr1 = 0;
286 	vmsize1 = 0;
287 	vmaddr2 = 0;
288 	vmsize2 = 0;
289 	mem_entry = MACH_PORT_NULL;
290 
291 	vmsize1 = 1;
292 	vmaddr1 = 0;
293 	kr = mach_vm_allocate(mach_task_self(),
294 	    &vmaddr1,
295 	    vmsize1,
296 	    VM_FLAGS_ANYWHERE);
297 	T_QUIET;
298 	T_EXPECT_MACH_SUCCESS(kr, "vm_allocate(%lld)", vmsize1);
299 	if (T_RESULT == T_RESULT_FAIL) {
300 		goto done;
301 	}
302 
303 	cp1 = (unsigned char *)(uintptr_t)vmaddr1;
304 	*cp1 = '1';
305 
306 	vmsize2 = 1;
307 	mem_entry = MACH_PORT_NULL;
308 	kr = mach_make_memory_entry_64(mach_task_self(),
309 	    &vmsize2,
310 	    vmaddr1,                            /* offset */
311 	    (MAP_MEM_VM_COPY |
312 	    VM_PROT_READ | VM_PROT_WRITE),
313 	    &mem_entry,
314 	    MACH_PORT_NULL);
315 	T_QUIET;
316 	T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry()");
317 	if (T_RESULT == T_RESULT_FAIL) {
318 		goto done;
319 	}
320 	T_QUIET;
321 	T_EXPECT_GE(vmsize2, vmsize1, "vmsize2 (0x%llx) < vmsize1 (0x%llx)",
322 	    (uint64_t) vmsize2, (uint64_t) vmsize1);
323 	if (T_RESULT == T_RESULT_FAIL) {
324 		goto done;
325 	}
326 	T_QUIET;
327 	T_EXPECT_NOTNULL(mem_entry, "mem_entry == 0x%x", mem_entry);
328 	if (T_RESULT == T_RESULT_FAIL) {
329 		goto done;
330 	}
331 
332 	vmaddr2 = 0;
333 	kr = mach_vm_map(mach_task_self(),
334 	    &vmaddr2,
335 	    vmsize2,
336 	    0,              /* mask */
337 	    VM_FLAGS_ANYWHERE,
338 	    mem_entry,
339 	    0,              /* offset */
340 	    TRUE,              /* copy */
341 	    VM_PROT_READ | VM_PROT_WRITE,
342 	    VM_PROT_READ | VM_PROT_WRITE,
343 	    VM_INHERIT_DEFAULT);
344 	T_QUIET;
345 	T_EXPECT_MACH_SUCCESS(kr, "mach_vm_map()");
346 	if (T_RESULT == T_RESULT_FAIL) {
347 		goto done;
348 	}
349 
350 	cp2 = (unsigned char *)(uintptr_t)vmaddr2;
351 	T_QUIET;
352 	T_EXPECT_TRUE(((*cp1 == '1') && (*cp2 == '1')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x",
353 	    *cp1, *cp2, '1', '1');
354 	if (T_RESULT == T_RESULT_FAIL) {
355 		goto done;
356 	}
357 
358 	*cp2 = '2';
359 	T_QUIET;
360 	T_EXPECT_TRUE(((*cp1 == '1') && (*cp2 == '2')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x",
361 	    *cp1, *cp2, '1', '2');
362 	if (T_RESULT == T_RESULT_FAIL) {
363 		goto done;
364 	}
365 
366 done:
367 	if (vmaddr1 != 0) {
368 		mach_vm_deallocate(mach_task_self(), vmaddr1, vmsize1);
369 		vmaddr1 = 0;
370 		vmsize1 = 0;
371 	}
372 	if (vmaddr2 != 0) {
373 		mach_vm_deallocate(mach_task_self(), vmaddr2, vmsize2);
374 		vmaddr2 = 0;
375 		vmsize2 = 0;
376 	}
377 	if (mem_entry != MACH_PORT_NULL) {
378 		mach_port_deallocate(mach_task_self(), mem_entry);
379 		mem_entry = MACH_PORT_NULL;
380 	}
381 
382 	return;
383 }
384 
385 T_DECL(memory_entry_tagging, "test mem entry tag for rdar://problem/23334087 \
386     VM memory tags should be propagated through memory entries",
387     T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
388 {
389 	test_memory_entry_tagging(0);
390 	test_memory_entry_tagging(1);
391 }
392 
393 T_DECL(map_memory_entry, "test mapping mem entry for rdar://problem/22611816 \
394     mach_make_memory_entry(MAP_MEM_VM_COPY) should never use a KERNEL_BUFFER \
395     copy", T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
396 {
397 	test_map_memory_entry();
398 }
399 
400 static char *vm_purgable_state[4] = { "NONVOLATILE", "VOLATILE", "EMPTY", "DENY" };
401 
402 static uint64_t
task_footprint(void)403 task_footprint(void)
404 {
405 	task_vm_info_data_t ti;
406 	kern_return_t kr;
407 	mach_msg_type_number_t count;
408 
409 	count = TASK_VM_INFO_COUNT;
410 	kr = task_info(mach_task_self(),
411 	    TASK_VM_INFO,
412 	    (task_info_t) &ti,
413 	    &count);
414 	T_QUIET;
415 	T_ASSERT_MACH_SUCCESS(kr, "task_info()");
416 #if defined(__arm64__)
417 	T_QUIET;
418 	T_ASSERT_EQ(count, TASK_VM_INFO_COUNT, "task_info() count = %d (expected %d)",
419 	    count, TASK_VM_INFO_COUNT);
420 #endif /* defined(__arm64__) */
421 	return ti.phys_footprint;
422 }
423 
424 T_DECL(purgeable_empty_to_volatile, "test task physical footprint when \
425     emptying, volatilizing purgeable vm", T_META_TAG_VM_PREFERRED)
426 {
427 	kern_return_t kr;
428 	mach_vm_address_t vm_addr;
429 	mach_vm_size_t vm_size;
430 	char *cp;
431 	int ret;
432 	vm_purgable_t state;
433 	uint64_t footprint[8];
434 
435 	vm_addr = 0;
436 	vm_size = 1 * 1024 * 1024;
437 	T_LOG("--> allocate %llu bytes", vm_size);
438 	kr = mach_vm_allocate(mach_task_self(),
439 	    &vm_addr,
440 	    vm_size,
441 	    VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
442 	T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
443 
444 	/* footprint0 */
445 	footprint[0] = task_footprint();
446 	T_LOG("    footprint[0] = %llu", footprint[0]);
447 
448 	T_LOG("--> access %llu bytes", vm_size);
449 	for (cp = (char *) vm_addr;
450 	    cp < (char *) (vm_addr + vm_size);
451 	    cp += vm_kernel_page_size) {
452 		*cp = 'x';
453 	}
454 	/* footprint1 == footprint0 + vm_size */
455 	footprint[1] = task_footprint();
456 	T_LOG("    footprint[1] = %llu", footprint[1]);
457 	if (footprint[1] != footprint[0] + vm_size) {
458 		T_LOG("WARN: footprint[1] != footprint[0] + vm_size");
459 	}
460 
461 	T_LOG("--> wire %llu bytes", vm_size / 2);
462 	ret = mlock((char *)vm_addr, (size_t) (vm_size / 2));
463 	T_ASSERT_POSIX_SUCCESS(ret, "mlock()");
464 
465 	/* footprint2 == footprint1 */
466 	footprint[2] = task_footprint();
467 	T_LOG("    footprint[2] = %llu", footprint[2]);
468 	if (footprint[2] != footprint[1]) {
469 		T_LOG("WARN: footprint[2] != footprint[1]");
470 	}
471 
472 	T_LOG("--> VOLATILE");
473 	state = VM_PURGABLE_VOLATILE;
474 	kr = mach_vm_purgable_control(mach_task_self(),
475 	    vm_addr,
476 	    VM_PURGABLE_SET_STATE,
477 	    &state);
478 	T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VOLATILE)");
479 	T_ASSERT_EQ(state, VM_PURGABLE_NONVOLATILE, "NONVOLATILE->VOLATILE: state was %s",
480 	    vm_purgable_state[state]);
481 	/* footprint3 == footprint2 - (vm_size / 2) */
482 	footprint[3] = task_footprint();
483 	T_LOG("    footprint[3] = %llu", footprint[3]);
484 	if (footprint[3] != footprint[2] - (vm_size / 2)) {
485 		T_LOG("WARN: footprint[3] != footprint[2] - (vm_size / 2)");
486 	}
487 
488 	T_LOG("--> EMPTY");
489 	state = VM_PURGABLE_EMPTY;
490 	kr = mach_vm_purgable_control(mach_task_self(),
491 	    vm_addr,
492 	    VM_PURGABLE_SET_STATE,
493 	    &state);
494 	T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(EMPTY)");
495 	if (state != VM_PURGABLE_VOLATILE &&
496 	    state != VM_PURGABLE_EMPTY) {
497 		T_ASSERT_FAIL("VOLATILE->EMPTY: state was %s",
498 		    vm_purgable_state[state]);
499 	}
500 	/* footprint4 == footprint3 */
501 	footprint[4] = task_footprint();
502 	T_LOG("    footprint[4] = %llu", footprint[4]);
503 	if (footprint[4] != footprint[3]) {
504 		T_LOG("WARN: footprint[4] != footprint[3]");
505 	}
506 
507 	T_LOG("--> unwire %llu bytes", vm_size / 2);
508 	ret = munlock((char *)vm_addr, (size_t) (vm_size / 2));
509 	T_ASSERT_POSIX_SUCCESS(ret, "munlock()");
510 
511 	/* footprint5 == footprint4 - (vm_size/2) (unless memory pressure) */
512 	/* footprint5 == footprint0 */
513 	footprint[5] = task_footprint();
514 	T_LOG("    footprint[5] = %llu", footprint[5]);
515 	if (footprint[5] != footprint[4] - (vm_size / 2)) {
516 		T_LOG("WARN: footprint[5] != footprint[4] - (vm_size/2)");
517 	}
518 	if (footprint[5] != footprint[0]) {
519 		T_LOG("WARN: footprint[5] != footprint[0]");
520 	}
521 
522 	T_LOG("--> VOLATILE");
523 	state = VM_PURGABLE_VOLATILE;
524 	kr = mach_vm_purgable_control(mach_task_self(),
525 	    vm_addr,
526 	    VM_PURGABLE_SET_STATE,
527 	    &state);
528 	T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VOLATILE)");
529 	T_ASSERT_EQ(state, VM_PURGABLE_EMPTY, "EMPTY->VOLATILE: state == %s",
530 	    vm_purgable_state[state]);
531 	/* footprint6 == footprint5 */
532 	/* footprint6 == footprint0 */
533 	footprint[6] = task_footprint();
534 	T_LOG("    footprint[6] = %llu", footprint[6]);
535 	if (footprint[6] != footprint[5]) {
536 		T_LOG("WARN: footprint[6] != footprint[5]");
537 	}
538 	if (footprint[6] != footprint[0]) {
539 		T_LOG("WARN: footprint[6] != footprint[0]");
540 	}
541 
542 	T_LOG("--> NONVOLATILE");
543 	state = VM_PURGABLE_NONVOLATILE;
544 	kr = mach_vm_purgable_control(mach_task_self(),
545 	    vm_addr,
546 	    VM_PURGABLE_SET_STATE,
547 	    &state);
548 	T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(NONVOLATILE)");
549 	T_ASSERT_EQ(state, VM_PURGABLE_EMPTY, "EMPTY->NONVOLATILE: state == %s",
550 	    vm_purgable_state[state]);
551 	/* footprint7 == footprint6 */
552 	/* footprint7 == footprint0 */
553 	footprint[7] = task_footprint();
554 	T_LOG("    footprint[7] = %llu", footprint[7]);
555 	if (footprint[7] != footprint[6]) {
556 		T_LOG("WARN: footprint[7] != footprint[6]");
557 	}
558 	if (footprint[7] != footprint[0]) {
559 		T_LOG("WARN: footprint[7] != footprint[0]");
560 	}
561 }
562 
563 static kern_return_t
get_reusable_size(uint64_t * reusable)564 get_reusable_size(uint64_t *reusable)
565 {
566 	task_vm_info_data_t     ti;
567 	mach_msg_type_number_t  ti_count = TASK_VM_INFO_COUNT;
568 	kern_return_t kr;
569 
570 	kr = task_info(mach_task_self(),
571 	    TASK_VM_INFO,
572 	    (task_info_t) &ti,
573 	    &ti_count);
574 	T_QUIET;
575 	T_EXPECT_MACH_SUCCESS(kr, "task_info()");
576 	T_QUIET;
577 	*reusable = ti.reusable;
578 	return kr;
579 }
580 
581 T_DECL(madvise_shared, "test madvise shared for rdar://problem/2295713 logging \
582     rethink needs madvise(MADV_FREE_HARDER)",
583     T_META_RUN_CONCURRENTLY(false),
584     T_META_ALL_VALID_ARCHS(true),
585     T_META_TAG_VM_PREFERRED)
586 {
587 	vm_address_t            vmaddr = 0, vmaddr2 = 0;
588 	vm_size_t               vmsize, vmsize1, vmsize2;
589 	kern_return_t           kr;
590 	char                    *cp;
591 	vm_prot_t               curprot, maxprot;
592 	int                     ret;
593 	int                     vmflags;
594 	uint64_t                footprint_before, footprint_after;
595 	uint64_t                reusable_before, reusable_after, reusable_expected;
596 
597 
598 	vmsize1 = 64 * 1024; /* 64KB to madvise() */
599 	vmsize2 = 32 * 1024; /* 32KB to mlock() */
600 	vmsize = vmsize1 + vmsize2;
601 	vmflags = VM_FLAGS_ANYWHERE;
602 	VM_SET_FLAGS_ALIAS(vmflags, VM_MEMORY_MALLOC);
603 
604 	kr = get_reusable_size(&reusable_before);
605 	if (kr) {
606 		goto done;
607 	}
608 
609 	kr = vm_allocate(mach_task_self(),
610 	    &vmaddr,
611 	    vmsize,
612 	    vmflags);
613 	T_QUIET;
614 	T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
615 	if (T_RESULT == T_RESULT_FAIL) {
616 		goto done;
617 	}
618 
619 	for (cp = (char *)(uintptr_t)vmaddr;
620 	    cp < (char *)(uintptr_t)(vmaddr + vmsize);
621 	    cp++) {
622 		*cp = 'x';
623 	}
624 
625 	kr = vm_remap(mach_task_self(),
626 	    &vmaddr2,
627 	    vmsize,
628 	    0,           /* mask */
629 	    VM_FLAGS_ANYWHERE,
630 	    mach_task_self(),
631 	    vmaddr,
632 	    FALSE,           /* copy */
633 	    &curprot,
634 	    &maxprot,
635 	    VM_INHERIT_DEFAULT);
636 	T_QUIET;
637 	T_EXPECT_MACH_SUCCESS(kr, "vm_remap()");
638 	if (T_RESULT == T_RESULT_FAIL) {
639 		goto done;
640 	}
641 
642 	for (cp = (char *)(uintptr_t)vmaddr2;
643 	    cp < (char *)(uintptr_t)(vmaddr2 + vmsize);
644 	    cp++) {
645 		T_QUIET;
646 		T_EXPECT_EQ(*cp, 'x', "vmaddr=%p vmaddr2=%p %p:0x%x",
647 		    (void *)(uintptr_t)vmaddr,
648 		    (void *)(uintptr_t)vmaddr2,
649 		    (void *)cp,
650 		    (unsigned char)*cp);
651 		if (T_RESULT == T_RESULT_FAIL) {
652 			goto done;
653 		}
654 	}
655 	cp = (char *)(uintptr_t)vmaddr;
656 	*cp = 'X';
657 	cp = (char *)(uintptr_t)vmaddr2;
658 	T_QUIET;
659 	T_EXPECT_EQ(*cp, 'X', "memory was not properly shared");
660 	if (T_RESULT == T_RESULT_FAIL) {
661 		goto done;
662 	}
663 
664 #if defined(__x86_64__) || defined(__i386__)
665 	if (COMM_PAGE_READ(uint64_t, CPU_CAPABILITIES64) & kIsTranslated) {
666 		T_LOG("Skipping madvise reusable tests because we're running under translation.");
667 		goto done;
668 	}
669 #endif /* defined(__x86_64__) || defined(__i386__) */
670 
671 	ret = mlock((char *)(uintptr_t)(vmaddr2 + vmsize1),
672 	    vmsize2);
673 	T_QUIET; T_EXPECT_POSIX_SUCCESS(ret, "mlock()");
674 
675 	footprint_before = task_footprint();
676 
677 	ret = madvise((char *)(uintptr_t)vmaddr,
678 	    vmsize1,
679 	    MADV_FREE_REUSABLE);
680 	T_QUIET;
681 	T_EXPECT_POSIX_SUCCESS(ret, "madvise()");
682 	if (T_RESULT == T_RESULT_FAIL) {
683 		goto done;
684 	}
685 
686 	footprint_after = task_footprint();
687 	T_ASSERT_EQ(footprint_after, footprint_before - 2 * vmsize1, NULL);
688 
689 	kr = get_reusable_size(&reusable_after);
690 	if (kr) {
691 		goto done;
692 	}
693 	reusable_expected = 2ULL * vmsize1 + reusable_before;
694 	T_EXPECT_EQ(reusable_after, reusable_expected, "actual=%lld expected %lld",
695 	    reusable_after, reusable_expected);
696 	if (T_RESULT == T_RESULT_FAIL) {
697 		goto done;
698 	}
699 
700 done:
701 	if (vmaddr != 0) {
702 		vm_deallocate(mach_task_self(), vmaddr, vmsize);
703 		vmaddr = 0;
704 	}
705 	if (vmaddr2 != 0) {
706 		vm_deallocate(mach_task_self(), vmaddr2, vmsize);
707 		vmaddr2 = 0;
708 	}
709 }
710 
711 T_DECL(madvise_purgeable_can_reuse, "test madvise purgeable can reuse for \
712     rdar://problem/37476183 Preview Footprint memory regressions ~100MB \
713     [ purgeable_malloc became eligible for reuse ]",
714     T_META_ALL_VALID_ARCHS(true),
715     T_META_TAG_VM_PREFERRED)
716 {
717 #if defined(__x86_64__) || defined(__i386__)
718 	if (COMM_PAGE_READ(uint64_t, CPU_CAPABILITIES64) & kIsTranslated) {
719 		T_SKIP("madvise reusable is not supported under Rosetta translation. Skipping.)");
720 	}
721 #endif /* defined(__x86_64__) || defined(__i386__) */
722 	vm_address_t            vmaddr = 0;
723 	vm_size_t               vmsize;
724 	kern_return_t           kr;
725 	char                    *cp;
726 	int                     ret;
727 
728 	vmsize = 10 * 1024 * 1024; /* 10MB */
729 	kr = vm_allocate(mach_task_self(),
730 	    &vmaddr,
731 	    vmsize,
732 	    (VM_FLAGS_ANYWHERE |
733 	    VM_FLAGS_PURGABLE |
734 	    VM_MAKE_TAG(VM_MEMORY_MALLOC)));
735 	T_QUIET;
736 	T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
737 	if (T_RESULT == T_RESULT_FAIL) {
738 		goto done;
739 	}
740 
741 	for (cp = (char *)(uintptr_t)vmaddr;
742 	    cp < (char *)(uintptr_t)(vmaddr + vmsize);
743 	    cp++) {
744 		*cp = 'x';
745 	}
746 
747 	ret = madvise((char *)(uintptr_t)vmaddr,
748 	    vmsize,
749 	    MADV_CAN_REUSE);
750 	T_QUIET;
751 	T_EXPECT_TRUE(((ret == -1) && (errno == EINVAL)), "madvise(): purgeable vm can't be adviced to reuse");
752 	if (T_RESULT == T_RESULT_FAIL) {
753 		goto done;
754 	}
755 
756 done:
757 	if (vmaddr != 0) {
758 		vm_deallocate(mach_task_self(), vmaddr, vmsize);
759 		vmaddr = 0;
760 	}
761 }
762 
763 static bool
validate_memory_is_zero(vm_address_t start,vm_size_t vmsize,vm_address_t * non_zero_addr)764 validate_memory_is_zero(
765 	vm_address_t            start,
766 	vm_size_t               vmsize,
767 	vm_address_t           *non_zero_addr)
768 {
769 	for (vm_size_t sz = 0; sz < vmsize; sz += sizeof(uint64_t)) {
770 		vm_address_t addr = start + sz;
771 
772 		if (*(uint64_t *)(addr) != 0) {
773 			*non_zero_addr = addr;
774 			return false;
775 		}
776 	}
777 	return true;
778 }
779 
780 T_DECL(madvise_zero, "test madvise zero", T_META_TAG_VM_PREFERRED)
781 {
782 	vm_address_t            vmaddr = 0;
783 	vm_size_t               vmsize = PAGE_SIZE * 3;
784 	vm_address_t            non_zero_addr = 0;
785 	kern_return_t           kr;
786 	int                     ret;
787 	unsigned char           vec;
788 
789 	kr = vm_allocate(mach_task_self(),
790 	    &vmaddr,
791 	    vmsize,
792 	    (VM_FLAGS_ANYWHERE |
793 	    VM_MAKE_TAG(VM_MEMORY_MALLOC)));
794 	T_QUIET;
795 	T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
796 	if (T_RESULT == T_RESULT_FAIL) {
797 		goto done;
798 	}
799 
800 	memset((void *)vmaddr, 'A', vmsize);
801 	ret = madvise((void*)vmaddr, vmsize, MADV_FREE_REUSABLE);
802 	T_QUIET;
803 	T_EXPECT_POSIX_SUCCESS(ret, "madvise(MADV_FREE_REUSABLE)");
804 	if (T_RESULT == T_RESULT_FAIL) {
805 		goto done;
806 	}
807 
808 	memset((void *)vmaddr, 'B', PAGE_SIZE);
809 	ret = madvise((void*)vmaddr, vmsize, MADV_ZERO);
810 	T_QUIET;
811 	T_EXPECT_POSIX_SUCCESS(ret, "madvise(MADV_ZERO)");
812 	if (T_RESULT == T_RESULT_FAIL) {
813 		goto done;
814 	}
815 
816 	T_QUIET;
817 	T_EXPECT_EQ(validate_memory_is_zero(vmaddr, vmsize, &non_zero_addr), true,
818 	    "madvise(%p, %lu, MADV_ZERO) returned non zero mem at %p",
819 	    (void *)vmaddr, vmsize, (void *)non_zero_addr);
820 	if (T_RESULT == T_RESULT_FAIL) {
821 		goto done;
822 	}
823 
824 	memset((void *)vmaddr, 'C', PAGE_SIZE);
825 	ret = madvise((void*)vmaddr, vmsize, MADV_PAGEOUT);
826 	T_QUIET;
827 	T_EXPECT_POSIX_SUCCESS(ret, "madvise(MADV_PAGEOUT)");
828 	if (T_RESULT == T_RESULT_FAIL) {
829 		goto done;
830 	}
831 
832 	/* wait for the pages to be (asynchronously) compressed */
833 	T_QUIET; T_LOG("waiting for first page to be paged out");
834 	do {
835 		ret = mincore((void*)vmaddr, 1, (char *)&vec);
836 		T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mincore(1st)");
837 	} while (vec & MINCORE_INCORE);
838 	T_QUIET; T_LOG("waiting for last page to be paged out");
839 	do {
840 		ret = mincore((void*)(vmaddr + vmsize - 1), 1, (char *)&vec);
841 		T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mincore(last)");
842 	} while (vec & MINCORE_INCORE);
843 
844 	ret = madvise((void*)vmaddr, vmsize, MADV_ZERO);
845 	T_QUIET;
846 	T_EXPECT_POSIX_SUCCESS(ret, "madvise(MADV_ZERO)");
847 	if (T_RESULT == T_RESULT_FAIL) {
848 		goto done;
849 	}
850 	T_QUIET;
851 	T_EXPECT_EQ(validate_memory_is_zero(vmaddr, vmsize, &non_zero_addr), true,
852 	    "madvise(%p, %lu, MADV_ZERO) returned non zero mem at %p",
853 	    (void *)vmaddr, vmsize, (void *)non_zero_addr);
854 	if (T_RESULT == T_RESULT_FAIL) {
855 		goto done;
856 	}
857 
858 done:
859 	if (vmaddr != 0) {
860 		vm_deallocate(mach_task_self(), vmaddr, vmsize);
861 		vmaddr = 0;
862 	}
863 }
864 
865 #define DEST_PATTERN 0xFEDCBA98
866 
867 T_DECL(map_read_overwrite, "test overwriting vm map from other map - \
868     rdar://31075370",
869     T_META_ALL_VALID_ARCHS(true),
870     T_META_TAG_VM_PREFERRED)
871 {
872 	kern_return_t           kr;
873 	mach_vm_address_t       vmaddr1, vmaddr2;
874 	mach_vm_size_t          vmsize1, vmsize2;
875 	unsigned int            *ip;
876 	unsigned int            i;
877 
878 	vmaddr1 = 0;
879 	vmsize1 = 4 * 4096;
880 	kr = mach_vm_allocate(mach_task_self(),
881 	    &vmaddr1,
882 	    vmsize1,
883 	    VM_FLAGS_ANYWHERE);
884 	T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
885 
886 	ip = (unsigned int *)(uintptr_t)vmaddr1;
887 	for (i = 0; i < vmsize1 / sizeof(*ip); i++) {
888 		ip[i] = i;
889 	}
890 
891 	vmaddr2 = 0;
892 	kr = mach_vm_allocate(mach_task_self(),
893 	    &vmaddr2,
894 	    vmsize1,
895 	    VM_FLAGS_ANYWHERE);
896 	T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
897 
898 	ip = (unsigned int *)(uintptr_t)vmaddr2;
899 	for (i = 0; i < vmsize1 / sizeof(*ip); i++) {
900 		ip[i] = DEST_PATTERN;
901 	}
902 
903 	vmsize2 = vmsize1 - 2 * (sizeof(*ip));
904 	kr = mach_vm_read_overwrite(mach_task_self(),
905 	    vmaddr1 + sizeof(*ip),
906 	    vmsize2,
907 	    vmaddr2 + sizeof(*ip),
908 	    &vmsize2);
909 	T_ASSERT_MACH_SUCCESS(kr, "vm_read_overwrite()");
910 
911 	ip = (unsigned int *)(uintptr_t)vmaddr2;
912 	for (i = 0; i < 1; i++) {
913 		T_QUIET;
914 		T_ASSERT_EQ(ip[i], DEST_PATTERN, "vmaddr2[%d] = 0x%x instead of 0x%x",
915 		    i, ip[i], DEST_PATTERN);
916 	}
917 	for (; i < (vmsize1 - 2) / sizeof(*ip); i++) {
918 		T_QUIET;
919 		T_ASSERT_EQ(ip[i], i, "vmaddr2[%d] = 0x%x instead of 0x%x",
920 		    i, ip[i], i);
921 	}
922 	for (; i < vmsize1 / sizeof(*ip); i++) {
923 		T_QUIET;
924 		T_ASSERT_EQ(ip[i], DEST_PATTERN, "vmaddr2[%d] = 0x%x instead of 0x%x",
925 		    i, ip[i], DEST_PATTERN);
926 	}
927 }
928 
929 T_DECL(copy_none_use_pmap, "test copy-on-write remapping of COPY_NONE vm \
930     objects - rdar://35610377",
931     T_META_ALL_VALID_ARCHS(true),
932     T_META_TAG_VM_PREFERRED)
933 {
934 	kern_return_t           kr;
935 	mach_vm_address_t       vmaddr1, vmaddr2, vmaddr3;
936 	mach_vm_size_t          vmsize;
937 	vm_prot_t               curprot, maxprot;
938 
939 	vmsize = 32 * 1024 * 1024;
940 
941 	vmaddr1 = 0;
942 	kr = mach_vm_allocate(mach_task_self(),
943 	    &vmaddr1,
944 	    vmsize,
945 	    VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
946 	T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
947 
948 	memset((void *)(uintptr_t)vmaddr1, 'x', vmsize);
949 
950 	vmaddr2 = 0;
951 	kr = mach_vm_remap(mach_task_self(),
952 	    &vmaddr2,
953 	    vmsize,
954 	    0,                /* mask */
955 	    VM_FLAGS_ANYWHERE,
956 	    mach_task_self(),
957 	    vmaddr1,
958 	    TRUE,                /* copy */
959 	    &curprot,
960 	    &maxprot,
961 	    VM_INHERIT_DEFAULT);
962 	T_ASSERT_MACH_SUCCESS(kr, "vm_remap() #1");
963 
964 	vmaddr3 = 0;
965 	kr = mach_vm_remap(mach_task_self(),
966 	    &vmaddr3,
967 	    vmsize,
968 	    0,                /* mask */
969 	    VM_FLAGS_ANYWHERE,
970 	    mach_task_self(),
971 	    vmaddr2,
972 	    TRUE,                /* copy */
973 	    &curprot,
974 	    &maxprot,
975 	    VM_INHERIT_DEFAULT);
976 	T_ASSERT_MACH_SUCCESS(kr, "vm_remap() #2");
977 }
978 
979 T_DECL(purgable_deny, "test purgeable memory is not allowed to be converted to \
980     non-purgeable - rdar://31990033",
981     T_META_ALL_VALID_ARCHS(true),
982     T_META_TAG_VM_PREFERRED)
983 {
984 	kern_return_t   kr;
985 	vm_address_t    vmaddr;
986 	vm_purgable_t   state;
987 
988 	vmaddr = 0;
989 	kr = vm_allocate(mach_task_self(), &vmaddr, 1,
990 	    VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
991 	T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
992 
993 	state = VM_PURGABLE_DENY;
994 	kr = vm_purgable_control(mach_task_self(), vmaddr,
995 	    VM_PURGABLE_SET_STATE, &state);
996 	T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
997 	    "vm_purgable_control(VM_PURGABLE_DENY) -> 0x%x (%s)",
998 	    kr, mach_error_string(kr));
999 
1000 	kr = vm_deallocate(mach_task_self(), vmaddr, 1);
1001 	T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate()");
1002 }
1003 
1004 #define VMSIZE 0x10000
1005 
1006 T_DECL(vm_remap_zero, "test vm map of zero size - rdar://33114981",
1007     T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
1008 {
1009 	kern_return_t           kr;
1010 	mach_vm_address_t       vmaddr1, vmaddr2;
1011 	mach_vm_size_t          vmsize;
1012 	vm_prot_t               curprot, maxprot;
1013 
1014 	vmaddr1 = 0;
1015 	vmsize = VMSIZE;
1016 	kr = mach_vm_allocate(mach_task_self(),
1017 	    &vmaddr1,
1018 	    vmsize,
1019 	    VM_FLAGS_ANYWHERE);
1020 	T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
1021 
1022 	vmaddr2 = 0;
1023 	vmsize = 0;
1024 	kr = mach_vm_remap(mach_task_self(),
1025 	    &vmaddr2,
1026 	    vmsize,
1027 	    0,
1028 	    VM_FLAGS_ANYWHERE,
1029 	    mach_task_self(),
1030 	    vmaddr1,
1031 	    FALSE,
1032 	    &curprot,
1033 	    &maxprot,
1034 	    VM_INHERIT_DEFAULT);
1035 	T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, "vm_remap(size=0x%llx) 0x%x (%s)",
1036 	    vmsize, kr, mach_error_string(kr));
1037 
1038 	vmaddr2 = 0;
1039 	vmsize = (mach_vm_size_t)-2;
1040 	kr = mach_vm_remap(mach_task_self(),
1041 	    &vmaddr2,
1042 	    vmsize,
1043 	    0,
1044 	    VM_FLAGS_ANYWHERE,
1045 	    mach_task_self(),
1046 	    vmaddr1,
1047 	    FALSE,
1048 	    &curprot,
1049 	    &maxprot,
1050 	    VM_INHERIT_DEFAULT);
1051 	T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, "vm_remap(size=0x%llx) 0x%x (%s)",
1052 	    vmsize, kr, mach_error_string(kr));
1053 }
1054 
1055 extern int __shared_region_check_np(uint64_t *);
1056 
1057 T_DECL(nested_pmap_trigger, "nested pmap should only be triggered from kernel \
1058     - rdar://problem/41481703",
1059     T_META_ALL_VALID_ARCHS(true),
1060     T_META_TAG_VM_PREFERRED)
1061 {
1062 	int                     ret;
1063 	kern_return_t           kr;
1064 	mach_vm_address_t       sr_start;
1065 	mach_vm_size_t          vmsize;
1066 	mach_vm_address_t       vmaddr;
1067 	mach_port_t             mem_entry;
1068 
1069 	ret = __shared_region_check_np(&sr_start);
1070 	if (ret != 0) {
1071 		int saved_errno;
1072 		saved_errno = errno;
1073 
1074 		T_ASSERT_EQ(saved_errno, ENOMEM, "__shared_region_check_np() %d (%s)",
1075 		    saved_errno, strerror(saved_errno));
1076 		T_END;
1077 	}
1078 
1079 	vmsize = PAGE_SIZE;
1080 	kr = mach_make_memory_entry_64(mach_task_self(),
1081 	    &vmsize,
1082 	    sr_start,
1083 	    MAP_MEM_VM_SHARE | VM_PROT_READ,
1084 	    &mem_entry,
1085 	    MACH_PORT_NULL);
1086 	T_ASSERT_MACH_SUCCESS(kr, "make_memory_entry(0x%llx)", sr_start);
1087 
1088 	vmaddr = 0;
1089 	kr = mach_vm_map(mach_task_self(),
1090 	    &vmaddr,
1091 	    vmsize,
1092 	    0,
1093 	    VM_FLAGS_ANYWHERE,
1094 	    mem_entry,
1095 	    0,
1096 	    FALSE,
1097 	    VM_PROT_READ,
1098 	    VM_PROT_READ,
1099 	    VM_INHERIT_DEFAULT);
1100 	T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1101 }
1102 
1103 static const char *prot_str[] = { "---", "r--", "-w-", "rw-", "--x", "r-x", "-wx", "rwx" };
1104 static const char *share_mode_str[] = { "---", "COW", "PRIVATE", "EMPTY", "SHARED", "TRUESHARED", "PRIVATE_ALIASED", "SHARED_ALIASED", "LARGE_PAGE" };
1105 
1106 T_DECL(shared_region_share_writable, "sharing a writable mapping of the shared region shoudl not give write access to shared region - rdar://problem/74469953",
1107     T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
1108 {
1109 	int ret;
1110 	uint64_t sr_start;
1111 	kern_return_t kr;
1112 	mach_vm_address_t address, tmp_address, remap_address;
1113 	mach_vm_size_t size, tmp_size, remap_size;
1114 	uint32_t depth;
1115 	mach_msg_type_number_t count;
1116 	vm_region_submap_info_data_64_t info;
1117 	vm_prot_t cur_prot, max_prot;
1118 	uint32_t before, after, remap;
1119 	mach_port_t mem_entry;
1120 
1121 	ret = __shared_region_check_np(&sr_start);
1122 	if (ret != 0) {
1123 		int saved_errno;
1124 		saved_errno = errno;
1125 
1126 		T_ASSERT_EQ(saved_errno, ENOMEM, "__shared_region_check_np() %d (%s)",
1127 		    saved_errno, strerror(saved_errno));
1128 		T_END;
1129 	}
1130 	T_LOG("SHARED_REGION_BASE 0x%llx", SHARED_REGION_BASE);
1131 	T_LOG("SHARED_REGION_SIZE 0x%llx", SHARED_REGION_SIZE);
1132 	T_LOG("shared region starts at 0x%llx", sr_start);
1133 	T_QUIET; T_ASSERT_GE(sr_start, SHARED_REGION_BASE,
1134 	    "shared region starts below BASE");
1135 	T_QUIET; T_ASSERT_LT(sr_start, SHARED_REGION_BASE + SHARED_REGION_SIZE,
1136 	    "shared region starts above BASE+SIZE");
1137 
1138 	/*
1139 	 * Step 1 - check that one can not get write access to a read-only
1140 	 * mapping in the shared region.
1141 	 */
1142 	size = 0;
1143 	for (address = SHARED_REGION_BASE;
1144 	    address < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1145 	    address += size) {
1146 		size = 0;
1147 		depth = 99;
1148 		count = VM_REGION_SUBMAP_INFO_COUNT_64;
1149 		kr = mach_vm_region_recurse(mach_task_self(),
1150 		    &address,
1151 		    &size,
1152 		    &depth,
1153 		    (vm_region_recurse_info_t)&info,
1154 		    &count);
1155 		T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_region_recurse()");
1156 		if (kr == KERN_INVALID_ADDRESS) {
1157 			T_SKIP("could not find read-only nested mapping");
1158 			T_END;
1159 		}
1160 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1161 		T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1162 		    address, address + size, depth,
1163 		    prot_str[info.protection],
1164 		    prot_str[info.max_protection],
1165 		    share_mode_str[info.share_mode],
1166 		    info.object_id);
1167 		if (depth > 0 &&
1168 		    (info.protection == VM_PROT_READ) &&
1169 		    (info.max_protection == VM_PROT_READ)) {
1170 			/* nested and read-only: bingo! */
1171 			break;
1172 		}
1173 	}
1174 	if (address >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1175 		T_SKIP("could not find read-only nested mapping");
1176 		T_END;
1177 	}
1178 
1179 	/* test vm_remap() of RO */
1180 	before = *(uint32_t *)(uintptr_t)address;
1181 	remap_address = 0;
1182 	remap_size = size;
1183 	kr = mach_vm_remap(mach_task_self(),
1184 	    &remap_address,
1185 	    remap_size,
1186 	    0,
1187 	    VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
1188 	    mach_task_self(),
1189 	    address,
1190 	    FALSE,
1191 	    &cur_prot,
1192 	    &max_prot,
1193 	    VM_INHERIT_DEFAULT);
1194 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap()");
1195 //	T_QUIET; T_ASSERT_EQ(cur_prot, VM_PROT_READ, "cur_prot is read-only");
1196 //	T_QUIET; T_ASSERT_EQ(max_prot, VM_PROT_READ, "max_prot is read-only");
1197 	/* check that region is still nested */
1198 	tmp_address = address;
1199 	tmp_size = 0;
1200 	depth = 99;
1201 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1202 	kr = mach_vm_region_recurse(mach_task_self(),
1203 	    &tmp_address,
1204 	    &tmp_size,
1205 	    &depth,
1206 	    (vm_region_recurse_info_t)&info,
1207 	    &count);
1208 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1209 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1210 	    tmp_address, tmp_address + tmp_size, depth,
1211 	    prot_str[info.protection],
1212 	    prot_str[info.max_protection],
1213 	    share_mode_str[info.share_mode],
1214 	    info.object_id);
1215 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1216 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1217 	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1218 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1219 //	T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1220 	/* check that new mapping is read-only */
1221 	tmp_address = remap_address;
1222 	tmp_size = 0;
1223 	depth = 99;
1224 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1225 	kr = mach_vm_region_recurse(mach_task_self(),
1226 	    &tmp_address,
1227 	    &tmp_size,
1228 	    &depth,
1229 	    (vm_region_recurse_info_t)&info,
1230 	    &count);
1231 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1232 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1233 	    tmp_address, tmp_address + tmp_size, depth,
1234 	    prot_str[info.protection],
1235 	    prot_str[info.max_protection],
1236 	    share_mode_str[info.share_mode],
1237 	    info.object_id);
1238 	T_QUIET; T_ASSERT_EQ(tmp_address, remap_address, "address hasn't changed");
1239 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1240 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "new cur_prot read-only");
1241 //	T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "new max_prot read-only");
1242 	remap = *(uint32_t *)(uintptr_t)remap_address;
1243 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1244 // this would crash if actually read-only:
1245 //	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1246 	after = *(uint32_t *)(uintptr_t)address;
1247 	T_LOG("vm_remap(): 0x%llx 0x%x -> 0x%x", address, before, after);
1248 //	*(uint32_t *)(uintptr_t)remap_address = before;
1249 	if (before != after) {
1250 		T_FAIL("vm_remap() bypassed copy-on-write");
1251 	} else {
1252 		T_PASS("vm_remap() did not bypass copy-on-write");
1253 	}
1254 	/* cleanup */
1255 	kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1256 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1257 	T_PASS("vm_remap() read-only");
1258 
1259 #if defined(VM_MEMORY_ROSETTA)
1260 	if (dlsym(RTLD_DEFAULT, "mach_vm_remap_new") == NULL) {
1261 		T_PASS("vm_remap_new() is not present");
1262 		goto skip_vm_remap_new_ro;
1263 	}
1264 	/* test vm_remap_new() of RO */
1265 	before = *(uint32_t *)(uintptr_t)address;
1266 	remap_address = 0;
1267 	remap_size = size;
1268 	cur_prot = VM_PROT_READ | VM_PROT_WRITE;
1269 	max_prot = VM_PROT_READ | VM_PROT_WRITE;
1270 	kr = mach_vm_remap_new(mach_task_self(),
1271 	    &remap_address,
1272 	    remap_size,
1273 	    0,
1274 	    VM_FLAGS_ANYWHERE,
1275 	    mach_task_self(),
1276 	    address,
1277 	    FALSE,
1278 	    &cur_prot,
1279 	    &max_prot,
1280 	    VM_INHERIT_DEFAULT);
1281 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_remap_new()");
1282 	if (kr == KERN_PROTECTION_FAILURE) {
1283 		/* wrong but not a security issue... */
1284 		goto skip_vm_remap_new_ro;
1285 	}
1286 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap_new()");
1287 	remap = *(uint32_t *)(uintptr_t)remap_address;
1288 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1289 	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1290 	after = *(uint32_t *)(uintptr_t)address;
1291 	T_LOG("vm_remap_new(): 0x%llx 0x%x -> 0x%x", address, before, after);
1292 	*(uint32_t *)(uintptr_t)remap_address = before;
1293 	if (before != after) {
1294 		T_FAIL("vm_remap_new() bypassed copy-on-write");
1295 	} else {
1296 		T_PASS("vm_remap_new() did not bypass copy-on-write");
1297 	}
1298 	/* check that region is still nested */
1299 	tmp_address = address;
1300 	tmp_size = 0;
1301 	depth = 99;
1302 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1303 	kr = mach_vm_region_recurse(mach_task_self(),
1304 	    &tmp_address,
1305 	    &tmp_size,
1306 	    &depth,
1307 	    (vm_region_recurse_info_t)&info,
1308 	    &count);
1309 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1310 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1311 	    tmp_address, tmp_address + tmp_size, depth,
1312 	    prot_str[info.protection],
1313 	    prot_str[info.max_protection],
1314 	    share_mode_str[info.share_mode],
1315 	    info.object_id);
1316 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1317 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1318 	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1319 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1320 	T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1321 	T_PASS("vm_remap_new() read-only");
1322 skip_vm_remap_new_ro:
1323 #else /* defined(VM_MEMORY_ROSETTA) */
1324 	/* pre-BigSur SDK: no vm_remap_new() */
1325 	T_LOG("No vm_remap_new() to test");
1326 #endif /* defined(VM_MEMORY_ROSETTA) */
1327 
1328 	/* test mach_make_memory_entry_64(VM_SHARE) of RO */
1329 	before = *(uint32_t *)(uintptr_t)address;
1330 	remap_size = size;
1331 	mem_entry = MACH_PORT_NULL;
1332 	kr = mach_make_memory_entry_64(mach_task_self(),
1333 	    &remap_size,
1334 	    address,
1335 	    MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE,
1336 	    &mem_entry,
1337 	    MACH_PORT_NULL);
1338 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1339 	if (kr == KERN_PROTECTION_FAILURE) {
1340 		/* wrong but not a security issue... */
1341 		goto skip_mem_entry_vm_share_ro;
1342 	}
1343 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1344 	remap_address = 0;
1345 	kr = mach_vm_map(mach_task_self(),
1346 	    &remap_address,
1347 	    remap_size,
1348 	    0,              /* mask */
1349 	    VM_FLAGS_ANYWHERE,
1350 	    mem_entry,
1351 	    0,              /* offset */
1352 	    FALSE,              /* copy */
1353 	    VM_PROT_READ | VM_PROT_WRITE,
1354 	    VM_PROT_READ | VM_PROT_WRITE,
1355 	    VM_INHERIT_DEFAULT);
1356 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1357 	remap = *(uint32_t *)(uintptr_t)remap_address;
1358 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1359 	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1360 	after = *(uint32_t *)(uintptr_t)address;
1361 	T_LOG("mem_entry(VM_SHARE): 0x%llx 0x%x -> 0x%x", address, before, after);
1362 	*(uint32_t *)(uintptr_t)remap_address = before;
1363 	if (before != after) {
1364 		T_FAIL("mem_entry(VM_SHARE) bypassed copy-on-write");
1365 	} else {
1366 		T_PASS("mem_entry(VM_SHARE) did not bypass copy-on-write");
1367 	}
1368 	/* check that region is still nested */
1369 	tmp_address = address;
1370 	tmp_size = 0;
1371 	depth = 99;
1372 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1373 	kr = mach_vm_region_recurse(mach_task_self(),
1374 	    &tmp_address,
1375 	    &tmp_size,
1376 	    &depth,
1377 	    (vm_region_recurse_info_t)&info,
1378 	    &count);
1379 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1380 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1381 	    tmp_address, tmp_address + tmp_size, depth,
1382 	    prot_str[info.protection],
1383 	    prot_str[info.max_protection],
1384 	    share_mode_str[info.share_mode],
1385 	    info.object_id);
1386 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1387 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1388 	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1389 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1390 	T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1391 	/* check that new mapping is a copy */
1392 	tmp_address = remap_address;
1393 	tmp_size = 0;
1394 	depth = 99;
1395 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1396 	kr = mach_vm_region_recurse(mach_task_self(),
1397 	    &tmp_address,
1398 	    &tmp_size,
1399 	    &depth,
1400 	    (vm_region_recurse_info_t)&info,
1401 	    &count);
1402 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1403 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1404 	    tmp_address, tmp_address + tmp_size, depth,
1405 	    prot_str[info.protection],
1406 	    prot_str[info.max_protection],
1407 	    share_mode_str[info.share_mode],
1408 	    info.object_id);
1409 	T_QUIET; T_ASSERT_EQ(tmp_address, remap_address, "address hasn't changed");
1410 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1411 	T_QUIET; T_ASSERT_EQ(depth, 0, "new mapping is unnested");
1412 //	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "new cur_prot read-only");
1413 //	T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "new max_prot read-only");
1414 	/* cleanup */
1415 	kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1416 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1417 	T_PASS("mem_entry(VM_SHARE) read-only");
1418 skip_mem_entry_vm_share_ro:
1419 
1420 	/* test mach_make_memory_entry_64() of RO */
1421 	before = *(uint32_t *)(uintptr_t)address;
1422 	remap_size = size;
1423 	mem_entry = MACH_PORT_NULL;
1424 	kr = mach_make_memory_entry_64(mach_task_self(),
1425 	    &remap_size,
1426 	    address,
1427 	    VM_PROT_READ | VM_PROT_WRITE,
1428 	    &mem_entry,
1429 	    MACH_PORT_NULL);
1430 	T_QUIET; T_ASSERT_EQ(kr, KERN_PROTECTION_FAILURE, "mach_make_memory_entry_64()");
1431 	/* check that region is still nested */
1432 	tmp_address = address;
1433 	tmp_size = 0;
1434 	depth = 99;
1435 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1436 	kr = mach_vm_region_recurse(mach_task_self(),
1437 	    &tmp_address,
1438 	    &tmp_size,
1439 	    &depth,
1440 	    (vm_region_recurse_info_t)&info,
1441 	    &count);
1442 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1443 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1444 	    tmp_address, tmp_address + tmp_size, depth,
1445 	    prot_str[info.protection],
1446 	    prot_str[info.max_protection],
1447 	    share_mode_str[info.share_mode],
1448 	    info.object_id);
1449 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1450 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1451 //	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1452 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1453 	if (depth > 0) {
1454 		T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1455 	}
1456 	T_PASS("mem_entry() read-only");
1457 
1458 	/* test mach_make_memory_entry_64(READ | WRITE | VM_PROT_IS_MASK) of RO */
1459 	before = *(uint32_t *)(uintptr_t)address;
1460 	remap_size = size;
1461 	mem_entry = MACH_PORT_NULL;
1462 	kr = mach_make_memory_entry_64(mach_task_self(),
1463 	    &remap_size,
1464 	    address,
1465 	    VM_PROT_READ | VM_PROT_WRITE | VM_PROT_IS_MASK,
1466 	    &mem_entry,
1467 	    MACH_PORT_NULL);
1468 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(READ | WRITE | IS_MASK)");
1469 	remap_address = 0;
1470 	kr = mach_vm_map(mach_task_self(),
1471 	    &remap_address,
1472 	    remap_size,
1473 	    0,              /* mask */
1474 	    VM_FLAGS_ANYWHERE,
1475 	    mem_entry,
1476 	    0,              /* offset */
1477 	    FALSE,              /* copy */
1478 	    VM_PROT_READ | VM_PROT_WRITE,
1479 	    VM_PROT_READ | VM_PROT_WRITE,
1480 	    VM_INHERIT_DEFAULT);
1481 	T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_RIGHT, "vm_map(read/write)");
1482 	remap_address = 0;
1483 	kr = mach_vm_map(mach_task_self(),
1484 	    &remap_address,
1485 	    remap_size,
1486 	    0,              /* mask */
1487 	    VM_FLAGS_ANYWHERE,
1488 	    mem_entry,
1489 	    0,              /* offset */
1490 	    FALSE,              /* copy */
1491 	    VM_PROT_READ,
1492 	    VM_PROT_READ,
1493 	    VM_INHERIT_DEFAULT);
1494 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map(read only)");
1495 	remap = *(uint32_t *)(uintptr_t)remap_address;
1496 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1497 	/* check that region is still nested */
1498 	tmp_address = address;
1499 	tmp_size = 0;
1500 	depth = 99;
1501 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1502 	kr = mach_vm_region_recurse(mach_task_self(),
1503 	    &tmp_address,
1504 	    &tmp_size,
1505 	    &depth,
1506 	    (vm_region_recurse_info_t)&info,
1507 	    &count);
1508 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1509 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1510 	    tmp_address, tmp_address + tmp_size, depth,
1511 	    prot_str[info.protection],
1512 	    prot_str[info.max_protection],
1513 	    share_mode_str[info.share_mode],
1514 	    info.object_id);
1515 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1516 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1517 //	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1518 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1519 	if (depth > 0) {
1520 		T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1521 	}
1522 	/* check that new mapping is a copy */
1523 	tmp_address = remap_address;
1524 	tmp_size = 0;
1525 	depth = 99;
1526 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1527 	kr = mach_vm_region_recurse(mach_task_self(),
1528 	    &tmp_address,
1529 	    &tmp_size,
1530 	    &depth,
1531 	    (vm_region_recurse_info_t)&info,
1532 	    &count);
1533 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1534 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1535 	    tmp_address, tmp_address + tmp_size, depth,
1536 	    prot_str[info.protection],
1537 	    prot_str[info.max_protection],
1538 	    share_mode_str[info.share_mode],
1539 	    info.object_id);
1540 	T_QUIET; T_ASSERT_EQ(tmp_address, remap_address, "address hasn't changed");
1541 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1542 	T_QUIET; T_ASSERT_EQ(depth, 0, "new mapping is unnested");
1543 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "new cur_prot read-only");
1544 	T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "new max_prot read-only");
1545 	/* cleanup */
1546 	kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1547 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1548 	T_PASS("mem_entry(READ | WRITE | IS_MASK) read-only");
1549 
1550 
1551 	/*
1552 	 * Step 2 - check that one can not share write access with a writable
1553 	 * mapping in the shared region.
1554 	 */
1555 	size = 0;
1556 	for (address = SHARED_REGION_BASE;
1557 	    address < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1558 	    address += size) {
1559 		size = 0;
1560 		depth = 99;
1561 		count = VM_REGION_SUBMAP_INFO_COUNT_64;
1562 		kr = mach_vm_region_recurse(mach_task_self(),
1563 		    &address,
1564 		    &size,
1565 		    &depth,
1566 		    (vm_region_recurse_info_t)&info,
1567 		    &count);
1568 		T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_region_recurse()");
1569 		if (kr == KERN_INVALID_ADDRESS) {
1570 			T_SKIP("could not find writable nested mapping");
1571 			T_END;
1572 		}
1573 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1574 		T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1575 		    address, address + size, depth,
1576 		    prot_str[info.protection],
1577 		    prot_str[info.max_protection],
1578 		    share_mode_str[info.share_mode],
1579 		    info.object_id);
1580 		if (depth > 0 && (info.protection & VM_PROT_WRITE)) {
1581 			/* nested and writable: bingo! */
1582 			break;
1583 		}
1584 	}
1585 	if (address >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1586 		T_SKIP("could not find writable nested mapping");
1587 		T_END;
1588 	}
1589 
1590 	/* test vm_remap() of RW */
1591 	before = *(uint32_t *)(uintptr_t)address;
1592 	remap_address = 0;
1593 	remap_size = size;
1594 	kr = mach_vm_remap(mach_task_self(),
1595 	    &remap_address,
1596 	    remap_size,
1597 	    0,
1598 	    VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
1599 	    mach_task_self(),
1600 	    address,
1601 	    FALSE,
1602 	    &cur_prot,
1603 	    &max_prot,
1604 	    VM_INHERIT_DEFAULT);
1605 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap()");
1606 	if (!(cur_prot & VM_PROT_WRITE)) {
1607 		T_LOG("vm_remap(): 0x%llx not writable %s/%s",
1608 		    remap_address, prot_str[cur_prot], prot_str[max_prot]);
1609 		T_ASSERT_FAIL("vm_remap() remapping not writable");
1610 	}
1611 	remap = *(uint32_t *)(uintptr_t)remap_address;
1612 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1613 	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1614 	after = *(uint32_t *)(uintptr_t)address;
1615 	T_LOG("vm_remap(): 0x%llx 0x%x -> 0x%x", address, before, after);
1616 	*(uint32_t *)(uintptr_t)remap_address = before;
1617 	if (before != after) {
1618 		T_FAIL("vm_remap() bypassed copy-on-write");
1619 	} else {
1620 		T_PASS("vm_remap() did not bypass copy-on-write");
1621 	}
1622 	/* check that region is still nested */
1623 	tmp_address = address;
1624 	tmp_size = 0;
1625 	depth = 99;
1626 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1627 	kr = mach_vm_region_recurse(mach_task_self(),
1628 	    &tmp_address,
1629 	    &tmp_size,
1630 	    &depth,
1631 	    (vm_region_recurse_info_t)&info,
1632 	    &count);
1633 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1634 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1635 	    tmp_address, tmp_address + tmp_size, depth,
1636 	    prot_str[info.protection],
1637 	    prot_str[info.max_protection],
1638 	    share_mode_str[info.share_mode],
1639 	    info.object_id);
1640 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1641 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1642 	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1643 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1644 	T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1645 	/* cleanup */
1646 	kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1647 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1648 
1649 #if defined(VM_MEMORY_ROSETTA)
1650 	if (dlsym(RTLD_DEFAULT, "mach_vm_remap_new") == NULL) {
1651 		T_PASS("vm_remap_new() is not present");
1652 		goto skip_vm_remap_new_rw;
1653 	}
1654 	/* test vm_remap_new() of RW */
1655 	before = *(uint32_t *)(uintptr_t)address;
1656 	remap_address = 0;
1657 	remap_size = size;
1658 	cur_prot = VM_PROT_READ | VM_PROT_WRITE;
1659 	max_prot = VM_PROT_READ | VM_PROT_WRITE;
1660 	kr = mach_vm_remap_new(mach_task_self(),
1661 	    &remap_address,
1662 	    remap_size,
1663 	    0,
1664 	    VM_FLAGS_ANYWHERE,
1665 	    mach_task_self(),
1666 	    address,
1667 	    FALSE,
1668 	    &cur_prot,
1669 	    &max_prot,
1670 	    VM_INHERIT_DEFAULT);
1671 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_remap_new()");
1672 	if (kr == KERN_PROTECTION_FAILURE) {
1673 		/* wrong but not a security issue... */
1674 		goto skip_vm_remap_new_rw;
1675 	}
1676 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap_new()");
1677 	if (!(cur_prot & VM_PROT_WRITE)) {
1678 		T_LOG("vm_remap_new(): 0x%llx not writable %s/%s",
1679 		    remap_address, prot_str[cur_prot], prot_str[max_prot]);
1680 		T_ASSERT_FAIL("vm_remap_new() remapping not writable");
1681 	}
1682 	remap = *(uint32_t *)(uintptr_t)remap_address;
1683 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1684 	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1685 	after = *(uint32_t *)(uintptr_t)address;
1686 	T_LOG("vm_remap_new(): 0x%llx 0x%x -> 0x%x", address, before, after);
1687 	*(uint32_t *)(uintptr_t)remap_address = before;
1688 	if (before != after) {
1689 		T_FAIL("vm_remap_new() bypassed copy-on-write");
1690 	} else {
1691 		T_PASS("vm_remap_new() did not bypass copy-on-write");
1692 	}
1693 	/* check that region is still nested */
1694 	tmp_address = address;
1695 	tmp_size = 0;
1696 	depth = 99;
1697 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1698 	kr = mach_vm_region_recurse(mach_task_self(),
1699 	    &tmp_address,
1700 	    &tmp_size,
1701 	    &depth,
1702 	    (vm_region_recurse_info_t)&info,
1703 	    &count);
1704 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1705 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1706 	    tmp_address, tmp_address + tmp_size, depth,
1707 	    prot_str[info.protection],
1708 	    prot_str[info.max_protection],
1709 	    share_mode_str[info.share_mode],
1710 	    info.object_id);
1711 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1712 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1713 	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1714 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1715 	T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1716 	/* cleanup */
1717 	kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1718 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1719 skip_vm_remap_new_rw:
1720 #else /* defined(VM_MEMORY_ROSETTA) */
1721 	/* pre-BigSur SDK: no vm_remap_new() */
1722 	T_LOG("No vm_remap_new() to test");
1723 #endif /* defined(VM_MEMORY_ROSETTA) */
1724 
1725 	/* test mach_make_memory_entry_64(VM_SHARE) of RW */
1726 	before = *(uint32_t *)(uintptr_t)address;
1727 	remap_size = size;
1728 	mem_entry = MACH_PORT_NULL;
1729 	kr = mach_make_memory_entry_64(mach_task_self(),
1730 	    &remap_size,
1731 	    address,
1732 	    MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE,
1733 	    &mem_entry,
1734 	    MACH_PORT_NULL);
1735 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1736 	if (kr == KERN_PROTECTION_FAILURE) {
1737 		/* wrong but not a security issue... */
1738 		goto skip_mem_entry_vm_share_rw;
1739 	}
1740 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1741 	T_QUIET; T_ASSERT_EQ(remap_size, size, "mem_entry(VM_SHARE) should cover whole mapping");
1742 //	T_LOG("AFTER MAKE_MEM_ENTRY(VM_SHARE) 0x%llx...", address); fflush(stdout); fflush(stderr); getchar();
1743 	remap_address = 0;
1744 	kr = mach_vm_map(mach_task_self(),
1745 	    &remap_address,
1746 	    remap_size,
1747 	    0,              /* mask */
1748 	    VM_FLAGS_ANYWHERE,
1749 	    mem_entry,
1750 	    0,              /* offset */
1751 	    FALSE,              /* copy */
1752 	    VM_PROT_READ | VM_PROT_WRITE,
1753 	    VM_PROT_READ | VM_PROT_WRITE,
1754 	    VM_INHERIT_DEFAULT);
1755 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1756 	remap = *(uint32_t *)(uintptr_t)remap_address;
1757 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1758 //	T_LOG("AFTER VM_MAP 0x%llx...", remap_address); fflush(stdout); fflush(stderr); getchar();
1759 	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1760 //	T_LOG("AFTER WRITE 0x%llx...", remap_address); fflush(stdout); fflush(stderr); getchar();
1761 	after = *(uint32_t *)(uintptr_t)address;
1762 	T_LOG("mem_entry(VM_SHARE): 0x%llx 0x%x -> 0x%x", address, before, after);
1763 	*(uint32_t *)(uintptr_t)remap_address = before;
1764 	if (before != after) {
1765 		T_FAIL("mem_entry(VM_SHARE) bypassed copy-on-write");
1766 	} else {
1767 		T_PASS("mem_entry(VM_SHARE) did not bypass copy-on-write");
1768 	}
1769 	/* check that region is still nested */
1770 	tmp_address = address;
1771 	tmp_size = 0;
1772 	depth = 99;
1773 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1774 	kr = mach_vm_region_recurse(mach_task_self(),
1775 	    &tmp_address,
1776 	    &tmp_size,
1777 	    &depth,
1778 	    (vm_region_recurse_info_t)&info,
1779 	    &count);
1780 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1781 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1782 	    tmp_address, tmp_address + tmp_size, depth,
1783 	    prot_str[info.protection],
1784 	    prot_str[info.max_protection],
1785 	    share_mode_str[info.share_mode],
1786 	    info.object_id);
1787 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1788 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1789 	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1790 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1791 	T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1792 	/* cleanup */
1793 	kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1794 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1795 	mach_port_deallocate(mach_task_self(), mem_entry);
1796 skip_mem_entry_vm_share_rw:
1797 
1798 	/* test mach_make_memory_entry_64() of RW */
1799 	before = *(uint32_t *)(uintptr_t)address;
1800 	remap_size = size;
1801 	mem_entry = MACH_PORT_NULL;
1802 	kr = mach_make_memory_entry_64(mach_task_self(),
1803 	    &remap_size,
1804 	    address,
1805 	    VM_PROT_READ | VM_PROT_WRITE,
1806 	    &mem_entry,
1807 	    MACH_PORT_NULL);
1808 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64()");
1809 	remap_address = 0;
1810 	kr = mach_vm_map(mach_task_self(),
1811 	    &remap_address,
1812 	    remap_size,
1813 	    0,              /* mask */
1814 	    VM_FLAGS_ANYWHERE,
1815 	    mem_entry,
1816 	    0,              /* offset */
1817 	    FALSE,              /* copy */
1818 	    VM_PROT_READ | VM_PROT_WRITE,
1819 	    VM_PROT_READ | VM_PROT_WRITE,
1820 	    VM_INHERIT_DEFAULT);
1821 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1822 	remap = *(uint32_t *)(uintptr_t)remap_address;
1823 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1824 	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1825 	after = *(uint32_t *)(uintptr_t)address;
1826 	T_LOG("mem_entry(): 0x%llx 0x%x -> 0x%x", address, before, after);
1827 	*(uint32_t *)(uintptr_t)remap_address = before;
1828 	/* check that region is no longer nested */
1829 	tmp_address = address;
1830 	tmp_size = 0;
1831 	depth = 99;
1832 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1833 	kr = mach_vm_region_recurse(mach_task_self(),
1834 	    &tmp_address,
1835 	    &tmp_size,
1836 	    &depth,
1837 	    (vm_region_recurse_info_t)&info,
1838 	    &count);
1839 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1840 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1841 	    tmp_address, tmp_address + tmp_size, depth,
1842 	    prot_str[info.protection],
1843 	    prot_str[info.max_protection],
1844 	    share_mode_str[info.share_mode],
1845 	    info.object_id);
1846 	if (before != after) {
1847 		if (depth == 0) {
1848 			T_PASS("mem_entry() honored copy-on-write");
1849 		} else {
1850 			T_FAIL("mem_entry() did not trigger copy-on_write");
1851 		}
1852 	} else {
1853 		T_FAIL("mem_entry() did not honor copy-on-write");
1854 	}
1855 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1856 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1857 	T_QUIET; T_ASSERT_EQ(depth, 0, "no longer nested");
1858 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1859 	T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1860 	/* cleanup */
1861 	kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1862 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1863 	mach_port_deallocate(mach_task_self(), mem_entry);
1864 }
1865 
1866 T_DECL(copyoverwrite_submap_protection, "test copywrite vm region submap \
1867     protection", T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
1868 {
1869 	kern_return_t           kr;
1870 	mach_vm_address_t       vmaddr;
1871 	mach_vm_size_t          vmsize;
1872 	natural_t               depth;
1873 	vm_region_submap_short_info_data_64_t region_info;
1874 	mach_msg_type_number_t  region_info_count;
1875 
1876 	for (vmaddr = SHARED_REGION_BASE;
1877 	    vmaddr < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1878 	    vmaddr += vmsize) {
1879 		depth = 99;
1880 		region_info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1881 		kr = mach_vm_region_recurse(mach_task_self(),
1882 		    &vmaddr,
1883 		    &vmsize,
1884 		    &depth,
1885 		    (vm_region_info_t) &region_info,
1886 		    &region_info_count);
1887 		if (kr == KERN_INVALID_ADDRESS) {
1888 			break;
1889 		}
1890 		T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse(0x%llx)", vmaddr);
1891 		T_ASSERT_EQ(region_info_count,
1892 		    VM_REGION_SUBMAP_SHORT_INFO_COUNT_64,
1893 		    "vm_region_recurse(0x%llx) count = %d expected %d",
1894 		    vmaddr, region_info_count,
1895 		    VM_REGION_SUBMAP_SHORT_INFO_COUNT_64);
1896 
1897 		T_LOG("--> region: vmaddr 0x%llx depth %d prot 0x%x/0x%x",
1898 		    vmaddr, depth, region_info.protection,
1899 		    region_info.max_protection);
1900 		if (depth == 0) {
1901 			/* not a submap mapping: next mapping */
1902 			continue;
1903 		}
1904 		if (vmaddr >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1905 			break;
1906 		}
1907 		kr = mach_vm_copy(mach_task_self(),
1908 		    vmaddr,
1909 		    vmsize,
1910 		    vmaddr);
1911 		if (kr == KERN_PROTECTION_FAILURE ||
1912 		    kr == KERN_INVALID_ADDRESS) {
1913 			T_PASS("vm_copy(0x%llx,0x%llx) expected prot error 0x%x (%s)",
1914 			    vmaddr, vmsize, kr, mach_error_string(kr));
1915 			continue;
1916 		}
1917 		T_ASSERT_MACH_SUCCESS(kr, "vm_copy(0x%llx,0x%llx) prot 0x%x",
1918 		    vmaddr, vmsize, region_info.protection);
1919 		depth = 0;
1920 		region_info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1921 		kr = mach_vm_region_recurse(mach_task_self(),
1922 		    &vmaddr,
1923 		    &vmsize,
1924 		    &depth,
1925 		    (vm_region_info_t) &region_info,
1926 		    &region_info_count);
1927 		T_ASSERT_MACH_SUCCESS(kr, "m_region_recurse(0x%llx)", vmaddr);
1928 		T_ASSERT_EQ(region_info_count,
1929 		    VM_REGION_SUBMAP_SHORT_INFO_COUNT_64,
1930 		    "vm_region_recurse() count = %d expected %d",
1931 		    region_info_count, VM_REGION_SUBMAP_SHORT_INFO_COUNT_64);
1932 
1933 		T_ASSERT_EQ(depth, 0, "vm_region_recurse(0x%llx): depth = %d expected 0",
1934 		    vmaddr, depth);
1935 		T_ASSERT_EQ((region_info.protection & VM_PROT_EXECUTE),
1936 		    0, "vm_region_recurse(0x%llx): prot 0x%x",
1937 		    vmaddr, region_info.protection);
1938 	}
1939 }
1940 
1941 T_DECL(wire_text, "test wired text for rdar://problem/16783546 Wiring code in \
1942     the shared region triggers code-signing violations",
1943     T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
1944 {
1945 	uint32_t *addr, before, after;
1946 	int retval;
1947 	int saved_errno;
1948 	kern_return_t kr;
1949 	vm_address_t map_addr, remap_addr;
1950 	vm_prot_t curprot, maxprot;
1951 
1952 	addr = (uint32_t *)&printf;
1953 #if __has_feature(ptrauth_calls)
1954 	map_addr = (vm_address_t)(uintptr_t)ptrauth_strip(addr, ptrauth_key_function_pointer);
1955 #else /* __has_feature(ptrauth_calls) */
1956 	map_addr = (vm_address_t)(uintptr_t)addr;
1957 #endif /* __has_feature(ptrauth_calls) */
1958 	remap_addr = 0;
1959 	kr = vm_remap(mach_task_self(), &remap_addr, 4096,
1960 	    0,           /* mask */
1961 	    VM_FLAGS_ANYWHERE,
1962 	    mach_task_self(), map_addr,
1963 	    FALSE,           /* copy */
1964 	    &curprot, &maxprot,
1965 	    VM_INHERIT_DEFAULT);
1966 	T_ASSERT_EQ(kr, KERN_SUCCESS, "vm_remap error 0x%x (%s)",
1967 	    kr, mach_error_string(kr));
1968 	before = *addr;
1969 	retval = mlock(addr, 4096);
1970 	after = *addr;
1971 	if (retval != 0) {
1972 		saved_errno = errno;
1973 		T_ASSERT_EQ(saved_errno, EPERM, "wire shared text error %d (%s), expected: %d",
1974 		    saved_errno, strerror(saved_errno), EPERM);
1975 	} else if (after != before) {
1976 		T_ASSERT_FAIL("shared text changed by wiring at %p 0x%x -> 0x%x", (void *)addr, before, after);
1977 	} else {
1978 		T_PASS("wire shared text");
1979 	}
1980 
1981 	addr = (uint32_t *) &fprintf;
1982 	before = *addr;
1983 	retval = mlock(addr, 4096);
1984 	after = *addr;
1985 	if (retval != 0) {
1986 		saved_errno = errno;
1987 		T_ASSERT_EQ(saved_errno, EPERM, "wire shared text error %d (%s), expected: %d",
1988 		    saved_errno, strerror(saved_errno), EPERM);
1989 	} else if (after != before) {
1990 		T_ASSERT_FAIL("shared text changed by wiring at %p 0x%x -> 0x%x", (void *)addr, before, after);
1991 	} else {
1992 		T_PASS("wire shared text");
1993 	}
1994 
1995 	addr = (uint32_t *) &testmain_wire_text;
1996 	before = *addr;
1997 	retval = mlock(addr, 4096);
1998 	after = *addr;
1999 	if (retval != 0) {
2000 		saved_errno = errno;
2001 		T_ASSERT_EQ(saved_errno, EPERM, "wire text error return error %d (%s)",
2002 		    saved_errno, strerror(saved_errno));
2003 	} else if (after != before) {
2004 		T_ASSERT_FAIL("text changed by wiring at %p 0x%x -> 0x%x", (void *)addr, before, after);
2005 	} else {
2006 		T_PASS("wire text");
2007 	}
2008 }
2009 
2010 T_DECL(remap_comm_page, "test remapping of the commpage - rdar://93177124",
2011     T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
2012 {
2013 	kern_return_t           kr;
2014 	mach_vm_address_t       commpage_addr, remap_addr;
2015 	mach_vm_size_t          vmsize;
2016 	vm_prot_t               curprot, maxprot;
2017 
2018 #if __arm__
2019 	commpage_addr = 0xFFFF4000ULL;
2020 #elif __arm64__
2021 	commpage_addr = 0x0000000FFFFFC000ULL;
2022 #elif __x86_64__
2023 	commpage_addr = 0x00007FFFFFE00000ULL;
2024 #else
2025 	T_FAIL("unknown commpage address for this architecture");
2026 #endif
2027 
2028 	T_LOG("Remapping commpage from 0x%llx", commpage_addr);
2029 	vmsize = vm_kernel_page_size;
2030 	remap_addr = 0;
2031 	kr = mach_vm_remap(mach_task_self(),
2032 	    &remap_addr,
2033 	    vmsize,
2034 	    0, /* mask */
2035 	    VM_FLAGS_ANYWHERE,
2036 	    mach_task_self(),
2037 	    commpage_addr,
2038 	    TRUE, /* copy */
2039 	    &curprot,
2040 	    &maxprot,
2041 	    VM_INHERIT_DEFAULT);
2042 	if (kr == KERN_INVALID_ADDRESS) {
2043 		T_SKIP("No mapping found at 0x%llx\n", commpage_addr);
2044 		return;
2045 	}
2046 	T_ASSERT_MACH_SUCCESS(kr, "vm_remap() of commpage from 0x%llx", commpage_addr);
2047 }
2048 
2049 /* rdar://132439059 */
2050 T_DECL(mach_vm_remap_new_task_read_port,
2051     "Ensure shared, writable mappings cannot be created with a process's task read port using mach_vm_remap_new",
2052     T_META_TAG_VM_PREFERRED,
2053     T_META_RUN_CONCURRENTLY(true))
2054 {
2055 	mach_vm_address_t private_data = 0;
2056 	pid_t pid = -1;
2057 	int fds[2];
2058 	uint32_t depth = 9999;
2059 	mach_vm_size_t size = PAGE_SIZE;
2060 	mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
2061 	vm_region_submap_info_data_64_t info;
2062 	kern_return_t kr = KERN_FAILURE;
2063 	int ret = -1;
2064 
2065 	kr = mach_vm_allocate(mach_task_self(), &private_data, size, VM_FLAGS_ANYWHERE);
2066 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_vm_allocate");
2067 
2068 	ret = pipe(fds);
2069 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "pipe");
2070 
2071 	pid = fork();
2072 	T_QUIET; T_ASSERT_POSIX_SUCCESS(pid, "fork");
2073 
2074 	if (pid == 0) {
2075 		char data[2];
2076 		ssize_t nbytes_read = -1;
2077 
2078 		/* Close write end of the pipe */
2079 		ret = close(fds[1]);
2080 		T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "child: close write end");
2081 
2082 		/* Check that the permissions are VM_PROT_DEFAULT/VM_PROT_ALL */
2083 		kr = mach_vm_region_recurse(mach_task_self(),
2084 		    &private_data,
2085 		    &size,
2086 		    &depth,
2087 		    (vm_region_recurse_info_t)&info,
2088 		    &count);
2089 		T_ASSERT_MACH_SUCCESS(kr, "child: mach_vm_region_recurse");
2090 		T_EXPECT_EQ_INT(info.protection, VM_PROT_DEFAULT, "child: current protection is VM_PROT_DEFAULT");
2091 		T_EXPECT_EQ_INT(info.max_protection, VM_PROT_ALL, "child: maximum protextion is VM_PROT_ALL");
2092 
2093 		/* The child tries to read data from the pipe (that never comes) */
2094 		nbytes_read = read(fds[0], data, 2);
2095 		T_QUIET; T_EXPECT_EQ_LONG(nbytes_read, 0L, "child: read 0 bytes");
2096 
2097 		exit(0);
2098 	} else {
2099 		mach_port_t read_port = MACH_PORT_NULL;
2100 		mach_vm_address_t remap_addr = 0;
2101 		int status;
2102 
2103 		/* Close read end of the pipe */
2104 		ret = close(fds[0]);
2105 		T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "close read end");
2106 
2107 		/* Get a read port */
2108 		ret = task_read_for_pid(mach_task_self(), pid, &read_port);
2109 		T_ASSERT_POSIX_SUCCESS(ret, "parent: task_read_for_pid");
2110 
2111 		/* Make a shared mapping with the child's data */
2112 		vm_prot_t cur_prot = VM_PROT_NONE;
2113 		vm_prot_t max_prot = VM_PROT_NONE;
2114 		kr = mach_vm_remap_new(
2115 			mach_task_self(),
2116 			&remap_addr,
2117 			size,
2118 			0, /* mask */
2119 			VM_FLAGS_ANYWHERE,
2120 			read_port,
2121 			private_data,
2122 			FALSE, /* copy */
2123 			&cur_prot,
2124 			&max_prot,
2125 			VM_INHERIT_DEFAULT);
2126 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "parent: mach_vm_remap_new");
2127 
2128 		/* Check that permissions of the remapped region are VM_PROT_NONE */
2129 		kr = mach_vm_region_recurse(mach_task_self(),
2130 		    &remap_addr,
2131 		    &size,
2132 		    &depth,
2133 		    (vm_region_recurse_info_t)&info,
2134 		    &count);
2135 		T_ASSERT_MACH_SUCCESS(kr, "parent: mach_vm_region_recurse");
2136 		T_EXPECT_EQ_INT(info.protection, VM_PROT_NONE, "parent: current protection is VM_PROT_NONE");
2137 		T_EXPECT_EQ_INT(info.max_protection, VM_PROT_NONE, "parent: maximum protextion is VM_PROT_NONE");
2138 
2139 		/* Tell the child it is done and can exit. */
2140 		ret = close(fds[1]);
2141 		T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "parent: close write end");
2142 
2143 		/* Clean up the child */
2144 		ret = waitpid(pid, &status, 0);
2145 		T_EXPECT_EQ_INT(ret, pid, "waitpid: child was stopped or terminated");
2146 	}
2147 }
2148