xref: /xnu-11215/tests/vm/vm_allocation.c (revision 8d741a5d)
1 /* Mach virtual memory unit tests
2  *
3  * The main goal of this code is to facilitate the construction,
4  * running, result logging and clean up of a test suite, taking care
5  * of all the scaffolding. A test suite is a sequence of very targeted
6  * unit tests, each running as a separate process to isolate its
7  * address space.
8  * A unit test is abstracted as a unit_test_t structure, consisting of
9  * a test function and a logging identifier. A test suite is a suite_t
10  * structure, consisting of an unit_test_t array, fixture set up and
11  * tear down functions.
12  * Test suites are created dynamically. Each of its unit test runs in
13  * its own fork()d process, with the fixture set up and tear down
14  * running before and after each test. The parent process will log a
15  * pass result if the child exits normally, and a fail result in any
16  * other case (non-zero exit status, abnormal signal). The suite
17  * results are then aggregated and logged after the [SUMMARY] keyword,
18  * and finally the test suite is destroyed.
19  * The included test suites cover the Mach memory allocators,
20  * mach_vm_allocate() and mach_vm_map() with various options, and
21  * mach_vm_deallocate(), mach_vm_read(), mach_vm_write(),
22  * mach_vm_protect(), mach_vm_copy().
23  *
24  * Author: Renaud Dreyer ([email protected])
25  *
26  * Transformed to libdarwintest by Tristan Ye ([email protected]) */
27 
28 #include <darwintest.h>
29 
30 #include <stdlib.h>
31 #include <ctype.h>
32 #include <inttypes.h>
33 #include <stdio.h>
34 #include <math.h>
35 #include <errno.h>
36 #include <signal.h>
37 #include <getopt.h>
38 #include <mach/mach.h>
39 #include <mach/mach_init.h>
40 #include <mach/mach_vm.h>
41 #include <sys/sysctl.h>
42 #include <time.h>
43 
44 T_GLOBAL_META(
45 	T_META_NAMESPACE("xnu.vm"),
46 	T_META_RADAR_COMPONENT_NAME("xnu"),
47 	T_META_RADAR_COMPONENT_VERSION("VM"));
48 
49 /**************************/
50 /**************************/
51 /* Unit Testing Framework */
52 /**************************/
53 /**************************/
54 
55 /*********************/
56 /* Private interface */
57 /*********************/
58 
59 static const char frameworkname[] = "vm_unitester";
60 
61 /* Type for test, fixture set up and fixture tear down functions. */
62 typedef void (*test_fn_t)();
63 
64 /* Unit test structure. */
65 typedef struct {
66 	const char * name;
67 	test_fn_t test;
68 } unit_test_t;
69 
70 /* Test suite structure. */
71 typedef struct {
72 	const char * name;
73 	int numoftests;
74 	test_fn_t set_up;
75 	unit_test_t * tests;
76 	test_fn_t tear_down;
77 } suite_t;
78 
79 int _quietness        = 0;
80 int _expected_signal  = 0;
81 
82 struct {
83 	uintmax_t numoftests;
84 	uintmax_t passed_tests;
85 } results = {0, 0};
86 
87 #define logr(format, ...) \
88 	do { \
89 	        if (_quietness <= 1) { \
90 	                T_LOG(format, ## __VA_ARGS__); \
91 	        } \
92 	} while (0)
93 
94 #define logv(format, ...) \
95 	do { \
96 	        if (_quietness == 0) { \
97 	                T_LOG(format, ## __VA_ARGS__); \
98 	        } \
99 	} while (0)
100 
101 static suite_t *
create_suite(const char * name,int numoftests,test_fn_t set_up,unit_test_t * tests,test_fn_t tear_down)102 create_suite(const char * name, int numoftests, test_fn_t set_up, unit_test_t * tests, test_fn_t tear_down)
103 {
104 	suite_t * suite = (suite_t *)malloc(sizeof(suite_t));
105 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(suite, "malloc()");
106 
107 	suite->name       = name;
108 	suite->numoftests = numoftests;
109 	suite->set_up     = set_up;
110 	suite->tests      = tests;
111 	suite->tear_down  = tear_down;
112 	return suite;
113 }
114 
115 static void
destroy_suite(suite_t * suite)116 destroy_suite(suite_t * suite)
117 {
118 	free(suite);
119 }
120 
121 static void
log_suite_info(suite_t * suite)122 log_suite_info(suite_t * suite)
123 {
124 	logr("[TEST] %s", suite->name);
125 	logr("Number of tests: %d\n", suite->numoftests);
126 }
127 
128 static void
log_suite_results(suite_t * suite,int passed_tests)129 log_suite_results(suite_t * suite, int passed_tests)
130 {
131 	results.numoftests += (uintmax_t)suite->numoftests;
132 	results.passed_tests += (uintmax_t)passed_tests;
133 }
134 
135 static void
log_test_info(unit_test_t * unit_test,unsigned test_num)136 log_test_info(unit_test_t * unit_test, unsigned test_num)
137 {
138 	logr("[BEGIN] #%04d: %s", test_num, unit_test->name);
139 }
140 
141 static void
log_test_result(unit_test_t * unit_test,boolean_t test_passed,unsigned test_num)142 log_test_result(unit_test_t * unit_test, boolean_t test_passed, unsigned test_num)
143 {
144 	logr("[%s] #%04d: %s\n", test_passed ? "PASS" : "FAIL", test_num, unit_test->name);
145 }
146 
147 /* Run a test with fixture set up and teardown, while enforcing the
148  * time out constraint. */
149 static void
run_test(suite_t * suite,unit_test_t * unit_test,unsigned test_num)150 run_test(suite_t * suite, unit_test_t * unit_test, unsigned test_num)
151 {
152 	log_test_info(unit_test, test_num);
153 
154 	suite->set_up();
155 	unit_test->test();
156 	suite->tear_down();
157 }
158 
159 /* Check a child return status. */
160 static boolean_t
child_terminated_normally(int child_status)161 child_terminated_normally(int child_status)
162 {
163 	boolean_t normal_exit = FALSE;
164 
165 	if (WIFEXITED(child_status)) {
166 		int exit_status = WEXITSTATUS(child_status);
167 		if (exit_status) {
168 			T_LOG("Child process unexpectedly exited with code %d.",
169 			    exit_status);
170 		} else if (!_expected_signal) {
171 			normal_exit = TRUE;
172 		}
173 	} else if (WIFSIGNALED(child_status)) {
174 		int signal = WTERMSIG(child_status);
175 		if (signal == _expected_signal ||
176 		    (_expected_signal == -1 && (signal == SIGBUS || signal == SIGSEGV))) {
177 			if (_quietness <= 0) {
178 				T_LOG("Child process died with expected signal "
179 				    "%d.", signal);
180 			}
181 			normal_exit = TRUE;
182 		} else {
183 			T_LOG("Child process unexpectedly died with signal %d.",
184 			    signal);
185 		}
186 	} else {
187 		T_LOG("Child process unexpectedly did not exit nor die");
188 	}
189 
190 	return normal_exit;
191 }
192 
193 /* Run a test in its own process, and report the result. */
194 static boolean_t
child_test_passed(suite_t * suite,unit_test_t * unit_test)195 child_test_passed(suite_t * suite, unit_test_t * unit_test)
196 {
197 	int test_status;
198 	static unsigned test_num = 0;
199 
200 	test_num++;
201 
202 	pid_t test_pid = fork();
203 	T_QUIET; T_ASSERT_POSIX_SUCCESS(test_pid, "fork()");
204 	if (!test_pid) {
205 		run_test(suite, unit_test, test_num);
206 		exit(0);
207 	}
208 	while (waitpid(test_pid, &test_status, 0) != test_pid) {
209 		continue;
210 	}
211 	boolean_t test_result = child_terminated_normally(test_status);
212 	log_test_result(unit_test, test_result, test_num);
213 	return test_result;
214 }
215 
216 /* Run each test in a suite, and report the results. */
217 static int
count_passed_suite_tests(suite_t * suite)218 count_passed_suite_tests(suite_t * suite)
219 {
220 	int passed_tests = 0;
221 	int i;
222 
223 	for (i = 0; i < suite->numoftests; i++) {
224 		passed_tests += child_test_passed(suite, &(suite->tests[i]));
225 	}
226 	return passed_tests;
227 }
228 
229 /********************/
230 /* Public interface */
231 /********************/
232 
233 #define DEFAULT_QUIETNESS    0 /* verbose */
234 #define RESULT_ERR_QUIETNESS 1 /* result and error */
235 #define ERROR_ONLY_QUIETNESS 2 /* error only */
236 
237 #define run_suite(set_up, tests, tear_down, ...) \
238 	_run_suite((sizeof(tests) / sizeof(tests[0])), (set_up), (tests), (tear_down), __VA_ARGS__)
239 
240 typedef unit_test_t UnitTests[];
241 
242 void _run_suite(int numoftests, test_fn_t set_up, UnitTests tests, test_fn_t tear_down, const char * format, ...)
243 __printflike(5, 6);
244 
245 void
_run_suite(int numoftests,test_fn_t set_up,UnitTests tests,test_fn_t tear_down,const char * format,...)246 _run_suite(int numoftests, test_fn_t set_up, UnitTests tests, test_fn_t tear_down, const char * format, ...)
247 {
248 	va_list ap;
249 	char * name;
250 
251 	va_start(ap, format);
252 	T_QUIET; T_ASSERT_POSIX_SUCCESS(vasprintf(&name, format, ap), "vasprintf()");
253 	va_end(ap);
254 	suite_t * suite = create_suite(name, numoftests, set_up, tests, tear_down);
255 	log_suite_info(suite);
256 	log_suite_results(suite, count_passed_suite_tests(suite));
257 	free(name);
258 	destroy_suite(suite);
259 }
260 
261 /* Setters and getters for various test framework global
262  * variables. Should only be used outside of the test, set up and tear
263  * down functions. */
264 
265 /* Expected signal for a test, default is 0. */
266 void
set_expected_signal(int signal)267 set_expected_signal(int signal)
268 {
269 	_expected_signal = signal;
270 }
271 
272 int
get_expected_signal()273 get_expected_signal()
274 {
275 	return _expected_signal;
276 }
277 
278 /* Logging verbosity. */
279 void
set_quietness(int value)280 set_quietness(int value)
281 {
282 	_quietness = value;
283 }
284 
285 int
get_quietness()286 get_quietness()
287 {
288 	return _quietness;
289 }
290 
291 /* For fixture set up and tear down functions, and units tests. */
292 void
do_nothing()293 do_nothing()
294 {
295 }
296 
297 void
log_aggregated_results()298 log_aggregated_results()
299 {
300 	T_LOG("[SUMMARY] Aggregated Test Results\n");
301 	T_LOG("Total: %ju", results.numoftests);
302 	T_LOG("Passed: %ju", results.passed_tests);
303 	T_LOG("Failed: %ju\n", results.numoftests - results.passed_tests);
304 
305 	T_QUIET; T_ASSERT_EQ(results.passed_tests, results.numoftests,
306 	    "%d passed of total %d tests",
307 	    results.passed_tests, results.numoftests);
308 }
309 
310 /*******************************/
311 /*******************************/
312 /* Virtual memory unit testing */
313 /*******************************/
314 /*******************************/
315 
316 /* Test exit values:
317  * 0: pass
318  * 1: fail, generic unexpected failure
319  * 2: fail, unexpected Mach return value
320  * 3: fail, time out */
321 
322 #define DEFAULT_VM_SIZE ((mach_vm_size_t)(1024ULL * 4096ULL))
323 
324 #define POINTER(address) ((char *)(uintptr_t)(address))
325 #define MACH_VM_ADDRESS_T(address) (*((mach_vm_address_t *)(uintptr_t)(address)))
326 
327 static int vm_address_size = sizeof(mach_vm_address_t);
328 
329 static char *progname = "";
330 
331 /*************************/
332 /* xnu version functions */
333 /*************************/
334 
335 /* Find the xnu version string. */
336 char *
xnu_version_string()337 xnu_version_string()
338 {
339 	size_t length;
340 	int mib[2];
341 	mib[0] = CTL_KERN;
342 	mib[1] = KERN_VERSION;
343 
344 	T_QUIET;
345 	T_ASSERT_POSIX_SUCCESS(sysctl(mib, 2, NULL, &length, NULL, 0), "sysctl()");
346 	char * version = (char *)malloc(length);
347 	T_QUIET;
348 	T_WITH_ERRNO;
349 	T_ASSERT_NOTNULL(version, "malloc()");
350 	T_QUIET;
351 	T_EXPECT_POSIX_SUCCESS(sysctl(mib, 2, version, &length, NULL, 0), "sysctl()");
352 	if (T_RESULT == T_RESULT_FAIL) {
353 		free(version);
354 		T_END;
355 	}
356 	char * xnu_string = strstr(version, "xnu-");
357 	free(version);
358 	T_QUIET;
359 	T_ASSERT_NOTNULL(xnu_string, "%s: error finding xnu version string.", progname);
360 	return xnu_string;
361 }
362 
363 /* Find the xnu major version number. */
364 unsigned int
xnu_major_version()365 xnu_major_version()
366 {
367 	char * endptr;
368 	char * xnu_substring = xnu_version_string() + 4;
369 
370 	errno                    = 0;
371 	unsigned int xnu_version = strtoul(xnu_substring, &endptr, 0);
372 	T_QUIET;
373 	T_ASSERT_TRUE((errno != ERANGE && endptr != xnu_substring),
374 	    "%s: error finding xnu major version number.", progname);
375 	return xnu_version;
376 }
377 
378 /*************************/
379 /* Mach assert functions */
380 /*************************/
381 
382 static inline void
assert_mach_return(kern_return_t kr,kern_return_t expected_kr,const char * mach_routine)383 assert_mach_return(kern_return_t kr, kern_return_t expected_kr, const char * mach_routine)
384 {
385 	T_QUIET; T_ASSERT_EQ(kr, expected_kr,
386 	    "%s unexpectedly returned: %s."
387 	    "Should have returned: %s.",
388 	    mach_routine, mach_error_string(kr),
389 	    mach_error_string(expected_kr));
390 }
391 
392 /*******************************/
393 /* Arrays for test suite loops */
394 /*******************************/
395 
396 /* Memory allocators */
397 typedef kern_return_t (*allocate_fn_t)(vm_map_t, mach_vm_address_t *, mach_vm_size_t, int);
398 
399 
400 /*
401  * Remember any pre-reserved fixed address, which needs to be released prior to an allocation.
402  */
403 static mach_vm_address_t fixed_vm_address = 0x0;
404 static mach_vm_size_t fixed_vm_size = 0;
405 
406 /* forward decl */
407 void assert_deallocate_success(mach_vm_address_t address, mach_vm_size_t size);
408 
409 /*
410  * If trying to allocate at a fixed address, we need to do the delayed deallocate first.
411  */
412 static void
check_fixed_address(mach_vm_address_t * address,mach_vm_size_t size)413 check_fixed_address(mach_vm_address_t *address, mach_vm_size_t size)
414 {
415 	if (fixed_vm_address != 0 &&
416 	    fixed_vm_address <= *address &&
417 	    *address + size <= fixed_vm_address + fixed_vm_size) {
418 		assert_deallocate_success(fixed_vm_address, fixed_vm_size);
419 		fixed_vm_address = 0;
420 		fixed_vm_size = 0;
421 	}
422 }
423 
424 kern_return_t
wrapper_mach_vm_allocate(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)425 wrapper_mach_vm_allocate(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
426 {
427 	check_fixed_address(address, size);
428 	return mach_vm_allocate(map, address, size, flags);
429 }
430 
431 kern_return_t
wrapper_mach_vm_map(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)432 wrapper_mach_vm_map(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
433 {
434 	check_fixed_address(address, size);
435 	return mach_vm_map(map, address, size, (mach_vm_offset_t)0, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE,
436 	           VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
437 }
438 
439 /* Should have the same behavior as when mask is zero. */
440 kern_return_t
wrapper_mach_vm_map_4kB(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)441 wrapper_mach_vm_map_4kB(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
442 {
443 	check_fixed_address(address, size);
444 	return mach_vm_map(map, address, size, (mach_vm_offset_t)0xFFF, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE,
445 	           VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
446 }
447 
448 kern_return_t
wrapper_mach_vm_map_2MB(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)449 wrapper_mach_vm_map_2MB(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
450 {
451 	check_fixed_address(address, size);
452 	return mach_vm_map(map, address, size, (mach_vm_offset_t)0x1FFFFF, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE,
453 	           VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
454 }
455 
456 kern_return_t
memory_entry(mach_vm_size_t * size,mach_port_t * object_handle)457 memory_entry(mach_vm_size_t * size, mach_port_t *object_handle)
458 {
459 	mach_vm_size_t original_size = *size;
460 	kern_return_t kr;
461 
462 	kr = mach_make_memory_entry_64(mach_task_self(), size,
463 	    (memory_object_offset_t)0, (MAP_MEM_NAMED_CREATE | VM_PROT_ALL),
464 	    object_handle, 0);
465 	if (kr != KERN_SUCCESS) {
466 		return kr;
467 	}
468 	T_QUIET; T_ASSERT_EQ(*size, round_page(original_size),
469 	    "mach_make_memory_entry_64() unexpectedly returned a named "
470 	    "entry of size 0x%jx (%ju).\n"
471 	    "Should have returned a "
472 	    "named entry of size 0x%jx (%ju).",
473 	    (uintmax_t)*size, (uintmax_t)*size, (uintmax_t)original_size, (uintmax_t)original_size);
474 	return KERN_SUCCESS;
475 }
476 
477 kern_return_t
wrapper_mach_vm_map_named_entry(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)478 wrapper_mach_vm_map_named_entry(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
479 {
480 	mach_port_t object_handle = MACH_PORT_NULL;
481 	kern_return_t kr = memory_entry(&size, &object_handle);
482 
483 	if (kr != KERN_SUCCESS) {
484 		return kr;
485 	}
486 	check_fixed_address(address, size);
487 	kr = mach_vm_map(map, address, size, (mach_vm_offset_t)0, flags, object_handle, (memory_object_offset_t)0, FALSE,
488 	    VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
489 	T_QUIET; T_ASSERT_MACH_SUCCESS(mach_port_deallocate(mach_task_self(), object_handle), "mach_port_deallocate()");
490 	return kr;
491 }
492 
493 static struct {
494 	allocate_fn_t allocate;
495 	const char * description;
496 } allocators[] = {
497 	{wrapper_mach_vm_allocate, "mach_vm_allocate()"},
498 	{wrapper_mach_vm_map, "mach_vm_map() (zero mask)"},
499 	{wrapper_mach_vm_map_4kB,
500 	 "mach_vm_map() "
501 	 "(4 kB address alignment)"},
502 	{wrapper_mach_vm_map_2MB,
503 	 "mach_vm_map() "
504 	 "(2 MB address alignment)"},
505 	{wrapper_mach_vm_map_named_entry,
506 	 "mach_vm_map() (named "
507 	 "entry, zero mask)"},
508 };
509 static int numofallocators = sizeof(allocators) / sizeof(allocators[0]);
510 static int allocators_idx;
511 enum { MACH_VM_ALLOCATE, MACH_VM_MAP, MACH_VM_MAP_4kB, MACH_VM_MAP_2MB, MACH_VM_MAP_NAMED_ENTRY };
512 
513 /* VM size */
514 static struct {
515 	mach_vm_size_t size;
516 	const char * description;
517 } vm_sizes[] = {
518 	{DEFAULT_VM_SIZE, "default/input"},
519 	{0, "zero"},
520 	{4096ULL, "aligned"},
521 	{1ULL, "unaligned"},
522 	{4095ULL, "unaligned"},
523 	{4097ULL, "unaligned"},
524 };
525 static int numofsizes = sizeof(vm_sizes) / sizeof(vm_sizes[0]);
526 static int sizes_idx;
527 static int buffer_sizes_idx;
528 enum { DEFAULT_INPUT, ZERO_BYTES, ONE_PAGE, ONE_BYTE, ONE_PAGE_MINUS_ONE_BYTE, ONE_PAGE_AND_ONE_BYTE };
529 
530 /* Unspecified/fixed address */
531 static struct {
532 	int flag;
533 	const char * description;
534 } address_flags[] = {
535 	{VM_FLAGS_ANYWHERE, "unspecified"}, {VM_FLAGS_FIXED, "fixed"},
536 };
537 static int numofflags = sizeof(address_flags) / sizeof(address_flags[0]);
538 static int flags_idx;
539 enum { ANYWHERE, FIXED };
540 
541 /* Address alignment */
542 static struct {
543 	boolean_t alignment;
544 	const char * description;
545 } address_alignments[] = {
546 	{TRUE, " aligned"}, {FALSE, " unaligned"},
547 };
548 static int numofalignments = sizeof(address_alignments) / sizeof(*address_alignments);
549 static int alignments_idx;
550 enum { ALIGNED, UNALIGNED };
551 
552 /* Buffer offset */
553 static struct {
554 	int offset;
555 	const char * description;
556 } buffer_offsets[] = {
557 	{0, ""}, {1, ""}, {2, ""},
558 };
559 static int numofoffsets = sizeof(buffer_offsets) / sizeof(buffer_offsets[0]);
560 static int offsets_idx;
561 enum { ZERO, ONE, TWO };
562 
563 /* mach_vm_copy() post actions */
564 enum { VMCOPY_MODIFY_SRC, VMCOPY_MODIFY_DST, VMCOPY_MODIFY_SHARED_COPIED };
565 
566 static struct {
567 	int action;
568 	const char * description;
569 } vmcopy_actions[] = {
570 	{VMCOPY_MODIFY_SRC, "modify vm_copy() source"},
571 	{VMCOPY_MODIFY_DST, "modify vm_copy() destination"},
572 	{VMCOPY_MODIFY_SHARED_COPIED,
573 	 "modify vm_copy source's shared "
574 	 "or copied from/to region"},
575 };
576 static int numofvmcopyactions = sizeof(vmcopy_actions) / sizeof(vmcopy_actions[0]);
577 static int vmcopy_action_idx;
578 
579 /************************************/
580 /* Setters and getters for fixtures */
581 /************************************/
582 
583 /* Allocation memory range. */
584 static allocate_fn_t _allocator      = wrapper_mach_vm_allocate;
585 static mach_vm_size_t _vm_size       = DEFAULT_VM_SIZE;
586 static int _address_flag             = VM_FLAGS_ANYWHERE;
587 static boolean_t _address_alignment  = TRUE;
588 static mach_vm_address_t _vm_address = 0x0;
589 
590 /* Buffer for mach_vm_write(). */
591 static mach_vm_size_t _buffer_size       = DEFAULT_VM_SIZE;
592 static mach_vm_address_t _buffer_address = 0x0;
593 static int _buffer_offset                = 0;
594 
595 /* Post action for mach_vm_copy(). */
596 static int _vmcopy_post_action = VMCOPY_MODIFY_SRC;
597 
598 static void
set_allocator(allocate_fn_t allocate)599 set_allocator(allocate_fn_t allocate)
600 {
601 	_allocator = allocate;
602 }
603 
604 static allocate_fn_t
get_allocator()605 get_allocator()
606 {
607 	return _allocator;
608 }
609 
610 static void
set_vm_size(mach_vm_size_t size)611 set_vm_size(mach_vm_size_t size)
612 {
613 	_vm_size = size;
614 }
615 
616 static mach_vm_size_t
get_vm_size()617 get_vm_size()
618 {
619 	return _vm_size;
620 }
621 
622 static void
set_address_flag(int flag)623 set_address_flag(int flag)
624 {
625 	_address_flag = flag;
626 }
627 
628 static int
get_address_flag()629 get_address_flag()
630 {
631 	return _address_flag;
632 }
633 
634 static void
set_address_alignment(boolean_t alignment)635 set_address_alignment(boolean_t alignment)
636 {
637 	_address_alignment = alignment;
638 }
639 
640 static boolean_t
get_address_alignment()641 get_address_alignment()
642 {
643 	return _address_alignment;
644 }
645 
646 static void
set_vm_address(mach_vm_address_t address)647 set_vm_address(mach_vm_address_t address)
648 {
649 	_vm_address = address;
650 }
651 
652 static mach_vm_address_t
get_vm_address()653 get_vm_address()
654 {
655 	return _vm_address;
656 }
657 
658 static void
set_buffer_size(mach_vm_size_t size)659 set_buffer_size(mach_vm_size_t size)
660 {
661 	_buffer_size = size;
662 }
663 
664 static mach_vm_size_t
get_buffer_size()665 get_buffer_size()
666 {
667 	return _buffer_size;
668 }
669 
670 static void
set_buffer_address(mach_vm_address_t address)671 set_buffer_address(mach_vm_address_t address)
672 {
673 	_buffer_address = address;
674 }
675 
676 static mach_vm_address_t
get_buffer_address()677 get_buffer_address()
678 {
679 	return _buffer_address;
680 }
681 
682 static void
set_buffer_offset(int offset)683 set_buffer_offset(int offset)
684 {
685 	_buffer_offset = offset;
686 }
687 
688 static int
get_buffer_offset()689 get_buffer_offset()
690 {
691 	return _buffer_offset;
692 }
693 
694 static void
set_vmcopy_post_action(int action)695 set_vmcopy_post_action(int action)
696 {
697 	_vmcopy_post_action = action;
698 }
699 
700 static int
get_vmcopy_post_action()701 get_vmcopy_post_action()
702 {
703 	return _vmcopy_post_action;
704 }
705 
706 /*******************************/
707 /* Usage and option processing */
708 /*******************************/
709 static boolean_t flag_run_allocate_test = FALSE;
710 static boolean_t flag_run_deallocate_test = FALSE;
711 static boolean_t flag_run_read_test = FALSE;
712 static boolean_t flag_run_write_test = FALSE;
713 static boolean_t flag_run_protect_test = FALSE;
714 static boolean_t flag_run_copy_test = FALSE;
715 
716 #define VM_TEST_ALLOCATE   0x00000001
717 #define VM_TEST_DEALLOCATE 0x00000002
718 #define VM_TEST_READ       0x00000004
719 #define VM_TEST_WRITE      0x00000008
720 #define VM_TEST_PROTECT    0x00000010
721 #define VM_TEST_COPY       0x00000020
722 
723 typedef struct test_option {
724 	uint32_t        to_flags;
725 	int             to_quietness;
726 	mach_vm_size_t  to_vmsize;
727 } test_option_t;
728 
729 typedef struct test_info {
730 	char            *ti_name;
731 	boolean_t       *ti_flag;
732 } test_info_t;
733 
734 static test_option_t test_options;
735 
736 enum {ALLOCATE = 0, DEALLOCATE, READ, WRITE, PROTECT, COPY};
737 
738 static test_info_t test_info[] = {
739 	{"allocate", &flag_run_allocate_test},
740 	{"deallocate", &flag_run_deallocate_test},
741 	{"read", &flag_run_read_test},
742 	{"write", &flag_run_write_test},
743 	{"protect", &flag_run_protect_test},
744 	{"copy", &flag_run_copy_test},
745 	{NULL, NULL}
746 };
747 
748 static void
die_on_invalid_value(int condition,const char * value_string)749 die_on_invalid_value(int condition, const char * value_string)
750 {
751 	T_QUIET;
752 	T_ASSERT_EQ(condition, 0, "%s: invalid value: %s.",
753 	    progname, value_string);
754 }
755 
756 static void
process_options(test_option_t options)757 process_options(test_option_t options)
758 {
759 	test_info_t *tp;
760 
761 	setvbuf(stdout, NULL, _IONBF, 0);
762 
763 	set_vm_size(DEFAULT_VM_SIZE);
764 	set_quietness(DEFAULT_QUIETNESS);
765 
766 	if (NULL != getenv("LTERDOS")) {
767 		logr("LTERDOS=YES this is LeanTestEnvironment\nIncreasing quietness by 1.");
768 		set_quietness(get_quietness() + 1);
769 	} else {
770 		if (options.to_quietness > 0) {
771 			set_quietness(options.to_quietness);
772 		}
773 	}
774 
775 	if (options.to_vmsize != 0) {
776 		vm_sizes[0].size = options.to_vmsize;
777 	}
778 
779 	if (options.to_flags == 0) {
780 		for (tp = test_info; tp->ti_name != NULL; ++tp) {
781 			*tp->ti_flag = TRUE;
782 		}
783 	} else {
784 		if (options.to_flags & VM_TEST_ALLOCATE) {
785 			*(test_info[ALLOCATE].ti_flag) = TRUE;
786 		}
787 
788 		if (options.to_flags & VM_TEST_DEALLOCATE) {
789 			*(test_info[DEALLOCATE].ti_flag) = TRUE;
790 		}
791 
792 		if (options.to_flags & VM_TEST_READ) {
793 			*(test_info[READ].ti_flag) = TRUE;
794 		}
795 
796 		if (options.to_flags & VM_TEST_WRITE) {
797 			*(test_info[WRITE].ti_flag) = TRUE;
798 		}
799 
800 		if (options.to_flags & VM_TEST_PROTECT) {
801 			*(test_info[PROTECT].ti_flag) = TRUE;
802 		}
803 
804 		if (options.to_flags & VM_TEST_COPY) {
805 			*(test_info[COPY].ti_flag) = TRUE;
806 		}
807 	}
808 }
809 
810 /*****************/
811 /* Various tools */
812 /*****************/
813 
814 /* Find the allocator address alignment mask. */
815 mach_vm_address_t
get_mask()816 get_mask()
817 {
818 	mach_vm_address_t mask;
819 
820 	if (get_allocator() == wrapper_mach_vm_map_2MB) {
821 		mask = (mach_vm_address_t)0x1FFFFF;
822 	} else {
823 		mask = vm_page_size - 1;
824 	}
825 	return mask;
826 }
827 
828 /* Find the size of the smallest aligned region containing a given
829  * memory range. */
830 mach_vm_size_t
aligned_size(mach_vm_address_t address,mach_vm_size_t size)831 aligned_size(mach_vm_address_t address, mach_vm_size_t size)
832 {
833 	return round_page(address - mach_vm_trunc_page(address) + size);
834 }
835 
836 /********************/
837 /* Assert functions */
838 /********************/
839 
840 /* Address is aligned on allocator boundary. */
841 static inline void
assert_aligned_address(mach_vm_address_t address)842 assert_aligned_address(mach_vm_address_t address)
843 {
844 	T_QUIET; T_ASSERT_EQ((address & get_mask()), 0,
845 	    "Address 0x%jx is unexpectedly "
846 	    "unaligned.",
847 	    (uintmax_t)address);
848 }
849 
850 /* Address is truncated to allocator boundary. */
851 static inline void
assert_trunc_address(mach_vm_address_t address,mach_vm_address_t trunc_address)852 assert_trunc_address(mach_vm_address_t address, mach_vm_address_t trunc_address)
853 {
854 	T_QUIET; T_ASSERT_EQ(trunc_address, (address & ~get_mask()),
855 	    "Address "
856 	    "0x%jx is unexpectedly not truncated to address 0x%jx.",
857 	    (uintmax_t)address, (uintmax_t)trunc_address);
858 }
859 
860 static inline void
assert_address_value(mach_vm_address_t address,mach_vm_address_t marker)861 assert_address_value(mach_vm_address_t address, mach_vm_address_t marker)
862 {
863 	/* this assert is used so frequently so that we simply judge on
864 	 * its own instead of leaving this to LD macro for efficiency
865 	 */
866 	if (MACH_VM_ADDRESS_T(address) != marker) {
867 		T_ASSERT_FAIL("Address 0x%jx unexpectedly has value 0x%jx, "
868 		    "instead of 0x%jx.", (uintmax_t)address,
869 		    (uintmax_t)MACH_VM_ADDRESS_T(address), (uintmax_t)marker);
870 	}
871 }
872 
873 void
assert_allocate_return(mach_vm_address_t * address,mach_vm_size_t size,int address_flag,kern_return_t expected_kr)874 assert_allocate_return(mach_vm_address_t * address, mach_vm_size_t size, int address_flag, kern_return_t expected_kr)
875 {
876 	assert_mach_return(get_allocator()(mach_task_self(), address, size, address_flag), expected_kr, "Allocator");
877 }
878 
879 void
assert_allocate_success(mach_vm_address_t * address,mach_vm_size_t size,int address_flag)880 assert_allocate_success(mach_vm_address_t * address, mach_vm_size_t size, int address_flag)
881 {
882 	assert_allocate_return(address, size, address_flag, KERN_SUCCESS);
883 }
884 
885 void
assert_deallocate_return(mach_vm_address_t address,mach_vm_size_t size,kern_return_t expected_kr)886 assert_deallocate_return(mach_vm_address_t address, mach_vm_size_t size, kern_return_t expected_kr)
887 {
888 	assert_mach_return(mach_vm_deallocate(mach_task_self(), address, size), expected_kr, "mach_vm_deallocate()");
889 }
890 
891 void
assert_deallocate_success(mach_vm_address_t address,mach_vm_size_t size)892 assert_deallocate_success(mach_vm_address_t address, mach_vm_size_t size)
893 {
894 	assert_deallocate_return(address, size, KERN_SUCCESS);
895 }
896 
897 void
assert_read_return(mach_vm_address_t address,mach_vm_size_t size,vm_offset_t * data,mach_msg_type_number_t * data_size,kern_return_t expected_kr)898 assert_read_return(mach_vm_address_t address,
899     mach_vm_size_t size,
900     vm_offset_t * data,
901     mach_msg_type_number_t * data_size,
902     kern_return_t expected_kr)
903 {
904 	assert_mach_return(mach_vm_read(mach_task_self(), address, size, data, data_size), expected_kr, "mach_vm_read()");
905 }
906 
907 void
assert_read_success(mach_vm_address_t address,mach_vm_size_t size,vm_offset_t * data,mach_msg_type_number_t * data_size)908 assert_read_success(mach_vm_address_t address, mach_vm_size_t size, vm_offset_t * data, mach_msg_type_number_t * data_size)
909 {
910 	assert_read_return(address, size, data, data_size, KERN_SUCCESS);
911 	T_QUIET; T_ASSERT_EQ(*data_size, size,
912 	    "Returned buffer size 0x%jx "
913 	    "(%ju) is unexpectedly different from source size 0x%jx "
914 	    "(%ju).",
915 	    (uintmax_t)*data_size, (uintmax_t)*data_size, (uintmax_t)size, (uintmax_t)size);
916 }
917 
918 void
assert_write_return(mach_vm_address_t address,vm_offset_t data,mach_msg_type_number_t data_size,kern_return_t expected_kr)919 assert_write_return(mach_vm_address_t address, vm_offset_t data, mach_msg_type_number_t data_size, kern_return_t expected_kr)
920 {
921 	assert_mach_return(mach_vm_write(mach_task_self(), address, data, data_size), expected_kr, "mach_vm_write()");
922 }
923 
924 void
assert_write_success(mach_vm_address_t address,vm_offset_t data,mach_msg_type_number_t data_size)925 assert_write_success(mach_vm_address_t address, vm_offset_t data, mach_msg_type_number_t data_size)
926 {
927 	assert_write_return(address, data, data_size, KERN_SUCCESS);
928 }
929 
930 void
assert_allocate_copy_return(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t * dest,kern_return_t expected_kr)931 assert_allocate_copy_return(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t * dest, kern_return_t expected_kr)
932 {
933 	assert_allocate_success(dest, size, VM_FLAGS_ANYWHERE);
934 	assert_mach_return(mach_vm_copy(mach_task_self(), source, size, *dest), expected_kr, "mach_vm_copy()");
935 }
936 void
assert_allocate_copy_success(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t * dest)937 assert_allocate_copy_success(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t * dest)
938 {
939 	assert_allocate_copy_return(source, size, dest, KERN_SUCCESS);
940 }
941 
942 void
assert_copy_return(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t dest,kern_return_t expected_kr)943 assert_copy_return(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t dest, kern_return_t expected_kr)
944 {
945 	assert_mach_return(mach_vm_copy(mach_task_self(), source, size, dest), expected_kr, "mach_vm_copy()");
946 }
947 
948 void
assert_copy_success(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t dest)949 assert_copy_success(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t dest)
950 {
951 	assert_copy_return(source, size, dest, KERN_SUCCESS);
952 }
953 
954 /*******************/
955 /* Memory patterns */
956 /*******************/
957 
958 typedef boolean_t (*address_filter_t)(mach_vm_address_t);
959 typedef void (*address_action_t)(mach_vm_address_t, mach_vm_address_t);
960 
961 /* Map over a memory region pattern and its complement, through a
962  * (possibly reversed) boolean filter and a starting value. */
963 void
filter_addresses_do_else(address_filter_t filter,boolean_t reversed,mach_vm_address_t address,mach_vm_size_t size,address_action_t if_action,address_action_t else_action,mach_vm_address_t start_value)964 filter_addresses_do_else(address_filter_t filter,
965     boolean_t reversed,
966     mach_vm_address_t address,
967     mach_vm_size_t size,
968     address_action_t if_action,
969     address_action_t else_action,
970     mach_vm_address_t start_value)
971 {
972 	mach_vm_address_t i;
973 	for (i = 0; i + vm_address_size < size; i += vm_address_size) {
974 		if (filter(address + i) != reversed) {
975 			if_action(address + i, start_value + i);
976 		} else {
977 			else_action(address + i, start_value + i);
978 		}
979 	}
980 }
981 
982 /* Various pattern actions. */
983 void
no_action(mach_vm_address_t i,mach_vm_address_t value)984 no_action(mach_vm_address_t i, mach_vm_address_t value)
985 {
986 }
987 
988 void
read_zero(mach_vm_address_t i,mach_vm_address_t value)989 read_zero(mach_vm_address_t i, mach_vm_address_t value)
990 {
991 	assert_address_value(i, 0);
992 }
993 
994 void
verify_address(mach_vm_address_t i,mach_vm_address_t value)995 verify_address(mach_vm_address_t i, mach_vm_address_t value)
996 {
997 	assert_address_value(i, value);
998 }
999 
1000 void
write_address(mach_vm_address_t i,mach_vm_address_t value)1001 write_address(mach_vm_address_t i, mach_vm_address_t value)
1002 {
1003 	MACH_VM_ADDRESS_T(i) = value;
1004 }
1005 
1006 /* Various patterns. */
1007 boolean_t
empty(mach_vm_address_t i)1008 empty(mach_vm_address_t i)
1009 {
1010 	return FALSE;
1011 }
1012 
1013 boolean_t
checkerboard(mach_vm_address_t i)1014 checkerboard(mach_vm_address_t i)
1015 {
1016 	return !((i / vm_address_size) & 0x1);
1017 }
1018 
1019 boolean_t
page_ends(mach_vm_address_t i)1020 page_ends(mach_vm_address_t i)
1021 {
1022 	mach_vm_address_t residue = i % vm_page_size;
1023 
1024 	return residue == 0 || residue == vm_page_size - vm_address_size;
1025 }
1026 
1027 /*************************************/
1028 /* Global variables set up functions */
1029 /*************************************/
1030 
1031 void
set_up_allocator()1032 set_up_allocator()
1033 {
1034 	T_QUIET; T_ASSERT_TRUE(allocators_idx >= 0 && allocators_idx < numofallocators, "Invalid allocators[] index: %d.", allocators_idx);
1035 	set_allocator(allocators[allocators_idx].allocate);
1036 }
1037 
1038 /* Find a fixed allocatable address by retrieving the address
1039  * populated by mach_vm_allocate() with VM_FLAGS_ANYWHERE. */
1040 mach_vm_address_t
get_fixed_address(mach_vm_size_t size)1041 get_fixed_address(mach_vm_size_t size)
1042 {
1043 	/* mach_vm_map() starts looking for an address at 0x0. */
1044 	mach_vm_address_t address = 0x0;
1045 
1046 	/*
1047 	 * The tests seem to have some funky off by one allocations. To avoid problems, we'll bump anything
1048 	 * non-zero to have at least an extra couple pages.
1049 	 */
1050 	if (size != 0) {
1051 		size = round_page(size + 2 * vm_page_size);
1052 	}
1053 
1054 	assert_allocate_success(&address, size, VM_FLAGS_ANYWHERE);
1055 
1056 	/*
1057 	 * Keep the memory allocated, otherwise the logv()/printf() activity sprinkled in these tests can
1058 	 * cause malloc() to use the desired range and tests will randomly fail. The allocate routines will
1059 	 * do the delayed vm_deallocate() to free the fixed memory just before allocation testing in the wrapper.
1060 	 */
1061 	T_QUIET; T_ASSERT_EQ(fixed_vm_address, 0, "previous fixed address not used");
1062 	T_QUIET; T_ASSERT_EQ(fixed_vm_size, 0, "previous fixed size not used");
1063 	fixed_vm_address = address;
1064 	fixed_vm_size = size;
1065 
1066 	assert_aligned_address(address);
1067 	return address;
1068 }
1069 
1070 /* If needed, find an address at which a region of the specified size
1071  * can be allocated. Otherwise, set the address to 0x0. */
1072 void
set_up_vm_address(mach_vm_size_t size)1073 set_up_vm_address(mach_vm_size_t size)
1074 {
1075 	T_QUIET; T_ASSERT_TRUE(flags_idx >= 0 && flags_idx < numofflags, "Invalid address_flags[] index: %d.", flags_idx);
1076 	T_QUIET; T_ASSERT_TRUE(alignments_idx >= 0 && alignments_idx < numofalignments, "Invalid address_alignments[] index: %d.", alignments_idx);
1077 	set_address_flag(address_flags[flags_idx].flag);
1078 	set_address_alignment(address_alignments[alignments_idx].alignment);
1079 
1080 	if (!(get_address_flag() & VM_FLAGS_ANYWHERE)) {
1081 		boolean_t aligned = get_address_alignment();
1082 		logv(
1083 			"Looking for fixed %saligned address for allocation "
1084 			"of 0x%jx (%ju) byte%s...",
1085 			aligned ? "" : "un", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1086 		mach_vm_address_t address = get_fixed_address(size);
1087 		if (!aligned) {
1088 			address++;
1089 		}
1090 		set_vm_address(address);
1091 		logv("Found %saligned fixed address 0x%jx.", aligned ? "" : "un", (uintmax_t)address);
1092 	} else {
1093 		/* mach_vm_map() with VM_FLAGS_ANYWHERE starts looking for
1094 		 *  an address at the one supplied and goes up, without
1095 		 *  wrapping around. */
1096 		set_vm_address(0x0);
1097 	}
1098 }
1099 
1100 void
set_up_vm_size()1101 set_up_vm_size()
1102 {
1103 	T_QUIET; T_ASSERT_TRUE(sizes_idx >= 0 && sizes_idx < numofsizes, "Invalid vm_sizes[] index: %d.", sizes_idx);
1104 	set_vm_size(vm_sizes[sizes_idx].size);
1105 }
1106 
1107 void
set_up_buffer_size()1108 set_up_buffer_size()
1109 {
1110 	T_QUIET; T_ASSERT_TRUE(buffer_sizes_idx >= 0 && buffer_sizes_idx < numofsizes, "Invalid vm_sizes[] index: %d.", buffer_sizes_idx);
1111 	set_buffer_size(vm_sizes[buffer_sizes_idx].size);
1112 }
1113 
1114 void
set_up_buffer_offset()1115 set_up_buffer_offset()
1116 {
1117 	T_QUIET; T_ASSERT_TRUE(offsets_idx >= 0 && offsets_idx < numofoffsets, "Invalid buffer_offsets[] index: %d.", offsets_idx);
1118 	set_buffer_offset(buffer_offsets[offsets_idx].offset);
1119 }
1120 
1121 void
set_up_vmcopy_action()1122 set_up_vmcopy_action()
1123 {
1124 	T_QUIET; T_ASSERT_TRUE(vmcopy_action_idx >= 0 && vmcopy_action_idx < numofvmcopyactions, "Invalid vmcopy_actions[] index: %d.",
1125 	    vmcopy_action_idx);
1126 	set_vmcopy_post_action(vmcopy_actions[vmcopy_action_idx].action);
1127 }
1128 
1129 void
set_up_allocator_and_vm_size()1130 set_up_allocator_and_vm_size()
1131 {
1132 	set_up_allocator();
1133 	set_up_vm_size();
1134 }
1135 
1136 void
set_up_vm_variables()1137 set_up_vm_variables()
1138 {
1139 	set_up_vm_size();
1140 	set_up_vm_address(get_vm_size());
1141 }
1142 
1143 void
set_up_allocator_and_vm_variables()1144 set_up_allocator_and_vm_variables()
1145 {
1146 	set_up_allocator();
1147 	set_up_vm_variables();
1148 }
1149 
1150 void
set_up_buffer_variables()1151 set_up_buffer_variables()
1152 {
1153 	set_up_buffer_size();
1154 	set_up_buffer_offset();
1155 }
1156 
1157 void
set_up_copy_shared_mode_variables()1158 set_up_copy_shared_mode_variables()
1159 {
1160 	set_up_vmcopy_action();
1161 }
1162 
1163 /*******************************/
1164 /* Allocation set up functions */
1165 /*******************************/
1166 
1167 /* Allocate VM region of given size. */
1168 void
allocate(mach_vm_size_t size)1169 allocate(mach_vm_size_t size)
1170 {
1171 	mach_vm_address_t address = get_vm_address();
1172 	int flag                  = get_address_flag();
1173 
1174 	logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1175 	if (!(flag & VM_FLAGS_ANYWHERE)) {
1176 		logv(" at address 0x%jx", (uintmax_t)address);
1177 	}
1178 	logv("...");
1179 	assert_allocate_success(&address, size, flag);
1180 	logv(
1181 		"Memory of rounded size 0x%jx (%ju) allocated at "
1182 		"address 0x%jx.",
1183 		(uintmax_t)round_page(size), (uintmax_t)round_page(size), (uintmax_t)address);
1184 	/* Fixed allocation address is truncated to the allocator
1185 	 *  boundary. */
1186 	if (!(flag & VM_FLAGS_ANYWHERE)) {
1187 		mach_vm_address_t old_address = get_vm_address();
1188 		assert_trunc_address(old_address, address);
1189 		logv(
1190 			"Address 0x%jx is correctly truncated to allocated "
1191 			"address 0x%jx.",
1192 			(uintmax_t)old_address, (uintmax_t)address);
1193 	}
1194 	set_vm_address(address);
1195 }
1196 
1197 void
allocate_buffer(mach_vm_size_t buffer_size)1198 allocate_buffer(mach_vm_size_t buffer_size)
1199 {
1200 	mach_vm_address_t data = 0x0;
1201 
1202 	logv("Allocating 0x%jx (%ju) byte%s...", (uintmax_t)buffer_size, (uintmax_t)buffer_size, (buffer_size == 1) ? "" : "s");
1203 	assert_allocate_success(&data, buffer_size, VM_FLAGS_ANYWHERE);
1204 	logv(
1205 		"Memory of rounded size 0x%jx (%ju) allocated at "
1206 		"address 0x%jx.",
1207 		(uintmax_t)round_page(buffer_size), (uintmax_t)round_page(buffer_size), (uintmax_t)data);
1208 	data += get_buffer_offset();
1209 	T_QUIET; T_ASSERT_EQ((vm_offset_t)data, data,
1210 	    "Address 0x%jx "
1211 	    "unexpectedly overflows to 0x%jx when cast as "
1212 	    "vm_offset_t type.",
1213 	    (uintmax_t)data, (uintmax_t)(vm_offset_t)data);
1214 	set_buffer_address(data);
1215 }
1216 
1217 /****************************************************/
1218 /* Global variables and allocation set up functions */
1219 /****************************************************/
1220 
1221 void
set_up_vm_variables_and_allocate()1222 set_up_vm_variables_and_allocate()
1223 {
1224 	set_up_vm_variables();
1225 	allocate(get_vm_size());
1226 }
1227 
1228 void
set_up_allocator_and_vm_variables_and_allocate()1229 set_up_allocator_and_vm_variables_and_allocate()
1230 {
1231 	set_up_allocator();
1232 	set_up_vm_variables_and_allocate();
1233 }
1234 
1235 void
set_up_vm_variables_and_allocate_extra_page()1236 set_up_vm_variables_and_allocate_extra_page()
1237 {
1238 	set_up_vm_size();
1239 	/* Increment the size to insure we get an extra allocated page
1240 	 *  for unaligned start addresses. */
1241 	mach_vm_size_t allocation_size = get_vm_size() + 1;
1242 	set_up_vm_address(allocation_size);
1243 
1244 	allocate(allocation_size);
1245 	/* In the fixed unaligned address case, restore the returned
1246 	*  (truncated) allocation address to its unaligned value. */
1247 	if (!get_address_alignment()) {
1248 		set_vm_address(get_vm_address() + 1);
1249 	}
1250 }
1251 
1252 void
set_up_buffer_variables_and_allocate_extra_page()1253 set_up_buffer_variables_and_allocate_extra_page()
1254 {
1255 	set_up_buffer_variables();
1256 	/* Increment the size to insure we get an extra allocated page
1257 	 *  for unaligned start addresses. */
1258 	allocate_buffer(get_buffer_size() + get_buffer_offset());
1259 }
1260 
1261 /* Allocate some destination and buffer memory for subsequent
1262  * writing, including extra pages for non-aligned start addresses. */
1263 void
set_up_vm_and_buffer_variables_allocate_for_writing()1264 set_up_vm_and_buffer_variables_allocate_for_writing()
1265 {
1266 	set_up_vm_variables_and_allocate_extra_page();
1267 	set_up_buffer_variables_and_allocate_extra_page();
1268 }
1269 
1270 /* Allocate some destination and source regions for subsequent
1271  * copying, including extra pages for non-aligned start addresses. */
1272 void
set_up_vm_and_buffer_variables_allocate_for_copying()1273 set_up_vm_and_buffer_variables_allocate_for_copying()
1274 {
1275 	set_up_vm_and_buffer_variables_allocate_for_writing();
1276 }
1277 
1278 /************************************/
1279 /* Deallocation tear down functions */
1280 /************************************/
1281 
1282 void
deallocate_range(mach_vm_address_t address,mach_vm_size_t size)1283 deallocate_range(mach_vm_address_t address, mach_vm_size_t size)
1284 {
1285 	logv("Deallocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1286 	    (uintmax_t)address);
1287 	assert_deallocate_success(address, size);
1288 }
1289 
1290 void
deallocate()1291 deallocate()
1292 {
1293 	deallocate_range(get_vm_address(), get_vm_size());
1294 }
1295 
1296 /* Deallocate source memory, including the extra page for unaligned
1297  * start addresses. */
1298 void
deallocate_extra_page()1299 deallocate_extra_page()
1300 {
1301 	/* Set the address and size to their original allocation
1302 	 *  values. */
1303 	deallocate_range(mach_vm_trunc_page(get_vm_address()), get_vm_size() + 1);
1304 }
1305 
1306 /* Deallocate buffer and destination memory for mach_vm_write(),
1307  * including the extra page for unaligned start addresses. */
1308 void
deallocate_vm_and_buffer()1309 deallocate_vm_and_buffer()
1310 {
1311 	deallocate_range(mach_vm_trunc_page(get_vm_address()), get_vm_size() + 1);
1312 	deallocate_range(mach_vm_trunc_page(get_buffer_address()), get_buffer_size() + get_buffer_offset());
1313 }
1314 
1315 /***********************************/
1316 /* mach_vm_read() set up functions */
1317 /***********************************/
1318 
1319 /* Read the source memory into a buffer, deallocate the source, set
1320  * the global address and size from the buffer's. */
1321 void
read_deallocate()1322 read_deallocate()
1323 {
1324 	mach_vm_size_t size       = get_vm_size();
1325 	mach_vm_address_t address = get_vm_address();
1326 	vm_offset_t read_address;
1327 	mach_msg_type_number_t read_size;
1328 
1329 	logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1330 	    (uintmax_t)address);
1331 	assert_read_success(address, size, &read_address, &read_size);
1332 	logv(
1333 		"Memory of size 0x%jx (%ju) read into buffer of "
1334 		"address 0x%jx.",
1335 		(uintmax_t)read_size, (uintmax_t)read_size, (uintmax_t)read_address);
1336 	/* Deallocate the originally allocated memory, including the
1337 	 *  extra allocated page in
1338 	 *  set_up_vm_variables_and_allocate_extra_page(). */
1339 	deallocate_range(mach_vm_trunc_page(address), size + 1);
1340 
1341 	/* Promoting to mach_vm types after checking for overflow, and
1342 	 *  setting the global address from the buffer's. */
1343 	T_QUIET; T_ASSERT_EQ((mach_vm_address_t)read_address, read_address,
1344 	    "Address 0x%jx unexpectedly overflows to 0x%jx when cast "
1345 	    "as mach_vm_address_t type.",
1346 	    (uintmax_t)read_address, (uintmax_t)(mach_vm_address_t)read_address);
1347 	T_QUIET; T_ASSERT_EQ((mach_vm_size_t)read_size, read_size,
1348 	    "Size 0x%jx (%ju) unexpectedly overflows to 0x%jx (%ju) "
1349 	    "when cast as mach_vm_size_t type.",
1350 	    (uintmax_t)read_size, (uintmax_t)read_size, (uintmax_t)(mach_vm_size_t)read_size, (uintmax_t)(mach_vm_size_t)read_size);
1351 	set_vm_address((mach_vm_address_t)read_address);
1352 	set_vm_size((mach_vm_size_t)read_size);
1353 }
1354 
1355 /* Allocate some source memory, read it into a buffer, deallocate the
1356  * source, set the global address and size from the buffer's. */
1357 void
set_up_vm_variables_allocate_read_deallocate()1358 set_up_vm_variables_allocate_read_deallocate()
1359 {
1360 	set_up_vm_variables_and_allocate_extra_page();
1361 	read_deallocate();
1362 }
1363 
1364 /************************************/
1365 /* mach_vm_write() set up functions */
1366 /************************************/
1367 
1368 /* Write the buffer into the destination memory. */
1369 void
write_buffer()1370 write_buffer()
1371 {
1372 	mach_vm_address_t address          = get_vm_address();
1373 	vm_offset_t data                   = (vm_offset_t)get_buffer_address();
1374 	mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
1375 
1376 	logv(
1377 		"Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
1378 		"memory at address 0x%jx...",
1379 		(uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
1380 	assert_write_success(address, data, buffer_size);
1381 	logv("Buffer written.");
1382 }
1383 
1384 /* Allocate some destination and buffer memory, and write the buffer
1385  * into the destination memory. */
1386 void
set_up_vm_and_buffer_variables_allocate_write()1387 set_up_vm_and_buffer_variables_allocate_write()
1388 {
1389 	set_up_vm_and_buffer_variables_allocate_for_writing();
1390 	write_buffer();
1391 }
1392 
1393 /***********************************/
1394 /* mach_vm_copy() set up functions */
1395 /***********************************/
1396 
1397 void
copy_deallocate(void)1398 copy_deallocate(void)
1399 {
1400 	mach_vm_size_t size      = get_vm_size();
1401 	mach_vm_address_t source = get_vm_address();
1402 	mach_vm_address_t dest   = 0;
1403 
1404 	logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1405 	    (uintmax_t)source);
1406 	assert_allocate_copy_success(source, size, &dest);
1407 	logv(
1408 		"Memory of size 0x%jx (%ju) copy into region of "
1409 		"address 0x%jx.",
1410 		(uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
1411 	/* Deallocate the originally allocated memory, including the
1412 	 *  extra allocated page in
1413 	 *  set_up_vm_variables_and_allocate_extra_page(). */
1414 	deallocate_range(mach_vm_trunc_page(source), size + 1);
1415 	/* Promoting to mach_vm types after checking for overflow, and
1416 	 *  setting the global address from the buffer's. */
1417 	T_QUIET; T_ASSERT_EQ((vm_offset_t)dest, dest,
1418 	    "Address 0x%jx unexpectedly overflows to 0x%jx when cast "
1419 	    "as mach_vm_address_t type.",
1420 	    (uintmax_t)dest, (uintmax_t)(vm_offset_t)dest);
1421 	set_vm_address(dest);
1422 	set_vm_size(size);
1423 }
1424 
1425 /* Copy the source region into the destination region. */
1426 void
copy_region()1427 copy_region()
1428 {
1429 	mach_vm_address_t source    = get_vm_address();
1430 	mach_vm_address_t dest      = get_buffer_address();
1431 	mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
1432 
1433 	logv(
1434 		"Copying memory region of address 0x%jx and size 0x%jx (%ju), on "
1435 		"memory at address 0x%jx...",
1436 		(uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
1437 	assert_copy_success(source, size, dest);
1438 	logv("Buffer written.");
1439 }
1440 
1441 /* Allocate some source memory, copy it to another region, deallocate the
1442 * source, set the global address and size from the designation region. */
1443 void
set_up_vm_variables_allocate_copy_deallocate()1444 set_up_vm_variables_allocate_copy_deallocate()
1445 {
1446 	set_up_vm_variables_and_allocate_extra_page();
1447 	copy_deallocate();
1448 }
1449 
1450 /* Allocate some destination and source memory, and copy the source
1451  * into the destination memory. */
1452 void
set_up_source_and_dest_variables_allocate_copy()1453 set_up_source_and_dest_variables_allocate_copy()
1454 {
1455 	set_up_vm_and_buffer_variables_allocate_for_copying();
1456 	copy_region();
1457 }
1458 
1459 /**************************************/
1460 /* mach_vm_protect() set up functions */
1461 /**************************************/
1462 
1463 void
set_up_vm_variables_allocate_protect(vm_prot_t protection,const char * protection_name)1464 set_up_vm_variables_allocate_protect(vm_prot_t protection, const char * protection_name)
1465 {
1466 	set_up_vm_variables_and_allocate_extra_page();
1467 	mach_vm_size_t size       = get_vm_size();
1468 	mach_vm_address_t address = get_vm_address();
1469 
1470 	logv(
1471 		"Setting %s-protection on 0x%jx (%ju) byte%s at address "
1472 		"0x%jx...",
1473 		protection_name, (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address);
1474 	T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), address, size, FALSE, protection), "mach_vm_protect()");
1475 	logv("Region %s-protected.", protection_name);
1476 }
1477 
1478 void
set_up_vm_variables_allocate_readprotect()1479 set_up_vm_variables_allocate_readprotect()
1480 {
1481 	set_up_vm_variables_allocate_protect(VM_PROT_WRITE, "read");
1482 }
1483 
1484 void
set_up_vm_variables_allocate_writeprotect()1485 set_up_vm_variables_allocate_writeprotect()
1486 {
1487 	set_up_vm_variables_allocate_protect(VM_PROT_READ, "write");
1488 }
1489 
1490 /*****************/
1491 /* Address tests */
1492 /*****************/
1493 
1494 /* Allocated address is nonzero iff size is nonzero. */
1495 void
test_nonzero_address_iff_nonzero_size()1496 test_nonzero_address_iff_nonzero_size()
1497 {
1498 	mach_vm_address_t address = get_vm_address();
1499 	mach_vm_size_t size       = get_vm_size();
1500 
1501 	T_QUIET; T_ASSERT_TRUE((address && size) || (!address && !size), "Address 0x%jx is unexpectedly %szero.", (uintmax_t)address,
1502 	    address ? "non" : "");
1503 	logv("Address 0x%jx is %szero as expected.", (uintmax_t)address, size ? "non" : "");
1504 }
1505 
1506 /* Allocated address is aligned. */
1507 void
test_aligned_address()1508 test_aligned_address()
1509 {
1510 	mach_vm_address_t address = get_vm_address();
1511 
1512 	assert_aligned_address(address);
1513 	logv("Address 0x%jx is aligned.", (uintmax_t)address);
1514 }
1515 
1516 /************************/
1517 /* Read and write tests */
1518 /************************/
1519 
1520 void
verify_pattern(address_filter_t filter,boolean_t reversed,mach_vm_address_t address,mach_vm_size_t size,const char * pattern_name)1521 verify_pattern(
1522 	address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name)
1523 {
1524 	logv(
1525 		"Verifying %s pattern on region of address 0x%jx "
1526 		"and size 0x%jx (%ju)...",
1527 		pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1528 	filter_addresses_do_else(filter, reversed, address, size, verify_address, read_zero, address);
1529 	logv("Pattern verified.");
1530 }
1531 
1532 void
write_pattern(address_filter_t filter,boolean_t reversed,mach_vm_address_t address,mach_vm_size_t size,const char * pattern_name)1533 write_pattern(
1534 	address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name)
1535 {
1536 	logv(
1537 		"Writing %s pattern on region of address 0x%jx "
1538 		"and size 0x%jx (%ju)...",
1539 		pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1540 	filter_addresses_do_else(filter, reversed, address, size, write_address, no_action, address);
1541 	logv("Pattern writen.");
1542 }
1543 
1544 void
write_and_verify_pattern(address_filter_t filter,boolean_t reversed,mach_vm_address_t address,mach_vm_size_t size,const char * pattern_name)1545 write_and_verify_pattern(
1546 	address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name)
1547 {
1548 	logv(
1549 		"Writing and verifying %s pattern on region of "
1550 		"address 0x%jx and size 0x%jx (%ju)...",
1551 		pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1552 	filter_addresses_do_else(filter, reversed, address, size, write_address, no_action, address);
1553 	filter_addresses_do_else(filter, reversed, address, size, verify_address, read_zero, address);
1554 	logv("Pattern written and verified.");
1555 }
1556 
1557 /* Verify that the smallest aligned region containing the
1558  * given range is zero-filled. */
1559 void
test_zero_filled()1560 test_zero_filled()
1561 {
1562 	verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), aligned_size(get_vm_address(), get_vm_size()),
1563 	    "zero-filled");
1564 }
1565 
1566 void
test_write_address_filled()1567 test_write_address_filled()
1568 {
1569 	write_and_verify_pattern(empty, TRUE, get_vm_address(), round_page(get_vm_size()), "address-filled");
1570 }
1571 
1572 void
test_write_checkerboard()1573 test_write_checkerboard()
1574 {
1575 	write_and_verify_pattern(checkerboard, FALSE, get_vm_address(), round_page(get_vm_size()), "checkerboard");
1576 }
1577 
1578 void
test_write_reverse_checkerboard()1579 test_write_reverse_checkerboard()
1580 {
1581 	write_and_verify_pattern(checkerboard, TRUE, get_vm_address(), round_page(get_vm_size()), "reverse checkerboard");
1582 }
1583 
1584 void
test_write_page_ends()1585 test_write_page_ends()
1586 {
1587 	write_and_verify_pattern(page_ends, FALSE, get_vm_address(), round_page(get_vm_size()), "page ends");
1588 }
1589 
1590 void
test_write_page_interiors()1591 test_write_page_interiors()
1592 {
1593 	write_and_verify_pattern(page_ends, TRUE, get_vm_address(), round_page(get_vm_size()), "page interiors");
1594 }
1595 
1596 /*********************************/
1597 /* Allocation error return tests */
1598 /*********************************/
1599 
1600 /* Reallocating a page in the smallest aligned region containing the
1601  * given allocated range fails. */
1602 void
test_reallocate_pages()1603 test_reallocate_pages()
1604 {
1605 	allocate_fn_t allocator   = get_allocator();
1606 	vm_map_t this_task        = mach_task_self();
1607 	mach_vm_address_t address = mach_vm_trunc_page(get_vm_address());
1608 	mach_vm_size_t size       = aligned_size(get_vm_address(), get_vm_size());
1609 	mach_vm_address_t i;
1610 	kern_return_t kr;
1611 
1612 	logv(
1613 		"Reallocating pages in allocated region of address 0x%jx "
1614 		"and size 0x%jx (%ju)...",
1615 		(uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1616 	for (i = address; i < address + size; i += vm_page_size) {
1617 		kr = allocator(this_task, &i, vm_page_size, VM_FLAGS_FIXED);
1618 		T_QUIET; T_ASSERT_EQ(kr, KERN_NO_SPACE,
1619 		    "Allocator "
1620 		    "at address 0x%jx unexpectedly returned: %s.\n"
1621 		    "Should have returned: %s.",
1622 		    (uintmax_t)address, mach_error_string(kr), mach_error_string(KERN_NO_SPACE));
1623 	}
1624 	logv("Returned expected error at each page: %s.", mach_error_string(KERN_NO_SPACE));
1625 }
1626 
1627 /* Allocating in VM_MAP_NULL fails. */
1628 void
test_allocate_in_null_map()1629 test_allocate_in_null_map()
1630 {
1631 	mach_vm_address_t address = get_vm_address();
1632 	mach_vm_size_t size       = get_vm_size();
1633 	int flag                  = get_address_flag();
1634 
1635 	logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1636 	if (!(flag & VM_FLAGS_ANYWHERE)) {
1637 		logv(" at address 0x%jx", (uintmax_t)address);
1638 	}
1639 	logv(" in NULL VM map...");
1640 	assert_mach_return(get_allocator()(VM_MAP_NULL, &address, size, flag), MACH_SEND_INVALID_DEST, "Allocator");
1641 	logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
1642 }
1643 
1644 /* Allocating with non-user flags fails. */
1645 void
test_allocate_with_kernel_flags()1646 test_allocate_with_kernel_flags()
1647 {
1648 	allocate_fn_t allocator   = get_allocator();
1649 	vm_map_t this_task        = mach_task_self();
1650 	mach_vm_address_t address = get_vm_address();
1651 	mach_vm_size_t size       = get_vm_size();
1652 	int flag                  = get_address_flag();
1653 	int bad_flag, i;
1654 	kern_return_t kr;
1655 	int valid_flags = VM_FLAGS_USER_ALLOCATE | VM_FLAGS_USER_MAP | VM_FLAGS_USER_REMAP | VM_FLAGS_ALIAS_MASK;
1656 
1657 	logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1658 	if (!(flag & VM_FLAGS_ANYWHERE)) {
1659 		logv(" at address 0x%jx", (uintmax_t)address);
1660 	}
1661 	logv(" with various invalid flags...");
1662 	for (i = 0; i < sizeof(int) * 8; i++) {
1663 		int test_flag = 1 << i;
1664 
1665 		/* Skip user valid flags */
1666 		if (valid_flags & test_flag) {
1667 			continue;
1668 		}
1669 
1670 		bad_flag = test_flag | flag;
1671 		kr = allocator(this_task, &address, size, bad_flag);
1672 		T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
1673 		    "Allocator "
1674 		    "with invalid flag 0x%x unexpectedly returned: %s.\n"
1675 		    "Should have returned: %s.",
1676 		    bad_flag, mach_error_string(kr), mach_error_string(KERN_INVALID_ARGUMENT));
1677 	}
1678 	logv("Returned expected error with each invalid flag: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
1679 }
1680 
1681 /* Allocating super-page with incompatible flags fails. */
1682 void
test_allocate_superpage_with_incompatible_flags()1683 test_allocate_superpage_with_incompatible_flags()
1684 {
1685 	allocate_fn_t allocator   = get_allocator();
1686 	vm_map_t this_task        = mach_task_self();
1687 	mach_vm_address_t address = get_vm_address();
1688 	mach_vm_size_t size       = get_vm_size();
1689 	int flag                  = get_address_flag();
1690 	int bad_flag, i;
1691 	kern_return_t kr;
1692 	int incompatible_flags = VM_FLAGS_PURGABLE | VM_FLAGS_TPRO;
1693 
1694 	logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1695 	if (!(flag & VM_FLAGS_ANYWHERE)) {
1696 		logv(" at address 0x%jx", (uintmax_t)address);
1697 	}
1698 	logv(" with various incompatible flags...");
1699 	for (i = 0; i < sizeof(int) * 8; i++) {
1700 		int test_flag = 1 << i;
1701 
1702 		/* Skip compatible flags */
1703 		if (!(incompatible_flags & test_flag)) {
1704 			continue;
1705 		}
1706 
1707 		bad_flag = test_flag | flag | VM_FLAGS_SUPERPAGE_SIZE_ANY;
1708 		kr = allocator(this_task, &address, size, bad_flag);
1709 		T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
1710 		    "Allocator "
1711 		    "with invalid flag 0x%x unexpectedly returned: %s.\n"
1712 		    "Should have returned: %s.",
1713 		    bad_flag, mach_error_string(kr), mach_error_string(KERN_INVALID_ARGUMENT));
1714 	}
1715 	logv("Returned expected error with each invalid flag: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
1716 }
1717 
1718 /*****************************/
1719 /* mach_vm_map() error tests */
1720 /*****************************/
1721 
1722 /* mach_vm_map() fails with invalid protection or inheritance
1723  *  arguments. */
1724 void
test_mach_vm_map_protection_inheritance_error()1725 test_mach_vm_map_protection_inheritance_error()
1726 {
1727 	kern_return_t kr;
1728 	vm_map_t my_task          = mach_task_self();
1729 	mach_vm_address_t address = get_vm_address();
1730 	mach_vm_size_t size       = get_vm_size();
1731 	vm_map_offset_t mask = (get_allocator() == wrapper_mach_vm_map || get_allocator() == wrapper_mach_vm_map_named_entry)
1732 	    ? (mach_vm_offset_t)0
1733 	    : (mach_vm_offset_t)get_mask();
1734 	int flag                    = get_address_flag();
1735 	mach_port_t object_handle   = MACH_PORT_NULL;
1736 	vm_prot_t cur_protections[] = {VM_PROT_DEFAULT, VM_PROT_ALL + 1, ~VM_PROT_IS_MASK, INT_MAX};
1737 	vm_prot_t max_protections[] = {VM_PROT_ALL, VM_PROT_ALL + 1, ~VM_PROT_IS_MASK, INT_MAX};
1738 	vm_inherit_t inheritances[] = {VM_INHERIT_DEFAULT, VM_INHERIT_LAST_VALID + 1, UINT_MAX};
1739 	int i, j, k;
1740 
1741 	if (get_allocator() == wrapper_mach_vm_map_named_entry) {
1742 		assert_mach_success(memory_entry(&size, &object_handle), "mach_make_memory_entry_64()");
1743 	}
1744 	logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1745 	if (!(flag & VM_FLAGS_ANYWHERE)) {
1746 		logv(" at address 0x%jx", (uintmax_t)address);
1747 	}
1748 	logv(
1749 		" with various invalid protection/inheritance "
1750 		"arguments...");
1751 
1752 	for (i = 0; i < 4; i++) {
1753 		for (j = 0; j < 4; j++) {
1754 			for (k = 0; k < 3; k++) {
1755 				/* Skip the case with all valid arguments. */
1756 				if (i == (j == (k == 0))) {
1757 					continue;
1758 				}
1759 				kr = mach_vm_map(my_task, &address, size, mask, flag, object_handle, (memory_object_offset_t)0, FALSE,
1760 				    cur_protections[i], max_protections[j], inheritances[k]);
1761 				T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
1762 				    "mach_vm_map() "
1763 				    "with cur_protection 0x%x, max_protection 0x%x, "
1764 				    "inheritance 0x%x unexpectedly returned: %s.\n"
1765 				    "Should have returned: %s.",
1766 				    cur_protections[i], max_protections[j], inheritances[k], mach_error_string(kr),
1767 				    mach_error_string(KERN_INVALID_ARGUMENT));
1768 			}
1769 		}
1770 	}
1771 	logv("Returned expected error in each case: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
1772 }
1773 
1774 /* mach_vm_map() with unspecified address fails if the starting
1775  *  address overflows when rounded up to a boundary value. */
1776 void
test_mach_vm_map_large_mask_overflow_error()1777 test_mach_vm_map_large_mask_overflow_error()
1778 {
1779 	mach_vm_address_t address = 0x1;
1780 	mach_vm_size_t size       = get_vm_size();
1781 	mach_vm_offset_t mask     = (mach_vm_offset_t)UINTMAX_MAX;
1782 	/* mach_vm_map() cannot allocate 0 bytes at an unspecified
1783 	 *  address, see 8003930. */
1784 	kern_return_t kr_expected = size ? KERN_NO_SPACE : KERN_INVALID_ARGUMENT;
1785 
1786 	logv(
1787 		"Allocating 0x%jx (%ju) byte%s at an unspecified address "
1788 		"starting at 0x%jx with mask 0x%jx...",
1789 		(uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address, (uintmax_t)mask);
1790 	assert_mach_return(mach_vm_map(mach_task_self(), &address, size, mask, VM_FLAGS_ANYWHERE, MACH_PORT_NULL,
1791 	    (memory_object_offset_t)0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT),
1792 	    kr_expected, "mach_vm_map()");
1793 	logv("Returned expected error: %s.", mach_error_string(kr_expected));
1794 }
1795 
1796 /************************/
1797 /* Size edge case tests */
1798 /************************/
1799 
1800 void
allocate_edge_size(mach_vm_address_t * address,mach_vm_size_t size,kern_return_t expected_kr)1801 allocate_edge_size(mach_vm_address_t * address, mach_vm_size_t size, kern_return_t expected_kr)
1802 {
1803 	logv("Allocating 0x%jx (%ju) bytes...", (uintmax_t)size, (uintmax_t)size);
1804 	assert_allocate_return(address, size, VM_FLAGS_ANYWHERE, expected_kr);
1805 	logv("Returned expected value: %s.", mach_error_string(expected_kr));
1806 }
1807 
1808 void
test_allocate_zero_size()1809 test_allocate_zero_size()
1810 {
1811 	mach_vm_address_t address = 0x0;
1812 	/* mach_vm_map() cannot allocate 0 bytes at an unspecified
1813 	 *  address, see 8003930. Other allocators succeed. */
1814 	kern_return_t kr_expected = (get_allocator() != wrapper_mach_vm_allocate) ? KERN_INVALID_ARGUMENT : KERN_SUCCESS;
1815 
1816 	allocate_edge_size(&address, 0, kr_expected);
1817 	if (kr_expected == KERN_SUCCESS) {
1818 		deallocate_range(address, 0);
1819 	}
1820 }
1821 
1822 /* Testing the allocation of the largest size that does not overflow
1823  * when rounded up to a page-aligned value. */
1824 void
test_allocate_invalid_large_size()1825 test_allocate_invalid_large_size()
1826 {
1827 	mach_vm_size_t size = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1;
1828 	if (get_allocator() != wrapper_mach_vm_map_named_entry) {
1829 		mach_vm_address_t address = 0x0;
1830 		allocate_edge_size(&address, size, KERN_NO_SPACE);
1831 	} else {
1832 		/* Named entries cannot currently be bigger than 4 GB
1833 		 *  - 4 kb. */
1834 		mach_port_t object_handle = MACH_PORT_NULL;
1835 		logv("Creating named entry of 0x%jx (%ju) bytes...", (uintmax_t)size, (uintmax_t)size);
1836 		assert_mach_return(mach_make_memory_entry_64(mach_task_self(), &size, (memory_object_offset_t)0,
1837 		    (MAP_MEM_NAMED_CREATE | VM_PROT_ALL), &object_handle, 0),
1838 		    KERN_FAILURE, "mach_make_memory_entry_64()");
1839 		logv("Returned expected error: %s.", mach_error_string(KERN_FAILURE));
1840 	}
1841 }
1842 
1843 /* A UINTMAX_MAX VM size will overflow to 0 when rounded up to a
1844  * page-aligned value. */
1845 void
test_allocate_overflowing_size()1846 test_allocate_overflowing_size()
1847 {
1848 	mach_vm_address_t address = 0x0;
1849 
1850 	allocate_edge_size(&address, (mach_vm_size_t)UINTMAX_MAX, KERN_INVALID_ARGUMENT);
1851 }
1852 
1853 /****************************/
1854 /* Address allocation tests */
1855 /****************************/
1856 
1857 /* Allocation at address zero fails iff size is nonzero. */
1858 void
test_allocate_at_zero()1859 test_allocate_at_zero()
1860 {
1861 	mach_vm_address_t address = 0x0;
1862 	mach_vm_size_t size       = get_vm_size();
1863 
1864 	kern_return_t kr_expected =
1865 	    size ? KERN_INVALID_ADDRESS : (get_allocator() != wrapper_mach_vm_allocate) ? KERN_INVALID_ARGUMENT : KERN_SUCCESS;
1866 
1867 	logv("Allocating 0x%jx (%ju) byte%s at address 0x0...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1868 	assert_allocate_return(&address, size, VM_FLAGS_FIXED, kr_expected);
1869 	logv("Returned expected value: %s.", mach_error_string(kr_expected));
1870 	if (kr_expected == KERN_SUCCESS) {
1871 		T_QUIET; T_ASSERT_EQ(address, 0,
1872 		    "Address 0x%jx is unexpectedly "
1873 		    "nonzero.\n",
1874 		    (uintmax_t)address);
1875 		logv("Allocated address 0x%jx is zero.", (uintmax_t)address);
1876 		deallocate_range(address, size);
1877 	}
1878 }
1879 
1880 /* Allocation at page-aligned but 2 MB boundary-unaligned address
1881  *  fails with KERN_NO_SPACE. */
1882 void
test_allocate_2MB_boundary_unaligned_page_aligned_address()1883 test_allocate_2MB_boundary_unaligned_page_aligned_address()
1884 {
1885 	mach_vm_size_t size = get_vm_size();
1886 
1887 	mach_vm_address_t address = get_fixed_address(size + vm_page_size) + vm_page_size;
1888 	logv(
1889 		"Found 2 MB boundary-unaligned, page aligned address "
1890 		"0x%jx.",
1891 		(uintmax_t)address);
1892 
1893 	/* mach_vm_allocate() cannot allocate 0 bytes, and fails with a
1894 	 *  fixed boundary-unaligned truncated address. */
1895 	kern_return_t kr_expected = (!size && get_allocator() != wrapper_mach_vm_allocate)
1896 	    ? KERN_INVALID_ARGUMENT
1897 	    : (get_allocator() == wrapper_mach_vm_map_2MB) ? KERN_NO_SPACE : KERN_SUCCESS;
1898 	logv("Allocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1899 	    (uintmax_t)address);
1900 	assert_allocate_return(&address, size, VM_FLAGS_FIXED, kr_expected);
1901 	logv("Returned expected value: %s.", mach_error_string(kr_expected));
1902 	if (kr_expected == KERN_SUCCESS) {
1903 		deallocate_range(address, size);
1904 	}
1905 }
1906 
1907 /* With VM_FLAGS_ANYWHERE set, mach_vm_allocate() starts looking for
1908  *  an allocation address at 0x0, while mach_vm_map() starts at the
1909  *  supplied address and does not wrap around. See 8016663. */
1910 void
test_allocate_page_with_highest_address_hint()1911 test_allocate_page_with_highest_address_hint()
1912 {
1913 	/* Highest valid page-aligned address. */
1914 	mach_vm_address_t address = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1;
1915 
1916 	logv(
1917 		"Allocating one page with unspecified address, but hint at "
1918 		"0x%jx...",
1919 		(uintmax_t)address);
1920 	if (get_allocator() == wrapper_mach_vm_allocate) {
1921 		/* mach_vm_allocate() starts from 0x0 and succeeds. */
1922 		assert_allocate_success(&address, vm_page_size, VM_FLAGS_ANYWHERE);
1923 		logv("Memory allocated at address 0x%jx.", (uintmax_t)address);
1924 		assert_aligned_address(address);
1925 		deallocate_range(address, vm_page_size);
1926 	} else {
1927 		/* mach_vm_map() starts from the supplied address, and fails
1928 		 *  with KERN_NO_SPACE, see 8016663. */
1929 		assert_allocate_return(&address, vm_page_size, VM_FLAGS_ANYWHERE, KERN_NO_SPACE);
1930 		logv("Returned expected error: %s.", mach_error_string(KERN_NO_SPACE));
1931 	}
1932 }
1933 
1934 /* Allocators find an allocation address with a first fit strategy. */
1935 void
test_allocate_first_fit_pages()1936 test_allocate_first_fit_pages()
1937 {
1938 	allocate_fn_t allocator    = get_allocator();
1939 	mach_vm_address_t address1 = 0x0;
1940 	mach_vm_address_t i;
1941 	kern_return_t kr;
1942 	vm_map_t this_task = mach_task_self();
1943 
1944 	logv(
1945 		"Looking for first fit address for allocating one "
1946 		"page...");
1947 	assert_allocate_success(&address1, vm_page_size, VM_FLAGS_ANYWHERE);
1948 	logv("Found address 0x%jx.", (uintmax_t)address1);
1949 	assert_aligned_address(address1);
1950 	mach_vm_address_t address2 = address1;
1951 	logv(
1952 		"Looking for next higher first fit address for allocating "
1953 		"one page...");
1954 	assert_allocate_success(&address2, vm_page_size, VM_FLAGS_ANYWHERE);
1955 	logv("Found address 0x%jx.", (uintmax_t)address2);
1956 	assert_aligned_address(address2);
1957 	T_QUIET; T_ASSERT_GT(address2, address1,
1958 	    "Second address 0x%jx is "
1959 	    "unexpectedly not higher than first address 0x%jx.",
1960 	    (uintmax_t)address2, (uintmax_t)address1);
1961 
1962 	logv("Allocating pages between 0x%jx and 0x%jx...", (uintmax_t)address1, (uintmax_t)address2);
1963 	for (i = address1; i <= address2; i += vm_page_size) {
1964 		kr = allocator(this_task, &i, vm_page_size, VM_FLAGS_FIXED);
1965 		T_QUIET; T_ASSERT_NE(kr, KERN_SUCCESS,
1966 		    "Allocator at address 0x%jx "
1967 		    "unexpectedly succeeded.",
1968 		    (uintmax_t)i);
1969 	}
1970 	logv("Expectedly returned error at each page.");
1971 	deallocate_range(address1, vm_page_size);
1972 	deallocate_range(address2, vm_page_size);
1973 }
1974 
1975 /*******************************/
1976 /* Deallocation segfault tests */
1977 /*******************************/
1978 
1979 /* mach_vm_deallocate() deallocates the smallest aligned region
1980  * (integral number of pages) containing the given range. */
1981 
1982 /* Addresses in deallocated range are inaccessible. */
1983 void
access_deallocated_range_address(mach_vm_address_t address,const char * position)1984 access_deallocated_range_address(mach_vm_address_t address, const char * position)
1985 {
1986 	logv("Will deallocate and read from %s 0x%jx of deallocated range...", position, (uintmax_t)address);
1987 	deallocate();
1988 	mach_vm_address_t bad_value = MACH_VM_ADDRESS_T(address);
1989 	T_ASSERT_FAIL("Unexpectedly read value 0x%jx at address 0x%jx.\n"
1990 	    "Should have died with signal SIGSEGV.",
1991 	    (uintmax_t)bad_value, (uintmax_t)address);
1992 }
1993 
1994 /* Start of deallocated range is inaccessible. */
1995 void
test_access_deallocated_range_start()1996 test_access_deallocated_range_start()
1997 {
1998 	access_deallocated_range_address(get_vm_address(), "start");
1999 }
2000 
2001 /* Middle of deallocated range is inaccessible. */
2002 void
test_access_deallocated_range_middle()2003 test_access_deallocated_range_middle()
2004 {
2005 	access_deallocated_range_address(get_vm_address() + (round_page(get_vm_size()) >> 1), "middle");
2006 }
2007 
2008 /* End of deallocated range is inaccessible. */
2009 void
test_access_deallocated_range_end()2010 test_access_deallocated_range_end()
2011 {
2012 	access_deallocated_range_address(round_page(get_vm_size()) - vm_address_size + get_vm_address(), "end");
2013 }
2014 
2015 /* Deallocating almost the whole address space causes a SIGSEGV or SIGBUS. We
2016  * deallocate the largest valid aligned size to avoid overflowing when
2017  * rounding up. */
2018 void
test_deallocate_suicide()2019 test_deallocate_suicide()
2020 {
2021 	mach_vm_address_t address = 0x0;
2022 	mach_vm_size_t size       = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1;
2023 
2024 	logv("Deallocating 0x%jx (%ju) bytes at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (uintmax_t)address);
2025 	kern_return_t kr = mach_vm_deallocate(mach_task_self(), address, size);
2026 	T_ASSERT_FAIL("mach_vm_deallocate() with address 0x%jx and "
2027 	    "size 0x%jx (%ju) unexpectedly returned: %s.\n"
2028 	    "Should have died with signal SIGSEGV or SIGBUS.",
2029 	    (uintmax_t)address, (uintmax_t)size, (uintmax_t)size, mach_error_string(kr));
2030 }
2031 
2032 /***************************************/
2033 /* Deallocation and reallocation tests */
2034 /***************************************/
2035 
2036 /* Deallocating memory twice succeeds. */
2037 void
test_deallocate_twice()2038 test_deallocate_twice()
2039 {
2040 	deallocate();
2041 	deallocate();
2042 }
2043 
2044 /* Deallocated and reallocated memory is zero-filled. Deallocated
2045  * memory is inaccessible since it can be reallocated. */
2046 void
test_write_pattern_deallocate_reallocate_zero_filled()2047 test_write_pattern_deallocate_reallocate_zero_filled()
2048 {
2049 	mach_vm_address_t address = get_vm_address();
2050 	mach_vm_size_t size       = get_vm_size();
2051 
2052 	write_pattern(page_ends, FALSE, address, size, "page ends");
2053 	logv("Deallocating, then Allocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2054 	    (uintmax_t)address);
2055 	deallocate();
2056 	assert_allocate_success(&address, size, VM_FLAGS_FIXED);
2057 	logv("Memory allocated.");
2058 	verify_pattern(empty, FALSE, address, size, "zero-filled");
2059 	deallocate();
2060 }
2061 
2062 /********************************/
2063 /* Deallocation edge case tests */
2064 /********************************/
2065 
2066 /* Zero size deallocation always succeeds. */
2067 void
test_deallocate_zero_size_ranges()2068 test_deallocate_zero_size_ranges()
2069 {
2070 	int i;
2071 	kern_return_t kr;
2072 	vm_map_t this_task            = mach_task_self();
2073 	mach_vm_address_t addresses[] = {0x0,
2074 		                         0x1,
2075 		                         vm_page_size - 1,
2076 		                         vm_page_size,
2077 		                         vm_page_size + 1,
2078 		                         (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
2079 		                         (mach_vm_address_t)UINT_MAX,
2080 		                         (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
2081 		                         (mach_vm_address_t)UINTMAX_MAX};
2082 	int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
2083 
2084 	logv("Deallocating 0x0 (0) bytes at various addresses...");
2085 	for (i = 0; i < numofaddresses; i++) {
2086 		kr = mach_vm_deallocate(this_task, addresses[i], 0);
2087 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_vm_deallocate() at "
2088 		    "address 0x%jx unexpectedly failed: %s.",
2089 		    (uintmax_t)addresses[i], mach_error_string(kr));
2090 	}
2091 	logv("Deallocations successful.");
2092 }
2093 
2094 /* Deallocation succeeds if the end of the range rounds to 0x0. */
2095 void
test_deallocate_rounded_zero_end_ranges()2096 test_deallocate_rounded_zero_end_ranges()
2097 {
2098 	int i;
2099 	kern_return_t kr;
2100 	vm_map_t this_task = mach_task_self();
2101 	struct {
2102 		mach_vm_address_t address;
2103 		mach_vm_size_t size;
2104 	} ranges[] = {
2105 		{0x0, (mach_vm_size_t)UINTMAX_MAX},
2106 		{0x0, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 2},
2107 		{0x1, (mach_vm_size_t)UINTMAX_MAX - 1},
2108 		{0x1, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1},
2109 		{0x2, (mach_vm_size_t)UINTMAX_MAX - 2},
2110 		{0x2, (mach_vm_size_t)UINTMAX_MAX - vm_page_size},
2111 		{(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size - 1},
2112 		{(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, 1},
2113 		{(mach_vm_address_t)UINTMAX_MAX - 1, 1},
2114 	};
2115 	int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2116 
2117 	logv(
2118 		"Deallocating various memory ranges whose end rounds to "
2119 		"0x0...");
2120 	for (i = 0; i < numofranges; i++) {
2121 		kr = mach_vm_deallocate(this_task, ranges[i].address, ranges[i].size);
2122 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr,
2123 		    "mach_vm_deallocate() with address 0x%jx and size "
2124 		    "0x%jx (%ju) unexpectedly returned: %s.\n"
2125 		    "Should have succeeded.",
2126 		    (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr));
2127 	}
2128 	logv("Deallocations successful.");
2129 }
2130 
2131 /* Deallocating a range wrapped around the address space fails. */
2132 void
test_deallocate_wrapped_around_ranges()2133 test_deallocate_wrapped_around_ranges()
2134 {
2135 	int i;
2136 	kern_return_t kr;
2137 	vm_map_t this_task = mach_task_self();
2138 	struct {
2139 		mach_vm_address_t address;
2140 		mach_vm_size_t size;
2141 	} ranges[] = {
2142 		{0x1, (mach_vm_size_t)UINTMAX_MAX},
2143 		{vm_page_size, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1},
2144 		{(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
2145 		{(mach_vm_address_t)UINTMAX_MAX, 1},
2146 	};
2147 	int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2148 
2149 	logv(
2150 		"Deallocating various memory ranges wrapping around the "
2151 		"address space...");
2152 	for (i = 0; i < numofranges; i++) {
2153 		kr = mach_vm_deallocate(this_task, ranges[i].address, ranges[i].size);
2154 		T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
2155 		    "mach_vm_deallocate() with address 0x%jx and size "
2156 		    "0x%jx (%ju) unexpectedly returned: %s.\n"
2157 		    "Should have returned: %s.",
2158 		    (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr),
2159 		    mach_error_string(KERN_INVALID_ARGUMENT));
2160 	}
2161 	logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
2162 }
2163 
2164 /* Deallocating in VM_MAP_NULL fails. */
2165 void
test_deallocate_in_null_map()2166 test_deallocate_in_null_map()
2167 {
2168 	mach_vm_address_t address = get_vm_address();
2169 	mach_vm_size_t size       = get_vm_size();
2170 	int flag                  = get_address_flag();
2171 
2172 	logv("Deallocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
2173 	if (!(flag & VM_FLAGS_ANYWHERE)) {
2174 		logv(" at address 0x%jx", (uintmax_t)address);
2175 	}
2176 	logv(" in NULL VM map...");
2177 	assert_mach_return(mach_vm_deallocate(VM_MAP_NULL, address, size), MACH_SEND_INVALID_DEST, "mach_vm_deallocate()");
2178 	logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2179 }
2180 
2181 /*****************************/
2182 /* mach_vm_read() main tests */
2183 /*****************************/
2184 
2185 /* Read memory of size less than a page has aligned starting
2186  * address. Otherwise, the destination buffer's starting address has
2187  * the same boundary offset as the source region's. */
2188 void
test_read_address_offset()2189 test_read_address_offset()
2190 {
2191 	mach_vm_address_t address = get_vm_address();
2192 	mach_vm_size_t size       = get_vm_size();
2193 
2194 	if (size < vm_page_size * 2 || get_address_alignment()) {
2195 		assert_aligned_address(address);
2196 		logv("Buffer address 0x%jx is aligned as expected.", (uintmax_t)address);
2197 	} else {
2198 		T_QUIET; T_ASSERT_EQ(((address - 1) & (vm_page_size - 1)), 0,
2199 		    "Buffer "
2200 		    "address 0x%jx does not have the expected boundary "
2201 		    "offset of 1.",
2202 		    (uintmax_t)address);
2203 		logv(
2204 			"Buffer address 0x%jx has the expected boundary "
2205 			"offset of 1.",
2206 			(uintmax_t)address);
2207 	}
2208 }
2209 
2210 /* Reading from VM_MAP_NULL fails. */
2211 void
test_read_null_map()2212 test_read_null_map()
2213 {
2214 	mach_vm_address_t address = get_vm_address();
2215 	mach_vm_size_t size       = get_vm_size();
2216 	vm_offset_t read_address;
2217 	mach_msg_type_number_t read_size;
2218 
2219 	logv(
2220 		"Reading 0x%jx (%ju) byte%s at address 0x%jx in NULL VM "
2221 		"map...",
2222 		(uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address);
2223 	assert_mach_return(mach_vm_read(VM_MAP_NULL, address, size, &read_address, &read_size), MACH_SEND_INVALID_DEST,
2224 	    "mach_vm_read()");
2225 	logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2226 }
2227 
2228 /* Reading partially deallocated memory fails. */
2229 void
test_read_partially_deallocated_range()2230 test_read_partially_deallocated_range()
2231 {
2232 	mach_vm_address_t address   = get_vm_address();
2233 	mach_vm_size_t size         = get_vm_size();
2234 	mach_vm_address_t mid_point = mach_vm_trunc_page(address + size / 2);
2235 	vm_offset_t read_address;
2236 	mach_msg_type_number_t read_size;
2237 
2238 	logv("Deallocating a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2239 	assert_deallocate_success(mid_point, vm_page_size);
2240 	logv("Page deallocated.");
2241 
2242 	logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2243 	    (uintmax_t)address);
2244 	assert_read_return(address, size, &read_address, &read_size, KERN_INVALID_ADDRESS);
2245 	logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2246 }
2247 
2248 /* Reading partially read-protected memory fails. */
2249 void
test_read_partially_unreadable_range()2250 test_read_partially_unreadable_range()
2251 {
2252 	mach_vm_address_t address   = get_vm_address();
2253 	mach_vm_size_t size         = get_vm_size();
2254 	mach_vm_address_t mid_point = mach_vm_trunc_page(address + size / 2);
2255 	vm_offset_t read_address;
2256 	mach_msg_type_number_t read_size;
2257 
2258 	/*  For sizes < msg_ool_size_small, vm_map_copyin_common() uses
2259 	 *  vm_map_copyin_kernel_buffer() to read in the memory,
2260 	 *  returning different errors, see 8182239. */
2261 	kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2262 
2263 	logv("Read-protecting a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2264 	T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2265 	logv("Page read-protected.");
2266 
2267 	logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2268 	    (uintmax_t)address);
2269 	assert_read_return(address, size, &read_address, &read_size, kr_expected);
2270 	logv("Returned expected error: %s.", mach_error_string(kr_expected));
2271 }
2272 
2273 /**********************************/
2274 /* mach_vm_read() edge case tests */
2275 /**********************************/
2276 
2277 void
read_edge_size(mach_vm_size_t size,kern_return_t expected_kr)2278 read_edge_size(mach_vm_size_t size, kern_return_t expected_kr)
2279 {
2280 	int i;
2281 	kern_return_t kr;
2282 	vm_map_t this_task            = mach_task_self();
2283 	mach_vm_address_t addresses[] = {vm_page_size - 1,
2284 		                         vm_page_size,
2285 		                         vm_page_size + 1,
2286 		                         (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
2287 		                         (mach_vm_address_t)UINT_MAX,
2288 		                         (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
2289 		                         (mach_vm_address_t)UINTMAX_MAX};
2290 	int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
2291 	vm_offset_t read_address;
2292 	mach_msg_type_number_t read_size;
2293 
2294 	logv("Reading 0x%jx (%ju) bytes at various addresses...", (uintmax_t)size, (uintmax_t)size);
2295 	for (i = 0; i < numofaddresses; i++) {
2296 		kr = mach_vm_read(this_task, addresses[i], size, &read_address, &read_size);
2297 		T_QUIET; T_ASSERT_EQ(kr, expected_kr,
2298 		    "mach_vm_read() at "
2299 		    "address 0x%jx unexpectedly returned: %s.\n"
2300 		    "Should have returned: %s.",
2301 		    (uintmax_t)addresses[i], mach_error_string(kr), mach_error_string(expected_kr));
2302 	}
2303 	logv(
2304 		"mach_vm_read() returned expected value in each case: "
2305 		"%s.",
2306 		mach_error_string(expected_kr));
2307 }
2308 
2309 /* Reading 0 bytes always succeeds. */
2310 void
test_read_zero_size()2311 test_read_zero_size()
2312 {
2313 	read_edge_size(0, KERN_SUCCESS);
2314 }
2315 
2316 /* Reading 4GB or higher always fails. */
2317 void
test_read_invalid_large_size()2318 test_read_invalid_large_size()
2319 {
2320 	read_edge_size((mach_vm_size_t)UINT_MAX + 1, KERN_INVALID_ARGUMENT);
2321 }
2322 
2323 /* Reading a range wrapped around the address space fails. */
2324 void
test_read_wrapped_around_ranges()2325 test_read_wrapped_around_ranges()
2326 {
2327 	int i;
2328 	kern_return_t kr;
2329 	vm_map_t this_task = mach_task_self();
2330 	struct {
2331 		mach_vm_address_t address;
2332 		mach_vm_size_t size;
2333 	} ranges[] = {
2334 		{(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + 1), (mach_vm_size_t)UINT_MAX},
2335 		{(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + vm_page_size), (mach_vm_size_t)(UINT_MAX - vm_page_size + 1)},
2336 		{(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
2337 		{(mach_vm_address_t)UINTMAX_MAX, 1},
2338 	};
2339 	int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2340 	vm_offset_t read_address;
2341 	mach_msg_type_number_t read_size;
2342 
2343 	logv(
2344 		"Reading various memory ranges wrapping around the "
2345 		"address space...");
2346 	for (i = 0; i < numofranges; i++) {
2347 		kr = mach_vm_read(this_task, ranges[i].address, ranges[i].size, &read_address, &read_size);
2348 		T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ADDRESS,
2349 		    "mach_vm_read() at address 0x%jx with size "
2350 		    "0x%jx (%ju) unexpectedly returned: %s.\n"
2351 		    "Should have returned: %s.",
2352 		    (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr),
2353 		    mach_error_string(KERN_INVALID_ADDRESS));
2354 	}
2355 	logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2356 }
2357 
2358 /********************************/
2359 /* mach_vm_read() pattern tests */
2360 /********************************/
2361 
2362 /* Write a pattern on pre-allocated memory, read into a buffer and
2363  * verify the pattern on the buffer. */
2364 void
write_read_verify_pattern(address_filter_t filter,boolean_t reversed,const char * pattern_name)2365 write_read_verify_pattern(address_filter_t filter, boolean_t reversed, const char * pattern_name)
2366 {
2367 	mach_vm_address_t address = get_vm_address();
2368 
2369 	write_pattern(filter, reversed, address, get_vm_size(), pattern_name);
2370 	read_deallocate();
2371 	/* Getting the address and size of the read buffer. */
2372 	mach_vm_address_t read_address = get_vm_address();
2373 	mach_vm_size_t read_size = get_vm_size();
2374 	logv(
2375 		"Verifying %s pattern on buffer of "
2376 		"address 0x%jx and size 0x%jx (%ju)...",
2377 		pattern_name, (uintmax_t)read_address, (uintmax_t)read_size, (uintmax_t)read_size);
2378 	filter_addresses_do_else(filter, reversed, read_address, read_size, verify_address, read_zero, address);
2379 	logv("Pattern verified on destination buffer.");
2380 }
2381 
2382 void
test_read_address_filled()2383 test_read_address_filled()
2384 {
2385 	write_read_verify_pattern(empty, TRUE, "address-filled");
2386 }
2387 
2388 void
test_read_checkerboard()2389 test_read_checkerboard()
2390 {
2391 	write_read_verify_pattern(checkerboard, FALSE, "checkerboard");
2392 }
2393 
2394 void
test_read_reverse_checkerboard()2395 test_read_reverse_checkerboard()
2396 {
2397 	write_read_verify_pattern(checkerboard, TRUE, "reverse checkerboard");
2398 }
2399 
2400 /***********************************/
2401 /* mach_vm_write() edge case tests */
2402 /***********************************/
2403 
2404 /* Writing in VM_MAP_NULL fails. */
2405 void
test_write_null_map()2406 test_write_null_map()
2407 {
2408 	mach_vm_address_t address          = get_vm_address();
2409 	vm_offset_t data                   = (vm_offset_t)get_buffer_address();
2410 	mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2411 
2412 	logv(
2413 		"Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2414 		"memory at address 0x%jx in NULL VM MAP...",
2415 		(uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2416 	assert_mach_return(mach_vm_write(VM_MAP_NULL, address, data, buffer_size), MACH_SEND_INVALID_DEST, "mach_vm_write()");
2417 	logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2418 }
2419 
2420 /* Writing 0 bytes always succeeds. */
2421 void
test_write_zero_size()2422 test_write_zero_size()
2423 {
2424 	set_buffer_size(0);
2425 	write_buffer();
2426 }
2427 
2428 /*****************************************/
2429 /* mach_vm_write() inaccessibility tests */
2430 /*****************************************/
2431 
2432 /* Writing a partially deallocated buffer fails. */
2433 void
test_write_partially_deallocated_buffer()2434 test_write_partially_deallocated_buffer()
2435 {
2436 	mach_vm_address_t address          = get_vm_address();
2437 	vm_offset_t data                   = (vm_offset_t)get_buffer_address();
2438 	mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2439 	mach_vm_address_t buffer_mid_point = (mach_vm_address_t)mach_vm_trunc_page(data + buffer_size / 2);
2440 
2441 	logv(
2442 		"Deallocating a mid-range buffer page at address "
2443 		"0x%jx...",
2444 		(uintmax_t)buffer_mid_point);
2445 	assert_deallocate_success(buffer_mid_point, vm_page_size);
2446 	logv("Page deallocated.");
2447 
2448 	logv(
2449 		"Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2450 		"memory at address 0x%jx...",
2451 		(uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2452 	assert_write_return(address, data, buffer_size, MACH_SEND_INVALID_MEMORY);
2453 	logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_MEMORY));
2454 }
2455 
2456 /* Writing a partially read-protected buffer fails. */
2457 void
test_write_partially_unreadable_buffer()2458 test_write_partially_unreadable_buffer()
2459 {
2460 	mach_vm_address_t address          = get_vm_address();
2461 	vm_offset_t data                   = (vm_offset_t)get_buffer_address();
2462 	mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2463 	mach_vm_address_t buffer_mid_point = (mach_vm_address_t)mach_vm_trunc_page(data + buffer_size / 2);
2464 
2465 	logv(
2466 		"Read-protecting a mid-range buffer page at address "
2467 		"0x%jx...",
2468 		(uintmax_t)buffer_mid_point);
2469 	T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), buffer_mid_point, vm_page_size, FALSE, VM_PROT_WRITE),
2470 	    "mach_vm_protect()");
2471 	logv("Page read-protected.");
2472 
2473 	logv(
2474 		"Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2475 		"memory at address 0x%jx...",
2476 		(uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2477 	assert_write_return(address, data, buffer_size, MACH_SEND_INVALID_MEMORY);
2478 	logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_MEMORY));
2479 }
2480 
2481 /* Writing on partially deallocated memory fails. */
2482 void
test_write_on_partially_deallocated_range()2483 test_write_on_partially_deallocated_range()
2484 {
2485 	mach_vm_address_t address          = get_vm_address();
2486 	mach_vm_address_t start            = mach_vm_trunc_page(address);
2487 	vm_offset_t data                   = (vm_offset_t)get_buffer_address();
2488 	mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2489 
2490 	logv(
2491 		"Deallocating the first destination page at address "
2492 		"0x%jx...",
2493 		(uintmax_t)start);
2494 	assert_deallocate_success(start, vm_page_size);
2495 	logv("Page deallocated.");
2496 
2497 	logv(
2498 		"Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2499 		"memory at address 0x%jx...",
2500 		(uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2501 	assert_write_return(address, data, buffer_size, KERN_INVALID_ADDRESS);
2502 	logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2503 }
2504 
2505 /* Writing on partially unwritable memory fails. */
2506 void
test_write_on_partially_unwritable_range()2507 test_write_on_partially_unwritable_range()
2508 {
2509 	mach_vm_address_t address          = get_vm_address();
2510 	mach_vm_address_t start            = mach_vm_trunc_page(address);
2511 	vm_offset_t data                   = (vm_offset_t)get_buffer_address();
2512 	mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2513 
2514 	/*  For sizes < msg_ool_size_small,
2515 	 *  vm_map_copy_overwrite_nested() uses
2516 	 *  vm_map_copyout_kernel_buffer() to read in the memory,
2517 	 *  returning different errors, see 8217123. */
2518 	kern_return_t kr_expected = (buffer_size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2519 
2520 	logv(
2521 		"Write-protecting the first destination page at address "
2522 		"0x%jx...",
2523 		(uintmax_t)start);
2524 	T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()");
2525 	logv("Page write-protected.");
2526 
2527 	logv(
2528 		"Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2529 		"memory at address 0x%jx...",
2530 		(uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2531 	assert_write_return(address, data, buffer_size, kr_expected);
2532 	logv("Returned expected error: %s.", mach_error_string(kr_expected));
2533 }
2534 
2535 /*********************************/
2536 /* mach_vm_write() pattern tests */
2537 /*********************************/
2538 
2539 /* Verify that a zero-filled buffer and destination memory are still
2540  * zero-filled after writing. */
2541 void
test_zero_filled_write()2542 test_zero_filled_write()
2543 {
2544 	verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page(get_vm_size() + 1), "zero-filled");
2545 	verify_pattern(empty, FALSE, mach_vm_trunc_page(get_buffer_address()),
2546 	    round_page(get_buffer_size() + get_buffer_offset()), "zero-filled");
2547 }
2548 
2549 /* Write a pattern on a buffer, write the buffer into some destination
2550  * memory, and verify the pattern on both buffer and destination. */
2551 void
pattern_write(address_filter_t filter,boolean_t reversed,const char * pattern_name)2552 pattern_write(address_filter_t filter, boolean_t reversed, const char * pattern_name)
2553 {
2554 	mach_vm_address_t address        = get_vm_address();
2555 	mach_vm_size_t size              = get_vm_size();
2556 	mach_vm_address_t buffer_address = get_buffer_address();
2557 	mach_vm_size_t buffer_size       = get_buffer_size();
2558 
2559 	write_pattern(filter, reversed, buffer_address, buffer_size, pattern_name);
2560 	write_buffer();
2561 	verify_pattern(filter, reversed, buffer_address, buffer_size, pattern_name);
2562 	logv(
2563 		"Verifying %s pattern on destination of "
2564 		"address 0x%jx and size 0x%jx (%ju)...",
2565 		pattern_name, (uintmax_t)address, (uintmax_t)buffer_size, (uintmax_t)size);
2566 	filter_addresses_do_else(filter, reversed, address, buffer_size, verify_address, read_zero, buffer_address);
2567 	logv("Pattern verified on destination.");
2568 }
2569 
2570 void
test_address_filled_write()2571 test_address_filled_write()
2572 {
2573 	pattern_write(empty, TRUE, "address-filled");
2574 }
2575 
2576 void
test_checkerboard_write()2577 test_checkerboard_write()
2578 {
2579 	pattern_write(checkerboard, FALSE, "checkerboard");
2580 }
2581 
2582 void
test_reverse_checkerboard_write()2583 test_reverse_checkerboard_write()
2584 {
2585 	pattern_write(checkerboard, TRUE, "reverse checkerboard");
2586 }
2587 
2588 /**********************************/
2589 /* mach_vm_copy() edge case tests */
2590 /**********************************/
2591 
2592 /* Copying in VM_MAP_NULL fails. */
2593 void
test_copy_null_map()2594 test_copy_null_map()
2595 {
2596 	mach_vm_address_t source    = get_vm_address();
2597 	mach_vm_address_t dest      = get_buffer_address();
2598 	mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2599 
2600 	logv(
2601 		"Copying buffer of address 0x%jx and size 0x%jx (%ju), on "
2602 		"memory at address 0x%jx in NULL VM MAP...",
2603 		(uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2604 	assert_mach_return(mach_vm_copy(VM_MAP_NULL, source, size, dest), MACH_SEND_INVALID_DEST, "mach_vm_copy()");
2605 	logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2606 }
2607 
2608 void
copy_edge_size(mach_vm_size_t size,kern_return_t expected_kr)2609 copy_edge_size(mach_vm_size_t size, kern_return_t expected_kr)
2610 {
2611 	int i;
2612 	kern_return_t kr;
2613 	vm_map_t this_task            = mach_task_self();
2614 	mach_vm_address_t addresses[] = {0x0,
2615 		                         0x1,
2616 		                         vm_page_size - 1,
2617 		                         vm_page_size,
2618 		                         vm_page_size + 1,
2619 		                         (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
2620 		                         (mach_vm_address_t)UINT_MAX,
2621 		                         (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
2622 		                         (mach_vm_address_t)UINTMAX_MAX};
2623 	int numofaddresses     = sizeof(addresses) / sizeof(addresses[0]);
2624 	mach_vm_address_t dest = 0;
2625 
2626 	logv("Allocating 0x%jx (%ju) byte%s...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
2627 	assert_allocate_success(&dest, 4096, VM_FLAGS_ANYWHERE);
2628 	logv("Copying 0x%jx (%ju) bytes at various addresses...", (uintmax_t)size, (uintmax_t)size);
2629 	for (i = 0; i < numofaddresses; i++) {
2630 		kr = mach_vm_copy(this_task, addresses[i], size, dest);
2631 		T_QUIET; T_ASSERT_EQ(kr, expected_kr,
2632 		    "mach_vm_copy() at "
2633 		    "address 0x%jx unexpectedly returned: %s.\n"
2634 		    "Should have returned: %s.",
2635 		    (uintmax_t)addresses[i], mach_error_string(kr), mach_error_string(expected_kr));
2636 	}
2637 	logv(
2638 		"mach_vm_copy() returned expected value in each case: "
2639 		"%s.",
2640 		mach_error_string(expected_kr));
2641 
2642 	deallocate_range(dest, 4096);
2643 }
2644 
2645 /* Copying 0 bytes always succeeds. */
2646 void
test_copy_zero_size()2647 test_copy_zero_size()
2648 {
2649 	copy_edge_size(0, KERN_SUCCESS);
2650 }
2651 
2652 /* Copying 4GB or higher always fails. */
2653 void
test_copy_invalid_large_size()2654 test_copy_invalid_large_size()
2655 {
2656 	copy_edge_size((mach_vm_size_t)UINT_MAX - 1, KERN_INVALID_ADDRESS);
2657 }
2658 
2659 /* Reading a range wrapped around the address space fails. */
2660 void
test_copy_wrapped_around_ranges()2661 test_copy_wrapped_around_ranges()
2662 {
2663 	int i;
2664 	kern_return_t kr;
2665 	vm_map_t this_task = mach_task_self();
2666 	struct {
2667 		mach_vm_address_t address;
2668 		mach_vm_size_t size;
2669 	} ranges[] = {
2670 		{(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + 1), (mach_vm_size_t)UINT_MAX},
2671 		{(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + vm_page_size), (mach_vm_size_t)(UINT_MAX - vm_page_size + 1)},
2672 		{(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
2673 		{(mach_vm_address_t)UINTMAX_MAX, 1},
2674 	};
2675 	int numofranges        = sizeof(ranges) / sizeof(ranges[0]);
2676 	mach_vm_address_t dest = 0;
2677 
2678 	logv("Allocating 0x1000 (4096) bytes...");
2679 	assert_allocate_success(&dest, 4096, VM_FLAGS_ANYWHERE);
2680 
2681 	logv(
2682 		"Copying various memory ranges wrapping around the "
2683 		"address space...");
2684 	for (i = 0; i < numofranges; i++) {
2685 		kr = mach_vm_copy(this_task, ranges[i].address, ranges[i].size, dest);
2686 		T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ADDRESS,
2687 		    "mach_vm_copy() at address 0x%jx with size "
2688 		    "0x%jx (%ju) unexpectedly returned: %s.\n"
2689 		    "Should have returned: %s.",
2690 		    (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr),
2691 		    mach_error_string(KERN_INVALID_ADDRESS));
2692 	}
2693 	logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2694 
2695 	deallocate_range(dest, 4096);
2696 }
2697 
2698 /********************************/
2699 /* mach_vm_copy() pattern tests */
2700 /********************************/
2701 
2702 /* Write a pattern on pre-allocated region, copy into another region
2703  * and verify the pattern in the region. */
2704 void
write_copy_verify_pattern(address_filter_t filter,boolean_t reversed,const char * pattern_name)2705 write_copy_verify_pattern(address_filter_t filter, boolean_t reversed, const char * pattern_name)
2706 {
2707 	mach_vm_address_t source = get_vm_address();
2708 	mach_vm_size_t src_size = get_vm_size();
2709 	write_pattern(filter, reversed, source, src_size, pattern_name);
2710 	/* Getting the address and size of the dest region */
2711 	mach_vm_address_t dest  = get_buffer_address();
2712 	mach_vm_size_t dst_size = get_buffer_size();
2713 
2714 	logv(
2715 		"Copying memory region of address 0x%jx and size 0x%jx (%ju), on "
2716 		"memory at address 0x%jx...",
2717 		(uintmax_t)source, (uintmax_t)dst_size, (uintmax_t)dst_size, (uintmax_t)dest);
2718 	assert_copy_success(source, dst_size, dest);
2719 	logv(
2720 		"Verifying %s pattern in region of "
2721 		"address 0x%jx and size 0x%jx (%ju)...",
2722 		pattern_name, (uintmax_t)dest, (uintmax_t)dst_size, (uintmax_t)dst_size);
2723 	filter_addresses_do_else(filter, reversed, dest, dst_size, verify_address, read_zero, source);
2724 	logv("Pattern verified on destination region.");
2725 }
2726 
2727 void
test_copy_address_filled()2728 test_copy_address_filled()
2729 {
2730 	write_copy_verify_pattern(empty, TRUE, "address-filled");
2731 }
2732 
2733 void
test_copy_checkerboard()2734 test_copy_checkerboard()
2735 {
2736 	write_copy_verify_pattern(checkerboard, FALSE, "checkerboard");
2737 }
2738 
2739 void
test_copy_reverse_checkerboard()2740 test_copy_reverse_checkerboard()
2741 {
2742 	write_copy_verify_pattern(checkerboard, TRUE, "reverse checkerboard");
2743 }
2744 
2745 /* Verify that a zero-filled source and destination memory are still
2746  * zero-filled after writing. */
2747 void
test_zero_filled_copy_dest()2748 test_zero_filled_copy_dest()
2749 {
2750 	verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page(get_vm_size() + 1), "zero-filled");
2751 	verify_pattern(empty, FALSE, mach_vm_trunc_page(get_buffer_address()),
2752 	    round_page(get_buffer_size() + get_buffer_offset()), "zero-filled");
2753 }
2754 
2755 /****************************************/
2756 /* mach_vm_copy() inaccessibility tests */
2757 /****************************************/
2758 
2759 /* Copying partially deallocated memory fails. */
2760 void
test_copy_partially_deallocated_range()2761 test_copy_partially_deallocated_range()
2762 {
2763 	mach_vm_address_t source    = get_vm_address();
2764 	mach_vm_size_t size         = get_vm_size();
2765 	mach_vm_address_t mid_point = mach_vm_trunc_page(source + size / 2);
2766 	mach_vm_address_t dest      = 0;
2767 
2768 	logv("Deallocating a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2769 	assert_deallocate_success(mid_point, vm_page_size);
2770 	logv("Page deallocated.");
2771 
2772 	logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2773 	    (uintmax_t)source);
2774 
2775 	assert_allocate_copy_return(source, size, &dest, KERN_INVALID_ADDRESS);
2776 
2777 	logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2778 
2779 	deallocate_range(dest, size);
2780 }
2781 
2782 /* Copy partially read-protected memory fails. */
2783 void
test_copy_partially_unreadable_range()2784 test_copy_partially_unreadable_range()
2785 {
2786 	mach_vm_address_t source    = get_vm_address();
2787 	mach_vm_size_t size         = get_vm_size();
2788 	mach_vm_address_t mid_point = mach_vm_trunc_page(source + size / 2);
2789 	mach_vm_address_t dest      = 0;
2790 
2791 	/*  For sizes < 1 page, vm_map_copyin_common() uses
2792 	 *  vm_map_copyin_kernel_buffer() to read in the memory,
2793 	 *  returning different errors, see 8182239. */
2794 	kern_return_t kr_expected = (size < vm_page_size) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2795 
2796 	logv("Read-protecting a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2797 	T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2798 	logv("Page read-protected.");
2799 
2800 	logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2801 	    (uintmax_t)source);
2802 	assert_allocate_copy_return(source, size, &dest, kr_expected);
2803 	logv("Returned expected error: %s.", mach_error_string(kr_expected));
2804 
2805 	deallocate_range(dest, size);
2806 }
2807 
2808 /* Copying to a partially deallocated region fails. */
2809 void
test_copy_dest_partially_deallocated_region()2810 test_copy_dest_partially_deallocated_region()
2811 {
2812 	mach_vm_address_t dest             = get_vm_address();
2813 	mach_vm_address_t source           = get_buffer_address();
2814 	mach_msg_type_number_t size        = (mach_msg_type_number_t)get_buffer_size();
2815 	mach_vm_address_t source_mid_point = (mach_vm_address_t)mach_vm_trunc_page(dest + size / 2);
2816 #if __MAC_OX_X_VERSION_MIN_REQUIRED > 1080
2817 	logv(
2818 		"Deallocating a mid-range source page at address "
2819 		"0x%jx...",
2820 		(uintmax_t)source_mid_point);
2821 	assert_deallocate_success(source_mid_point, vm_page_size);
2822 	logv("Page deallocated.");
2823 
2824 	logv(
2825 		"Copying region of address 0x%jx and size 0x%jx (%ju), on "
2826 		"memory at address 0x%jx...",
2827 		(uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2828 	assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2829 	logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2830 #else
2831 	logv(
2832 		"Bypassing partially deallocated region test "
2833 		"(See <rdar://problem/12190999>)");
2834 #endif /* __MAC_OX_X_VERSION_MIN_REQUIRED > 1080 */
2835 }
2836 
2837 /* Copying from a partially deallocated region fails. */
2838 void
test_copy_source_partially_deallocated_region()2839 test_copy_source_partially_deallocated_region()
2840 {
2841 	mach_vm_address_t source           = get_vm_address();
2842 	mach_vm_address_t dest             = get_buffer_address();
2843 	mach_msg_type_number_t size        = (mach_msg_type_number_t)get_buffer_size();
2844 	mach_vm_address_t source_mid_point = (mach_vm_address_t)mach_vm_trunc_page(source + size / 2);
2845 
2846 	logv(
2847 		"Deallocating a mid-range source page at address "
2848 		"0x%jx...",
2849 		(uintmax_t)source_mid_point);
2850 	assert_deallocate_success(source_mid_point, vm_page_size);
2851 	logv("Page deallocated.");
2852 
2853 	logv(
2854 		"Copying region of address 0x%jx and size 0x%jx (%ju), on "
2855 		"memory at address 0x%jx...",
2856 		(uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2857 	assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2858 	logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2859 }
2860 
2861 /* Copying from a partially read-protected region fails. */
2862 void
test_copy_source_partially_unreadable_region()2863 test_copy_source_partially_unreadable_region()
2864 {
2865 	mach_vm_address_t source    = get_vm_address();
2866 	mach_vm_address_t dest      = get_buffer_address();
2867 	mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2868 	mach_vm_address_t mid_point = (mach_vm_address_t)mach_vm_trunc_page(source + size / 2);
2869 	kern_return_t kr            = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2870 
2871 	logv(
2872 		"Read-protecting a mid-range buffer page at address "
2873 		"0x%jx...",
2874 		(uintmax_t)mid_point);
2875 	T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2876 	logv("Page read-protected.");
2877 
2878 	logv(
2879 		"Copying region at address 0x%jx and size 0x%jx (%ju), on "
2880 		"memory at address 0x%jx...",
2881 		(uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2882 
2883 	assert_copy_return(source, size, dest, kr);
2884 	logv("Returned expected error: %s.", mach_error_string(kr));
2885 }
2886 
2887 /* Copying to a partially write-protected region fails. */
2888 void
test_copy_dest_partially_unwriteable_region()2889 test_copy_dest_partially_unwriteable_region()
2890 {
2891 	kern_return_t kr;
2892 	mach_vm_address_t dest      = get_vm_address();
2893 	mach_vm_address_t source    = get_buffer_address();
2894 	mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2895 	mach_vm_address_t mid_point = (mach_vm_address_t)mach_vm_trunc_page(dest + size / 2);
2896 
2897 #if __MAC_OX_X_VERSION_MIN_REQUIRED > 1080
2898 	logv(
2899 		"Read-protecting a mid-range buffer page at address "
2900 		"0x%jx...",
2901 		(uintmax_t)mid_point);
2902 	T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()");
2903 	logv("Page read-protected.");
2904 	logv(
2905 		"Copying region at address 0x%jx and size 0x%jx (%ju), on "
2906 		"memory at address 0x%jx...",
2907 		(uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2908 	if (size >= vm_page_size) {
2909 		kr = KERN_PROTECTION_FAILURE;
2910 	} else {
2911 		kr = KERN_INVALID_ADDRESS;
2912 	}
2913 	assert_copy_return(source, size, dest, kr);
2914 	logv("Returned expected error: %s.", mach_error_string(kr));
2915 #else
2916 	logv(
2917 		"Bypassing partially unwriteable region test "
2918 		"(See <rdar://problem/12190999>)");
2919 #endif /* __MAC_OX_X_VERSION_MIN_REQUIRED > 1080 */
2920 }
2921 
2922 /* Copying on partially deallocated memory fails. */
2923 void
test_copy_source_on_partially_deallocated_range()2924 test_copy_source_on_partially_deallocated_range()
2925 {
2926 	mach_vm_address_t source    = get_vm_address();
2927 	mach_vm_address_t dest      = get_buffer_address();
2928 	mach_vm_address_t start     = mach_vm_trunc_page(source);
2929 	mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2930 
2931 	logv(
2932 		"Deallocating the first source page at address "
2933 		"0x%jx...",
2934 		(uintmax_t)start);
2935 	assert_deallocate_success(start, vm_page_size);
2936 	logv("Page deallocated.");
2937 
2938 	logv(
2939 		"Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2940 		"memory at address 0x%jx...",
2941 		(uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2942 	assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2943 	logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2944 }
2945 
2946 /* Copying on partially deallocated memory fails. */
2947 void
test_copy_dest_on_partially_deallocated_range()2948 test_copy_dest_on_partially_deallocated_range()
2949 {
2950 	mach_vm_address_t source    = get_vm_address();
2951 	mach_vm_address_t dest      = get_buffer_address();
2952 	mach_vm_address_t start     = mach_vm_trunc_page(dest);
2953 	mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2954 
2955 	logv(
2956 		"Deallocating the first destination page at address "
2957 		"0x%jx...",
2958 		(uintmax_t)start);
2959 	assert_deallocate_success(start, vm_page_size);
2960 	logv("Page deallocated.");
2961 
2962 	logv(
2963 		"Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2964 		"memory at address 0x%jx...",
2965 		(uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2966 	assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2967 	logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2968 }
2969 
2970 /* Copying on partially unwritable memory fails. */
2971 void
test_copy_dest_on_partially_unwritable_range()2972 test_copy_dest_on_partially_unwritable_range()
2973 {
2974 	mach_vm_address_t source    = get_vm_address();
2975 	mach_vm_address_t dest      = get_buffer_address();
2976 	mach_vm_address_t start     = mach_vm_trunc_page(dest);
2977 	mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2978 
2979 	/*  For sizes < msg_ool_size_small,
2980 	 *  vm_map_copy_overwrite_nested() uses
2981 	 *  vm_map_copyout_kernel_buffer() to read in the memory,
2982 	 *  returning different errors, see 8217123. */
2983 	kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2984 
2985 	logv(
2986 		"Write-protecting the first destination page at address "
2987 		"0x%jx...",
2988 		(uintmax_t)start);
2989 	T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()");
2990 	logv("Page write-protected.");
2991 
2992 	logv(
2993 		"Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2994 		"memory at address 0x%jx...",
2995 		(uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2996 	assert_copy_return(source, size, dest, kr_expected);
2997 	logv("Returned expected error: %s.", mach_error_string(kr_expected));
2998 }
2999 
3000 /* Copying on partially unreadable memory fails. */
3001 void
test_copy_source_on_partially_unreadable_range()3002 test_copy_source_on_partially_unreadable_range()
3003 {
3004 	mach_vm_address_t source    = get_vm_address();
3005 	mach_vm_address_t dest      = get_buffer_address();
3006 	mach_vm_address_t start     = mach_vm_trunc_page(source);
3007 	mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
3008 
3009 	/*  For sizes < msg_ool_size_small,
3010 	 *  vm_map_copy_overwrite_nested() uses
3011 	 *  vm_map_copyout_kernel_buffer() to read in the memory,
3012 	 *  returning different errors, see 8217123. */
3013 	kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
3014 
3015 	logv(
3016 		"Read-protecting the first destination page at address "
3017 		"0x%jx...",
3018 		(uintmax_t)start);
3019 	T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
3020 	logv("Page read-protected.");
3021 
3022 	logv(
3023 		"Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
3024 		"memory at address 0x%jx...",
3025 		(uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
3026 	assert_copy_return(source, size, dest, kr_expected);
3027 	logv("Returned expected error: %s.", mach_error_string(kr_expected));
3028 }
3029 
3030 /********************************/
3031 /* mach_vm_protect() main tests */
3032 /********************************/
3033 
3034 void
test_zero_filled_extended()3035 test_zero_filled_extended()
3036 {
3037 	verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page(get_vm_size() + 1), "zero-filled");
3038 }
3039 
3040 /* Allocated region is still zero-filled after read-protecting it and
3041  * then restoring read-access. */
3042 void
test_zero_filled_readprotect()3043 test_zero_filled_readprotect()
3044 {
3045 	mach_vm_address_t address = get_vm_address();
3046 	mach_vm_size_t size       = get_vm_size();
3047 
3048 	logv("Setting read access on 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size,
3049 	    (size == 1) ? "" : "s", (uintmax_t)address);
3050 	T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), address, size, FALSE, VM_PROT_DEFAULT), "mach_vm_protect()");
3051 	logv("Region has read access.");
3052 	test_zero_filled_extended();
3053 }
3054 
3055 void
verify_protection(vm_prot_t protection,const char * protection_name)3056 verify_protection(vm_prot_t protection, const char * protection_name)
3057 {
3058 	mach_vm_address_t address    = get_vm_address();
3059 	mach_vm_size_t size          = get_vm_size();
3060 	mach_vm_size_t original_size = size;
3061 	vm_region_basic_info_data_64_t info;
3062 	mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
3063 	mach_port_t unused;
3064 
3065 	logv(
3066 		"Verifying %s-protection on region of address 0x%jx and "
3067 		"size 0x%jx (%ju) with mach_vm_region()...",
3068 		protection_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
3069 	T_QUIET; T_ASSERT_MACH_SUCCESS(
3070 		mach_vm_region(mach_task_self(), &address, &size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&info, &count, &unused),
3071 		"mach_vm_region()");
3072 	if (original_size) {
3073 		T_QUIET; T_ASSERT_EQ((info.protection & protection), 0,
3074 		    "Region "
3075 		    "is unexpectedly %s-unprotected.",
3076 		    protection_name);
3077 		logv("Region is %s-protected as expected.", protection_name);
3078 	} else {
3079 		T_QUIET; T_ASSERT_NE(info.protection & protection, 0,
3080 		    "Region is "
3081 		    "unexpectedly %s-protected.",
3082 		    protection_name);
3083 		logv("Region is %s-unprotected as expected.", protection_name);
3084 	}
3085 }
3086 
3087 void
test_verify_readprotection()3088 test_verify_readprotection()
3089 {
3090 	verify_protection(VM_PROT_READ, "read");
3091 }
3092 
3093 void
test_verify_writeprotection()3094 test_verify_writeprotection()
3095 {
3096 	verify_protection(VM_PROT_WRITE, "write");
3097 }
3098 
3099 /******************************/
3100 /* Protection bus error tests */
3101 /******************************/
3102 
3103 /* mach_vm_protect() affects the smallest aligned region (integral
3104  * number of pages) containing the given range. */
3105 
3106 /* Addresses in read-protected range are inaccessible. */
3107 void
access_readprotected_range_address(mach_vm_address_t address,const char * position)3108 access_readprotected_range_address(mach_vm_address_t address, const char * position)
3109 {
3110 	logv("Reading from %s 0x%jx of read-protected range...", position, (uintmax_t)address);
3111 	mach_vm_address_t bad_value = MACH_VM_ADDRESS_T(address);
3112 	T_ASSERT_FAIL("Unexpectedly read value 0x%jx at address 0x%jx."
3113 	    "Should have died with signal SIGBUS.",
3114 	    (uintmax_t)bad_value, (uintmax_t)address);
3115 }
3116 
3117 /* Start of read-protected range is inaccessible. */
3118 void
test_access_readprotected_range_start()3119 test_access_readprotected_range_start()
3120 {
3121 	access_readprotected_range_address(mach_vm_trunc_page(get_vm_address()), "start");
3122 }
3123 
3124 /* Middle of read-protected range is inaccessible. */
3125 void
test_access_readprotected_range_middle()3126 test_access_readprotected_range_middle()
3127 {
3128 	mach_vm_address_t address = get_vm_address();
3129 	access_readprotected_range_address(mach_vm_trunc_page(address) + (aligned_size(address, get_vm_size()) >> 1), "middle");
3130 }
3131 
3132 /* End of read-protected range is inaccessible. */
3133 void
test_access_readprotected_range_end()3134 test_access_readprotected_range_end()
3135 {
3136 	access_readprotected_range_address(round_page(get_vm_address() + get_vm_size()) - vm_address_size, "end");
3137 }
3138 
3139 /* Addresses in write-protected range are unwritable. */
3140 void
write_writeprotected_range_address(mach_vm_address_t address,const char * position)3141 write_writeprotected_range_address(mach_vm_address_t address, const char * position)
3142 {
3143 	logv("Writing on %s 0x%jx of write-protected range...", position, (uintmax_t)address);
3144 	MACH_VM_ADDRESS_T(address) = 0x0;
3145 	T_ASSERT_FAIL("Unexpectedly wrote value 0x0 value at address 0x%jx."
3146 	    "Should have died with signal SIGBUS.",
3147 	    (uintmax_t)address);
3148 }
3149 
3150 /* Start of write-protected range is unwritable. */
3151 void
test_write_writeprotected_range_start()3152 test_write_writeprotected_range_start()
3153 {
3154 	write_writeprotected_range_address(mach_vm_trunc_page(get_vm_address()), "start");
3155 }
3156 
3157 /* Middle of write-protected range is unwritable. */
3158 void
test_write_writeprotected_range_middle()3159 test_write_writeprotected_range_middle()
3160 {
3161 	mach_vm_address_t address = get_vm_address();
3162 	write_writeprotected_range_address(mach_vm_trunc_page(address) + (aligned_size(address, get_vm_size()) >> 1), "middle");
3163 }
3164 
3165 /* End of write-protected range is unwritable. */
3166 void
test_write_writeprotected_range_end()3167 test_write_writeprotected_range_end()
3168 {
3169 	write_writeprotected_range_address(round_page(get_vm_address() + get_vm_size()) - vm_address_size, "end");
3170 }
3171 
3172 /*************************************/
3173 /* mach_vm_protect() edge case tests */
3174 /*************************************/
3175 
3176 void
protect_zero_size(vm_prot_t protection,const char * protection_name)3177 protect_zero_size(vm_prot_t protection, const char * protection_name)
3178 {
3179 	int i;
3180 	kern_return_t kr;
3181 	vm_map_t this_task            = mach_task_self();
3182 	mach_vm_address_t addresses[] = {0x0,
3183 		                         0x1,
3184 		                         vm_page_size - 1,
3185 		                         vm_page_size,
3186 		                         vm_page_size + 1,
3187 		                         (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
3188 		                         (mach_vm_address_t)UINT_MAX,
3189 		                         (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
3190 		                         (mach_vm_address_t)UINTMAX_MAX};
3191 	int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
3192 
3193 	logv("%s-protecting 0x0 (0) bytes at various addresses...", protection_name);
3194 	for (i = 0; i < numofaddresses; i++) {
3195 		kr = mach_vm_protect(this_task, addresses[i], 0, FALSE, protection);
3196 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr,
3197 		    "mach_vm_protect() at "
3198 		    "address 0x%jx unexpectedly failed: %s.",
3199 		    (uintmax_t)addresses[i], mach_error_string(kr));
3200 	}
3201 	logv("Protection successful.");
3202 }
3203 
3204 void
test_readprotect_zero_size()3205 test_readprotect_zero_size()
3206 {
3207 	protect_zero_size(VM_PROT_READ, "Read");
3208 }
3209 
3210 void
test_writeprotect_zero_size()3211 test_writeprotect_zero_size()
3212 {
3213 	protect_zero_size(VM_PROT_WRITE, "Write");
3214 }
3215 
3216 /* Protecting a range wrapped around the address space fails. */
3217 void
protect_wrapped_around_ranges(vm_prot_t protection,const char * protection_name)3218 protect_wrapped_around_ranges(vm_prot_t protection, const char * protection_name)
3219 {
3220 	int i;
3221 	kern_return_t kr;
3222 	vm_map_t this_task = mach_task_self();
3223 	struct {
3224 		mach_vm_address_t address;
3225 		mach_vm_size_t size;
3226 	} ranges[] = {
3227 		{0x1, (mach_vm_size_t)UINTMAX_MAX},
3228 		{vm_page_size, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1},
3229 		{(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
3230 		{(mach_vm_address_t)UINTMAX_MAX, 1},
3231 	};
3232 	int numofranges = sizeof(ranges) / sizeof(ranges[0]);
3233 
3234 	logv(
3235 		"%s-protecting various memory ranges wrapping around the "
3236 		"address space...",
3237 		protection_name);
3238 	for (i = 0; i < numofranges; i++) {
3239 		kr = mach_vm_protect(this_task, ranges[i].address, ranges[i].size, FALSE, protection);
3240 		T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
3241 		    "mach_vm_protect() with address 0x%jx and size "
3242 		    "0x%jx (%ju) unexpectedly returned: %s.\n"
3243 		    "Should have returned: %s.",
3244 		    (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr),
3245 		    mach_error_string(KERN_INVALID_ARGUMENT));
3246 	}
3247 	logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
3248 }
3249 
3250 void
test_readprotect_wrapped_around_ranges()3251 test_readprotect_wrapped_around_ranges()
3252 {
3253 	protect_wrapped_around_ranges(VM_PROT_READ, "Read");
3254 }
3255 
3256 void
test_writeprotect_wrapped_around_ranges()3257 test_writeprotect_wrapped_around_ranges()
3258 {
3259 	protect_wrapped_around_ranges(VM_PROT_WRITE, "Write");
3260 }
3261 
3262 /*******************/
3263 /* vm_copy() tests */
3264 /*******************/
3265 
3266 /* Verify the address space is being shared. */
3267 void
assert_share_mode(mach_vm_address_t address,unsigned share_mode,const char * share_mode_name)3268 assert_share_mode(mach_vm_address_t address, unsigned share_mode, const char * share_mode_name)
3269 {
3270 	mach_vm_size_t size = get_vm_size();
3271 	vm_region_extended_info_data_t info;
3272 	mach_msg_type_number_t count = VM_REGION_EXTENDED_INFO_COUNT;
3273 	mach_port_t unused;
3274 
3275 /*
3276  * XXX Fails on UVM kernel.  See <rdar://problem/12164664>
3277  */
3278 #if notyet /* __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 */
3279 	logv(
3280 		"Verifying %s share mode on region of address 0x%jx and "
3281 		"size 0x%jx (%ju)...",
3282 		share_mode_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
3283 	T_QUIET; T_ASSERT_MACH_SUCCESS(
3284 		mach_vm_region(mach_task_self(), &address, &size, VM_REGION_EXTENDED_INFO, (vm_region_info_t)&info, &count, &unused),
3285 		"mach_vm_region()");
3286 	T_QUIET; T_ASSERT_EQ(info.share_mode, share_mode,
3287 	    "Region's share mode "
3288 	    " unexpectedly is not %s but %d.",
3289 	    share_mode_name, info.share_mode);
3290 	logv("Region has a share mode of %s as expected.", share_mode_name);
3291 #else
3292 	logv("Bypassing share_mode verification (See <rdar://problem/12164664>)");
3293 #endif /* __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 */
3294 }
3295 
3296 /* Do the vm_copy() and verify its success. */
3297 void
assert_vmcopy_success(vm_address_t src,vm_address_t dst,const char * source_name)3298 assert_vmcopy_success(vm_address_t src, vm_address_t dst, const char * source_name)
3299 {
3300 	kern_return_t kr;
3301 	mach_vm_size_t size = get_vm_size();
3302 
3303 	logv("Copying (using mach_vm_copy()) from a %s source...", source_name);
3304 	kr = mach_vm_copy(mach_task_self(), src, size, dst);
3305 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr,
3306 	    "mach_vm_copy() with the source address "
3307 	    "0x%jx, designation address 0x%jx, and size 0x%jx (%ju) unexpectly "
3308 	    "returned %s.\n  Should have returned: %s.",
3309 	    (uintmax_t)src, (uintmax_t)dst, (uintmax_t)size, (uintmax_t)size, mach_error_string(kr),
3310 	    mach_error_string(KERN_SUCCESS));
3311 	logv("Copy (mach_vm_copy()) was successful as expected.");
3312 }
3313 
3314 void
write_region(mach_vm_address_t address,mach_vm_size_t start)3315 write_region(mach_vm_address_t address, mach_vm_size_t start)
3316 {
3317 	mach_vm_size_t size = get_vm_size();
3318 
3319 	filter_addresses_do_else(empty, FALSE, address, size, write_address, write_address, start);
3320 }
3321 
3322 void
verify_region(mach_vm_address_t address,mach_vm_address_t start)3323 verify_region(mach_vm_address_t address, mach_vm_address_t start)
3324 {
3325 	mach_vm_size_t size = get_vm_size();
3326 
3327 	filter_addresses_do_else(empty, FALSE, address, size, verify_address, verify_address, start);
3328 }
3329 
3330 /* Perform the post vm_copy() action and verify its results. */
3331 void
modify_one_and_verify_all_regions(vm_address_t src,vm_address_t dst,vm_address_t shared_copied,boolean_t shared)3332 modify_one_and_verify_all_regions(vm_address_t src, vm_address_t dst, vm_address_t shared_copied, boolean_t shared)
3333 {
3334 	mach_vm_size_t size = get_vm_size();
3335 	int action          = get_vmcopy_post_action();
3336 
3337 	/* Do the post vm_copy() action. */
3338 	switch (action) {
3339 	case VMCOPY_MODIFY_SRC:
3340 		logv("Modifying: source%s...", shared ? " (shared with other region)" : "");
3341 		write_region(src, 1);
3342 		break;
3343 
3344 	case VMCOPY_MODIFY_DST:
3345 		logv("Modifying: destination...");
3346 		write_region(dst, 1);
3347 		break;
3348 
3349 	case VMCOPY_MODIFY_SHARED_COPIED:
3350 		/* If no shared_copied then no need to verify (nothing changed). */
3351 		if (!shared_copied) {
3352 			return;
3353 		}
3354 		logv("Modifying: shared/copied%s...", shared ? " (shared with source region)" : "");
3355 		write_region(shared_copied, 1);
3356 		break;
3357 
3358 	default:
3359 		T_ASSERT_FAIL("Unknown post vm_copy() action (%d)", action);
3360 	}
3361 	logv("Modification was successful as expected.");
3362 
3363 	/* Verify all the regions with what is expected. */
3364 	logv("Verifying: source... ");
3365 	verify_region(src, (VMCOPY_MODIFY_SRC == action || (shared && VMCOPY_MODIFY_SHARED_COPIED == action)) ? 1 : 0);
3366 	logv("destination... ");
3367 	verify_region(dst, (VMCOPY_MODIFY_DST == action) ? 1 : 0);
3368 	if (shared_copied) {
3369 		logv("shared/copied... ");
3370 		verify_region(shared_copied, (VMCOPY_MODIFY_SHARED_COPIED == action || (shared && VMCOPY_MODIFY_SRC == action)) ? 1 : 0);
3371 	}
3372 	logv("Verification was successful as expected.");
3373 }
3374 
3375 /* Test source being a simple fresh region. */
3376 void
test_vmcopy_fresh_source()3377 test_vmcopy_fresh_source()
3378 {
3379 	mach_vm_size_t size = get_vm_size();
3380 	mach_vm_address_t src, dst;
3381 
3382 	if (get_vmcopy_post_action() == VMCOPY_MODIFY_SHARED_COPIED) {
3383 		/* No shared/copied region to modify so just return. */
3384 		logv("No shared/copied region as expected.");
3385 		return;
3386 	}
3387 
3388 	assert_allocate_success(&src, size, TRUE);
3389 
3390 	assert_share_mode(src, SM_EMPTY, "SM_EMPTY");
3391 
3392 	write_region(src, 0);
3393 
3394 	assert_allocate_success(&dst, size, TRUE);
3395 
3396 	assert_vmcopy_success(src, dst, "freshly allocated");
3397 
3398 	modify_one_and_verify_all_regions(src, dst, 0, FALSE);
3399 
3400 	assert_deallocate_success(src, size);
3401 	assert_deallocate_success(dst, size);
3402 }
3403 
3404 /* Test source copied from a shared region. */
3405 void
test_vmcopy_shared_source()3406 test_vmcopy_shared_source()
3407 {
3408 	mach_vm_size_t size = get_vm_size();
3409 	mach_vm_address_t src, dst, shared;
3410 	int action = get_vmcopy_post_action();
3411 	int pid, status;
3412 
3413 	assert_allocate_success(&src, size, TRUE);
3414 
3415 	T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_inherit(mach_task_self(), src, size, VM_INHERIT_SHARE), "mach_vm_inherit()");
3416 
3417 	write_region(src, 0);
3418 
3419 	pid = fork();
3420 	if (pid == 0) {
3421 		/* Verify that the child's 'src' is shared with the
3422 		 *  parent's src */
3423 		assert_share_mode(src, SM_SHARED, "SM_SHARED");
3424 		assert_allocate_success(&dst, size, TRUE);
3425 		assert_vmcopy_success(src, dst, "shared");
3426 		if (VMCOPY_MODIFY_SHARED_COPIED == action) {
3427 			logv("Modifying: shared...");
3428 			write_region(src, 1);
3429 			logv("Modification was successsful as expected.");
3430 			logv("Verifying: source... ");
3431 			verify_region(src, 1);
3432 			logv("destination...");
3433 			verify_region(dst, (VMCOPY_MODIFY_DST == action) ? 1 : 0);
3434 			logv("Verification was successful as expected.");
3435 		} else {
3436 			modify_one_and_verify_all_regions(src, dst, 0, TRUE);
3437 		}
3438 		assert_deallocate_success(dst, size);
3439 		exit(0);
3440 	} else if (pid > 0) {
3441 		/* In the parent the src becomes the shared */
3442 		shared = src;
3443 		wait(&status);
3444 		if (WEXITSTATUS(status) != 0) {
3445 			exit(status);
3446 		}
3447 		/* verify shared (shared with child's src) */
3448 		logv("Verifying: shared...");
3449 		verify_region(shared, (VMCOPY_MODIFY_SHARED_COPIED == action || VMCOPY_MODIFY_SRC == action) ? 1 : 0);
3450 		logv("Verification was successful as expected.");
3451 	} else {
3452 		T_WITH_ERRNO; T_ASSERT_FAIL("fork failed");
3453 	}
3454 
3455 	assert_deallocate_success(src, size);
3456 }
3457 
3458 /* Test source copied from another mapping. */
3459 void
test_vmcopy_copied_from_source()3460 test_vmcopy_copied_from_source()
3461 {
3462 	mach_vm_size_t size = get_vm_size();
3463 	mach_vm_address_t src, dst, copied;
3464 
3465 	assert_allocate_success(&copied, size, TRUE);
3466 	write_region(copied, 0);
3467 
3468 	assert_allocate_success(&src, size, TRUE);
3469 
3470 	T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_copy(mach_task_self(), copied, size, src), "mach_vm_copy()");
3471 
3472 	assert_share_mode(src, SM_COW, "SM_COW");
3473 
3474 	assert_allocate_success(&dst, size, TRUE);
3475 
3476 	assert_vmcopy_success(src, dst, "copied from");
3477 
3478 	modify_one_and_verify_all_regions(src, dst, copied, FALSE);
3479 
3480 	assert_deallocate_success(src, size);
3481 	assert_deallocate_success(dst, size);
3482 	assert_deallocate_success(copied, size);
3483 }
3484 
3485 /* Test source copied to another mapping. */
3486 void
test_vmcopy_copied_to_source()3487 test_vmcopy_copied_to_source()
3488 {
3489 	mach_vm_size_t size = get_vm_size();
3490 	mach_vm_address_t src, dst, copied;
3491 
3492 	assert_allocate_success(&src, size, TRUE);
3493 	write_region(src, 0);
3494 
3495 	assert_allocate_success(&copied, size, TRUE);
3496 
3497 	T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_copy(mach_task_self(), src, size, copied), "mach_vm_copy()");
3498 
3499 	assert_share_mode(src, SM_COW, "SM_COW");
3500 
3501 	assert_allocate_success(&dst, size, TRUE);
3502 
3503 	assert_vmcopy_success(src, dst, "copied to");
3504 
3505 	modify_one_and_verify_all_regions(src, dst, copied, FALSE);
3506 
3507 	assert_deallocate_success(src, size);
3508 	assert_deallocate_success(dst, size);
3509 	assert_deallocate_success(copied, size);
3510 }
3511 
3512 /* Test a truedshared source copied. */
3513 void
test_vmcopy_trueshared_source()3514 test_vmcopy_trueshared_source()
3515 {
3516 	mach_vm_size_t size   = get_vm_size();
3517 	mach_vm_address_t src = 0x0, dst, shared;
3518 	vm_prot_t cur_protect = (VM_PROT_READ | VM_PROT_WRITE);
3519 	vm_prot_t max_protect = (VM_PROT_READ | VM_PROT_WRITE);
3520 	mem_entry_name_port_t mem_obj;
3521 
3522 	assert_allocate_success(&shared, size, TRUE);
3523 	write_region(shared, 0);
3524 
3525 	T_QUIET; T_ASSERT_MACH_SUCCESS(mach_make_memory_entry_64(mach_task_self(), &size, (memory_object_offset_t)shared, cur_protect, &mem_obj,
3526 	    (mem_entry_name_port_t)NULL),
3527 	    "mach_make_memory_entry_64()");
3528 	T_QUIET; T_ASSERT_MACH_SUCCESS(
3529 		mach_vm_map(mach_task_self(), &src, size, 0, TRUE, mem_obj, 0, FALSE, cur_protect, max_protect, VM_INHERIT_NONE),
3530 		"mach_vm_map()");
3531 
3532 	assert_share_mode(src, SM_TRUESHARED, "SM_TRUESHARED");
3533 
3534 	assert_allocate_success(&dst, size, TRUE);
3535 
3536 	assert_vmcopy_success(src, dst, "true shared");
3537 
3538 	modify_one_and_verify_all_regions(src, dst, shared, TRUE);
3539 
3540 	assert_deallocate_success(src, size);
3541 	assert_deallocate_success(dst, size);
3542 	assert_deallocate_success(shared, size);
3543 }
3544 
3545 /* Test a private aliazed source copied. */
3546 void
test_vmcopy_private_aliased_source()3547 test_vmcopy_private_aliased_source()
3548 {
3549 	mach_vm_size_t size   = get_vm_size();
3550 	mach_vm_address_t src = 0x0, dst, shared;
3551 	vm_prot_t cur_protect = (VM_PROT_READ | VM_PROT_WRITE);
3552 	vm_prot_t max_protect = (VM_PROT_READ | VM_PROT_WRITE);
3553 
3554 	assert_allocate_success(&shared, size, TRUE);
3555 	write_region(shared, 0);
3556 
3557 	T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_remap(mach_task_self(), &src, size, 0, TRUE, mach_task_self(), shared, FALSE, &cur_protect,
3558 	    &max_protect, VM_INHERIT_NONE),
3559 	    "mach_vm_remap()");
3560 
3561 	assert_share_mode(src, SM_PRIVATE_ALIASED, "SM_PRIVATE_ALIASED");
3562 
3563 	assert_allocate_success(&dst, size, TRUE);
3564 
3565 	assert_vmcopy_success(src, dst, "true shared");
3566 
3567 	modify_one_and_verify_all_regions(src, dst, shared, TRUE);
3568 
3569 	assert_deallocate_success(src, size);
3570 	assert_deallocate_success(dst, size);
3571 	assert_deallocate_success(shared, size);
3572 }
3573 
3574 /*************/
3575 /* VM Suites */
3576 /*************/
3577 
3578 void
run_allocate_test_suites()3579 run_allocate_test_suites()
3580 {
3581 	/* <rdar://problem/10304215> CoreOSZin 12Z30: VMUnitTest fails:
3582 	 * error finding xnu major version number. */
3583 	/* unsigned int xnu_version = xnu_major_version(); */
3584 
3585 	UnitTests allocate_main_tests = {
3586 		{"Allocated address is nonzero iff size is nonzero", test_nonzero_address_iff_nonzero_size},
3587 		{"Allocated address is page-aligned", test_aligned_address},
3588 		{"Allocated memory is zero-filled", test_zero_filled},
3589 		{"Write and verify address-filled pattern", test_write_address_filled},
3590 		{"Write and verify checkerboard pattern", test_write_checkerboard},
3591 		{"Write and verify reverse checkerboard pattern", test_write_reverse_checkerboard},
3592 		{"Write and verify page ends pattern", test_write_page_ends},
3593 		{"Write and verify page interiors pattern", test_write_page_interiors},
3594 		{"Reallocate allocated pages", test_reallocate_pages},
3595 	};
3596 	UnitTests allocate_address_error_tests = {
3597 		{"Allocate at address zero", test_allocate_at_zero},
3598 		{"Allocate at a 2 MB boundary-unaligned, page-aligned "
3599 		 "address",
3600 		 test_allocate_2MB_boundary_unaligned_page_aligned_address},
3601 	};
3602 	UnitTests allocate_argument_error_tests = {
3603 		{"Allocate in NULL VM map", test_allocate_in_null_map},
3604 		{"Allocate with kernel flags", test_allocate_with_kernel_flags},
3605 		{"Allocate super-page with incompatible flags", test_allocate_superpage_with_incompatible_flags},
3606 	};
3607 	UnitTests allocate_fixed_size_tests = {
3608 		{"Allocate zero size", test_allocate_zero_size},
3609 		{"Allocate overflowing size", test_allocate_overflowing_size},
3610 		{"Allocate a page with highest address hint", test_allocate_page_with_highest_address_hint},
3611 		{"Allocate two pages and verify first fit strategy", test_allocate_first_fit_pages},
3612 	};
3613 	UnitTests allocate_invalid_large_size_test = {
3614 		{"Allocate invalid large size", test_allocate_invalid_large_size},
3615 	};
3616 	UnitTests mach_vm_map_protection_inheritance_error_test = {
3617 		{"mach_vm_map() with invalid protection/inheritance "
3618 		 "arguments",
3619 		 test_mach_vm_map_protection_inheritance_error},
3620 	};
3621 	UnitTests mach_vm_map_large_mask_overflow_error_test = {
3622 		{"mach_vm_map() with large address mask", test_mach_vm_map_large_mask_overflow_error},
3623 	};
3624 
3625 	/* Run the test suites with various allocators and VM sizes, and
3626 	 *  unspecified or fixed (page-aligned or page-unaligned),
3627 	 *  addresses. */
3628 	for (allocators_idx = 0; allocators_idx < numofallocators; allocators_idx++) {
3629 		for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3630 			for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3631 				for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3632 					/* An allocated address will be page-aligned. */
3633 					/* Only run the zero size mach_vm_map() error tests in the
3634 					 *  unspecified address case, since we won't be able to retrieve a
3635 					 *  fixed address for allocation. See 8003930. */
3636 					if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED) ||
3637 					    (allocators_idx != MACH_VM_ALLOCATE && sizes_idx == ZERO_BYTES && flags_idx == FIXED)) {
3638 						continue;
3639 					}
3640 					run_suite(set_up_allocator_and_vm_variables, allocate_argument_error_tests, do_nothing,
3641 					    "%s argument error tests, %s%s address, "
3642 					    "%s size: 0x%jx (%ju)",
3643 					    allocators[allocators_idx].description, address_flags[flags_idx].description,
3644 					    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3645 					    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3646 					    (uintmax_t)vm_sizes[sizes_idx].size);
3647 					/* mach_vm_map() only protection and inheritance error
3648 					 *  tests. */
3649 					if (allocators_idx != MACH_VM_ALLOCATE) {
3650 						run_suite(set_up_allocator_and_vm_variables, mach_vm_map_protection_inheritance_error_test, do_nothing,
3651 						    "%s protection and inheritance "
3652 						    "error test, %s%s address, %s size: 0x%jx "
3653 						    "(%ju)",
3654 						    allocators[allocators_idx].description, address_flags[flags_idx].description,
3655 						    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3656 						    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3657 						    (uintmax_t)vm_sizes[sizes_idx].size);
3658 					}
3659 					/* mach_vm_map() cannot allocate 0 bytes, see 8003930. */
3660 					if (allocators_idx == MACH_VM_ALLOCATE || sizes_idx != ZERO_BYTES) {
3661 						run_suite(set_up_allocator_and_vm_variables_and_allocate, allocate_main_tests, deallocate,
3662 						    "%s main "
3663 						    "allocation tests, %s%s address, %s size: 0x%jx "
3664 						    "(%ju)",
3665 						    allocators[allocators_idx].description, address_flags[flags_idx].description,
3666 						    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3667 						    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3668 						    (uintmax_t)vm_sizes[sizes_idx].size);
3669 					}
3670 				}
3671 			}
3672 			run_suite(set_up_allocator_and_vm_size, allocate_address_error_tests, do_nothing,
3673 			    "%s address "
3674 			    "error allocation tests, %s size: 0x%jx (%ju)",
3675 			    allocators[allocators_idx].description, vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3676 			    (uintmax_t)vm_sizes[sizes_idx].size);
3677 		}
3678 		run_suite(set_up_allocator, allocate_fixed_size_tests, do_nothing, "%s fixed size allocation tests",
3679 		    allocators[allocators_idx].description);
3680 		/* <rdar://problem/10304215> CoreOSZin 12Z30: VMUnitTest fails:
3681 		 * error finding xnu major version number. */
3682 		/* mach_vm_map() with a named entry triggers a panic with this test
3683 		 *  unless under xnu-1598 or later, see 8048580. */
3684 		/* if (allocators_idx != MACH_VM_MAP_NAMED_ENTRY
3685 		|| xnu_version >= 1598) { */
3686 		if (allocators_idx != MACH_VM_MAP_NAMED_ENTRY) {
3687 			run_suite(set_up_allocator, allocate_invalid_large_size_test, do_nothing, "%s invalid large size allocation test",
3688 			    allocators[allocators_idx].description);
3689 		}
3690 	}
3691 	/* mach_vm_map() only large mask overflow tests. */
3692 	for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3693 		run_suite(set_up_vm_size, mach_vm_map_large_mask_overflow_error_test, do_nothing,
3694 		    "mach_vm_map() large mask overflow "
3695 		    "error test, size: 0x%jx (%ju)",
3696 		    (uintmax_t)vm_sizes[sizes_idx].size, (uintmax_t)vm_sizes[sizes_idx].size);
3697 	}
3698 }
3699 
3700 void
run_deallocate_test_suites()3701 run_deallocate_test_suites()
3702 {
3703 	UnitTests access_deallocated_memory_tests = {
3704 		{"Read start of deallocated range", test_access_deallocated_range_start},
3705 		{"Read middle of deallocated range", test_access_deallocated_range_middle},
3706 		{"Read end of deallocated range", test_access_deallocated_range_end},
3707 	};
3708 	UnitTests deallocate_reallocate_tests = {
3709 		{"Deallocate twice", test_deallocate_twice},
3710 		{"Write pattern, deallocate, reallocate (deallocated "
3711 		 "memory is inaccessible), and verify memory is "
3712 		 "zero-filled",
3713 		 test_write_pattern_deallocate_reallocate_zero_filled},
3714 	};
3715 	UnitTests deallocate_null_map_test = {
3716 		{"Deallocate in NULL VM map", test_deallocate_in_null_map},
3717 	};
3718 	UnitTests deallocate_edge_case_tests = {
3719 		{"Deallocate zero size ranges", test_deallocate_zero_size_ranges},
3720 		{"Deallocate memory ranges whose end rounds to 0x0", test_deallocate_rounded_zero_end_ranges},
3721 		{"Deallocate wrapped around memory ranges", test_deallocate_wrapped_around_ranges},
3722 	};
3723 	UnitTests deallocate_suicide_test = {
3724 		{"Deallocate whole address space", test_deallocate_suicide},
3725 	};
3726 
3727 	/* All allocations done with mach_vm_allocate(). */
3728 	set_allocator(wrapper_mach_vm_allocate);
3729 
3730 	/* Run the test suites with various VM sizes, and unspecified or
3731 	 *  fixed (page-aligned or page-unaligned), addresses. */
3732 	for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3733 		for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3734 			for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3735 				/* An allocated address will be page-aligned. */
3736 				if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) {
3737 					continue;
3738 				}
3739 				/* Accessing deallocated memory should cause a segmentation
3740 				 *  fault. */
3741 				/* Nothing gets deallocated if size is zero. */
3742 				if (sizes_idx != ZERO_BYTES) {
3743 					set_expected_signal(SIGSEGV);
3744 					run_suite(set_up_vm_variables_and_allocate, access_deallocated_memory_tests, do_nothing,
3745 					    "Deallocated memory access tests, "
3746 					    "%s%s address, %s size: 0x%jx (%ju)",
3747 					    address_flags[flags_idx].description,
3748 					    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3749 					    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3750 					    (uintmax_t)vm_sizes[sizes_idx].size);
3751 					set_expected_signal(0);
3752 				}
3753 				run_suite(set_up_vm_variables_and_allocate, deallocate_reallocate_tests, do_nothing,
3754 				    "Deallocation and reallocation tests, %s%s "
3755 				    "address, %s size: 0x%jx (%ju)",
3756 				    address_flags[flags_idx].description,
3757 				    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3758 				    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3759 				    (uintmax_t)vm_sizes[sizes_idx].size);
3760 				run_suite(set_up_vm_variables, deallocate_null_map_test, do_nothing,
3761 				    "mach_vm_deallocate() null map test, "
3762 				    "%s%s address, %s size: 0x%jx (%ju)",
3763 				    address_flags[flags_idx].description,
3764 				    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3765 				    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3766 				    (uintmax_t)vm_sizes[sizes_idx].size);
3767 			}
3768 		}
3769 	}
3770 	run_suite(do_nothing, deallocate_edge_case_tests, do_nothing, "Edge case deallocation tests");
3771 
3772 	set_expected_signal(-1);        /* SIGSEGV or SIGBUS */
3773 	run_suite(do_nothing, deallocate_suicide_test, do_nothing, "Whole address space deallocation test");
3774 	set_expected_signal(0);
3775 }
3776 
3777 void
run_read_test_suites()3778 run_read_test_suites()
3779 {
3780 	UnitTests read_main_tests = {
3781 		{"Read address is nonzero iff size is nonzero", test_nonzero_address_iff_nonzero_size},
3782 		{"Read address has the correct boundary offset", test_read_address_offset},
3783 		{"Reallocate read pages", test_reallocate_pages},
3784 		{"Read and verify zero-filled memory", test_zero_filled},
3785 	};
3786 	UnitTests read_pattern_tests = {
3787 		{"Read address-filled pattern", test_read_address_filled},
3788 		{"Read checkerboard pattern", test_read_checkerboard},
3789 		{"Read reverse checkerboard pattern", test_read_reverse_checkerboard},
3790 	};
3791 	UnitTests read_null_map_test = {
3792 		{"Read from NULL VM map", test_read_null_map},
3793 	};
3794 	UnitTests read_edge_case_tests = {
3795 		{"Read zero size", test_read_zero_size},
3796 		{"Read invalid large size", test_read_invalid_large_size},
3797 		{"Read wrapped around memory ranges", test_read_wrapped_around_ranges},
3798 	};
3799 	UnitTests read_inaccessible_tests = {
3800 		{"Read partially decallocated memory", test_read_partially_deallocated_range},
3801 		{"Read partially read-protected memory", test_read_partially_unreadable_range},
3802 	};
3803 
3804 	/* All allocations done with mach_vm_allocate(). */
3805 	set_allocator(wrapper_mach_vm_allocate);
3806 
3807 	/* Run the test suites with various VM sizes, and unspecified or
3808 	 *  fixed (page-aligned or page-unaligned) addresses. */
3809 	for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3810 		for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3811 			for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3812 				/* An allocated address will be page-aligned. */
3813 				if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) {
3814 					continue;
3815 				}
3816 				run_suite(set_up_vm_variables_allocate_read_deallocate, read_main_tests, deallocate,
3817 				    "mach_vm_read() "
3818 				    "main tests, %s%s address, %s size: 0x%jx (%ju)",
3819 				    address_flags[flags_idx].description,
3820 				    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3821 				    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3822 				    (uintmax_t)vm_sizes[sizes_idx].size);
3823 				run_suite(set_up_vm_variables_and_allocate_extra_page, read_pattern_tests, deallocate,
3824 				    "mach_vm_read() pattern tests, %s%s address, %s "
3825 				    "size: 0x%jx (%ju)",
3826 				    address_flags[flags_idx].description,
3827 				    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3828 				    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3829 				    (uintmax_t)vm_sizes[sizes_idx].size);
3830 				run_suite(set_up_vm_variables_and_allocate_extra_page, read_null_map_test, deallocate_extra_page,
3831 				    "mach_vm_read() null map test, "
3832 				    "%s%s address, %s size: 0x%jx (%ju)",
3833 				    address_flags[flags_idx].description,
3834 				    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3835 				    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3836 				    (uintmax_t)vm_sizes[sizes_idx].size);
3837 				/* A zero size range is always accessible. */
3838 				if (sizes_idx != ZERO_BYTES) {
3839 					run_suite(set_up_vm_variables_and_allocate_extra_page, read_inaccessible_tests, deallocate_extra_page,
3840 					    "mach_vm_read() inaccessibility tests, %s%s "
3841 					    "address, %s size: 0x%jx (%ju)",
3842 					    address_flags[flags_idx].description,
3843 					    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3844 					    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3845 					    (uintmax_t)vm_sizes[sizes_idx].size);
3846 				}
3847 			}
3848 		}
3849 	}
3850 	run_suite(do_nothing, read_edge_case_tests, do_nothing, "mach_vm_read() fixed size tests");
3851 }
3852 
3853 void
run_write_test_suites()3854 run_write_test_suites()
3855 {
3856 	UnitTests write_main_tests = {
3857 		{"Write and verify zero-filled memory", test_zero_filled_write},
3858 	};
3859 	UnitTests write_pattern_tests = {
3860 		{"Write address-filled pattern", test_address_filled_write},
3861 		{"Write checkerboard pattern", test_checkerboard_write},
3862 		{"Write reverse checkerboard pattern", test_reverse_checkerboard_write},
3863 	};
3864 	UnitTests write_edge_case_tests = {
3865 		{"Write into NULL VM map", test_write_null_map}, {"Write zero size", test_write_zero_size},
3866 	};
3867 	UnitTests write_inaccessible_tests = {
3868 		{"Write partially decallocated buffer", test_write_partially_deallocated_buffer},
3869 		{"Write partially read-protected buffer", test_write_partially_unreadable_buffer},
3870 		{"Write on partially deallocated range", test_write_on_partially_deallocated_range},
3871 		{"Write on partially write-protected range", test_write_on_partially_unwritable_range},
3872 	};
3873 
3874 	/* All allocations done with mach_vm_allocate(). */
3875 	set_allocator(wrapper_mach_vm_allocate);
3876 
3877 	/* Run the test suites with various destination sizes and
3878 	 *  unspecified or fixed (page-aligned or page-unaligned)
3879 	 *  addresses, and various buffer sizes and boundary offsets. */
3880 	for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3881 		for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3882 			for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3883 				for (buffer_sizes_idx = 0; buffer_sizes_idx < numofsizes; buffer_sizes_idx++) {
3884 					for (offsets_idx = 0; offsets_idx < numofoffsets; offsets_idx++) {
3885 						/* An allocated address will be page-aligned. */
3886 						if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED)) {
3887 							continue;
3888 						}
3889 						run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_edge_case_tests,
3890 						    deallocate_vm_and_buffer,
3891 						    "mach_vm_write() edge case tests, %s%s address, %s "
3892 						    "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3893 						    "buffer boundary offset: %d",
3894 						    address_flags[flags_idx].description,
3895 						    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3896 						    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3897 						    (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3898 						    (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3899 						    buffer_offsets[offsets_idx].offset);
3900 						/* A zero size buffer is always accessible. */
3901 						if (buffer_sizes_idx != ZERO_BYTES) {
3902 							run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_inaccessible_tests,
3903 							    deallocate_vm_and_buffer,
3904 							    "mach_vm_write() inaccessibility tests, "
3905 							    "%s%s address, %s size: 0x%jx (%ju), buffer "
3906 							    "%s size: 0x%jx (%ju), buffer boundary "
3907 							    "offset: %d",
3908 							    address_flags[flags_idx].description,
3909 							    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3910 							    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3911 							    (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3912 							    (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3913 							    buffer_offsets[offsets_idx].offset);
3914 						}
3915 						/* The buffer cannot be larger than the destination. */
3916 						if (vm_sizes[sizes_idx].size < vm_sizes[buffer_sizes_idx].size) {
3917 							continue;
3918 						}
3919 						run_suite(set_up_vm_and_buffer_variables_allocate_write, write_main_tests, deallocate_vm_and_buffer,
3920 						    "mach_vm_write() main tests, %s%s address, %s "
3921 						    "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3922 						    "buffer boundary offset: %d",
3923 						    address_flags[flags_idx].description,
3924 						    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3925 						    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3926 						    (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3927 						    (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3928 						    buffer_offsets[offsets_idx].offset);
3929 						run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_pattern_tests,
3930 						    deallocate_vm_and_buffer,
3931 						    "mach_vm_write() pattern tests, %s%s address, %s "
3932 						    "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3933 						    "buffer boundary offset: %d",
3934 						    address_flags[flags_idx].description,
3935 						    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3936 						    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3937 						    (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3938 						    (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3939 						    buffer_offsets[offsets_idx].offset);
3940 					}
3941 				}
3942 			}
3943 		}
3944 	}
3945 }
3946 
3947 void
run_protect_test_suites()3948 run_protect_test_suites()
3949 {
3950 	UnitTests readprotection_main_tests = {
3951 		{"Read-protect, read-allow and verify zero-filled memory", test_zero_filled_readprotect},
3952 		{"Verify that region is read-protected iff size is "
3953 		 "nonzero",
3954 		 test_verify_readprotection},
3955 	};
3956 	UnitTests access_readprotected_memory_tests = {
3957 		{"Read start of read-protected range", test_access_readprotected_range_start},
3958 		{"Read middle of read-protected range", test_access_readprotected_range_middle},
3959 		{"Read end of read-protected range", test_access_readprotected_range_end},
3960 	};
3961 	UnitTests writeprotection_main_tests = {
3962 		{"Write-protect and verify zero-filled memory", test_zero_filled_extended},
3963 		{"Verify that region is write-protected iff size is "
3964 		 "nonzero",
3965 		 test_verify_writeprotection},
3966 	};
3967 	UnitTests write_writeprotected_memory_tests = {
3968 		{"Write at start of write-protected range", test_write_writeprotected_range_start},
3969 		{"Write in middle of write-protected range", test_write_writeprotected_range_middle},
3970 		{"Write at end of write-protected range", test_write_writeprotected_range_end},
3971 	};
3972 	UnitTests protect_edge_case_tests = {
3973 		{"Read-protect zero size ranges", test_readprotect_zero_size},
3974 		{"Write-protect zero size ranges", test_writeprotect_zero_size},
3975 		{"Read-protect wrapped around memory ranges", test_readprotect_wrapped_around_ranges},
3976 		{"Write-protect wrapped around memory ranges", test_writeprotect_wrapped_around_ranges},
3977 	};
3978 
3979 	/* All allocations done with mach_vm_allocate(). */
3980 	set_allocator(wrapper_mach_vm_allocate);
3981 
3982 	/* Run the test suites with various VM sizes, and unspecified or
3983 	 *  fixed (page-aligned or page-unaligned), addresses. */
3984 	for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3985 		for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3986 			for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3987 				/* An allocated address will be page-aligned. */
3988 				if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) {
3989 					continue;
3990 				}
3991 				run_suite(set_up_vm_variables_allocate_readprotect, readprotection_main_tests, deallocate_extra_page,
3992 				    "Main read-protection tests, %s%s address, %s "
3993 				    "size: 0x%jx (%ju)",
3994 				    address_flags[flags_idx].description,
3995 				    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3996 				    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3997 				    (uintmax_t)vm_sizes[sizes_idx].size);
3998 				run_suite(set_up_vm_variables_allocate_writeprotect, writeprotection_main_tests, deallocate_extra_page,
3999 				    "Main write-protection tests, %s%s address, %s "
4000 				    "size: 0x%jx (%ju)",
4001 				    address_flags[flags_idx].description,
4002 				    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4003 				    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4004 				    (uintmax_t)vm_sizes[sizes_idx].size);
4005 				/* Nothing gets protected if size is zero. */
4006 				if (sizes_idx != ZERO_BYTES) {
4007 					set_expected_signal(SIGBUS);
4008 					/* Accessing read-protected memory should cause a bus
4009 					 *  error. */
4010 					run_suite(set_up_vm_variables_allocate_readprotect, access_readprotected_memory_tests, deallocate_extra_page,
4011 					    "Read-protected memory access tests, %s%s "
4012 					    "address, %s size: 0x%jx (%ju)",
4013 					    address_flags[flags_idx].description,
4014 					    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4015 					    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4016 					    (uintmax_t)vm_sizes[sizes_idx].size);
4017 					/* Writing on write-protected memory should cause a bus
4018 					 *  error. */
4019 					run_suite(set_up_vm_variables_allocate_writeprotect, write_writeprotected_memory_tests, deallocate_extra_page,
4020 					    "Write-protected memory writing tests, %s%s "
4021 					    "address, %s size: 0x%jx (%ju)",
4022 					    address_flags[flags_idx].description,
4023 					    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4024 					    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4025 					    (uintmax_t)vm_sizes[sizes_idx].size);
4026 					set_expected_signal(0);
4027 				}
4028 			}
4029 		}
4030 	}
4031 	run_suite(do_nothing, protect_edge_case_tests, do_nothing, "Edge case protection tests");
4032 }
4033 
4034 void
run_copy_test_suites()4035 run_copy_test_suites()
4036 {
4037 	/* Copy tests */
4038 	UnitTests copy_main_tests = {
4039 		{"Copy and verify zero-filled memory", test_zero_filled_copy_dest},
4040 	};
4041 	UnitTests copy_pattern_tests = {
4042 		{"Copy address-filled pattern", test_copy_address_filled},
4043 		{"Copy checkerboard pattern", test_copy_checkerboard},
4044 		{"Copy reverse checkerboard pattern", test_copy_reverse_checkerboard},
4045 	};
4046 	UnitTests copy_edge_case_tests = {
4047 		{"Copy with NULL VM map", test_copy_null_map},
4048 		{"Copy zero size", test_copy_zero_size},
4049 		{"Copy invalid large size", test_copy_invalid_large_size},
4050 		{"Read wrapped around memory ranges", test_copy_wrapped_around_ranges},
4051 	};
4052 	UnitTests copy_inaccessible_tests = {
4053 		{"Copy source partially decallocated region", test_copy_source_partially_deallocated_region},
4054 		/* XXX */
4055 		{"Copy destination partially decallocated region", test_copy_dest_partially_deallocated_region},
4056 		{"Copy source partially read-protected region", test_copy_source_partially_unreadable_region},
4057 		/* XXX */
4058 		{"Copy destination partially write-protected region", test_copy_dest_partially_unwriteable_region},
4059 		{"Copy source on partially deallocated range", test_copy_source_on_partially_deallocated_range},
4060 		{"Copy destination on partially deallocated range", test_copy_dest_on_partially_deallocated_range},
4061 		{"Copy source on partially read-protected range", test_copy_source_on_partially_unreadable_range},
4062 		{"Copy destination on partially write-protected range", test_copy_dest_on_partially_unwritable_range},
4063 	};
4064 
4065 	UnitTests copy_shared_mode_tests = {
4066 		{"Copy using freshly allocated source", test_vmcopy_fresh_source},
4067 		{"Copy using shared source", test_vmcopy_shared_source},
4068 		{"Copy using a \'copied from\' source", test_vmcopy_copied_from_source},
4069 		{"Copy using a \'copied to\' source", test_vmcopy_copied_to_source},
4070 		{"Copy using a true shared source", test_vmcopy_trueshared_source},
4071 		{"Copy using a private aliased source", test_vmcopy_private_aliased_source},
4072 	};
4073 
4074 	/* All allocations done with mach_vm_allocate(). */
4075 	set_allocator(wrapper_mach_vm_allocate);
4076 
4077 	/* All the tests are done with page size regions. */
4078 	set_vm_size(vm_page_size);
4079 
4080 	/* Run the test suites with various shared modes for source */
4081 	for (vmcopy_action_idx = 0; vmcopy_action_idx < numofvmcopyactions; vmcopy_action_idx++) {
4082 		run_suite(set_up_copy_shared_mode_variables, copy_shared_mode_tests, do_nothing, "Copy shared mode tests, %s",
4083 		    vmcopy_actions[vmcopy_action_idx].description);
4084 	}
4085 
4086 	for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
4087 		for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
4088 			for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
4089 				for (buffer_sizes_idx = 0; buffer_sizes_idx < numofsizes; buffer_sizes_idx++) {
4090 					for (offsets_idx = 0; offsets_idx < numofoffsets; offsets_idx++) {
4091 						/* An allocated address will be page-aligned. */
4092 						if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED)) {
4093 							continue;
4094 						}
4095 						run_suite(set_up_vm_and_buffer_variables_allocate_for_copying, copy_edge_case_tests,
4096 						    deallocate_vm_and_buffer,
4097 						    "mach_vm_copy() edge case tests, %s%s address, %s "
4098 						    "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
4099 						    "buffer boundary offset: %d",
4100 						    address_flags[flags_idx].description,
4101 						    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4102 						    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4103 						    (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4104 						    (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4105 						    buffer_offsets[offsets_idx].offset);
4106 						/* The buffer cannot be larger than the destination. */
4107 						if (vm_sizes[sizes_idx].size < vm_sizes[buffer_sizes_idx].size) {
4108 							continue;
4109 						}
4110 
4111 						/* A zero size buffer is always accessible. */
4112 						if (buffer_sizes_idx != ZERO_BYTES) {
4113 							run_suite(set_up_vm_and_buffer_variables_allocate_for_copying, copy_inaccessible_tests,
4114 							    deallocate_vm_and_buffer,
4115 							    "mach_vm_copy() inaccessibility tests, "
4116 							    "%s%s address, %s size: 0x%jx (%ju), buffer "
4117 							    "%s size: 0x%jx (%ju), buffer boundary "
4118 							    "offset: %d",
4119 							    address_flags[flags_idx].description,
4120 							    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4121 							    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4122 							    (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4123 							    (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4124 							    buffer_offsets[offsets_idx].offset);
4125 						}
4126 						run_suite(set_up_source_and_dest_variables_allocate_copy, copy_main_tests, deallocate_vm_and_buffer,
4127 						    "mach_vm_copy() main tests, %s%s address, %s "
4128 						    "size: 0x%jx (%ju), destination %s size: 0x%jx (%ju), "
4129 						    "destination boundary offset: %d",
4130 						    address_flags[flags_idx].description,
4131 						    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4132 						    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4133 						    (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4134 						    (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4135 						    buffer_offsets[offsets_idx].offset);
4136 						run_suite(set_up_source_and_dest_variables_allocate_copy, copy_pattern_tests, deallocate_vm_and_buffer,
4137 						    "mach_vm_copy() pattern tests, %s%s address, %s "
4138 						    "size: 0x%jx (%ju) destination %s size: 0x%jx (%ju), "
4139 						    "destination boundary offset: %d",
4140 						    address_flags[flags_idx].description,
4141 						    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4142 						    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4143 						    (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4144 						    (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4145 						    buffer_offsets[offsets_idx].offset);
4146 					}
4147 				}
4148 			}
4149 		}
4150 	}
4151 }
4152 
4153 void
perform_test_with_options(test_option_t options)4154 perform_test_with_options(test_option_t options)
4155 {
4156 	process_options(options);
4157 
4158 	/* <rdar://problem/10304215> CoreOSZin 12Z30: VMUnitTest fails:
4159 	 * error finding xnu major version number. */
4160 	/* printf("xnu version is %s.\n\n", xnu_version_string()); */
4161 
4162 	if (flag_run_allocate_test) {
4163 		run_allocate_test_suites();
4164 	}
4165 
4166 	if (flag_run_deallocate_test) {
4167 		run_deallocate_test_suites();
4168 	}
4169 
4170 	if (flag_run_read_test) {
4171 		run_read_test_suites();
4172 	}
4173 
4174 	if (flag_run_write_test) {
4175 		run_write_test_suites();
4176 	}
4177 
4178 	if (flag_run_protect_test) {
4179 		run_protect_test_suites();
4180 	}
4181 
4182 	if (flag_run_copy_test) {
4183 		run_copy_test_suites();
4184 	}
4185 
4186 	log_aggregated_results();
4187 }
4188 
4189 T_DECL(vm_test_allocate, "Allocate VM unit test")
4190 {
4191 	test_options.to_flags = VM_TEST_ALLOCATE;
4192 	test_options.to_vmsize = 0;
4193 	test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4194 
4195 	perform_test_with_options(test_options);
4196 }
4197 
4198 T_DECL(vm_test_deallocate, "Deallocate VM unit test",
4199     T_META_IGNORECRASHES(".*vm_allocation.*"))
4200 {
4201 	test_options.to_flags = VM_TEST_DEALLOCATE;
4202 	test_options.to_vmsize = 0;
4203 	test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4204 
4205 	perform_test_with_options(test_options);
4206 }
4207 
4208 T_DECL(vm_test_read, "Read VM unit test")
4209 {
4210 	test_options.to_flags = VM_TEST_READ;
4211 	test_options.to_vmsize = 0;
4212 	test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4213 
4214 	perform_test_with_options(test_options);
4215 }
4216 
4217 T_DECL(vm_test_write, "Write VM unit test")
4218 {
4219 	test_options.to_flags = VM_TEST_WRITE;
4220 	test_options.to_vmsize = 0;
4221 	test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4222 
4223 	perform_test_with_options(test_options);
4224 }
4225 
4226 T_DECL(vm_test_protect, "Protect VM unit test",
4227     T_META_IGNORECRASHES(".*vm_allocation.*"))
4228 {
4229 	test_options.to_flags = VM_TEST_PROTECT;
4230 	test_options.to_vmsize = 0;
4231 	test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4232 
4233 	perform_test_with_options(test_options);
4234 }
4235 
4236 T_DECL(vm_test_copy, "Copy VM unit test")
4237 {
4238 	test_options.to_flags = VM_TEST_COPY;
4239 	test_options.to_vmsize = 0;
4240 	test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4241 
4242 	perform_test_with_options(test_options);
4243 }
4244