1 #include <darwintest.h>
2 #include <darwintest_utils.h>
3 #include <test_utils.h>
4
5 #include <sys/types.h>
6 #include <sys/sysctl.h>
7 #include <mach/mach.h>
8 #include <mach/mach_vm.h>
9 #include <mach/memory_entry.h>
10 #include <mach/shared_region.h>
11 #include <mach/vm_reclaim.h>
12 #include <mach/vm_types.h>
13 #include <sys/mman.h>
14 #include <unistd.h>
15 #include <TargetConditionals.h>
16 #include <mach-o/dyld.h>
17 #include <libgen.h>
18
19 #include <os/bsd.h> // For os_parse_boot_arg_int
20
21 // workarounds for buggy MIG declarations
22 // see tests/vm/vm_parameter_validation_replacement_*.defs
23 // and tests/Makefile for details
24 #include "vm_parameter_validation_replacement_mach_host.h"
25 #include "vm_parameter_validation_replacement_host_priv.h"
26
27 // code shared with kernel/kext tests
28 #include "../../osfmk/tests/vm_parameter_validation.h"
29
30 #define GOLDEN_FILES_VERSION "vm_parameter_validation_golden_images_46d15ea.tar.xz"
31 #define GOLDEN_FILES_ASSET_FILE_POINTER GOLDEN_FILES_VERSION
32
33 T_GLOBAL_META(
34 T_META_NAMESPACE("xnu.vm"),
35 T_META_RADAR_COMPONENT_NAME("xnu"),
36 T_META_RADAR_COMPONENT_VERSION("VM"),
37 T_META_S3_ASSET(GOLDEN_FILES_ASSET_FILE_POINTER),
38 T_META_ASROOT(true), /* required for vm_wire tests on macOS */
39 T_META_RUN_CONCURRENTLY(false), /* vm_parameter_validation_kern uses kernel globals */
40 T_META_ALL_VALID_ARCHS(true),
41 XNU_T_META_REQUIRES_DEVELOPMENT_KERNEL
42 );
43
44 /*
45 * vm_parameter_validation.c
46 * Test parameter validation of vm's userspace API
47 *
48 * The test compares the return values against a 'golden' list, which is a text
49 * file previously generated and compressed in .xz files, per platform.
50 * When vm_parameter_validation runs, it calls assets/vm_parameter_validation/decompress.sh,
51 * which detects the platform and decompresses the corresponding user and kern
52 * golden files.
53 *
54 * Any return code mismatch is reported as a failure, printing test name and iteration.
55 * New tests not present in the 'golden' list will run but they are also reported as a failure.
56 *
57 * There are two environment variable flags that makes development work easier and
58 * can temporarily disable golden list testing.
59 *
60 * SKIP_TESTS
61 * When running with SKIP_TESTS set, the test will not compare the results
62 * against the golden files.
63 *
64 * DUMP_RESULTS
65 * When running with DUMP_RESULTS set, the test will print all the returned values
66 * (as opposed to only the failing ones). To pretty-print this output use the python script:
67 * DUMP_RESULTS=1 vm_parameter_validation | tools/format_vm_parameter_validation.py
68 */
69
70
71
72 /*
73 * xnu/libsyscall/mach/mach_vm.c intercepts some VM calls from userspace,
74 * sometimes doing something other than the expected MIG call.
75 * This test generates its own MIG userspace call sites to call the kernel
76 * entrypoints directly, bypassing libsyscall's interference.
77 *
78 * The custom MIG call sites are generated into:
79 * vm_parameter_validation_vm_map_user.c
80 * vm_parameter_validation_mach_vm_user.c
81 */
82
83 #pragma clang diagnostic ignored "-Wdeclaration-after-statement"
84 #pragma clang diagnostic ignored "-Wmissing-prototypes"
85 #pragma clang diagnostic ignored "-Wpedantic"
86
87 /*
88 * Our wire tests often try to wire the whole address space.
89 * In that case the error code is determined by the first range of addresses
90 * that cannot be wired.
91 * In most cases that is a protection failure on a malloc guard page. But
92 * sometimes, circumstances outside of our control change the address map of
93 * our test process and add holes, which means we get a bad address error
94 * instead, and the test fails because the return code doesn't match what's
95 * recorded in the golden files.
96 * To avoid this, we want to keep a guard page inside our data section.
97 * Because that data section is one of the first things in our address space,
98 * the behavior of wire is (more) predictable.
99 */
_Alignas(KB16)100 static _Alignas(KB16) char guard_page[KB16];
101
102 static void
103 set_up_guard_page(void)
104 {
105 /*
106 * Ensure that _Alignas worked as expected.
107 */
108 assert(0 == (((mach_vm_address_t)guard_page) & PAGE_MASK));
109 /*
110 * Remove all permissions on guard_page such that it is a guard page.
111 */
112 assert(0 == mprotect(guard_page, sizeof(guard_page), 0));
113 }
114
115 // Return a file descriptor that tests can read and write.
116 // A single temporary file is shared among all tests.
117 static int
get_fd()118 get_fd()
119 {
120 static int fd = -1;
121 if (fd > 0) {
122 return fd;
123 }
124
125 char filename[] = "/tmp/vm_parameter_validation_XXXXXX";
126 fd = mkstemp(filename);
127 assert(fd > 2); // not stdin/stdout/stderr
128 return fd;
129 }
130
131 static int rosetta_dyld_fd = -1;
132 // Return a file descriptor that Rosetta dyld will accept
133 static int
get_dyld_fd()134 get_dyld_fd()
135 {
136 if (rosetta_dyld_fd >= 0) {
137 return rosetta_dyld_fd;
138 }
139
140 if (!isRosetta()) {
141 rosetta_dyld_fd = 0;
142 return rosetta_dyld_fd;
143 }
144
145 rosetta_dyld_fd = 0;
146 return rosetta_dyld_fd;
147 }
148
149 // Close the Rosetta dyld fd (only one test calls this)
150 static void
close_dyld_fd()151 close_dyld_fd()
152 {
153 if (isRosetta()) {
154 assert(rosetta_dyld_fd > 2);
155 if (close(rosetta_dyld_fd) != 0) {
156 assert(0);
157 }
158 rosetta_dyld_fd = -1;
159 }
160 }
161
162 static int
munmap_helper(void * ptr,size_t size)163 munmap_helper(void *ptr, size_t size)
164 {
165 mach_vm_address_t start, end;
166 if (0 != size) { // munmap rejects size == 0 even though mmap accepts it
167 /*
168 * munmap expects aligned inputs, even though mmap sometimes
169 * returns unaligned values
170 */
171 start = ((mach_vm_address_t)ptr) & ~PAGE_MASK;
172 end = (((mach_vm_address_t)ptr) + size + PAGE_MASK) & ~PAGE_MASK;
173 return munmap((void*)start, end - start);
174 }
175 return 0;
176 }
177
178 // Some tests provoke EXC_GUARD exceptions.
179 // We disable EXC_GUARD if possible. If we can't, we disable those tests instead.
180 static bool EXC_GUARD_ENABLED = true;
181
182 static int
call_munlock(void * start,size_t size)183 call_munlock(void *start, size_t size)
184 {
185 int err = munlock(start, size);
186 return err ? errno : 0;
187 }
188
189 static int
call_mlock(void * start,size_t size)190 call_mlock(void *start, size_t size)
191 {
192 int err = mlock(start, size);
193 return err ? errno : 0;
194 }
195
196 extern int __munmap(void *, size_t);
197
198 static kern_return_t
call_munmap(MAP_T map __unused,mach_vm_address_t start,mach_vm_size_t size)199 call_munmap(MAP_T map __unused, mach_vm_address_t start, mach_vm_size_t size)
200 {
201 int err = __munmap((void*)start, (size_t)size);
202 return err ? errno : 0;
203 }
204
205 static int
call_mremap_encrypted(void * start,size_t size)206 call_mremap_encrypted(void *start, size_t size)
207 {
208 int err = mremap_encrypted(start, size, CRYPTID_NO_ENCRYPTION, /*cputype=*/ 0, /*cpusubtype=*/ 0);
209 return err ? errno : 0;
210 }
211
212 /////////////////////////////////////////////////////
213 // Mach tests
214
215 static mach_port_t
make_a_mem_object(mach_vm_size_t size)216 make_a_mem_object(mach_vm_size_t size)
217 {
218 mach_port_t out_handle;
219 kern_return_t kr = mach_memory_object_memory_entry_64(mach_host_self(), 1, size, VM_PROT_READ | VM_PROT_WRITE, 0, &out_handle);
220 assert(kr == 0);
221 return out_handle;
222 }
223
224 static mach_port_t
make_a_mem_entry(vm_size_t size)225 make_a_mem_entry(vm_size_t size)
226 {
227 mach_port_t port;
228 memory_object_size_t s = (memory_object_size_t)size;
229 kern_return_t kr = mach_make_memory_entry_64(mach_host_self(), &s, (memory_object_offset_t)0, MAP_MEM_NAMED_CREATE | MAP_MEM_LEDGER_TAGGED, &port, MACH_PORT_NULL);
230 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "allocate memory entry");
231 return port;
232 }
233
234 static inline void
check_mach_memory_entry_outparam_changes(kern_return_t * kr,mach_port_t out_handle,mach_port_t saved_handle)235 check_mach_memory_entry_outparam_changes(kern_return_t * kr, mach_port_t out_handle, mach_port_t saved_handle)
236 {
237 if (*kr != KERN_SUCCESS) {
238 if (out_handle != (mach_port_t) saved_handle) {
239 *kr = OUT_PARAM_BAD;
240 }
241 }
242 }
243 // mach_make_memory_entry is really several functions wearing a trenchcoat.
244 // Run a separate test for each variation.
245
246 // mach_make_memory_entry also has a confusing number of entrypoints:
247 // U64: mach_make_memory_entry_64(64) (mach_make_memory_entry is the same MIG message)
248 // U32: mach_make_memory_entry(32), mach_make_memory_entry_64(64), _mach_make_memory_entry(64) (each is a unique MIG message)
249 #define IMPL(FN, T) \
250 static kern_return_t \
251 call_ ## FN ## __start_size__memonly(MAP_T map, T start, T size) \
252 { \
253 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
254 T io_size = size; \
255 mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT; \
256 mach_port_t out_handle = invalid_value; \
257 kern_return_t kr = FN(map, &io_size, start, \
258 VM_PROT_READ | MAP_MEM_ONLY, &out_handle, memobject); \
259 if (kr == 0) { \
260 (void)mach_port_deallocate(mach_task_self(), out_handle); \
261 /* MAP_MEM_ONLY doesn't use the size. It should not change it. */ \
262 if(io_size != size) { \
263 kr = OUT_PARAM_BAD; \
264 } \
265 } \
266 (void)mach_port_deallocate(mach_task_self(), memobject); \
267 check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
268 return kr; \
269 } \
270 \
271 static kern_return_t \
272 call_ ## FN ## __start_size__namedcreate(MAP_T map, T start, T size) \
273 { \
274 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
275 T io_size = size; \
276 mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT; \
277 mach_port_t out_handle = invalid_value; \
278 kern_return_t kr = FN(map, &io_size, start, \
279 VM_PROT_READ | MAP_MEM_NAMED_CREATE, &out_handle, memobject); \
280 if (kr == 0) { \
281 (void)mach_port_deallocate(mach_task_self(), out_handle); \
282 } \
283 (void)mach_port_deallocate(mach_task_self(), memobject); \
284 check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
285 return kr; \
286 } \
287 \
288 static kern_return_t \
289 call_ ## FN ## __start_size__copy(MAP_T map, T start, T size) \
290 { \
291 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
292 T io_size = size; \
293 mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT; \
294 mach_port_t out_handle = invalid_value; \
295 kern_return_t kr = FN(map, &io_size, start, \
296 VM_PROT_READ | MAP_MEM_VM_COPY, &out_handle, memobject); \
297 if (kr == 0) { \
298 (void)mach_port_deallocate(mach_task_self(), out_handle); \
299 } \
300 (void)mach_port_deallocate(mach_task_self(), memobject); \
301 check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
302 return kr; \
303 } \
304 \
305 static kern_return_t \
306 call_ ## FN ## __start_size__share(MAP_T map, T start, T size) \
307 { \
308 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
309 T io_size = size; \
310 mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT; \
311 mach_port_t out_handle = invalid_value; \
312 kern_return_t kr = FN(map, &io_size, start, \
313 VM_PROT_READ | MAP_MEM_VM_SHARE, &out_handle, memobject); \
314 if (kr == 0) { \
315 (void)mach_port_deallocate(mach_task_self(), out_handle); \
316 } \
317 (void)mach_port_deallocate(mach_task_self(), memobject); \
318 check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
319 return kr; \
320 } \
321 \
322 static kern_return_t \
323 call_ ## FN ## __start_size__namedreuse(MAP_T map, T start, T size) \
324 { \
325 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
326 T io_size = size; \
327 mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT; \
328 mach_port_t out_handle = invalid_value; \
329 kern_return_t kr = FN(map, &io_size, start, \
330 VM_PROT_READ | MAP_MEM_NAMED_REUSE, &out_handle, memobject); \
331 if (kr == 0) { \
332 (void)mach_port_deallocate(mach_task_self(), out_handle); \
333 } \
334 (void)mach_port_deallocate(mach_task_self(), memobject); \
335 check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
336 return kr; \
337 } \
338 \
339 static kern_return_t \
340 call_ ## FN ## __vm_prot(MAP_T map, T start, T size, vm_prot_t prot) \
341 { \
342 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
343 T io_size = size; \
344 mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT; \
345 mach_port_t out_handle = invalid_value; \
346 kern_return_t kr = FN(map, &io_size, start, \
347 prot, &out_handle, memobject); \
348 if (kr == 0) { \
349 (void)mach_port_deallocate(mach_task_self(), out_handle); \
350 } \
351 (void)mach_port_deallocate(mach_task_self(), memobject); \
352 check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
353 return kr; \
354 }
355
IMPL(mach_make_memory_entry_64,mach_vm_address_t)356 IMPL(mach_make_memory_entry_64, mach_vm_address_t)
357 #if TEST_OLD_STYLE_MACH
358 IMPL(mach_make_memory_entry, vm_address_t)
359 IMPL(_mach_make_memory_entry, mach_vm_address_t)
360 #endif
361 #undef IMPL
362
363 static inline void
364 check_mach_memory_object_memory_entry_outparam_changes(kern_return_t * kr, mach_port_t out_handle,
365 mach_port_t saved_out_handle)
366 {
367 if (*kr != KERN_SUCCESS) {
368 if (out_handle != saved_out_handle) {
369 *kr = OUT_PARAM_BAD;
370 }
371 }
372 }
373
374 #define IMPL(FN) \
375 static kern_return_t \
376 call_ ## FN ## __size(MAP_T map __unused, mach_vm_size_t size) \
377 { \
378 kern_return_t kr; \
379 mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT; \
380 mach_port_t out_entry = invalid_value; \
381 kr = FN(mach_host_self(), 1, size, VM_PROT_READ | VM_PROT_WRITE, 0, &out_entry); \
382 if (kr == 0) { \
383 (void)mach_port_deallocate(mach_task_self(), out_entry); \
384 } \
385 check_mach_memory_object_memory_entry_outparam_changes(&kr, out_entry, invalid_value); \
386 return kr; \
387 } \
388 static kern_return_t \
389 call_ ## FN ## __vm_prot(MAP_T map __unused, mach_vm_size_t size, vm_prot_t prot) \
390 { \
391 kern_return_t kr; \
392 mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT; \
393 mach_port_t out_entry = invalid_value; \
394 kr = FN(mach_host_self(), 1, size, prot, 0, &out_entry); \
395 if (kr == 0) { \
396 (void)mach_port_deallocate(mach_task_self(), out_entry); \
397 } \
398 check_mach_memory_object_memory_entry_outparam_changes(&kr, out_entry, invalid_value); \
399 return kr; \
400 }
401
402 // The declaration of mach_memory_object_memory_entry is buggy on U32.
403 // We compile in our own MIG user stub for it with a "replacement_" prefix.
404 // rdar://117927965
405 IMPL(replacement_mach_memory_object_memory_entry)
IMPL(mach_memory_object_memory_entry_64)406 IMPL(mach_memory_object_memory_entry_64)
407 #undef IMPL
408
409 static inline void
410 check_vm_read_outparam_changes(kern_return_t * kr, mach_vm_size_t size, mach_vm_size_t requested_size,
411 mach_vm_address_t addr)
412 {
413 if (*kr == KERN_SUCCESS) {
414 if (size != requested_size) {
415 *kr = OUT_PARAM_BAD;
416 }
417 if (size == 0) {
418 if (addr != 0) {
419 *kr = OUT_PARAM_BAD;
420 }
421 }
422 }
423 }
424
425
426 static kern_return_t
call_mach_vm_read(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)427 call_mach_vm_read(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
428 {
429 vm_offset_t out_addr = UNLIKELY_INITIAL_ADDRESS;
430 mach_msg_type_number_t out_size = UNLIKELY_INITIAL_SIZE;
431 kern_return_t kr = mach_vm_read(map, start, size, &out_addr, &out_size);
432 if (kr == 0) {
433 (void)mach_vm_deallocate(mach_task_self(), out_addr, out_size);
434 }
435 check_vm_read_outparam_changes(&kr, out_size, size, out_addr);
436 return kr;
437 }
438 #if TEST_OLD_STYLE_MACH
439 static kern_return_t
call_vm_read(MAP_T map,vm_address_t start,vm_size_t size)440 call_vm_read(MAP_T map, vm_address_t start, vm_size_t size)
441 {
442 vm_offset_t out_addr = UNLIKELY_INITIAL_ADDRESS;
443 mach_msg_type_number_t out_size = UNLIKELY_INITIAL_SIZE;
444 kern_return_t kr = vm_read(map, start, size, &out_addr, &out_size);
445 if (kr == 0) {
446 (void)mach_vm_deallocate(mach_task_self(), out_addr, out_size);
447 }
448 check_vm_read_outparam_changes(&kr, out_size, size, out_addr);
449 return kr;
450 }
451 #endif
452
453 static kern_return_t
call_mach_vm_read_list(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)454 call_mach_vm_read_list(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
455 {
456 mach_vm_read_entry_t re = {{.address = start, .size = size}};
457 kern_return_t kr = mach_vm_read_list(map, re, 1);
458 if (kr == 0) {
459 (void)mach_vm_deallocate(mach_task_self(), re[0].address, re[0].size);
460 }
461 return kr;
462 }
463 #if TEST_OLD_STYLE_MACH
464 static kern_return_t
call_vm_read_list(MAP_T map,vm_address_t start,vm_size_t size)465 call_vm_read_list(MAP_T map, vm_address_t start, vm_size_t size)
466 {
467 vm_read_entry_t re = {{.address = start, .size = size}};
468 kern_return_t kr = vm_read_list(map, re, 1);
469 if (kr == 0) {
470 (void)mach_vm_deallocate(mach_task_self(), re[0].address, re[0].size);
471 }
472 return kr;
473 }
474 #endif
475
476 static inline void
check_vm_read_overwrite_outparam_changes(kern_return_t * kr,mach_vm_size_t size,mach_vm_size_t requested_size)477 check_vm_read_overwrite_outparam_changes(kern_return_t * kr, mach_vm_size_t size, mach_vm_size_t requested_size)
478 {
479 if (*kr == KERN_SUCCESS) {
480 if (size != requested_size) {
481 *kr = OUT_PARAM_BAD;
482 }
483 }
484 }
485
486 static kern_return_t __unused
call_mach_vm_read_overwrite__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)487 call_mach_vm_read_overwrite__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
488 {
489 mach_vm_size_t out_size;
490 kern_return_t kr = mach_vm_read_overwrite(map, start, size, start_2, &out_size);
491 check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
492 return kr;
493 }
494
495 static kern_return_t
call_mach_vm_read_overwrite__src(MAP_T map,mach_vm_address_t src,mach_vm_size_t size)496 call_mach_vm_read_overwrite__src(MAP_T map, mach_vm_address_t src, mach_vm_size_t size)
497 {
498 mach_vm_size_t out_size;
499 allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
500 kern_return_t kr = mach_vm_read_overwrite(map, src, size, dst.addr, &out_size);
501 check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
502 return kr;
503 }
504
505 static kern_return_t
call_mach_vm_read_overwrite__dst(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size)506 call_mach_vm_read_overwrite__dst(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size)
507 {
508 mach_vm_size_t out_size;
509 allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
510 kern_return_t kr = mach_vm_read_overwrite(map, src.addr, size, dst, &out_size);
511 check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
512 return kr;
513 }
514
515 #if TEST_OLD_STYLE_MACH
516 static kern_return_t __unused
call_vm_read_overwrite__ssz(MAP_T map,vm_address_t start,vm_address_t start_2,vm_size_t size)517 call_vm_read_overwrite__ssz(MAP_T map, vm_address_t start, vm_address_t start_2, vm_size_t size)
518 {
519 vm_size_t out_size;
520 kern_return_t kr = vm_read_overwrite(map, start, size, start_2, &out_size);
521 check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
522 return kr;
523 }
524
525 static kern_return_t
call_vm_read_overwrite__src(MAP_T map,vm_address_t src,vm_size_t size)526 call_vm_read_overwrite__src(MAP_T map, vm_address_t src, vm_size_t size)
527 {
528 vm_size_t out_size;
529 allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
530 kern_return_t kr = vm_read_overwrite(map, src, size, (vm_address_t) dst.addr, &out_size);
531 check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
532 return kr;
533 }
534
535 static kern_return_t
call_vm_read_overwrite__dst(MAP_T map,vm_address_t dst,vm_size_t size)536 call_vm_read_overwrite__dst(MAP_T map, vm_address_t dst, vm_size_t size)
537 {
538 vm_size_t out_size;
539 allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
540 kern_return_t kr = vm_read_overwrite(map, (vm_address_t) src.addr, size, dst, &out_size);
541 check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
542 return kr;
543 }
544 #endif
545
546
547
548 static kern_return_t __unused
call_mach_vm_copy__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)549 call_mach_vm_copy__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
550 {
551 kern_return_t kr = mach_vm_copy(map, start, size, start_2);
552 return kr;
553 }
554
555 static kern_return_t
call_mach_vm_copy__src(MAP_T map,mach_vm_address_t src,mach_vm_size_t size)556 call_mach_vm_copy__src(MAP_T map, mach_vm_address_t src, mach_vm_size_t size)
557 {
558 allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
559 kern_return_t kr = mach_vm_copy(map, src, size, dst.addr);
560 return kr;
561 }
562
563 static kern_return_t
call_mach_vm_copy__dst(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size)564 call_mach_vm_copy__dst(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size)
565 {
566 allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
567 kern_return_t kr = mach_vm_copy(map, src.addr, size, dst);
568 return kr;
569 }
570
571 #if TEST_OLD_STYLE_MACH
572 static kern_return_t __unused
call_vm_copy__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)573 call_vm_copy__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
574 {
575 kern_return_t kr = vm_copy(map, (vm_address_t) start, (vm_size_t) size, (vm_address_t) start_2);
576 return kr;
577 }
578
579 static kern_return_t
call_vm_copy__src(MAP_T map,vm_address_t src,vm_size_t size)580 call_vm_copy__src(MAP_T map, vm_address_t src, vm_size_t size)
581 {
582 allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
583 kern_return_t kr = vm_copy(map, src, size, (vm_address_t) dst.addr);
584 return kr;
585 }
586
587 static kern_return_t
call_vm_copy__dst(MAP_T map,vm_address_t dst,vm_size_t size)588 call_vm_copy__dst(MAP_T map, vm_address_t dst, vm_size_t size)
589 {
590 allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
591 kern_return_t kr = vm_copy(map, (vm_address_t) src.addr, size, dst);
592 return kr;
593 }
594 #endif
595
596 static kern_return_t __unused
call_mach_vm_write__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)597 call_mach_vm_write__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
598 {
599 kern_return_t kr = mach_vm_write(map, start, (vm_offset_t) start_2, (mach_msg_type_number_t) size);
600 return kr;
601 }
602
603 static kern_return_t
call_mach_vm_write__src(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)604 call_mach_vm_write__src(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
605 {
606 allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
607 kern_return_t kr = mach_vm_write(map, dst.addr, (vm_offset_t) start, (mach_msg_type_number_t) size);
608 return kr;
609 }
610
611 static kern_return_t
call_mach_vm_write__dst(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)612 call_mach_vm_write__dst(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
613 {
614 allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
615 kern_return_t kr = mach_vm_write(map, start, (vm_offset_t) src.addr, (mach_msg_type_number_t) size);
616 return kr;
617 }
618
619 #if TEST_OLD_STYLE_MACH
620 static kern_return_t __unused
call_vm_write__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)621 call_vm_write__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
622 {
623 kern_return_t kr = vm_write(map, (vm_address_t) start, (vm_offset_t) start_2, (mach_msg_type_number_t) size);
624 return kr;
625 }
626
627 static kern_return_t
call_vm_write__src(MAP_T map,vm_address_t start,vm_size_t size)628 call_vm_write__src(MAP_T map, vm_address_t start, vm_size_t size)
629 {
630 allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
631 kern_return_t kr = vm_write(map, (vm_address_t) dst.addr, start, (mach_msg_type_number_t) size);
632 return kr;
633 }
634
635 static kern_return_t
call_vm_write__dst(MAP_T map,vm_address_t start,vm_size_t size)636 call_vm_write__dst(MAP_T map, vm_address_t start, vm_size_t size)
637 {
638 allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
639 kern_return_t kr = vm_write(map, start, (vm_offset_t) src.addr, (mach_msg_type_number_t) size);
640 return kr;
641 }
642 #endif
643
644 // mach_vm_wire, vm_wire (start/size)
645 // "wire" and "unwire" paths diverge internally; test both
646 #define IMPL(FN, T, FLAVOR, PROT) \
647 static kern_return_t \
648 call_ ## FN ## __ ## FLAVOR(MAP_T map, T start, T size) \
649 { \
650 mach_port_t host_priv = HOST_PRIV_NULL; \
651 kern_return_t kr = host_get_host_priv_port(mach_host_self(), &host_priv); \
652 assert(kr == 0); /* host priv port on macOS requires entitlements or root */ \
653 kr = FN(host_priv, map, start, size, PROT); \
654 return kr; \
655 }
656 IMPL(mach_vm_wire, mach_vm_address_t, wire, VM_PROT_READ)
657 IMPL(mach_vm_wire, mach_vm_address_t, unwire, VM_PROT_NONE)
658 // The declaration of vm_wire is buggy on U32.
659 // We compile in our own MIG user stub for it with a "replacement_" prefix.
660 // rdar://118258929
661 IMPL(replacement_vm_wire, mach_vm_address_t, wire, VM_PROT_READ)
662 IMPL(replacement_vm_wire, mach_vm_address_t, unwire, VM_PROT_NONE)
663 #undef IMPL
664
665 // mach_vm_wire, vm_wire (vm_prot_t)
666 #define IMPL(FN, T) \
667 static kern_return_t \
668 call_ ## FN ## __vm_prot(MAP_T map, T start, T size, vm_prot_t prot) \
669 { \
670 mach_port_t host_priv = HOST_PRIV_NULL; \
671 kern_return_t kr = host_get_host_priv_port(mach_host_self(), &host_priv); \
672 assert(kr == 0); /* host priv port on macOS requires entitlements or root */ \
673 kr = FN(host_priv, map, start, size, prot); \
674 return kr; \
675 }
676 IMPL(mach_vm_wire, mach_vm_address_t)
677 // The declaration of vm_wire is buggy on U32.
678 // We compile in our own MIG user stub for it with a "replacement_" prefix.
679 // rdar://118258929
680 IMPL(replacement_vm_wire, mach_vm_address_t)
681 #undef IMPL
682
683
684 // mach_vm_map/vm32_map/vm32_map_64 infra
685
686 typedef kern_return_t (*map_fn_t)(vm_map_t target_task,
687 mach_vm_address_t *address,
688 mach_vm_size_t size,
689 mach_vm_offset_t mask,
690 int flags,
691 mem_entry_name_port_t object,
692 memory_object_offset_t offset,
693 boolean_t copy,
694 vm_prot_t cur_protection,
695 vm_prot_t max_protection,
696 vm_inherit_t inheritance);
697
698 static kern_return_t
call_map_fn__allocate_fixed(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)699 call_map_fn__allocate_fixed(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
700 {
701 mach_vm_address_t out_addr = start;
702 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
703 0, 0, 0, 0, 0, VM_INHERIT_NONE);
704 // fixed-overwrite with pre-existing allocation, don't deallocate
705 return kr;
706 }
707
708 static kern_return_t
call_map_fn__allocate_fixed_copy(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)709 call_map_fn__allocate_fixed_copy(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
710 {
711 mach_vm_address_t out_addr = start;
712 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
713 0, 0, true, 0, 0, VM_INHERIT_NONE);
714 // fixed-overwrite with pre-existing allocation, don't deallocate
715 return kr;
716 }
717
718 static kern_return_t
call_map_fn__allocate_anywhere(map_fn_t fn,MAP_T map,mach_vm_address_t start_hint,mach_vm_size_t size)719 call_map_fn__allocate_anywhere(map_fn_t fn, MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size)
720 {
721 mach_vm_address_t out_addr = start_hint;
722 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_ANYWHERE, 0, 0, 0, 0, 0, VM_INHERIT_NONE);
723 if (kr == 0) {
724 (void)mach_vm_deallocate(map, out_addr, size);
725 }
726 return kr;
727 }
728
729 static kern_return_t
call_map_fn__memobject_fixed(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)730 call_map_fn__memobject_fixed(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
731 {
732 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
733 mach_vm_address_t out_addr = start;
734 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
735 memobject, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
736 (void)mach_port_deallocate(mach_task_self(), memobject);
737 // fixed-overwrite with pre-existing allocation, don't deallocate
738 return kr;
739 }
740
741 static kern_return_t
call_map_fn__memobject_fixed_copy(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)742 call_map_fn__memobject_fixed_copy(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
743 {
744 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
745 mach_vm_address_t out_addr = start;
746 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
747 memobject, KB16, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
748 (void)mach_port_deallocate(mach_task_self(), memobject);
749 // fixed-overwrite with pre-existing allocation, don't deallocate
750 return kr;
751 }
752
753 static kern_return_t
call_map_fn__memobject_anywhere(map_fn_t fn,MAP_T map,mach_vm_address_t start_hint,mach_vm_size_t size)754 call_map_fn__memobject_anywhere(map_fn_t fn, MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size)
755 {
756 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
757 mach_vm_address_t out_addr = start_hint;
758 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_ANYWHERE, memobject,
759 KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
760 if (kr == 0) {
761 (void)mach_vm_deallocate(map, out_addr, size);
762 }
763 (void)mach_port_deallocate(mach_task_self(), memobject);
764 return kr;
765 }
766
767 static kern_return_t
helper_call_map_fn__memobject__ssoo(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)768 helper_call_map_fn__memobject__ssoo(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
769 {
770 mach_port_t memobject = make_a_mem_object(obj_size);
771 mach_vm_address_t out_addr = start;
772 kern_return_t kr = fn(map, &out_addr, size, 0, flags, memobject,
773 offset, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
774 deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
775 (void)mach_port_deallocate(mach_task_self(), memobject);
776 return kr;
777 }
778
779 static kern_return_t
call_map_fn__memobject_fixed__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)780 call_map_fn__memobject_fixed__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
781 {
782 return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, offset, obj_size);
783 }
784
785 static kern_return_t
call_map_fn__memobject_fixed_copy__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)786 call_map_fn__memobject_fixed_copy__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
787 {
788 return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, offset, obj_size);
789 }
790
791 static kern_return_t
call_map_fn__memobject_anywhere__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)792 call_map_fn__memobject_anywhere__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
793 {
794 return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_ANYWHERE, false, start, size, offset, obj_size);
795 }
796
797 static kern_return_t
help_call_map_fn__allocate__inherit(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)798 help_call_map_fn__allocate__inherit(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
799 {
800 mach_vm_address_t out_addr = start;
801 kern_return_t kr = fn(map, &out_addr, size, 0, flags,
802 0, KB16, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, inherit);
803 deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
804 return kr;
805 }
806
807 static kern_return_t
call_map_fn__allocate_fixed__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)808 call_map_fn__allocate_fixed__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
809 {
810 return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, inherit);
811 }
812
813 static kern_return_t
call_map_fn__allocate_fixed_copy__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)814 call_map_fn__allocate_fixed_copy__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
815 {
816 return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, inherit);
817 }
818
819 static kern_return_t
call_map_fn__allocate_anywhere__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)820 call_map_fn__allocate_anywhere__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
821 {
822 return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_ANYWHERE, false, start, size, inherit);
823 }
824
825 static kern_return_t
help_call_map_fn__memobject__inherit(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)826 help_call_map_fn__memobject__inherit(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
827 {
828 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
829 mach_vm_address_t out_addr = start;
830 kern_return_t kr = fn(map, &out_addr, size, 0, flags,
831 memobject, KB16, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, inherit);
832 deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
833 (void)mach_port_deallocate(mach_task_self(), memobject);
834 return kr;
835 }
836
837 static kern_return_t
call_map_fn__memobject_fixed__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)838 call_map_fn__memobject_fixed__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
839 {
840 return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, inherit);
841 }
842
843 static kern_return_t
call_map_fn__memobject_fixed_copy__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)844 call_map_fn__memobject_fixed_copy__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
845 {
846 return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, inherit);
847 }
848
849 static kern_return_t
call_map_fn__memobject_anywhere__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)850 call_map_fn__memobject_anywhere__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
851 {
852 return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_ANYWHERE, false, start, size, inherit);
853 }
854
855 static kern_return_t
call_map_fn__allocate__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)856 call_map_fn__allocate__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
857 {
858 kern_return_t kr = fn(map, start, size, 0, flags,
859 0, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
860 deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
861 return kr;
862 }
863
864 static kern_return_t
call_map_fn__allocate_copy__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)865 call_map_fn__allocate_copy__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
866 {
867 kern_return_t kr = fn(map, start, size, 0, flags,
868 0, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
869 deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
870 return kr;
871 }
872
873 static kern_return_t
call_map_fn__memobject__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)874 call_map_fn__memobject__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
875 {
876 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
877 kern_return_t kr = fn(map, start, size, 0, flags,
878 memobject, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
879 deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
880 (void)mach_port_deallocate(mach_task_self(), memobject);
881 return kr;
882 }
883
884 static kern_return_t
call_map_fn__memobject_copy__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)885 call_map_fn__memobject_copy__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
886 {
887 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
888 kern_return_t kr = fn(map, start, size, 0, flags,
889 memobject, KB16, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
890 deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
891 (void)mach_port_deallocate(mach_task_self(), memobject);
892 return kr;
893 }
894
895 static kern_return_t
help_call_map_fn__allocate__prot_pairs(map_fn_t fn,MAP_T map,int flags,bool copy,vm_prot_t cur,vm_prot_t max)896 help_call_map_fn__allocate__prot_pairs(map_fn_t fn, MAP_T map, int flags, bool copy, vm_prot_t cur, vm_prot_t max)
897 {
898 mach_vm_address_t out_addr = 0;
899 kern_return_t kr = fn(map, &out_addr, KB16, 0, flags,
900 0, KB16, copy, cur, max, VM_INHERIT_DEFAULT);
901 deallocate_if_not_fixed_overwrite(kr, map, out_addr, KB16, flags);
902 return kr;
903 }
904
905 static kern_return_t
call_map_fn__allocate_fixed__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)906 call_map_fn__allocate_fixed__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
907 {
908 return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, cur, max);
909 }
910
911 static kern_return_t
call_map_fn__allocate_fixed_copy__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)912 call_map_fn__allocate_fixed_copy__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
913 {
914 return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, cur, max);
915 }
916
917 static kern_return_t
call_map_fn__allocate_anywhere__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)918 call_map_fn__allocate_anywhere__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
919 {
920 return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_ANYWHERE, false, cur, max);
921 }
922
923 static kern_return_t
help_call_map_fn__memobject__prot_pairs(map_fn_t fn,MAP_T map,int flags,bool copy,vm_prot_t cur,vm_prot_t max)924 help_call_map_fn__memobject__prot_pairs(map_fn_t fn, MAP_T map, int flags, bool copy, vm_prot_t cur, vm_prot_t max)
925 {
926 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
927 mach_vm_address_t out_addr = 0;
928 kern_return_t kr = fn(map, &out_addr, KB16, 0, flags,
929 memobject, KB16, copy, cur, max, VM_INHERIT_DEFAULT);
930 deallocate_if_not_fixed_overwrite(kr, map, out_addr, KB16, flags);
931 (void)mach_port_deallocate(mach_task_self(), memobject);
932 return kr;
933 }
934
935 static kern_return_t
call_map_fn__memobject_fixed__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)936 call_map_fn__memobject_fixed__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
937 {
938 return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, cur, max);
939 }
940
941 static kern_return_t
call_map_fn__memobject_fixed_copy__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)942 call_map_fn__memobject_fixed_copy__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
943 {
944 return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, cur, max);
945 }
946
947 static kern_return_t
call_map_fn__memobject_anywhere__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)948 call_map_fn__memobject_anywhere__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
949 {
950 return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_ANYWHERE, false, cur, max);
951 }
952
953 // implementations
954
955 #define IMPL_MAP_FN_START_SIZE(map_fn, instance) \
956 static kern_return_t \
957 call_ ## map_fn ## __ ## instance (MAP_T map, mach_vm_address_t start, mach_vm_size_t size) \
958 { \
959 return call_map_fn__ ## instance(map_fn, map, start, size); \
960 }
961
962 #define IMPL_MAP_FN_HINT_SIZE(map_fn, instance) \
963 static kern_return_t \
964 call_ ## map_fn ## __ ## instance (MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size) \
965 { \
966 return call_map_fn__ ## instance(map_fn, map, start_hint, size); \
967 }
968
969 #define IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, instance) \
970 static kern_return_t \
971 call_ ## map_fn ## __ ## instance ## __start_size_offset_object(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size) \
972 { \
973 return call_map_fn__ ## instance ## __start_size_offset_object(map_fn, map, start, size, offset, obj_size); \
974 }
975
976 #define IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, instance) \
977 static kern_return_t \
978 call_ ## map_fn ## __ ## instance ## __inherit(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit) \
979 { \
980 return call_map_fn__ ## instance ## __inherit(map_fn, map, start, size, inherit); \
981 }
982
983 #define IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, instance) \
984 static kern_return_t \
985 call_ ## map_fn ## __ ## instance ## __flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags) \
986 { \
987 return call_map_fn__ ## instance ## __flags(map_fn, map, start, size, flags); \
988 }
989
990 #define IMPL_MAP_FN_PROT_PAIRS(map_fn, instance) \
991 static kern_return_t \
992 call_ ## map_fn ## __ ## instance ## __prot_pairs(MAP_T map, vm_prot_t cur, vm_prot_t max) \
993 { \
994 return call_map_fn__ ## instance ## __prot_pairs(map_fn, map, cur, max); \
995 }
996
997 #define IMPL(map_fn) \
998 IMPL_MAP_FN_START_SIZE(map_fn, allocate_fixed) \
999 IMPL_MAP_FN_START_SIZE(map_fn, allocate_fixed_copy) \
1000 IMPL_MAP_FN_START_SIZE(map_fn, memobject_fixed) \
1001 IMPL_MAP_FN_START_SIZE(map_fn, memobject_fixed_copy) \
1002 IMPL_MAP_FN_HINT_SIZE(map_fn, allocate_anywhere) \
1003 IMPL_MAP_FN_HINT_SIZE(map_fn, memobject_anywhere) \
1004 IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_fixed) \
1005 IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_fixed_copy) \
1006 IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_anywhere) \
1007 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_fixed) \
1008 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_fixed_copy) \
1009 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_anywhere) \
1010 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_fixed) \
1011 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_fixed_copy) \
1012 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_anywhere) \
1013 IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, allocate) \
1014 IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, allocate_copy) \
1015 IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, memobject) \
1016 IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, memobject_copy) \
1017 IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_fixed) \
1018 IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_fixed_copy) \
1019 IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_anywhere) \
1020 IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_fixed) \
1021 IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_fixed_copy) \
1022 IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_anywhere) \
1023
1024 static kern_return_t
mach_vm_map_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1025 mach_vm_map_wrapped(vm_map_t target_task,
1026 mach_vm_address_t *address,
1027 mach_vm_size_t size,
1028 mach_vm_offset_t mask,
1029 int flags,
1030 mem_entry_name_port_t object,
1031 memory_object_offset_t offset,
1032 boolean_t copy,
1033 vm_prot_t cur_protection,
1034 vm_prot_t max_protection,
1035 vm_inherit_t inheritance)
1036 {
1037 mach_vm_address_t addr = *address;
1038 kern_return_t kr = mach_vm_map(target_task, &addr, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1039 check_mach_vm_map_outparam_changes(&kr, addr, *address, flags, target_task);
1040 *address = addr;
1041 return kr;
1042 }
IMPL(mach_vm_map_wrapped)1043 IMPL(mach_vm_map_wrapped)
1044
1045 #if TEST_OLD_STYLE_MACH
1046 static kern_return_t
1047 vm_map_64_retyped(vm_map_t target_task,
1048 mach_vm_address_t *address,
1049 mach_vm_size_t size,
1050 mach_vm_offset_t mask,
1051 int flags,
1052 mem_entry_name_port_t object,
1053 memory_object_offset_t offset,
1054 boolean_t copy,
1055 vm_prot_t cur_protection,
1056 vm_prot_t max_protection,
1057 vm_inherit_t inheritance)
1058 {
1059 vm_address_t addr = (vm_address_t)*address;
1060 kern_return_t kr = vm_map_64(target_task, &addr, (vm_size_t)size, (vm_address_t)mask, flags, object, (vm_offset_t)offset, copy, cur_protection, max_protection, inheritance);
1061 check_mach_vm_map_outparam_changes(&kr, addr, (vm_address_t)*address, flags, target_task);
1062 *address = addr;
1063 return kr;
1064 }
IMPL(vm_map_64_retyped)1065 IMPL(vm_map_64_retyped)
1066
1067 static kern_return_t
1068 vm_map_retyped(vm_map_t target_task,
1069 mach_vm_address_t *address,
1070 mach_vm_size_t size,
1071 mach_vm_offset_t mask,
1072 int flags,
1073 mem_entry_name_port_t object,
1074 memory_object_offset_t offset,
1075 boolean_t copy,
1076 vm_prot_t cur_protection,
1077 vm_prot_t max_protection,
1078 vm_inherit_t inheritance)
1079 {
1080 vm_address_t addr = (vm_address_t)*address;
1081 kern_return_t kr = vm_map(target_task, &addr, (vm_size_t)size, (vm_address_t)mask, flags, object, (vm_offset_t)offset, copy, cur_protection, max_protection, inheritance);
1082 check_mach_vm_map_outparam_changes(&kr, addr, (vm_address_t)*address, flags, target_task);
1083 *address = addr;
1084 return kr;
1085 }
1086 IMPL(vm_map_retyped)
1087 #endif
1088
1089 #undef IMPL_MAP_FN_START_SIZE
1090 #undef IMPL_MAP_FN_SIZE
1091 #undef IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT
1092 #undef IMPL_MAP_FN_START_SIZE_INHERIT
1093 #undef IMPL_MAP_FN_START_SIZE_FLAGS
1094 #undef IMPL_MAP_FN_PROT_PAIRS
1095 #undef IMPL
1096
1097
1098 // mmap
1099 // Directly calling this symbol lets us hit the syscall directly instead of the libsyscall wrapper.
1100 void *__mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off);
1101
1102 // We invert MAP_UNIX03 in the flags. This is because by default libsyscall intercepts calls to mmap and adds MAP_UNIX03.
1103 // That means MAP_UNIX03 should be the default for most of our tests, and we should only test without MAP_UNIX03 when we explicitly want to.
1104 void *
mmap_wrapper(void * addr,size_t len,int prot,int flags,int fildes,off_t off)1105 mmap_wrapper(void *addr, size_t len, int prot, int flags, int fildes, off_t off)
1106 {
1107 flags ^= MAP_UNIX03;
1108 return __mmap(addr, len, prot, flags, fildes, off);
1109 }
1110
1111 // Rename the UNIX03 flag for the code below since we're inverting its meaning.
1112 #define MAP_NOT_UNIX03 0x40000
1113 static_assert(MAP_NOT_UNIX03 == MAP_UNIX03, "MAP_UNIX03 value changed");
1114 #undef MAP_UNIX03
1115 #define MAP_UNIX03 dont_use_MAP_UNIX03
1116
1117 // helpers
1118
1119 // Return true if security policy disallows unsigned code.
1120 // Some test results are expected to change with this set.
1121 static bool
unsigned_code_is_disallowed(void)1122 unsigned_code_is_disallowed(void)
1123 {
1124 if (isRosetta()) {
1125 return false;
1126 }
1127
1128 int out_value = 0;
1129 size_t io_size = sizeof(out_value);
1130 if (0 == sysctlbyname("security.mac.amfi.unsigned_code_policy",
1131 &out_value, &io_size, NULL, 0)) {
1132 return out_value;
1133 }
1134
1135 // sysctl not present, assume unsigned code is okay
1136 return false;
1137 }
1138
1139 static int
maybe_hide_mmap_failure(int ret,int prot,int fd)1140 maybe_hide_mmap_failure(int ret, int prot, int fd)
1141 {
1142 // Special case for mmap(PROT_EXEC, fd).
1143 // When SIP is enabled these get EPERM from mac_file_check_mmap().
1144 // The golden files record the SIP-disabled values.
1145 // This special case also allows the test to succeed when SIP
1146 // is enabled even though the return value isn't the golden one.
1147 if (ret == EPERM && fd != -1 && (prot & PROT_EXEC) &&
1148 unsigned_code_is_disallowed()) {
1149 return ACCEPTABLE;
1150 }
1151 return ret;
1152 }
1153
1154 static kern_return_t
help_call_mmap__vm_prot(MAP_T map __unused,int flags,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)1155 help_call_mmap__vm_prot(MAP_T map __unused, int flags, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
1156 {
1157 int fd = -1;
1158 if (!(flags & MAP_ANON)) {
1159 fd = get_fd();
1160 }
1161 void *rv = mmap_wrapper((void *)start, (size_t) size, prot, flags, fd, 0);
1162 if (rv == MAP_FAILED) {
1163 return maybe_hide_mmap_failure(errno, prot, fd);
1164 } else {
1165 assert(0 == munmap_helper(rv, size));
1166 return 0;
1167 }
1168 }
1169
1170 static kern_return_t
help_call_mmap__kernel_flags(MAP_T map __unused,int mmap_flags,mach_vm_address_t start,mach_vm_size_t size,int kernel_flags)1171 help_call_mmap__kernel_flags(MAP_T map __unused, int mmap_flags, mach_vm_address_t start, mach_vm_size_t size, int kernel_flags)
1172 {
1173 void *rv = mmap_wrapper((void *)start, (size_t) size, VM_PROT_DEFAULT, mmap_flags, kernel_flags, 0);
1174 if (rv == MAP_FAILED) {
1175 return errno;
1176 } else {
1177 assert(0 == munmap_helper(rv, size));
1178 return 0;
1179 }
1180 }
1181
1182 static kern_return_t
help_call_mmap__dst_size_fileoff(MAP_T map __unused,int flags,mach_vm_address_t dst,mach_vm_size_t size,mach_vm_address_t fileoff)1183 help_call_mmap__dst_size_fileoff(MAP_T map __unused, int flags, mach_vm_address_t dst, mach_vm_size_t size, mach_vm_address_t fileoff)
1184 {
1185 int fd = -1;
1186 if (!(flags & MAP_ANON)) {
1187 fd = get_fd();
1188 }
1189 void *rv = mmap_wrapper((void *)dst, (size_t) size, VM_PROT_DEFAULT, flags, fd, (off_t)fileoff);
1190 if (rv == MAP_FAILED) {
1191 return errno;
1192 } else {
1193 assert(0 == munmap_helper(rv, size));
1194 return 0;
1195 }
1196 }
1197
1198 static kern_return_t
help_call_mmap__start_size(MAP_T map __unused,int flags,mach_vm_address_t start,mach_vm_size_t size)1199 help_call_mmap__start_size(MAP_T map __unused, int flags, mach_vm_address_t start, mach_vm_size_t size)
1200 {
1201 int fd = -1;
1202 if (!(flags & MAP_ANON)) {
1203 fd = get_fd();
1204 }
1205 void *rv = mmap_wrapper((void *)start, (size_t) size, VM_PROT_DEFAULT, flags, fd, 0);
1206 if (rv == MAP_FAILED) {
1207 return errno;
1208 } else {
1209 assert(0 == munmap_helper(rv, size));
1210 return 0;
1211 }
1212 }
1213
1214 static kern_return_t
help_call_mmap__offset_size(MAP_T map __unused,int flags,mach_vm_address_t offset,mach_vm_size_t size)1215 help_call_mmap__offset_size(MAP_T map __unused, int flags, mach_vm_address_t offset, mach_vm_size_t size)
1216 {
1217 int fd = -1;
1218 if (!(flags & MAP_ANON)) {
1219 fd = get_fd();
1220 }
1221 void *rv = mmap_wrapper((void *)0, (size_t) size, VM_PROT_DEFAULT, flags, fd, (off_t)offset);
1222 if (rv == MAP_FAILED) {
1223 return errno;
1224 } else {
1225 assert(0 == munmap_helper(rv, size));
1226 return 0;
1227 }
1228 }
1229
1230 #define IMPL_ONE_FROM_HELPER(type, variant, flags, ...) \
1231 static kern_return_t \
1232 __attribute__((used)) \
1233 call_mmap ## __ ## variant ## __ ## type(MAP_T map, mach_vm_address_t start, mach_vm_size_t size DROP_COMMAS(__VA_ARGS__)) { \
1234 return help_call_mmap__ ## type(map, flags, start, size DROP_TYPES(__VA_ARGS__)); \
1235 }
1236
1237 // call functions
1238
1239 #define IMPL_FROM_HELPER(type, ...) \
1240 IMPL_ONE_FROM_HELPER(type, file_private, MAP_FILE | MAP_PRIVATE, ##__VA_ARGS__) \
1241 IMPL_ONE_FROM_HELPER(type, anon_private, MAP_ANON | MAP_PRIVATE, ##__VA_ARGS__) \
1242 IMPL_ONE_FROM_HELPER(type, file_shared, MAP_FILE | MAP_SHARED, ##__VA_ARGS__) \
1243 IMPL_ONE_FROM_HELPER(type, anon_shared, MAP_ANON | MAP_SHARED, ##__VA_ARGS__) \
1244 IMPL_ONE_FROM_HELPER(type, file_private_codesign, MAP_FILE | MAP_PRIVATE | MAP_RESILIENT_CODESIGN, ##__VA_ARGS__) \
1245 IMPL_ONE_FROM_HELPER(type, file_private_media, MAP_FILE | MAP_PRIVATE | MAP_RESILIENT_MEDIA, ##__VA_ARGS__) \
1246 IMPL_ONE_FROM_HELPER(type, nounix03_private, MAP_FILE | MAP_PRIVATE | MAP_NOT_UNIX03, ##__VA_ARGS__) \
1247 IMPL_ONE_FROM_HELPER(type, fixed_private, MAP_FILE | MAP_PRIVATE | MAP_FIXED, ##__VA_ARGS__) \
1248
IMPL_FROM_HELPER(vm_prot,vm_prot_t,prot)1249 IMPL_FROM_HELPER(vm_prot, vm_prot_t, prot)
1250 IMPL_FROM_HELPER(dst_size_fileoff, mach_vm_address_t, fileoff)
1251 IMPL_FROM_HELPER(start_size)
1252 IMPL_FROM_HELPER(offset_size)
1253
1254 IMPL_ONE_FROM_HELPER(kernel_flags, anon_private, MAP_ANON | MAP_PRIVATE, int, kernel_flags)
1255 IMPL_ONE_FROM_HELPER(kernel_flags, anon_shared, MAP_ANON | MAP_SHARED, int, kernel_flags)
1256
1257 static kern_return_t
1258 call_mmap__mmap_flags(MAP_T map __unused, mach_vm_address_t start, mach_vm_size_t size, int mmap_flags)
1259 {
1260 int fd = -1;
1261 if (!(mmap_flags & MAP_ANON)) {
1262 fd = get_fd();
1263 }
1264 void *rv = mmap_wrapper((void *)start, (size_t) size, VM_PROT_DEFAULT, mmap_flags, fd, 0);
1265 if (rv == MAP_FAILED) {
1266 return errno;
1267 } else {
1268 assert(0 == munmap(rv, (size_t) size));
1269 return 0;
1270 }
1271 }
1272
1273 // Mach memory entry ownership
1274
1275 static kern_return_t
call_mach_memory_entry_ownership__ledger_tag(MAP_T map __unused,int ledger_tag)1276 call_mach_memory_entry_ownership__ledger_tag(MAP_T map __unused, int ledger_tag)
1277 {
1278 mach_port_t mementry = make_a_mem_entry(TEST_ALLOC_SIZE + 1);
1279 kern_return_t kr = mach_memory_entry_ownership(mementry, mach_task_self(), ledger_tag, 0);
1280 (void)mach_port_deallocate(mach_task_self(), mementry);
1281 return kr;
1282 }
1283
1284 static kern_return_t
call_mach_memory_entry_ownership__ledger_flag(MAP_T map __unused,int ledger_flag)1285 call_mach_memory_entry_ownership__ledger_flag(MAP_T map __unused, int ledger_flag)
1286 {
1287 mach_port_t mementry = make_a_mem_entry(TEST_ALLOC_SIZE + 1);
1288 kern_return_t kr = mach_memory_entry_ownership(mementry, mach_task_self(), VM_LEDGER_TAG_DEFAULT, ledger_flag);
1289 (void)mach_port_deallocate(mach_task_self(), mementry);
1290 return kr;
1291 }
1292
1293
1294 // For deallocators like munmap and vm_deallocate.
1295 // Return a non-zero error code if we should avoid performing this trial.
1296 kern_return_t
short_circuit_deallocator(MAP_T map,start_size_trial_t trial)1297 short_circuit_deallocator(MAP_T map, start_size_trial_t trial)
1298 {
1299 // mach_vm_deallocate(size == 0) is safe
1300 if (trial.size == 0) {
1301 return 0;
1302 }
1303
1304 // Allow deallocation attempts based on a valid allocation
1305 // (assumes the test loop will slide this trial to a valid allocation)
1306 if (!trial.start_is_absolute && trial.size_is_absolute) {
1307 return 0;
1308 }
1309
1310 // Avoid overwriting random live memory.
1311 if (!vm_sanitize_range_overflows_strict_zero(trial.start, trial.size, VM_MAP_PAGE_MASK(map))) {
1312 return IGNORED;
1313 }
1314
1315 // Avoid EXC_GUARD if it is still enabled.
1316 mach_vm_address_t sum;
1317 if (!__builtin_add_overflow(trial.start, trial.size, &sum) &&
1318 trial.start + trial.size != 0 &&
1319 round_up_page(trial.start + trial.size, PAGE_SIZE) == 0) {
1320 // this case provokes EXC_GUARD
1321 if (EXC_GUARD_ENABLED) {
1322 return GUARD;
1323 }
1324 }
1325
1326 // Allow.
1327 return 0;
1328 }
1329
1330 static kern_return_t
call_mach_vm_deallocate(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1331 call_mach_vm_deallocate(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1332 {
1333 kern_return_t kr = mach_vm_deallocate(map, start, size);
1334 return kr;
1335 }
1336
1337 #if TEST_OLD_STYLE_MACH
1338 static kern_return_t
call_vm_deallocate(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1339 call_vm_deallocate(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1340 {
1341 kern_return_t kr = vm_deallocate(map, (vm_address_t) start, (vm_size_t) size);
1342 return kr;
1343 }
1344 #endif
1345
1346 static kern_return_t
call_mach_vm_allocate__flags(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1347 call_mach_vm_allocate__flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1348 {
1349 mach_vm_address_t saved_start = *start;
1350 kern_return_t kr = mach_vm_allocate(map, start, size, flags);
1351 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, flags, map);
1352 return kr;
1353 }
1354
1355
1356 static kern_return_t
call_mach_vm_allocate__start_size_fixed(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)1357 call_mach_vm_allocate__start_size_fixed(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
1358 {
1359 mach_vm_address_t saved_start = *start;
1360 kern_return_t kr = mach_vm_allocate(map, start, size, VM_FLAGS_FIXED);
1361 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_FIXED, map);
1362 return kr;
1363 }
1364
1365 static kern_return_t
call_mach_vm_allocate__start_size_anywhere(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)1366 call_mach_vm_allocate__start_size_anywhere(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
1367 {
1368 mach_vm_address_t saved_start = *start;
1369 kern_return_t kr = mach_vm_allocate(map, start, size, VM_FLAGS_ANYWHERE);
1370 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_ANYWHERE, map);
1371 return kr;
1372 }
1373
1374 static kern_return_t
call_mach_vm_inherit(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1375 call_mach_vm_inherit(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1376 {
1377 kern_return_t kr = mach_vm_inherit(map, start, size, VM_INHERIT_NONE);
1378 return kr;
1379 }
1380 #if TEST_OLD_STYLE_MACH
1381 static kern_return_t
call_vm_inherit(MAP_T map,vm_address_t start,vm_size_t size)1382 call_vm_inherit(MAP_T map, vm_address_t start, vm_size_t size)
1383 {
1384 kern_return_t kr = vm_inherit(map, start, size, VM_INHERIT_NONE);
1385 return kr;
1386 }
1387 #endif
1388
1389 static int
call_minherit(void * start,size_t size)1390 call_minherit(void *start, size_t size)
1391 {
1392 int err = minherit(start, size, VM_INHERIT_SHARE);
1393 return err ? errno : 0;
1394 }
1395
1396 static kern_return_t
call_mach_vm_inherit__inherit(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t value)1397 call_mach_vm_inherit__inherit(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t value)
1398 {
1399 kern_return_t kr = mach_vm_inherit(map, start, size, value);
1400 return kr;
1401 }
1402
1403 static int
call_minherit__inherit(void * start,size_t size,int value)1404 call_minherit__inherit(void * start, size_t size, int value)
1405 {
1406 int err = minherit(start, size, value);
1407 return err ? errno : 0;
1408 }
1409
1410 static kern_return_t
call_mach_vm_protect__start_size(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1411 call_mach_vm_protect__start_size(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1412 {
1413 kern_return_t kr = mach_vm_protect(map, start, size, 0, VM_PROT_READ | VM_PROT_WRITE);
1414 return kr;
1415 }
1416 static kern_return_t
call_mach_vm_protect__vm_prot(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)1417 call_mach_vm_protect__vm_prot(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
1418 {
1419 kern_return_t kr = mach_vm_protect(map, start, size, 0, prot);
1420 return kr;
1421 }
1422 #if TEST_OLD_STYLE_MACH
1423 static kern_return_t
call_vm_protect__start_size(MAP_T map,vm_address_t start,vm_size_t size)1424 call_vm_protect__start_size(MAP_T map, vm_address_t start, vm_size_t size)
1425 {
1426 kern_return_t kr = vm_protect(map, start, size, 0, VM_PROT_READ | VM_PROT_WRITE);
1427 return kr;
1428 }
1429 static kern_return_t
call_vm_protect__vm_prot(MAP_T map,vm_address_t start,vm_size_t size,vm_prot_t prot)1430 call_vm_protect__vm_prot(MAP_T map, vm_address_t start, vm_size_t size, vm_prot_t prot)
1431 {
1432 kern_return_t kr = vm_protect(map, start, size, 0, prot);
1433 return kr;
1434 }
1435 #endif
1436
1437 extern int __mprotect(void *, size_t, int);
1438
1439 static int
call_mprotect__start_size(void * start,size_t size)1440 call_mprotect__start_size(void *start, size_t size)
1441 {
1442 int err = __mprotect(start, size, PROT_READ | PROT_WRITE);
1443 return err ? errno : 0;
1444 }
1445
1446 static int
call_mprotect__vm_prot(void * start,size_t size,int prot)1447 call_mprotect__vm_prot(void *start, size_t size, int prot)
1448 {
1449 int err = __mprotect(start, size, prot);
1450 return err ? errno : 0;
1451 }
1452
1453 #if TEST_OLD_STYLE_MACH
1454 static kern_return_t
call_vm_behavior_set__start_size__default(MAP_T map,vm_address_t start,vm_size_t size)1455 call_vm_behavior_set__start_size__default(MAP_T map, vm_address_t start, vm_size_t size)
1456 {
1457 kern_return_t kr = vm_behavior_set(map, start, size, VM_BEHAVIOR_DEFAULT);
1458 return kr;
1459 }
1460
1461 static kern_return_t
call_vm_behavior_set__start_size__can_reuse(MAP_T map,vm_address_t start,vm_size_t size)1462 call_vm_behavior_set__start_size__can_reuse(MAP_T map, vm_address_t start, vm_size_t size)
1463 {
1464 kern_return_t kr = vm_behavior_set(map, start, size, VM_BEHAVIOR_CAN_REUSE);
1465 return kr;
1466 }
1467
1468 static kern_return_t
call_vm_behavior_set__vm_behavior(MAP_T map,vm_address_t start,vm_size_t size,vm_behavior_t behavior)1469 call_vm_behavior_set__vm_behavior(MAP_T map, vm_address_t start, vm_size_t size, vm_behavior_t behavior)
1470 {
1471 kern_return_t kr = vm_behavior_set(map, start, size, behavior);
1472 return kr;
1473 }
1474 #endif /* TEST_OLD_STYLE_MACH */
1475
1476 extern int __shared_region_map_and_slide_2_np(uint32_t files_count,
1477 const struct shared_file_np *files,
1478 uint32_t mappings_count,
1479 const struct shared_file_mapping_slide_np *mappings);
1480
1481 static int
maybe_hide_shared_region_map_failure(int ret,uint32_t files_count,const struct shared_file_np * files,uint32_t mappings_count)1482 maybe_hide_shared_region_map_failure(int ret,
1483 uint32_t files_count, const struct shared_file_np *files,
1484 uint32_t mappings_count)
1485 {
1486 // Special case for __shared_region_map_and_slide_2_np().
1487 // When SIP is enabled this case gets EPERM instead of EINVAL due to
1488 // vm_shared_region_map_file returning KERN_PROTECTION_FAILURE instead of
1489 // KERN_INVALID_ARGUMENT.
1490 if (ret == EPERM && files_count == 1 && mappings_count == 1 &&
1491 files->sf_fd == get_fd() && files->sf_mappings_count == 1 &&
1492 unsigned_code_is_disallowed()) {
1493 return ACCEPTABLE;
1494 }
1495 return ret;
1496 }
1497
1498 static int
call_shared_region_map_and_slide_2_np_child(uint32_t files_count,const struct shared_file_np * files,uint32_t mappings_count,const struct shared_file_mapping_slide_np * mappings)1499 call_shared_region_map_and_slide_2_np_child(uint32_t files_count, const struct shared_file_np *files,
1500 uint32_t mappings_count, const struct shared_file_mapping_slide_np *mappings)
1501 {
1502 int err = __shared_region_map_and_slide_2_np(files_count, files, mappings_count, mappings);
1503 return err ? maybe_hide_shared_region_map_failure(errno, files_count, files, mappings_count) : 0;
1504 }
1505
1506 typedef struct {
1507 uint32_t files_count;
1508 const struct shared_file_np *files;
1509 uint32_t mappings_count;
1510 const struct shared_file_mapping_slide_np *mappings;
1511 } map_n_slice_thread_args;
1512
1513 void*
thread_func(void * args)1514 thread_func(void* args)
1515 {
1516 map_n_slice_thread_args *thread_args = (map_n_slice_thread_args *)args;
1517 uint32_t files_count = thread_args->files_count;
1518 const struct shared_file_np *files = thread_args->files;
1519 uint32_t mappings_count = thread_args->mappings_count;
1520 const struct shared_file_mapping_slide_np *mappings = thread_args->mappings;
1521
1522 int err = call_shared_region_map_and_slide_2_np_child(files_count, files, mappings_count, mappings);
1523
1524 int *result = malloc(sizeof(int));
1525 assert(result != NULL);
1526 *result = err;
1527 return result;
1528 }
1529
1530 static int
call_shared_region_map_and_slide_2_np_in_thread(uint32_t files_count,const struct shared_file_np * files,uint32_t mappings_count,const struct shared_file_mapping_slide_np * mappings)1531 call_shared_region_map_and_slide_2_np_in_thread(uint32_t files_count, const struct shared_file_np *files,
1532 uint32_t mappings_count, const struct shared_file_mapping_slide_np *mappings)
1533 {
1534 // From vm/vm_shared_region.c: After a chroot(), the calling process keeps using its original shared region [...]
1535 // But its children will use a different shared region [...]
1536 if (chroot(".") < 0) {
1537 return BUSTED;
1538 }
1539
1540 map_n_slice_thread_args args = {files_count, files, mappings_count, mappings};
1541 pthread_t thread;
1542 if (pthread_create(&thread, NULL, thread_func, (void *)&args) < 0) {
1543 return -91;
1544 }
1545
1546 int *err;
1547 if (pthread_join(thread, (void**)&err) < 0) {
1548 return BUSTED;
1549 }
1550
1551 if (chroot("/") < 0) {
1552 return BUSTED;
1553 }
1554
1555 return *err;
1556 }
1557
1558 static int
call_madvise__start_size(void * start,size_t size)1559 call_madvise__start_size(void *start, size_t size)
1560 {
1561 int err = madvise(start, size, MADV_NORMAL);
1562 return err ? errno : 0;
1563 }
1564
1565 static int
call_madvise__vm_advise(void * start,size_t size,int advise)1566 call_madvise__vm_advise(void *start, size_t size, int advise)
1567 {
1568 int err = madvise(start, size, advise);
1569 return err ? errno : 0;
1570 }
1571
1572 static int
call_mach_vm_msync__start_size(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1573 call_mach_vm_msync__start_size(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1574 {
1575 kern_return_t kr = mach_vm_msync(map, start, size, VM_SYNC_ASYNCHRONOUS);
1576 return kr;
1577 }
1578
1579 static int
call_mach_vm_msync__vm_sync(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_sync_t sync)1580 call_mach_vm_msync__vm_sync(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_sync_t sync)
1581 {
1582 kern_return_t kr = mach_vm_msync(map, start, size, sync);
1583 return kr;
1584 }
1585
1586 #if TEST_OLD_STYLE_MACH
1587 static int
call_vm_msync__start_size(MAP_T map,vm_address_t start,vm_size_t size)1588 call_vm_msync__start_size(MAP_T map, vm_address_t start, vm_size_t size)
1589 {
1590 kern_return_t kr = vm_msync(map, start, size, VM_SYNC_ASYNCHRONOUS);
1591 return kr;
1592 }
1593
1594 static int
call_vm_msync__vm_sync(MAP_T map,vm_address_t start,vm_size_t size,vm_sync_t sync)1595 call_vm_msync__vm_sync(MAP_T map, vm_address_t start, vm_size_t size, vm_sync_t sync)
1596 {
1597 kern_return_t kr = vm_msync(map, start, size, sync);
1598 return kr;
1599 }
1600 #endif /* TEST_OLD_STYLE_MACH */
1601
1602 // msync has a libsyscall wrapper that does alignment. We want the raw syscall.
1603 int __msync(void *, size_t, int);
1604
1605 static int
call_msync__start_size(void * start,size_t size)1606 call_msync__start_size(void *start, size_t size)
1607 {
1608 int err = __msync(start, size, MS_SYNC);
1609 return err ? errno : 0;
1610 }
1611
1612 static int
call_msync__vm_msync(void * start,size_t size,int msync_value)1613 call_msync__vm_msync(void *start, size_t size, int msync_value)
1614 {
1615 int err = __msync(start, size, msync_value);
1616 return err ? errno : 0;
1617 }
1618
1619 // msync nocancel isn't declared, but we want to directly hit the syscall
1620 int __msync_nocancel(void *, size_t, int);
1621
1622 static int
call_msync_nocancel__start_size(void * start,size_t size)1623 call_msync_nocancel__start_size(void *start, size_t size)
1624 {
1625 int err = __msync_nocancel(start, size, MS_SYNC);
1626 return err ? errno : 0;
1627 }
1628
1629 static int
call_msync_nocancel__vm_msync(void * start,size_t size,int msync_value)1630 call_msync_nocancel__vm_msync(void *start, size_t size, int msync_value)
1631 {
1632 int err = __msync_nocancel(start, size, msync_value);
1633 return err ? errno : 0;
1634 }
1635
1636 static void
check_mach_vm_machine_attribute_outparam_changes(kern_return_t * kr,vm_machine_attribute_val_t value,vm_machine_attribute_val_t saved_value)1637 check_mach_vm_machine_attribute_outparam_changes(kern_return_t * kr, vm_machine_attribute_val_t value, vm_machine_attribute_val_t saved_value)
1638 {
1639 if (value != saved_value) {
1640 *kr = OUT_PARAM_BAD;
1641 }
1642 }
1643
1644 static int
call_mach_vm_machine_attribute__start_size(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1645 call_mach_vm_machine_attribute__start_size(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1646 {
1647 vm_machine_attribute_val_t value = MATTR_VAL_GET;
1648 vm_machine_attribute_val_t initial_value = value;
1649 kern_return_t kr = mach_vm_machine_attribute(map, start, size, MATTR_CACHE, &value);
1650 check_mach_vm_machine_attribute_outparam_changes(&kr, value, initial_value);
1651 return kr;
1652 }
1653
1654
1655 static int
call_mach_vm_machine_attribute__machine_attribute(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_machine_attribute_t attr)1656 call_mach_vm_machine_attribute__machine_attribute(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_machine_attribute_t attr)
1657 {
1658 vm_machine_attribute_val_t value = MATTR_VAL_GET;
1659 vm_machine_attribute_val_t initial_value = value;
1660 kern_return_t kr = mach_vm_machine_attribute(map, start, size, attr, &value);
1661 check_mach_vm_machine_attribute_outparam_changes(&kr, value, initial_value);
1662 return kr;
1663 }
1664
1665 #if TEST_OLD_STYLE_MACH
1666 static int
call_vm_machine_attribute__start_size(MAP_T map,vm_address_t start,vm_size_t size)1667 call_vm_machine_attribute__start_size(MAP_T map, vm_address_t start, vm_size_t size)
1668 {
1669 vm_machine_attribute_val_t value = MATTR_VAL_GET;
1670 vm_machine_attribute_val_t initial_value = value;
1671 kern_return_t kr = vm_machine_attribute(map, start, size, MATTR_CACHE, &value);
1672 check_mach_vm_machine_attribute_outparam_changes(&kr, value, initial_value);
1673 return kr;
1674 }
1675
1676 static int
call_vm_machine_attribute__machine_attribute(MAP_T map,vm_address_t start,vm_size_t size,vm_machine_attribute_t attr)1677 call_vm_machine_attribute__machine_attribute(MAP_T map, vm_address_t start, vm_size_t size, vm_machine_attribute_t attr)
1678 {
1679 vm_machine_attribute_val_t value = MATTR_VAL_GET;
1680 vm_machine_attribute_val_t initial_value = value;
1681 kern_return_t kr = vm_machine_attribute(map, start, size, attr, &value);
1682 check_mach_vm_machine_attribute_outparam_changes(&kr, value, initial_value);
1683 return kr;
1684 }
1685 #endif /* TEST_OLD_STYLE_MACH */
1686
1687 static int
call_mach_vm_purgable_control__address__get(MAP_T map,mach_vm_address_t addr)1688 call_mach_vm_purgable_control__address__get(MAP_T map, mach_vm_address_t addr)
1689 {
1690 int state = INVALID_PURGABLE_STATE;
1691 int initial_state = state;
1692 kern_return_t kr = mach_vm_purgable_control(map, addr, VM_PURGABLE_GET_STATE, &state);
1693 check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, VM_PURGABLE_GET_STATE);
1694 return kr;
1695 }
1696
1697
1698 static int
call_mach_vm_purgable_control__address__purge_all(MAP_T map,mach_vm_address_t addr)1699 call_mach_vm_purgable_control__address__purge_all(MAP_T map, mach_vm_address_t addr)
1700 {
1701 int state = INVALID_PURGABLE_STATE;
1702 int initial_state = state;
1703 kern_return_t kr = mach_vm_purgable_control(map, addr, VM_PURGABLE_PURGE_ALL, &state);
1704 check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, VM_PURGABLE_PURGE_ALL);
1705 return kr;
1706 }
1707
1708 static int
call_mach_vm_purgable_control__purgeable_state(MAP_T map,mach_vm_address_t addr,vm_purgable_t control,int state)1709 call_mach_vm_purgable_control__purgeable_state(MAP_T map, mach_vm_address_t addr, vm_purgable_t control, int state)
1710 {
1711 int initial_state = state;
1712 kern_return_t kr = mach_vm_purgable_control(map, addr, control, &state);
1713 check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, control);
1714 return kr;
1715 }
1716
1717 #if TEST_OLD_STYLE_MACH
1718 static int
call_vm_purgable_control__address__get(MAP_T map,vm_address_t addr)1719 call_vm_purgable_control__address__get(MAP_T map, vm_address_t addr)
1720 {
1721 int state = INVALID_PURGABLE_STATE;
1722 int initial_state = state;
1723 kern_return_t kr = vm_purgable_control(map, addr, VM_PURGABLE_GET_STATE, &state);
1724 check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, VM_PURGABLE_GET_STATE);
1725 return kr;
1726 }
1727
1728 static int
call_vm_purgable_control__address__purge_all(MAP_T map,vm_address_t addr)1729 call_vm_purgable_control__address__purge_all(MAP_T map, vm_address_t addr)
1730 {
1731 int state = INVALID_PURGABLE_STATE;
1732 int initial_state = state;
1733 kern_return_t kr = vm_purgable_control(map, addr, VM_PURGABLE_PURGE_ALL, &state);
1734 check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, VM_PURGABLE_PURGE_ALL);
1735 return kr;
1736 }
1737
1738 static int
call_vm_purgable_control__purgeable_state(MAP_T map,vm_address_t addr,vm_purgable_t control,int state)1739 call_vm_purgable_control__purgeable_state(MAP_T map, vm_address_t addr, vm_purgable_t control, int state)
1740 {
1741 int initial_state = state;
1742 kern_return_t kr = vm_purgable_control(map, addr, control, &state);
1743 check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, control);
1744 return kr;
1745 }
1746 #endif /* TEST_OLD_STYLE_MACH */
1747
1748 static void
check_mach_vm_region_recurse_outparam_changes(kern_return_t * kr,void * info,void * saved_info,size_t info_size,natural_t depth,natural_t saved_depth,mach_vm_address_t addr,mach_vm_address_t saved_addr,mach_vm_size_t size,mach_vm_size_t saved_size)1749 check_mach_vm_region_recurse_outparam_changes(kern_return_t * kr, void * info, void * saved_info, size_t info_size,
1750 natural_t depth, natural_t saved_depth, mach_vm_address_t addr, mach_vm_address_t saved_addr,
1751 mach_vm_size_t size, mach_vm_size_t saved_size)
1752 {
1753 if (*kr == KERN_SUCCESS) {
1754 if (depth == saved_depth) {
1755 *kr = OUT_PARAM_BAD;
1756 }
1757 if (size == saved_size) {
1758 *kr = OUT_PARAM_BAD;
1759 }
1760 if (memcmp(info, saved_info, info_size) == 0) {
1761 *kr = OUT_PARAM_BAD;
1762 }
1763 } else {
1764 if (depth != saved_depth || addr != saved_addr || size != saved_size || memcmp(info, saved_info, info_size) != 0) {
1765 *kr = OUT_PARAM_BAD;
1766 }
1767 }
1768 }
1769
1770 static kern_return_t
call_mach_vm_region_recurse(MAP_T map,mach_vm_address_t addr)1771 call_mach_vm_region_recurse(MAP_T map, mach_vm_address_t addr)
1772 {
1773 vm_region_submap_info_data_64_t info;
1774 info.inheritance = INVALID_INHERIT;
1775 vm_region_submap_info_data_64_t saved_info = info;
1776 mach_vm_size_t size_out = UNLIKELY_INITIAL_SIZE;
1777 mach_vm_size_t saved_size = size_out;
1778 natural_t depth = 10;
1779 natural_t saved_depth = depth;
1780 mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
1781 mach_vm_address_t addr_cpy = addr;
1782
1783 kern_return_t kr = mach_vm_region_recurse(map,
1784 &addr_cpy,
1785 &size_out,
1786 &depth,
1787 (vm_region_recurse_info_t)&info,
1788 &count);
1789 check_mach_vm_region_recurse_outparam_changes(&kr, &info, &saved_info, sizeof(info), depth, saved_depth,
1790 addr, addr_cpy, size_out, saved_size);
1791
1792 return kr;
1793 }
1794
1795 #if TEST_OLD_STYLE_MACH
1796 static kern_return_t
call_vm_region_recurse(MAP_T map,vm_address_t addr)1797 call_vm_region_recurse(MAP_T map, vm_address_t addr)
1798 {
1799 vm_region_submap_info_data_t info;
1800 info.inheritance = INVALID_INHERIT;
1801 vm_region_submap_info_data_t saved_info = info;
1802
1803 vm_size_t size_out = UNLIKELY_INITIAL_SIZE;
1804 vm_size_t saved_size = size_out;
1805
1806 natural_t depth = 10;
1807 natural_t saved_depth = depth;
1808
1809 mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT;
1810 vm_address_t addr_cpy = addr;
1811
1812 kern_return_t kr = vm_region_recurse(map,
1813 &addr_cpy,
1814 &size_out,
1815 &depth,
1816 (vm_region_recurse_info_t)&info,
1817 &count);
1818
1819 check_mach_vm_region_recurse_outparam_changes(&kr, &info, &saved_info, sizeof(info), depth, saved_depth,
1820 addr_cpy, addr, size_out, saved_size);
1821
1822 return kr;
1823 }
1824
1825 static kern_return_t
call_vm_region_recurse_64(MAP_T map,vm_address_t addr)1826 call_vm_region_recurse_64(MAP_T map, vm_address_t addr)
1827 {
1828 vm_region_submap_info_data_64_t info;
1829 info.inheritance = INVALID_INHERIT;
1830 vm_region_submap_info_data_64_t saved_info = info;
1831
1832 vm_size_t size_out = UNLIKELY_INITIAL_SIZE;
1833 vm_size_t saved_size = size_out;
1834
1835 natural_t depth = 10;
1836 natural_t saved_depth = depth;
1837
1838 mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
1839 vm_address_t addr_cpy = addr;
1840
1841 kern_return_t kr = vm_region_recurse_64(map,
1842 &addr_cpy,
1843 &size_out,
1844 &depth,
1845 (vm_region_recurse_info_t)&info,
1846 &count);
1847
1848 check_mach_vm_region_recurse_outparam_changes(&kr, &info, &saved_info, sizeof(info), depth, saved_depth,
1849 addr_cpy, addr, size_out, saved_size);
1850
1851 return kr;
1852 }
1853 #endif /* TEST_OLD_STYLE_MACH */
1854
1855 static kern_return_t
call_mach_vm_page_info(MAP_T map,mach_vm_address_t addr)1856 call_mach_vm_page_info(MAP_T map, mach_vm_address_t addr)
1857 {
1858 vm_page_info_flavor_t flavor = VM_PAGE_INFO_BASIC;
1859 mach_msg_type_number_t count = VM_PAGE_INFO_BASIC_COUNT;
1860 mach_msg_type_number_t saved_count = count;
1861 vm_page_info_basic_data_t info = {0};
1862 info.depth = -1;
1863 vm_page_info_basic_data_t saved_info = info;
1864
1865 kern_return_t kr = mach_vm_page_info(map, addr, flavor, (vm_page_info_t)&info, &count);
1866 check_mach_vm_page_info_outparam_changes(&kr, info, saved_info, count, saved_count);
1867 return kr;
1868 }
1869
1870 static void
check_mach_vm_page_query_outparam_changes(kern_return_t * kr,int disposition,int saved_disposition,int ref_count)1871 check_mach_vm_page_query_outparam_changes(kern_return_t * kr, int disposition, int saved_disposition, int ref_count)
1872 {
1873 if (*kr == KERN_SUCCESS) {
1874 /*
1875 * There should be no outside references to the memory created for this test
1876 */
1877 if (ref_count != 0) {
1878 *kr = OUT_PARAM_BAD;
1879 }
1880 if (disposition == saved_disposition) {
1881 *kr = OUT_PARAM_BAD;
1882 }
1883 }
1884 }
1885
1886 static kern_return_t
call_mach_vm_page_query(MAP_T map,mach_vm_address_t addr)1887 call_mach_vm_page_query(MAP_T map, mach_vm_address_t addr)
1888 {
1889 int disp = INVALID_DISPOSITION_VALUE, ref = 0;
1890 int saved_disposition = disp;
1891 kern_return_t kr = mach_vm_page_query(map, addr, &disp, &ref);
1892 check_mach_vm_page_query_outparam_changes(&kr, disp, saved_disposition, ref);
1893 return kr;
1894 }
1895
1896 #if TEST_OLD_STYLE_MACH
1897 static kern_return_t
call_vm_map_page_query(MAP_T map,vm_address_t addr)1898 call_vm_map_page_query(MAP_T map, vm_address_t addr)
1899 {
1900 int disp = INVALID_DISPOSITION_VALUE, ref = 0;
1901 int saved_disposition = disp;
1902 kern_return_t kr = vm_map_page_query(map, addr, &disp, &ref);
1903 check_mach_vm_page_query_outparam_changes(&kr, disp, saved_disposition, ref);
1904 return kr;
1905 }
1906 #endif /* TEST_OLD_STYLE_MACH */
1907
1908 static void
check_mach_vm_page_range_query_outparam_changes(kern_return_t * kr,mach_vm_size_t out_count,mach_vm_size_t in_count)1909 check_mach_vm_page_range_query_outparam_changes(kern_return_t * kr, mach_vm_size_t out_count, mach_vm_size_t in_count)
1910 {
1911 if (out_count != in_count) {
1912 *kr = OUT_PARAM_BAD;
1913 }
1914 }
1915
1916 static kern_return_t
call_mach_vm_page_range_query(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1917 call_mach_vm_page_range_query(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1918 {
1919 // mach_vm_page_range_query writes one int per page output
1920 // and can accept any address range as input
1921 // We can't provide that much storage for very large lengths.
1922 // Instead we provide a limited output buffer,
1923 // write-protect the page after it, and "succeed" if the kernel
1924 // fills the buffer and then returns EFAULT.
1925
1926 // enough space for MAX_PAGE_RANGE_QUERY with 4KB pages, twice
1927 mach_vm_size_t prq_buf_size = 2 * 262144 * sizeof(int);
1928 mach_vm_address_t prq_buf = 0;
1929 kern_return_t kr = mach_vm_allocate(map, &prq_buf,
1930 prq_buf_size + KB16, VM_FLAGS_ANYWHERE);
1931 assert(kr == 0);
1932
1933 // protect the guard page
1934 mach_vm_address_t prq_guard = prq_buf + prq_buf_size;
1935 kr = mach_vm_protect(map, prq_guard, KB16, 0, VM_PROT_NONE);
1936 assert(kr == 0);
1937
1938 // pre-fill the output buffer with an invalid value
1939 memset((char *)prq_buf, 0xff, prq_buf_size);
1940
1941 mach_vm_size_t in_count = size / KB16 + (size % KB16 ? 1 : 0);
1942 mach_vm_size_t out_count = in_count;
1943 kr = mach_vm_page_range_query(map, start, size, prq_buf, &out_count);
1944
1945 // yes, EFAULT as a kern_return_t because mach_vm_page_range_query returns copyio's error
1946 if (kr == EFAULT) {
1947 bool bad = false;
1948 for (unsigned i = 0; i < prq_buf_size / sizeof(uint32_t); i++) {
1949 if (((uint32_t *)prq_buf)[i] == 0xffffffff) {
1950 // kernel didn't fill the entire writeable buffer, that's bad
1951 bad = true;
1952 break;
1953 }
1954 }
1955 if (!bad) {
1956 // kernel filled our buffer and then hit our fault page
1957 // we'll allow it
1958 kr = 0;
1959 }
1960 }
1961
1962 check_mach_vm_page_range_query_outparam_changes(&kr, out_count, in_count);
1963 (void)mach_vm_deallocate(map, prq_buf, prq_buf_size + KB16);
1964
1965 return kr;
1966 }
1967
1968 static int
call_mincore(void * start,size_t size)1969 call_mincore(void *start, size_t size)
1970 {
1971 // mincore writes one byte per page output
1972 // and can accept any address range as input
1973 // We can't provide that much storage for very large lengths.
1974 // Instead we provide a limited output buffer,
1975 // write-protect the page after it, and "succeed" if the kernel
1976 // fills the buffer and then returns EFAULT.
1977
1978 // enough space for MAX_PAGE_RANGE_QUERY with 4KB pages, twice
1979 size_t mincore_buf_size = 2 * 262144;
1980 char *mincore_buf = 0;
1981 mincore_buf = mmap(NULL, mincore_buf_size + KB16, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
1982 assert(mincore_buf != MAP_FAILED);
1983
1984 // protect the guard page
1985 char *mincore_guard = mincore_buf + mincore_buf_size;
1986 int err = mprotect(mincore_guard, KB16, PROT_NONE);
1987 assert(err == 0);
1988
1989 // pre-fill the output buffer with an invalid value
1990 memset(mincore_buf, 0xff, mincore_buf_size);
1991
1992 int ret;
1993 err = mincore(start, size, mincore_buf);
1994 if (err == 0) {
1995 ret = 0;
1996 } else if (errno != EFAULT) {
1997 ret = errno;
1998 } else {
1999 // EFAULT - check if kernel hit our guard page
2000 bool bad = false;
2001 for (unsigned i = 0; i < mincore_buf_size; i++) {
2002 if (mincore_buf[i] == (char)0xff) {
2003 // kernel didn't fill the entire writeable buffer, that's bad
2004 bad = true;
2005 break;
2006 }
2007 }
2008 if (!bad) {
2009 // kernel filled our buffer and then hit our guard page
2010 // we'll allow it
2011 ret = 0;
2012 } else {
2013 ret = errno;
2014 }
2015 }
2016
2017 (void)munmap(mincore_buf, mincore_buf_size + PAGE_SIZE);
2018
2019 return ret;
2020 }
2021
2022
2023 typedef kern_return_t (*fn_mach_vm_deferred_reclamation_buffer_init)(task_t task, mach_vm_address_t address, mach_vm_size_t size);
2024
2025 static results_t *
test_mach_vm_deferred_reclamation_buffer_init(fn_mach_vm_deferred_reclamation_buffer_init func,const char * testname)2026 test_mach_vm_deferred_reclamation_buffer_init(fn_mach_vm_deferred_reclamation_buffer_init func,
2027 const char * testname)
2028 {
2029 int ret = 0;
2030 // Set vm.reclaim_max_threshold to non-zero
2031 int orig_reclaim_max_threshold = 0;
2032 int new_reclaim_max_threshold = 1;
2033 size_t size = sizeof(orig_reclaim_max_threshold);
2034 int sysctl_res = sysctlbyname("vm.reclaim_max_threshold", &orig_reclaim_max_threshold, &size, NULL, 0);
2035 assert(sysctl_res == 0);
2036 sysctl_res = sysctlbyname("vm.reclaim_max_threshold", NULL, 0, &new_reclaim_max_threshold, size);
2037 assert(sysctl_res == 0);
2038
2039 reclamation_buffer_init_trials_t *trials SMART_RECLAMATION_BUFFER_INIT_TRIALS();
2040 results_t *results = alloc_results(testname, eSMART_RECLAMATION_BUFFER_INIT_TRIALS, trials->count);
2041
2042 // reserve last trial to run without modified sysctl
2043 for (unsigned i = 0; i < trials->count - 1; i++) {
2044 reclamation_buffer_init_trial_t trial = trials->list[i];
2045 ret = func(trial.task, trial.address, trial.size);
2046 append_result(results, ret, trial.name);
2047 }
2048
2049 // run with vm.reclaim_max_threshold = 0 and exercise KERN_NOT_SUPPORTED path
2050 new_reclaim_max_threshold = 0;
2051 reclamation_buffer_init_trial_t last_trial = trials->list[trials->count - 1];
2052
2053 sysctl_res = sysctlbyname("vm.reclaim_max_threshold", NULL, 0, &new_reclaim_max_threshold, size);
2054 assert(sysctl_res == 0);
2055
2056 ret = func(last_trial.task, last_trial.address, last_trial.size);
2057 if (__improbable(ret == KERN_INVALID_ARGUMENT)) {
2058 // Unlikely case when args are rejected before sysctl check.
2059 // When this happens during test run, return acceptable, but if this happens
2060 // during golden file generation, record the expected value.
2061 ret = generate_golden ? KERN_NOT_SUPPORTED : ACCEPTABLE;
2062 }
2063 append_result(results, ret, last_trial.name);
2064
2065 // Revert vm.reclaim_max_threshold to how we found it
2066 sysctl_res = sysctlbyname("vm.reclaim_max_threshold", NULL, 0, &orig_reclaim_max_threshold, size);
2067 assert(sysctl_res == 0);
2068
2069 return results;
2070 }
2071
2072
2073 static vm_map_kernel_flags_trials_t *
generate_mmap_kernel_flags_trials()2074 generate_mmap_kernel_flags_trials()
2075 {
2076 // mmap rejects both ANYWHERE and FIXED | OVERWRITE
2077 // so don't set any prefix flags.
2078 return generate_prefixed_vm_map_kernel_flags_trials(0, "");
2079 }
2080
2081
2082 #define SMART_MMAP_KERNEL_FLAGS_TRIALS() \
2083 __attribute__((cleanup(cleanup_vm_map_kernel_flags_trials))) \
2084 = generate_mmap_kernel_flags_trials()
2085
2086 static results_t *
test_mmap_with_allocated_vm_map_kernel_flags_t(kern_return_t (* func)(MAP_T map,mach_vm_address_t src,mach_vm_size_t size,int flags),const char * testname)2087 test_mmap_with_allocated_vm_map_kernel_flags_t(kern_return_t (*func)(MAP_T map, mach_vm_address_t src, mach_vm_size_t size, int flags), const char * testname)
2088 {
2089 MAP_T map SMART_MAP;
2090
2091 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
2092 vm_map_kernel_flags_trials_t * trials SMART_MMAP_KERNEL_FLAGS_TRIALS();
2093 results_t *results = alloc_results(testname, eSMART_MMAP_KERNEL_FLAGS_TRIALS, trials->count);
2094
2095 for (unsigned i = 0; i < trials->count; i++) {
2096 kern_return_t ret = func(map, base.addr, base.size, trials->list[i].flags);
2097 append_result(results, ret, trials->list[i].name);
2098 }
2099 return results;
2100 }
2101
2102 // Test a Unix function.
2103 // Run each trial with an allocated vm region and a vm_inherit_t
2104 typedef int (*unix_with_inherit_fn)(void *start, size_t size, int inherit);
2105
2106 static results_t *
test_unix_with_allocated_vm_inherit_t(unix_with_inherit_fn fn,const char * testname)2107 test_unix_with_allocated_vm_inherit_t(unix_with_inherit_fn fn, const char * testname)
2108 {
2109 MAP_T map SMART_MAP;
2110 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
2111 vm_inherit_trials_t *trials SMART_VM_INHERIT_TRIALS();
2112 results_t *results = alloc_results(testname, eSMART_VM_INHERIT_TRIALS, trials->count);
2113
2114 for (unsigned i = 0; i < trials->count; i++) {
2115 vm_inherit_trial_t trial = trials->list[i];
2116 int ret = fn((void*)(uintptr_t)base.addr, (size_t)base.size, (int)trial.value);
2117 append_result(results, ret, trial.name);
2118 }
2119 return results;
2120 }
2121
2122 // Test a Unix function.
2123 // Run each trial with an allocated vm region and a vm_msync_t
2124 typedef int (*unix_with_msync_fn)(void *start, size_t size, int msync_value);
2125
2126 static results_t *
test_unix_with_allocated_vm_msync_t(unix_with_msync_fn fn,const char * testname)2127 test_unix_with_allocated_vm_msync_t(unix_with_msync_fn fn, const char * testname)
2128 {
2129 MAP_T map SMART_MAP;
2130 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
2131 vm_msync_trials_t *trials SMART_VM_MSYNC_TRIALS();
2132 results_t *results = alloc_results(testname, eSMART_VM_MSYNC_TRIALS, trials->count);
2133
2134 for (unsigned i = 0; i < trials->count; i++) {
2135 vm_msync_trial_t trial = trials->list[i];
2136 int ret = fn((void*)(uintptr_t)base.addr, (size_t)base.size, (int)trial.value);
2137 append_result(results, ret, trial.name);
2138 }
2139 return results;
2140 }
2141
2142 // Test a Unix function.
2143 // Run each trial with an allocated vm region and an advise
2144 typedef int (*unix_with_advise_fn)(void *start, size_t size, int advise);
2145
2146 static results_t *
test_unix_with_allocated_aligned_vm_advise_t(unix_with_advise_fn fn,mach_vm_size_t align_mask,const char * testname)2147 test_unix_with_allocated_aligned_vm_advise_t(unix_with_advise_fn fn, mach_vm_size_t align_mask, const char * testname)
2148 {
2149 MAP_T map SMART_MAP;
2150 allocation_t base SMART_ALLOCATE_ALIGNED_VM(map, TEST_ALLOC_SIZE, align_mask, VM_PROT_DEFAULT);
2151 vm_advise_trials_t *trials SMART_VM_ADVISE_TRIALS();
2152 results_t *results = alloc_results(testname, eSMART_VM_ADVISE_TRIALS, trials->count);
2153
2154 for (unsigned i = 0; i < trials->count; i++) {
2155 vm_advise_trial_t trial = trials->list[i];
2156 int ret = fn((void*)(uintptr_t)base.addr, (size_t)base.size, (int)trial.value);
2157 append_result(results, ret, trial.name);
2158 }
2159 return results;
2160 }
2161
2162 // Rosetta userspace intercepts shared_region_map_and_slide_2_np calls and this Rosetta wrapper
2163 // function doesn't have the necessary checks to support invalid input arguments. Skip these trials
2164 // intead of crashing the test.
2165 static bool
shared_region_map_and_slide_would_crash(shared_region_map_and_slide_2_trial_t * trial)2166 shared_region_map_and_slide_would_crash(shared_region_map_and_slide_2_trial_t *trial)
2167 {
2168 uint32_t files_count = trial->files_count;
2169 struct shared_file_np *files = trial->files;
2170 uint32_t mappings_count = trial->mappings_count;
2171 struct shared_file_mapping_slide_np *mappings = trial->mappings;
2172
2173 if (files_count == 0 || files_count == 1 || files_count > _SR_FILE_MAPPINGS_MAX_FILES) {
2174 return true;
2175 }
2176 if (mappings_count == 0 || mappings_count > SFM_MAX) {
2177 return true;
2178 }
2179 if (!files) {
2180 return true;
2181 }
2182 if (!mappings) {
2183 return true;
2184 }
2185 if (mappings_count != (((files_count - 1) * kNumSharedCacheMappings) + 1) &&
2186 mappings_count != (files_count * kNumSharedCacheMappings)) {
2187 return true;
2188 }
2189 if (files_count >= kMaxSubcaches) {
2190 return true;
2191 }
2192 return false;
2193 }
2194
2195 typedef int (*unix_shared_region_map_and_slide_2_np)(uint32_t files_coun, const struct shared_file_np *files, uint32_t mappings_count, const struct shared_file_mapping_slide_np *mappings);
2196
2197 static results_t *
test_unix_shared_region_map_and_slide_2_np(unix_shared_region_map_and_slide_2_np func,const char * testname)2198 test_unix_shared_region_map_and_slide_2_np(unix_shared_region_map_and_slide_2_np func, const char *testname)
2199 {
2200 uint64_t dyld_fp = (uint64_t)get_dyld_fd();
2201 shared_region_map_and_slide_2_trials_t *trials SMART_SHARED_REGION_MAP_AND_SLIDE_2_TRIALS(dyld_fp);
2202 results_t *results = alloc_results(testname, eSMART_SHARED_REGION_MAP_AND_SLIDE_2_TRIALS, dyld_fp, trials->count);
2203
2204 for (unsigned i = 0; i < trials->count; i++) {
2205 int ret;
2206 shared_region_map_and_slide_2_trial_t trial = trials->list[i];
2207 if (isRosetta() && shared_region_map_and_slide_would_crash(&trial)) {
2208 ret = IGNORED;
2209 } else {
2210 ret = func(trial.files_count, trial.files, trial.mappings_count, trial.mappings);
2211 }
2212 append_result(results, ret, trial.name);
2213 }
2214
2215 close_dyld_fd();
2216 return results;
2217 }
2218
2219 static results_t *
test_dst_size_fileoff(kern_return_t (* func)(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size,mach_vm_address_t fileoff),const char * testname)2220 test_dst_size_fileoff(kern_return_t (*func)(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size, mach_vm_address_t fileoff), const char * testname)
2221 {
2222 MAP_T map SMART_MAP;
2223 src_dst_size_trials_t * trials SMART_FILEOFF_DST_SIZE_TRIALS();
2224 results_t *results = alloc_results(testname, eSMART_FILEOFF_DST_SIZE_TRIALS, trials->count);
2225
2226 for (unsigned i = 0; i < trials->count; i++) {
2227 src_dst_size_trial_t trial = trials->list[i];
2228 unallocation_t dst_base SMART_UNALLOCATE_VM(map, TEST_ALLOC_SIZE);
2229 // src a.k.a. mmap fileoff doesn't slide
2230 trial = slide_trial_dst(trial, dst_base.addr);
2231 int ret = func(map, trial.dst, trial.size, trial.src);
2232 append_result(results, ret, trial.name);
2233 }
2234 return results;
2235 }
2236
2237 // Try to allocate a destination for mmap(MAP_FIXED) to overwrite.
2238 // On exit:
2239 // *out_dst *out_size are the allocation, or 0
2240 // *out_panic is true if the trial should stop and record PANIC
2241 // (because the trial specifies an absolute address that is already occupied)
2242 // *out_slide is true if the trial should slide by *out_dst
2243 static __attribute__((overloadable)) void
allocate_for_mmap_fixed(MAP_T map,mach_vm_address_t trial_dst,mach_vm_size_t trial_size,bool trial_dst_is_absolute,bool trial_size_is_absolute,mach_vm_address_t * out_dst,mach_vm_size_t * out_size,bool * out_panic,bool * out_slide)2244 allocate_for_mmap_fixed(MAP_T map, mach_vm_address_t trial_dst, mach_vm_size_t trial_size, bool trial_dst_is_absolute, bool trial_size_is_absolute, mach_vm_address_t *out_dst, mach_vm_size_t *out_size, bool *out_panic, bool *out_slide)
2245 {
2246 *out_panic = false;
2247 *out_slide = false;
2248
2249 if (trial_dst_is_absolute && trial_size_is_absolute) {
2250 // known dst addr, known size
2251 *out_dst = trial_dst;
2252 *out_size = trial_size;
2253 kern_return_t kr = mach_vm_allocate(map, out_dst, *out_size, VM_FLAGS_FIXED);
2254 if (kr == KERN_NO_SPACE) {
2255 // this space is in use, we can't allow mmap to try to overwrite it
2256 *out_panic = true;
2257 *out_dst = 0;
2258 *out_size = 0;
2259 } else if (kr != 0) {
2260 // some other error, assume mmap will also fail
2261 *out_dst = 0;
2262 *out_size = 0;
2263 }
2264 // no slide, trial and allocation are already at the same place
2265 *out_slide = false;
2266 } else {
2267 // other cases either fit in a small allocation or fail
2268 *out_dst = 0;
2269 *out_size = TEST_ALLOC_SIZE;
2270 kern_return_t kr = mach_vm_allocate(map, out_dst, *out_size, VM_FLAGS_ANYWHERE);
2271 if (kr != 0) {
2272 // allocation error, assume mmap will also fail
2273 *out_dst = 0;
2274 *out_size = 0;
2275 }
2276 *out_slide = true;
2277 }
2278 }
2279
2280 static __attribute__((overloadable)) void
allocate_for_mmap_fixed(MAP_T map,start_size_trial_t trial,mach_vm_address_t * out_dst,mach_vm_size_t * out_size,bool * out_panic,bool * out_slide)2281 allocate_for_mmap_fixed(MAP_T map, start_size_trial_t trial, mach_vm_address_t *out_dst, mach_vm_size_t *out_size, bool *out_panic, bool *out_slide)
2282 {
2283 allocate_for_mmap_fixed(map, trial.start, trial.size, trial.start_is_absolute, trial.size_is_absolute,
2284 out_dst, out_size, out_panic, out_slide);
2285 }
2286 static __attribute__((overloadable)) void
allocate_for_mmap_fixed(MAP_T map,src_dst_size_trial_t trial,mach_vm_address_t * out_dst,mach_vm_size_t * out_size,bool * out_panic,bool * out_slide)2287 allocate_for_mmap_fixed(MAP_T map, src_dst_size_trial_t trial, mach_vm_address_t *out_dst, mach_vm_size_t *out_size, bool *out_panic, bool *out_slide)
2288 {
2289 allocate_for_mmap_fixed(map, trial.dst, trial.size, trial.dst_is_absolute, !trial.size_is_dst_relative,
2290 out_dst, out_size, out_panic, out_slide);
2291 }
2292
2293 // Like test_dst_size_fileoff, but specialized for mmap(MAP_FIXED).
2294 // mmap(MAP_FIXED) is destructive, forcibly unmapping anything
2295 // already at that address.
2296 // We must ensure that each trial is either obviously invalid and caught
2297 // by the sanitizers, or is valid and overwrites an allocation we control.
2298 static results_t *
test_fixed_dst_size_fileoff(kern_return_t (* func)(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size,mach_vm_address_t fileoff),const char * testname)2299 test_fixed_dst_size_fileoff(kern_return_t (*func)(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size, mach_vm_address_t fileoff), const char * testname)
2300 {
2301 MAP_T map SMART_MAP;
2302 src_dst_size_trials_t * trials SMART_FILEOFF_DST_SIZE_TRIALS();
2303 results_t *results = alloc_results(testname, eSMART_FILEOFF_DST_SIZE_TRIALS, trials->count);
2304 for (unsigned i = 0; i < trials->count; i++) {
2305 src_dst_size_trial_t trial = trials->list[i];
2306 // Try to create an allocation for mmap to overwrite.
2307 mach_vm_address_t dst_alloc;
2308 mach_vm_size_t dst_size;
2309 bool should_panic;
2310 bool should_slide_trial;
2311 allocate_for_mmap_fixed(map, trial, &dst_alloc, &dst_size, &should_panic, &should_slide_trial);
2312 if (should_panic) {
2313 append_result(results, PANIC, trial.name);
2314 continue;
2315 }
2316 if (should_slide_trial) {
2317 // src a.k.a. mmap fileoff doesn't slide
2318 trial = slide_trial_dst(trial, dst_alloc);
2319 }
2320
2321 kern_return_t ret = func(map, trial.dst, trial.size, trial.src);
2322
2323 if (dst_alloc != 0) {
2324 (void)mach_vm_deallocate(map, dst_alloc, dst_size);
2325 }
2326 append_result(results, ret, trial.name);
2327 }
2328 return results;
2329 }
2330
2331 // Like test_mach_with_allocated_start_size, but specialized for mmap(MAP_FIXED).
2332 // See test_fixed_dst_size_fileoff for more.
2333 static results_t *
test_fixed_dst_size(kern_return_t (* func)(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size),const char * testname)2334 test_fixed_dst_size(kern_return_t (*func)(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size), const char *testname)
2335 {
2336 MAP_T map SMART_MAP;
2337 start_size_trials_t *trials SMART_START_SIZE_TRIALS(0); // no base addr
2338 results_t *results = alloc_results(testname, eSMART_START_SIZE_TRIALS, 0, trials->count);
2339 for (unsigned i = 0; i < trials->count; i++) {
2340 start_size_trial_t trial = trials->list[i];
2341 // Try to create an allocation for mmap to overwrite.
2342 mach_vm_address_t dst_alloc;
2343 mach_vm_size_t dst_size;
2344 bool should_panic;
2345 bool should_slide_trial;
2346 allocate_for_mmap_fixed(map, trial, &dst_alloc, &dst_size, &should_panic, &should_slide_trial);
2347 if (should_panic) {
2348 append_result(results, PANIC, trial.name);
2349 continue;
2350 }
2351 if (should_slide_trial) {
2352 trial = slide_trial(trial, dst_alloc);
2353 }
2354
2355 kern_return_t ret = func(map, trial.start, trial.size);
2356
2357 if (dst_alloc != 0) {
2358 (void)mach_vm_deallocate(map, dst_alloc, dst_size);
2359 }
2360 append_result(results, ret, trial.name);
2361 }
2362 return results;
2363 }
2364
2365 static results_t *
test_allocated_src_allocated_dst_size(kern_return_t (* func)(MAP_T map,mach_vm_address_t src,mach_vm_size_t size,mach_vm_address_t dst),const char * testname)2366 test_allocated_src_allocated_dst_size(kern_return_t (*func)(MAP_T map, mach_vm_address_t src, mach_vm_size_t size, mach_vm_address_t dst), const char * testname)
2367 {
2368 MAP_T map SMART_MAP;
2369 allocation_t src_base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
2370 allocation_t dst_base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
2371 src_dst_size_trials_t * trials SMART_SRC_DST_SIZE_TRIALS();
2372 results_t *results = alloc_results(testname, eSMART_SRC_DST_SIZE_TRIALS, trials->count);
2373
2374 for (unsigned i = 0; i < trials->count; i++) {
2375 src_dst_size_trial_t trial = trials->list[i];
2376 trial = slide_trial_src(trial, src_base.addr);
2377 trial = slide_trial_dst(trial, dst_base.addr);
2378 int ret = func(map, trial.src, trial.size, trial.dst);
2379 // func should be fixed-overwrite, nothing new to deallocate
2380 append_result(results, ret, trial.name);
2381 }
2382 return results;
2383 }
2384
2385 static task_exc_guard_behavior_t saved_exc_guard_behavior;
2386
2387 static void
disable_exc_guard()2388 disable_exc_guard()
2389 {
2390 T_SETUPBEGIN;
2391
2392 // Disable EXC_GUARD for the duration of the test.
2393 // We restore it at the end.
2394 kern_return_t kr = task_get_exc_guard_behavior(mach_task_self(), &saved_exc_guard_behavior);
2395 assert(kr == 0);
2396
2397 kr = task_set_exc_guard_behavior(mach_task_self(), TASK_EXC_GUARD_NONE);
2398 if (kr) {
2399 T_LOG("warning, couldn't disable EXC_GUARD; some tests are disabled");
2400 EXC_GUARD_ENABLED = true;
2401 } else {
2402 EXC_GUARD_ENABLED = false;
2403 }
2404
2405 T_SETUPEND;
2406 }
2407
2408 static void
restore_exc_guard()2409 restore_exc_guard()
2410 {
2411 // restore process's EXC_GUARD handling
2412 (void)task_set_exc_guard_behavior(mach_task_self(), saved_exc_guard_behavior);
2413 }
2414
2415 static int
set_disable_vm_sanitize_telemetry_via_sysctl(uint32_t val)2416 set_disable_vm_sanitize_telemetry_via_sysctl(uint32_t val)
2417 {
2418 int ret = sysctlbyname("debug.disable_vm_sanitize_telemetry", NULL, NULL, &val, sizeof(uint32_t));
2419 if (ret != 0) {
2420 printf("sysctl failed with errno %d.\n", errno);
2421 }
2422 return ret;
2423 }
2424
2425 static int
disable_vm_sanitize_telemetry(void)2426 disable_vm_sanitize_telemetry(void)
2427 {
2428 return set_disable_vm_sanitize_telemetry_via_sysctl(1);
2429 }
2430
2431 static int
reenable_vm_sanitize_telemetry(void)2432 reenable_vm_sanitize_telemetry(void)
2433 {
2434 return set_disable_vm_sanitize_telemetry_via_sysctl(0);
2435 }
2436
2437 #define MAX_LINE_LENGTH 100
2438 #define MAX_NUM_TESTS 350
2439 #define TMP_DIR "/tmp/"
2440 #define ASSETS_DIR "../assets/vm_parameter_validation/"
2441 #define DECOMPRESS ASSETS_DIR "decompress.sh"
2442 #define GOLDEN_FILE TMP_DIR "user_golden_image.log"
2443
2444 #define KERN_GOLDEN_FILE TMP_DIR "kern_golden_image.log"
2445
2446 results_t *golden_list[MAX_NUM_TESTS];
2447 results_t *kern_list[MAX_NUM_TESTS];
2448
2449 #define FILL_TRIALS_NAMES_AND_CONTINUE(results, trials, t_count) { \
2450 for (unsigned i = 0; i < t_count; i++) { \
2451 /* trials names are free'd in dealloc_results() */ \
2452 (results)->list[i].name = kstrdup((trials)->list[i].name); \
2453 } \
2454 }
2455
2456 #define FILL_TRIALS_NAMES(results, trials) { \
2457 unsigned t_count = ((trials)->count < (results)->count) ? (trials)->count : (results)->count; \
2458 if ((trials)->count != (results)->count) { \
2459 T_LOG("%s:%d Trials count mismatch, expected %u, golden file %u\n", \
2460 __func__, __LINE__, (trials)->count, (results)->count); \
2461 }\
2462 FILL_TRIALS_NAMES_AND_CONTINUE((results), (trials), (t_count)) \
2463 break; \
2464 }
2465
2466 static void
fill_golden_trials(uint64_t trialsargs[static TRIALSARGUMENTS_SIZE],results_t * results)2467 fill_golden_trials(uint64_t trialsargs[static TRIALSARGUMENTS_SIZE],
2468 results_t *results)
2469 {
2470 trialsformula_t formula = results->trialsformula;
2471 uint64_t trialsargs0 = trialsargs[0];
2472 uint64_t trialsargs1 = trialsargs[1];
2473 switch (formula) {
2474 case eUNKNOWN_TRIALS:
2475 // Leave them empty
2476 T_FAIL("Golden file with unknown trials, testname: %s\n", results->testname);
2477 break;
2478 case eSMART_VM_MAP_KERNEL_FLAGS_TRIALS: {
2479 vm_map_kernel_flags_trials_t * trials SMART_VM_MAP_KERNEL_FLAGS_TRIALS();
2480 FILL_TRIALS_NAMES(results, trials);
2481 }
2482 case eSMART_VM_INHERIT_TRIALS: {
2483 vm_inherit_trials_t *trials SMART_VM_INHERIT_TRIALS();
2484 FILL_TRIALS_NAMES(results, trials);
2485 }
2486 case eSMART_MMAP_KERNEL_FLAGS_TRIALS: {
2487 vm_map_kernel_flags_trials_t * trials SMART_MMAP_KERNEL_FLAGS_TRIALS();
2488 FILL_TRIALS_NAMES(results, trials);
2489 }
2490 case eSMART_MMAP_FLAGS_TRIALS: {
2491 mmap_flags_trials_t *trials SMART_MMAP_FLAGS_TRIALS();
2492 FILL_TRIALS_NAMES(results, trials);
2493 }
2494 case eSMART_GENERIC_FLAG_TRIALS: {
2495 generic_flag_trials_t *trials SMART_GENERIC_FLAG_TRIALS();
2496 FILL_TRIALS_NAMES(results, trials);
2497 }
2498 case eSMART_VM_TAG_TRIALS: {
2499 // special case, trails (vm_tag_trials_values) depend on data only available on KERNEL
2500 vm_tag_trials_t *trials SMART_VM_TAG_TRIALS();
2501 FILL_TRIALS_NAMES(results, trials);
2502 }
2503 case eSMART_VM_PROT_TRIALS: {
2504 vm_prot_trials_t *trials SMART_VM_PROT_TRIALS();
2505 FILL_TRIALS_NAMES(results, trials);
2506 }
2507 case eSMART_VM_PROT_PAIR_TRIALS: {
2508 vm_prot_pair_trials_t *trials SMART_VM_PROT_PAIR_TRIALS();
2509 FILL_TRIALS_NAMES(results, trials);
2510 }
2511 case eSMART_LEDGER_TAG_TRIALS: {
2512 ledger_tag_trials_t *trials SMART_LEDGER_TAG_TRIALS();
2513 FILL_TRIALS_NAMES(results, trials);
2514 }
2515 case eSMART_LEDGER_FLAG_TRIALS: {
2516 ledger_flag_trials_t *trials SMART_LEDGER_FLAG_TRIALS();
2517 FILL_TRIALS_NAMES(results, trials);
2518 }
2519 case eSMART_ADDR_TRIALS: {
2520 addr_trials_t *trials SMART_ADDR_TRIALS(trialsargs0);
2521 if (trialsargs1) {
2522 // Special case with an additional trial such that obj_size + addr == 0
2523 FILL_TRIALS_NAMES_AND_CONTINUE(results, trials, trials->count);
2524 assert(trials->count + 1 == results->count);
2525 char *trial_desc;
2526 kasprintf(&trial_desc, "addr: -0x%llx", trialsargs1);
2527 results->list[results->count - 1].name = kstrdup(trial_desc);
2528 kfree_str(trial_desc);
2529 break;
2530 } else {
2531 FILL_TRIALS_NAMES(results, trials);
2532 }
2533 }
2534 case eSMART_SIZE_TRIALS: {
2535 size_trials_t *trials SMART_SIZE_TRIALS();
2536 FILL_TRIALS_NAMES(results, trials);
2537 }
2538 case eSMART_START_SIZE_TRIALS: {
2539 // NB: base.addr is not constant between runs but doesn't affect trial name
2540 start_size_trials_t *trials SMART_START_SIZE_TRIALS(trialsargs0);
2541 FILL_TRIALS_NAMES(results, trials);
2542 }
2543 case eSMART_START_SIZE_OFFSET_OBJECT_TRIALS: {
2544 start_size_offset_object_trials_t *trials SMART_START_SIZE_OFFSET_OBJECT_TRIALS();
2545 FILL_TRIALS_NAMES(results, trials);
2546 }
2547 case eSMART_START_SIZE_OFFSET_TRIALS: {
2548 start_size_offset_trials_t *trials SMART_START_SIZE_OFFSET_TRIALS();
2549 FILL_TRIALS_NAMES(results, trials);
2550 }
2551 case eSMART_SIZE_SIZE_TRIALS: {
2552 T_FAIL("SIZE_SIZE_TRIALS not used\n");
2553 break;
2554 }
2555 case eSMART_SRC_DST_SIZE_TRIALS: {
2556 src_dst_size_trials_t * trials SMART_SRC_DST_SIZE_TRIALS();
2557 FILL_TRIALS_NAMES(results, trials);
2558 }
2559 case eSMART_FILEOFF_DST_SIZE_TRIALS: {
2560 src_dst_size_trials_t * trials SMART_FILEOFF_DST_SIZE_TRIALS();
2561 FILL_TRIALS_NAMES(results, trials);
2562 }
2563 case eSMART_VM_BEHAVIOR_TRIALS: {
2564 vm_behavior_trials_t *trials SMART_VM_BEHAVIOR_TRIALS();
2565 FILL_TRIALS_NAMES(results, trials);
2566 }
2567 case eSMART_VM_ADVISE_TRIALS: {
2568 vm_advise_trials_t *trials SMART_VM_ADVISE_TRIALS();
2569 FILL_TRIALS_NAMES(results, trials);
2570 }
2571 case eSMART_VM_SYNC_TRIALS: {
2572 vm_sync_trials_t *trials SMART_VM_SYNC_TRIALS();
2573 FILL_TRIALS_NAMES(results, trials);
2574 }
2575 case eSMART_VM_MSYNC_TRIALS: {
2576 vm_msync_trials_t *trials SMART_VM_MSYNC_TRIALS();
2577 FILL_TRIALS_NAMES(results, trials);
2578 }
2579 case eSMART_VM_MACHINE_ATTRIBUTE_TRIALS: {
2580 vm_machine_attribute_trials_t *trials SMART_VM_MACHINE_ATTRIBUTE_TRIALS();
2581 FILL_TRIALS_NAMES(results, trials);
2582 }
2583 case eSMART_VM_PURGEABLE_AND_STATE_TRIALS: {
2584 vm_purgeable_and_state_trials_t *trials SMART_VM_PURGEABLE_AND_STATE_TRIALS();
2585 FILL_TRIALS_NAMES(results, trials);
2586 }
2587 case eSMART_START_SIZE_START_SIZE_TRIALS: {
2588 start_size_start_size_trials_t *trials SMART_START_SIZE_START_SIZE_TRIALS();
2589 FILL_TRIALS_NAMES(results, trials);
2590 }
2591 case eSMART_SHARED_REGION_MAP_AND_SLIDE_2_TRIALS: {
2592 shared_region_map_and_slide_2_trials_t *trials SMART_SHARED_REGION_MAP_AND_SLIDE_2_TRIALS(trialsargs0);
2593 FILL_TRIALS_NAMES(results, trials);
2594 }
2595 case eSMART_RECLAMATION_BUFFER_INIT_TRIALS: {
2596 reclamation_buffer_init_trials_t *trials SMART_RECLAMATION_BUFFER_INIT_TRIALS();
2597 FILL_TRIALS_NAMES(results, trials);
2598 }
2599 default:
2600 T_FAIL("New formula %u, args %llu %llu, update fill_golden_trials, testname: %s\n",
2601 formula, trialsargs[0], trialsargs[1], results->testname);
2602 }
2603 }
2604
2605 // Number of test trials with ret == OUT_PARAM_BAD
2606 int out_param_bad_count = 0;
2607
2608 static results_t *
test_name_to_golden_results(const char * testname)2609 test_name_to_golden_results(const char* testname)
2610 {
2611 results_t *golden_results = NULL;
2612 results_t *golden_results_found = NULL;
2613
2614 for (uint32_t x = 0; x < num_tests; x++) {
2615 golden_results = golden_list[x];
2616 if (strncmp(golden_results->testname, testname, strlen(testname)) == 0) {
2617 golden_results->tested_count += 1;
2618 golden_results_found = golden_results;
2619 break;
2620 }
2621 }
2622
2623 return golden_results_found;
2624 }
2625
2626 static void
dump_results_list(results_t * res_list[],uint32_t res_num_tests)2627 dump_results_list(results_t *res_list[], uint32_t res_num_tests)
2628 {
2629 for (uint32_t x = 0; x < res_num_tests; x++) {
2630 results_t *results = res_list[x];
2631 testprintf("\t[%u] %s (%u)\n", x, results->testname, results->count);
2632 }
2633 }
2634
2635 static void
dump_golden_list()2636 dump_golden_list()
2637 {
2638 testprintf("======\n");
2639 testprintf("golden_list %p, num_tests %u\n", golden_list, num_tests);
2640 dump_results_list(golden_list, num_tests);
2641 testprintf("======\n");
2642 }
2643
2644 static void
dump_kernel_results_list()2645 dump_kernel_results_list()
2646 {
2647 testprintf("======\n");
2648 testprintf("kernel_results_list %p, num_tests %u\n", kern_list, num_kern_tests);
2649 dump_results_list(kern_list, num_kern_tests);
2650 testprintf("======\n");
2651 }
2652
2653 // Read results written by dump_golden_results().
2654 static int
populate_golden_results(const char * filename)2655 populate_golden_results(const char *filename)
2656 {
2657 FILE *file;
2658 char line[MAX_LINE_LENGTH];
2659 char trial_formula[20];
2660 results_t *results = NULL;
2661 trialsformula_t formula = eUNKNOWN_TRIALS;
2662 uint64_t trial_args[TRIALSARGUMENTS_SIZE] = {0, 0};
2663 uint32_t num_results = 0;
2664 uint32_t result_number = 0;
2665 int result_ret = 0;
2666 char *test_name = NULL;
2667 char *sub_line = NULL;
2668 char *s_num_results = NULL;
2669 bool in_test = FALSE;
2670 out_param_bad_count = 0;
2671 kern_trialname_generation = strnstr(filename, "kern_golden_image", strlen(filename)) != NULL;
2672
2673 // cd to the directory containing this executable
2674 // Test files are located relative to there.
2675 uint32_t exesize = 0;
2676 _NSGetExecutablePath(NULL, &exesize);
2677 char *exe = malloc(exesize);
2678 assert(exe != NULL);
2679 _NSGetExecutablePath(exe, &exesize);
2680 char *dir = dirname(exe);
2681 chdir(dir);
2682 free(exe);
2683
2684 file = fopen(filename, "r");
2685 if (file == NULL) {
2686 T_FAIL("Could not open file %s\n", filename);
2687 return 1;
2688 }
2689
2690 // Read file line by line
2691 while (fgets(line, MAX_LINE_LENGTH, file) != NULL) {
2692 // Check if the line starts with "TESTNAME" or "RESULT COUNT"
2693 if (strncmp(line, TESTNAME_DELIMITER, strlen(TESTNAME_DELIMITER)) == 0) {
2694 // remove the newline char
2695 line[strcspn(line, "\n")] = 0;
2696 sub_line = line + strlen(TESTNAME_DELIMITER);
2697 test_name = strdup(sub_line);
2698 formula = eUNKNOWN_TRIALS;
2699 trial_args[0] = TRIALSARGUMENTS_NONE;
2700 trial_args[1] = TRIALSARGUMENTS_NONE;
2701 // T_LOG("TESTNAME %u : %s", num_tests, test_name);
2702 in_test = TRUE;
2703 } else if (in_test && strncmp(line, TRIALSFORMULA_DELIMITER, strlen(TRIALSFORMULA_DELIMITER)) == 0) {
2704 sscanf(line, "%*s %s %*s %llu,%llu,%llu", trial_formula, &trial_args[0], &trial_args[1], &trial_page_size);
2705 formula = trialsformula_from_string(trial_formula);
2706 } else if (in_test && strncmp(line, RESULTCOUNT_DELIMITER, strlen(RESULTCOUNT_DELIMITER)) == 0) {
2707 assert(num_tests < MAX_NUM_TESTS);
2708 s_num_results = line + strlen(RESULTCOUNT_DELIMITER);
2709 num_results = (uint32_t)strtoul(s_num_results, NULL, 10);
2710 results = alloc_results(test_name, formula, trial_args, TRIALSARGUMENTS_SIZE, num_results);
2711 assert(results);
2712 results->count = num_results;
2713 fill_golden_trials(trial_args, results);
2714 golden_list[num_tests++] = results;
2715 // T_LOG("num_tests %u, testname %s, count: %u", num_tests, results->testname, results->count);
2716 } else if (in_test && strncmp(line, TESTRESULT_DELIMITER, strlen(TESTRESULT_DELIMITER)) == 0) {
2717 sscanf(line, "%d: %d", &result_number, &result_ret);
2718 assert(result_number < num_results);
2719 // T_LOG("\tresult #%u: %d\n", result_number, result_ret);
2720 results->list[result_number].ret = result_ret;
2721 if (result_ret == OUT_PARAM_BAD) {
2722 out_param_bad_count += 1;
2723 T_FAIL("Out parameter violation in test %s - %s\n", results->testname, results->list[result_number].name);
2724 }
2725 } else {
2726 // T_LOG("Unknown line: %s\n", line);
2727 in_test = FALSE;
2728 }
2729 }
2730
2731 fclose(file);
2732
2733 if (!out_param_bad_count) {
2734 dump_golden_list();
2735 }
2736 kern_trialname_generation = FALSE;
2737
2738 return out_param_bad_count;
2739 }
2740
2741 static void
clean_golden_results()2742 clean_golden_results()
2743 {
2744 for (uint32_t x = 0; x < num_tests; ++x) {
2745 if (golden_list[x]->tested_count == 0) {
2746 T_LOG("WARN: Test %s found in golden file but no test with that name was run\n",
2747 golden_list[x]->testname);
2748 }
2749 if (golden_list[x]->tested_count > 1) {
2750 T_LOG("WARN: Test %s found in golden file with %d runs\n",
2751 golden_list[x]->testname, golden_list[x]->tested_count);
2752 }
2753 dealloc_results(golden_list[x]);
2754 golden_list[x] = NULL;
2755 }
2756 }
2757
2758 static void
clean_kernel_results()2759 clean_kernel_results()
2760 {
2761 for (uint32_t x = 0; x < num_kern_tests; ++x) {
2762 dealloc_results(kern_list[x]);
2763 kern_list[x] = NULL;
2764 }
2765 }
2766
2767 // buffer to output userspace golden file results (using same size as the kern buffer)
2768 static const int64_t GOLDEN_OUTPUT_BUFFER_SIZE = SYSCTL_OUTPUT_BUFFER_SIZE;
2769 static char* GOLDEN_OUTPUT_START;
2770 static char* GOLDEN_OUTPUT_BUF;
2771 static char* GOLDEN_OUTPUT_END;
2772
2773 void
goldenprintf(const char * format,...)2774 goldenprintf(const char *format, ...)
2775 {
2776 if (!GOLDEN_OUTPUT_START) {
2777 GOLDEN_OUTPUT_START = calloc(GOLDEN_OUTPUT_BUFFER_SIZE, 1);
2778 GOLDEN_OUTPUT_BUF = GOLDEN_OUTPUT_START;
2779 GOLDEN_OUTPUT_END = GOLDEN_OUTPUT_BUF + GOLDEN_OUTPUT_BUFFER_SIZE;
2780 }
2781
2782 int printed;
2783 ssize_t s_buffer_size = GOLDEN_OUTPUT_END - GOLDEN_OUTPUT_BUF;
2784 assert(s_buffer_size > 0 && s_buffer_size <= GOLDEN_OUTPUT_BUFFER_SIZE);
2785 size_t buffer_size = (size_t)s_buffer_size;
2786 va_list args;
2787 va_start(args, format);
2788 printed = vsnprintf(GOLDEN_OUTPUT_BUF, buffer_size, format, args);
2789 va_end(args);
2790 assert(printed >= 0);
2791 assert((unsigned)printed < buffer_size - 1);
2792 assert(GOLDEN_OUTPUT_BUF + printed + 1 < GOLDEN_OUTPUT_END);
2793 GOLDEN_OUTPUT_BUF += printed;
2794 }
2795
2796 // Verbose output in dump_results, controlled by DUMP_RESULTS env.
2797 bool dump = FALSE;
2798 // Output to create a golden test result, controlled by GENERATE_GOLDEN_IMAGE.
2799 bool generate_golden = FALSE;
2800 // Read existing golden file and print its contents in verbose format (like dump_results). Controlled by DUMP_GOLDEN_IMAGE.
2801 bool dump_golden = FALSE;
2802 // Run tests as tests (i.e. emit TS_{PASS/FAIL}), enabled unless golden image generation is true.
2803 bool should_test_results = TRUE;
2804
2805 T_DECL(vm_parameter_validation_user,
2806 "parameter validation for userspace calls",
2807 T_META_SPAWN_TOOL(DECOMPRESS),
2808 T_META_SPAWN_TOOL_ARG("user"),
2809 T_META_SPAWN_TOOL_ARG(TMP_DIR),
2810 T_META_SPAWN_TOOL_ARG(GOLDEN_FILES_VERSION)
2811 )
2812 {
2813 if (disable_vm_sanitize_telemetry() != 0) {
2814 T_FAIL("Could not disable VM API telemetry. Bailing out early.");
2815 return;
2816 }
2817
2818 read_env();
2819
2820 T_LOG("dump %d, golden %d, dump_golden %d, test %d\n", dump, generate_golden, dump_golden, should_test_results);
2821
2822 if (generate_golden && unsigned_code_is_disallowed()) {
2823 // Some test results change when SIP is enabled.
2824 // Golden files must record the SIP-disabled values.
2825 T_FAIL("Can't generate golden files with SIP enabled. Disable SIP and try again.\n");
2826 return;
2827 }
2828
2829 if ((dump_golden || should_test_results) && populate_golden_results(GOLDEN_FILE)) {
2830 // bail out early, problem loading golden test results
2831 T_FAIL("Could not load golden file '%s'\n", GOLDEN_FILE);
2832 return;
2833 }
2834
2835 set_up_guard_page();
2836
2837 disable_exc_guard();
2838
2839 if (dump_golden) {
2840 // just print the parsed golden file
2841 for (uint32_t x = 0; x < num_tests; ++x) {
2842 __dump_results(golden_list[x]);
2843 }
2844 goto out;
2845 }
2846
2847 /*
2848 * -- memory entry functions --
2849 * The memory entry test functions use macros to generate each flavor of memory entry function.
2850 * This is partially becauseof many entrypoints (mach_make_memory_entry/mach_make_memory_entry_64/mach_make_memory_entry)
2851 * and partially because many flavors of each function are called (copy/memonly/share/...).
2852 */
2853
2854 // Mach start/size with both old-style and new-style types
2855 // (co-located so old and new can be compared more easily)
2856 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
2857 #if TEST_OLD_STYLE_MACH
2858 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
2859 #define RUN_OLD64(fn, name) RUN_NEW(fn, name)
2860 #else
2861 #define RUN_OLD(fn, name) do {} while (0)
2862 #define RUN_OLD64(fn, name) do {} while (0)
2863 #endif
2864 // mach_make_memory_entry has up to three entry points on U32, unlike other functions that have two
2865 RUN_NEW(call_mach_make_memory_entry_64__start_size__copy, "mach_make_memory_entry_64 (copy)");
2866 RUN_OLD(call_mach_make_memory_entry__start_size__copy, "mach_make_memory_entry (copy)");
2867 RUN_OLD64(call__mach_make_memory_entry__start_size__copy, "_mach_make_memory_entry (copy)");
2868 RUN_NEW(call_mach_make_memory_entry_64__start_size__memonly, "mach_make_memory_entry_64 (mem_only)");
2869 RUN_OLD(call_mach_make_memory_entry__start_size__memonly, "mach_make_memory_entry (mem_only)");
2870 RUN_OLD64(call__mach_make_memory_entry__start_size__memonly, "_mach_make_memory_entry (mem_only)");
2871 RUN_NEW(call_mach_make_memory_entry_64__start_size__namedcreate, "mach_make_memory_entry_64 (named_create)");
2872 RUN_OLD(call_mach_make_memory_entry__start_size__namedcreate, "mach_make_memory_entry (named_create)");
2873 RUN_OLD64(call__mach_make_memory_entry__start_size__namedcreate, "_mach_make_memory_entry (named_create)");
2874 RUN_NEW(call_mach_make_memory_entry_64__start_size__share, "mach_make_memory_entry_64 (share)");
2875 RUN_OLD(call_mach_make_memory_entry__start_size__share, "mach_make_memory_entry (share)");
2876 RUN_OLD64(call__mach_make_memory_entry__start_size__share, "_mach_make_memory_entry (share)");
2877 RUN_NEW(call_mach_make_memory_entry_64__start_size__namedreuse, "mach_make_memory_entry_64 (named_reuse)");
2878 RUN_OLD(call_mach_make_memory_entry__start_size__namedreuse, "mach_make_memory_entry (named_reuse)");
2879 RUN_OLD64(call__mach_make_memory_entry__start_size__namedreuse, "_mach_make_memory_entry (named_reuse)");
2880 #undef RUN_NEW
2881 #undef RUN_OLD
2882 #undef RUN_OLD64
2883
2884 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_size(fn, name " (size)")))
2885 RUN(call_mach_memory_object_memory_entry_64__size, "mach_memory_object_memory_entry_64");
2886 RUN(call_replacement_mach_memory_object_memory_entry__size, "mach_memory_object_memory_entry");
2887 #undef RUN
2888
2889 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
2890 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
2891 #define RUN_OLD64(fn, name) RUN_NEW(fn, name)
2892
2893 RUN_NEW(call_mach_make_memory_entry_64__vm_prot, "mach_make_memory_entry_64");
2894 #if TEST_OLD_STYLE_MACH
2895 RUN_OLD(call_mach_make_memory_entry__vm_prot, "mach_make_memory_entry");
2896 RUN_OLD64(call__mach_make_memory_entry__vm_prot, "_mach_make_memory_entry");
2897 #endif
2898
2899 #undef RUN_NEW
2900 #undef RUN_OLD
2901 #undef RUN_OLD64
2902
2903 #define RUN(fn, name) dealloc_results(process_results(test_mach_vm_prot(fn, name " (vm_prot_t)")))
2904 RUN(call_mach_memory_object_memory_entry_64__vm_prot, "mach_memory_object_memory_entry_64");
2905 RUN(call_replacement_mach_memory_object_memory_entry__vm_prot, "mach_memory_object_memory_entry");
2906 #undef RUN
2907
2908 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_ledger_tag(fn, name " (ledger tag)")))
2909 RUN(call_mach_memory_entry_ownership__ledger_tag, "mach_memory_entry_ownership");
2910 #undef RUN
2911
2912 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_ledger_flag(fn, name " (ledger flag)")))
2913 RUN(call_mach_memory_entry_ownership__ledger_flag, "mach_memory_entry_ownership");
2914 #undef RUN
2915
2916 /*
2917 * -- allocate/deallocate functions --
2918 */
2919
2920 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_start_size(fn, name)))
2921 RUN(call_mach_vm_allocate__start_size_fixed, "mach_vm_allocate (fixed) (realigned start/size)");
2922 RUN(call_mach_vm_allocate__start_size_anywhere, "mach_vm_allocate (anywhere) (hint/size)");
2923 #undef RUN
2924
2925 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
2926 RUN(call_mach_vm_allocate__flags, "mach_vm_allocate");
2927 #undef RUN
2928
2929 dealloc_results(process_results(test_deallocator(call_mach_vm_deallocate, "mach_vm_deallocate (start/size)")));
2930 #if TEST_OLD_STYLE_MACH
2931 dealloc_results(process_results(test_deallocator(call_vm_deallocate, "vm_deallocate (start/size)")));
2932 #endif
2933
2934 #define RUN(fn, name) dealloc_results(process_results(test_deallocator(fn, name " (start/size)")))
2935 RUN(call_munmap, "munmap");
2936 #undef RUN
2937
2938 /*
2939 * -- map/unmap functions --
2940 * The map/unmap functions use multiple layers of macros.
2941 * The macros are used both for function generation (see IMPL_ONE_FROM_HELPER) and to call all of those.
2942 * This was written this way to further avoid lots of code duplication, as the map/remap functions
2943 * have many different parameter combinations we want to test.
2944 */
2945
2946 // map tests
2947
2948 #define RUN_START_SIZE(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (realigned start/size)")))
2949 #define RUN_HINT_SIZE(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (hint/size)")))
2950 #define RUN_PROT_PAIR(fn, name) dealloc_results(process_results(test_mach_vm_prot_pair(fn, name " (prot_pairs)")))
2951 #define RUN_INHERIT(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_inherit_t(fn, name " (vm_inherit_t)")))
2952 #define RUN_FLAGS(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
2953 #define RUN_SSOO(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size_offset_object(fn, name " (start/size/offset/object)")))
2954
2955 #define RUN_ALL(fn, name) \
2956 RUN_START_SIZE(call_ ## fn ## __allocate_fixed, #name " (allocate fixed overwrite)"); \
2957 RUN_START_SIZE(call_ ## fn ## __allocate_fixed_copy, #name " (allocate fixed overwrite copy)"); \
2958 RUN_START_SIZE(call_ ## fn ## __memobject_fixed, #name " (memobject fixed overwrite)"); \
2959 RUN_START_SIZE(call_ ## fn ## __memobject_fixed_copy, #name " (memobject fixed overwrite copy)"); \
2960 RUN_HINT_SIZE(call_ ## fn ## __allocate_anywhere, #name " (allocate anywhere)"); \
2961 RUN_HINT_SIZE(call_ ## fn ## __memobject_anywhere, #name " (memobject anywhere)"); \
2962 RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed__prot_pairs, #name " (allocate fixed overwrite)"); \
2963 RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed_copy__prot_pairs, #name " (allocate fixed overwrite copy)"); \
2964 RUN_PROT_PAIR(call_ ## fn ## __allocate_anywhere__prot_pairs, #name " (allocate anywhere)"); \
2965 RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed__prot_pairs, #name " (memobject fixed overwrite)"); \
2966 RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed_copy__prot_pairs, #name " (memobject fixed overwrite copy)"); \
2967 RUN_PROT_PAIR(call_ ## fn ## __memobject_anywhere__prot_pairs, #name " (memobject anywhere)"); \
2968 RUN_INHERIT(call_ ## fn ## __allocate_fixed__inherit, #name " (allocate fixed overwrite)"); \
2969 RUN_INHERIT(call_ ## fn ## __allocate_fixed_copy__inherit, #name " (allocate fixed overwrite copy)"); \
2970 RUN_INHERIT(call_ ## fn ## __allocate_anywhere__inherit, #name " (allocate anywhere)"); \
2971 RUN_INHERIT(call_ ## fn ## __memobject_fixed__inherit, #name " (memobject fixed overwrite)"); \
2972 RUN_INHERIT(call_ ## fn ## __memobject_fixed_copy__inherit, #name " (memobject fixed overwrite copy)"); \
2973 RUN_INHERIT(call_ ## fn ## __memobject_anywhere__inherit, #name " (memobject anywhere)"); \
2974 RUN_FLAGS(call_ ## fn ## __allocate__flags, #name " (allocate)"); \
2975 RUN_FLAGS(call_ ## fn ## __allocate_copy__flags, #name " (allocate copy)"); \
2976 RUN_FLAGS(call_ ## fn ## __memobject__flags, #name " (memobject)"); \
2977 RUN_FLAGS(call_ ## fn ## __memobject_copy__flags, #name " (memobject copy)"); \
2978 RUN_SSOO(call_ ## fn ## __memobject_fixed__start_size_offset_object, #name " (memobject fixed overwrite)"); \
2979 RUN_SSOO(call_ ## fn ## __memobject_fixed_copy__start_size_offset_object, #name " (memobject fixed overwrite copy)"); \
2980 RUN_SSOO(call_ ## fn ## __memobject_anywhere__start_size_offset_object, #name " (memobject anywhere)"); \
2981
2982 RUN_ALL(mach_vm_map_wrapped, mach_vm_map);
2983 #if TEST_OLD_STYLE_MACH
2984 RUN_ALL(vm_map_64_retyped, vm_map_64);
2985 RUN_ALL(vm_map_retyped, vm_map);
2986 #endif
2987
2988 #undef RUN_ALL
2989 #undef RUN_START_SIZE
2990 #undef RUN_HINT_SIZE
2991 #undef RUN_PROT_PAIR
2992 #undef RUN_INHERIT
2993 #undef RUN_FLAGS
2994 #undef RUN_SSOO
2995
2996 // remap tests
2997
2998 #define FN_NAME(fn, variant, type) call_ ## fn ## __ ## variant ## __ ## type
2999 #define RUN_HELPER(harness, fn, variant, type, type_name, name) dealloc_results(process_results(harness(FN_NAME(fn, variant, type), #name " (" #variant ") (" type_name ")")))
3000 #define RUN_SRC_SIZE(fn, variant, type_name, name) RUN_HELPER(test_mach_with_allocated_start_size, fn, variant, src_size, type_name, name)
3001 #define RUN_DST_SIZE(fn, variant, type_name, name) RUN_HELPER(test_mach_with_allocated_start_size, fn, variant, dst_size, type_name, name)
3002 #define RUN_PROT_PAIRS(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_prot_pair, fn, variant, prot_pairs, "prot_pairs", name)
3003 #define RUN_INHERIT(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_inherit_t, fn, variant, inherit, "inherit", name)
3004 #define RUN_FLAGS(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_map_kernel_flags_t, fn, variant, flags, "flags", name)
3005 #define RUN_SRC_DST_SIZE(fn, dst, variant, type_name, name) RUN_HELPER(test_allocated_src_##dst##_dst_size, fn, variant, src_dst_size, type_name, name)
3006
3007 #define RUN_ALL(fn, realigned, name) \
3008 RUN_SRC_SIZE(fn, copy, realigned "src/size", name); \
3009 RUN_SRC_SIZE(fn, nocopy, realigned "src/size", name); \
3010 RUN_DST_SIZE(fn, fixed, "realigned dst/size", name); \
3011 RUN_DST_SIZE(fn, fixed_copy, "realigned dst/size", name); \
3012 RUN_DST_SIZE(fn, anywhere, "hint/size", name); \
3013 RUN_INHERIT(fn, fixed, name); \
3014 RUN_INHERIT(fn, fixed_copy, name); \
3015 RUN_INHERIT(fn, anywhere, name); \
3016 RUN_FLAGS(fn, nocopy, name); \
3017 RUN_FLAGS(fn, copy, name); \
3018 RUN_PROT_PAIRS(fn, fixed, name); \
3019 RUN_PROT_PAIRS(fn, fixed_copy, name); \
3020 RUN_PROT_PAIRS(fn, anywhere, name); \
3021 RUN_SRC_DST_SIZE(fn, allocated, fixed, "src/dst/size", name); \
3022 RUN_SRC_DST_SIZE(fn, allocated, fixed_copy, "src/dst/size", name); \
3023 RUN_SRC_DST_SIZE(fn, unallocated, anywhere, "src/dst/size", name); \
3024
3025 RUN_ALL(mach_vm_remap_user, "realigned ", mach_vm_remap);
3026 RUN_ALL(mach_vm_remap_new_user, , mach_vm_remap_new);
3027
3028 #if TEST_OLD_STYLE_MACH
3029 RUN_ALL(vm_remap_retyped, "realigned ", vm_remap);
3030 #endif
3031
3032 #undef RUN_ALL
3033 #undef RUN_HELPER
3034 #undef RUN_SRC_SIZE
3035 #undef RUN_DST_SIZE
3036 #undef RUN_PROT_PAIRS
3037 #undef RUN_INHERIT
3038 #undef RUN_FLAGS
3039 #undef RUN_SRC_DST_SIZE
3040
3041 // mmap tests
3042
3043 #define RUN(fn, name) dealloc_results(process_results(test_mmap_with_allocated_vm_map_kernel_flags_t(fn, name " (kernel flags)")))
3044 RUN(call_mmap__anon_private__kernel_flags, "mmap (anon private)");
3045 RUN(call_mmap__anon_shared__kernel_flags, "mmap (anon shared)");
3046 #undef RUN
3047
3048 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_mmap_flags(fn, name " (mmap flags)")))
3049 RUN(call_mmap__mmap_flags, "mmap");
3050 #undef RUN
3051
3052 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (hint/size)")))
3053 RUN(call_mmap__file_private__start_size, "mmap (file private)");
3054 RUN(call_mmap__anon_private__start_size, "mmap (anon private)");
3055 RUN(call_mmap__file_shared__start_size, "mmap (file shared)");
3056 RUN(call_mmap__anon_shared__start_size, "mmap (anon shared)");
3057 RUN(call_mmap__file_private_codesign__start_size, "mmap (file private codesign)");
3058 RUN(call_mmap__file_private_media__start_size, "mmap (file private media)");
3059 RUN(call_mmap__nounix03_private__start_size, "mmap (no unix03)");
3060 #undef RUN
3061
3062 #define RUN(fn, name) dealloc_results(process_results(test_fixed_dst_size(fn, name " (dst/size)")))
3063 RUN(call_mmap__fixed_private__start_size, "mmap (fixed)");
3064 #undef RUN
3065
3066 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (offset/size)")))
3067 RUN(call_mmap__file_private__offset_size, "mmap (file private)");
3068 RUN(call_mmap__anon_private__offset_size, "mmap (anon private)");
3069 RUN(call_mmap__file_shared__offset_size, "mmap (file shared)");
3070 RUN(call_mmap__anon_shared__offset_size, "mmap (anon shared)");
3071 RUN(call_mmap__file_private_codesign__offset_size, "mmap (file private codesign)");
3072 RUN(call_mmap__file_private_media__offset_size, "mmap (file private media)");
3073 RUN(call_mmap__nounix03_private__offset_size, "mmap (no unix03)");
3074 #undef RUN
3075
3076 #define RUN(fn, name) dealloc_results(process_results(test_dst_size_fileoff(fn, name " (hint/size/fileoff)")))
3077 RUN(call_mmap__file_private__dst_size_fileoff, "mmap (file private)");
3078 RUN(call_mmap__anon_private__dst_size_fileoff, "mmap (anon private)");
3079 RUN(call_mmap__file_shared__dst_size_fileoff, "mmap (file shared)");
3080 RUN(call_mmap__anon_shared__dst_size_fileoff, "mmap (anon shared)");
3081 RUN(call_mmap__file_private_codesign__dst_size_fileoff, "mmap (file private codesign)");
3082 RUN(call_mmap__file_private_media__dst_size_fileoff, "mmap (file private media)");
3083 RUN(call_mmap__nounix03_private__dst_size_fileoff, "mmap (no unix03)");
3084 #undef RUN
3085
3086 #define RUN(fn, name) dealloc_results(process_results(test_fixed_dst_size_fileoff(fn, name " (dst/size/fileoff)")))
3087 RUN(call_mmap__fixed_private__dst_size_fileoff, "mmap (fixed)");
3088 #undef RUN
3089
3090 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
3091 RUN(call_mmap__file_private__vm_prot, "mmap (file private)");
3092 RUN(call_mmap__anon_private__vm_prot, "mmap (anon private)");
3093 RUN(call_mmap__file_shared__vm_prot, "mmap (file shared)");
3094 RUN(call_mmap__anon_shared__vm_prot, "mmap (anon shared)");
3095 RUN(call_mmap__file_private_codesign__vm_prot, "mmap (file private codesign)");
3096 RUN(call_mmap__file_private_media__vm_prot, "mmap (file private media)");
3097 RUN(call_mmap__nounix03_private__vm_prot, "mmap (no unix03)");
3098 RUN(call_mmap__fixed_private__vm_prot, "mmap (fixed)");
3099 #undef RUN
3100
3101 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3102 RUN(call_mremap_encrypted, "mremap_encrypted");
3103 #undef RUN
3104
3105 /*
3106 * -- wire/unwire functions --
3107 */
3108
3109 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3110 RUN(call_mlock, "mlock");
3111 RUN(call_munlock, "munlock");
3112 #undef RUN
3113
3114 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3115 RUN(call_mach_vm_wire__wire, "mach_vm_wire (wire)");
3116 RUN(call_replacement_vm_wire__wire, "vm_wire (wire)");
3117 RUN(call_mach_vm_wire__unwire, "mach_vm_wire (unwire)");
3118 RUN(call_replacement_vm_wire__unwire, "vm_wire (unwire)");
3119 #undef RUN
3120
3121 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
3122 RUN(call_mach_vm_wire__vm_prot, "mach_vm_wire");
3123 RUN(call_replacement_vm_wire__vm_prot, "vm_wire");
3124 #undef RUN
3125
3126 /*
3127 * -- copyin/copyout functions --
3128 */
3129
3130 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3131 #if TEST_OLD_STYLE_MACH
3132 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
3133 #else
3134 #define RUN_OLD(fn, name) do {} while (0)
3135 #endif
3136 RUN_NEW(call_mach_vm_read, "mach_vm_read");
3137 RUN_OLD(call_vm_read, "vm_read");
3138 RUN_NEW(call_mach_vm_read_list, "mach_vm_read_list");
3139 RUN_OLD(call_vm_read_list, "vm_read_list");
3140
3141 RUN_NEW(call_mach_vm_read_overwrite__src, "mach_vm_read_overwrite (src)");
3142 RUN_NEW(call_mach_vm_read_overwrite__dst, "mach_vm_read_overwrite (dst)");
3143 RUN_OLD(call_vm_read_overwrite__src, "vm_read_overwrite (src)");
3144 RUN_OLD(call_vm_read_overwrite__dst, "vm_read_overwrite (dst)");
3145
3146 RUN_NEW(call_mach_vm_write__src, "mach_vm_write (src)");
3147 RUN_NEW(call_mach_vm_write__dst, "mach_vm_write (dst)");
3148 RUN_OLD(call_vm_write__src, "vm_write (src)");
3149 RUN_OLD(call_vm_write__dst, "vm_write (dst)");
3150
3151 RUN_NEW(call_mach_vm_copy__src, "mach_vm_copy (src)");
3152 RUN_NEW(call_mach_vm_copy__dst, "mach_vm_copy (dst)");
3153 RUN_OLD(call_vm_copy__src, "vm_copy (src)");
3154 RUN_OLD(call_vm_copy__dst, "vm_copy (dst)");
3155 #undef RUN_NEW
3156 #undef RUN_OLD
3157
3158 /*
3159 * -- inherit functions --
3160 */
3161
3162 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3163 #if TEST_OLD_STYLE_MACH
3164 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
3165 #else
3166 #define RUN_OLD(fn, name) do {} while (0)
3167 #endif
3168 RUN_NEW(call_mach_vm_inherit, "mach_vm_inherit");
3169 RUN_OLD(call_vm_inherit, "vm_inherit");
3170 #undef RUN_OLD
3171 #undef RUN_NEW
3172
3173 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3174 RUN(call_minherit, "minherit");
3175 #undef RUN
3176
3177 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_inherit_t(fn, name " (vm_inherit_t)")))
3178 RUN(call_mach_vm_inherit__inherit, "mach_vm_inherit");
3179 #undef RUN
3180 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_vm_inherit_t(fn, name " (vm_inherit_t)")))
3181 RUN(call_minherit__inherit, "minherit");
3182 #undef RUN
3183
3184 /*
3185 * -- protection functions --
3186 */
3187
3188 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3189 #if TEST_OLD_STYLE_MACH
3190 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
3191 #else
3192 #define RUN_OLD(fn, name) do {} while (0)
3193 #endif
3194 RUN_NEW(call_mach_vm_protect__start_size, "mach_vm_protect");
3195 RUN_OLD(call_vm_protect__start_size, "vm_protect");
3196 #undef RUN_NEW
3197 #undef RUN_OLD
3198 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
3199 #if TEST_OLD_STYLE_MACH
3200 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
3201 #else
3202 #define RUN_OLD(fn, name) do {} while (0)
3203 #endif
3204 RUN_NEW(call_mach_vm_protect__vm_prot, "mach_vm_protect");
3205 RUN_OLD(call_vm_protect__vm_prot, "vm_protect");
3206 #undef RUN_NEW
3207 #undef RUN_OLD
3208 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3209 RUN(call_mprotect__start_size, "mprotect");
3210 #undef RUN
3211 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
3212 RUN(call_mprotect__vm_prot, "mprotect");
3213 #undef RUN
3214
3215 /*
3216 * -- madvise/behavior functions --
3217 */
3218
3219 unsigned alignment_for_can_reuse;
3220 if (isRosetta()) {
3221 /*
3222 * VM_BEHAVIOR_CAN_REUSE and MADV_CAN_REUSE get different errors
3223 * on Rosetta when the allocation happens to be 4K vs 16K aligned.
3224 * Force 16K alignment for consistent results.
3225 */
3226 alignment_for_can_reuse = KB16 - 1;
3227 } else {
3228 /* Use default alignment everywhere else. */
3229 alignment_for_can_reuse = 0;
3230 }
3231
3232 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3233 #if TEST_OLD_STYLE_MACH
3234 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
3235 #else
3236 #define RUN_OLD(fn, name) do {} while (0)
3237 #endif
3238 RUN_NEW(call_mach_vm_behavior_set__start_size__default, "mach_vm_behavior_set (VM_BEHAVIOR_DEFAULT)");
3239 RUN_OLD(call_vm_behavior_set__start_size__default, "vm_behavior_set (VM_BEHAVIOR_DEFAULT)");
3240 #undef RUN_NEW
3241 #undef RUN_OLD
3242
3243 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_aligned_start_size(fn, alignment_for_can_reuse, name " (start/size)")))
3244 #if TEST_OLD_STYLE_MACH
3245 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_aligned_start_size(fn, alignment_for_can_reuse, name " (start/size)")))
3246 #else
3247 #define RUN_OLD(fn, name) do {} while (0)
3248 #endif
3249 RUN_NEW(call_mach_vm_behavior_set__start_size__can_reuse, "mach_vm_behavior_set (VM_BEHAVIOR_CAN_REUSE)");
3250 RUN_OLD(call_vm_behavior_set__start_size__can_reuse, "vm_behavior_set (VM_BEHAVIOR_CAN_REUSE)");
3251 #undef RUN_NEW
3252 #undef RUN_OLD
3253
3254 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_aligned_vm_behavior_t(fn, alignment_for_can_reuse, name " (vm_behavior_t)")))
3255 #if TEST_OLD_STYLE_MACH
3256 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_aligned_vm_behavior_t(fn, alignment_for_can_reuse, name " (vm_behavior_t)")))
3257 #else
3258 #define RUN_OLD(fn, name) do {} while (0)
3259 #endif
3260 RUN_NEW(call_mach_vm_behavior_set__vm_behavior, "mach_vm_behavior_set");
3261 RUN_OLD(call_vm_behavior_set__vm_behavior, "vm_behavior_set");
3262 #undef RUN_NEW
3263 #undef RUN_OLD
3264
3265 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3266 RUN(call_madvise__start_size, "madvise");
3267 #undef RUN
3268
3269 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_aligned_vm_advise_t(fn, alignment_for_can_reuse, name " (vm_advise_t)")))
3270 RUN(call_madvise__vm_advise, "madvise");
3271 #undef RUN
3272
3273 /*
3274 * -- msync functions --
3275 */
3276
3277 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3278 #if TEST_OLD_STYLE_MACH
3279 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
3280 #else
3281 #define RUN_OLD(fn, name) do {} while (0)
3282 #endif
3283 RUN_NEW(call_mach_vm_msync__start_size, "mach_vm_msync");
3284 RUN_OLD(call_vm_msync__start_size, "vm_msync");
3285 #undef RUN_NEW
3286 #undef RUN_OLD
3287 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_sync_t(fn, name " (vm_sync_t)")))
3288 #if TEST_OLD_STYLE_MACH
3289 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_vm_sync_t(fn, name " (vm_sync_t)")))
3290 #else
3291 #define RUN_OLD(fn, name) do {} while (0)
3292 #endif
3293 RUN_NEW(call_mach_vm_msync__vm_sync, "mach_vm_msync");
3294 RUN_OLD(call_vm_msync__vm_sync, "vm_msync");
3295 #undef RUN_NEW
3296 #undef RUN_OLD
3297 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3298 RUN(call_msync__start_size, "msync");
3299 RUN(call_msync_nocancel__start_size, "msync_nocancel");
3300 #undef RUN
3301 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_vm_msync_t(fn, name " (msync flags)")))
3302 RUN(call_msync__vm_msync, "msync");
3303 RUN(call_msync_nocancel__vm_msync, "msync_nocancel");
3304 #undef RUN
3305
3306 /*
3307 * -- machine attribute functions --
3308 */
3309
3310 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3311 #if TEST_OLD_STYLE_MACH
3312 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
3313 #else
3314 #define RUN_OLD(fn, name) do {} while (0)
3315 #endif
3316 RUN_NEW(call_mach_vm_machine_attribute__start_size, "mach_vm_machine_attribute");
3317 RUN_OLD(call_vm_machine_attribute__start_size, "vm_machine_attribute");
3318 #undef RUN_NEW
3319 #undef RUN_OLD
3320 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_machine_attribute_t(fn, name " (machine_attribute_t)")))
3321 #if TEST_OLD_STYLE_MACH
3322 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_vm_machine_attribute_t(fn, name " (machine_attribute_t)")))
3323 #else
3324 #define RUN_OLD(fn, name) do {} while (0)
3325 #endif
3326 RUN_NEW(call_mach_vm_machine_attribute__machine_attribute, "mach_vm_machine_attribute");
3327 RUN_OLD(call_vm_machine_attribute__machine_attribute, "vm_machine_attribute");
3328 #undef RUN_NEW
3329 #undef RUN_OLD
3330
3331 /*
3332 * -- purgability/purgeability functions --
3333 */
3334
3335 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_purgeable_addr(fn, name " (addr)")))
3336 #if TEST_OLD_STYLE_MACH
3337 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_purgeable_addr(fn, name " (addr)")))
3338 #else
3339 #define RUN_OLD(fn, name) do {} while (0)
3340 #endif
3341 RUN_NEW(call_mach_vm_purgable_control__address__get, "mach_vm_purgable_control (get)");
3342 RUN_OLD(call_vm_purgable_control__address__get, "vm_purgable_control (get)");
3343
3344 RUN_NEW(call_mach_vm_purgable_control__address__purge_all, "mach_vm_purgable_control (purge all)");
3345 RUN_OLD(call_vm_purgable_control__address__purge_all, "vm_purgable_control (purge all)");
3346 #undef RUN_NEW
3347 #undef RUN_OLD
3348 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_purgeable_and_state(fn, name " (purgeable and state)")))
3349 #if TEST_OLD_STYLE_MACH
3350 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_purgeable_and_state(fn, name " (purgeable and state)")))
3351 #else
3352 #define RUN_OLD(fn, name) do {} while (0)
3353 #endif
3354 RUN_NEW(call_mach_vm_purgable_control__purgeable_state, "mach_vm_purgable_control");
3355 RUN_OLD(call_vm_purgable_control__purgeable_state, "vm_purgable_control");
3356 #undef RUN_NEW
3357 #undef RUN_OLD
3358
3359 /*
3360 * -- region info functions --
3361 */
3362
3363 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_addr(fn, name " (addr)")))
3364 #if TEST_OLD_STYLE_MACH
3365 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_addr(fn, name " (addr)")))
3366 #else
3367 #define RUN_OLD(fn, name) do {} while (0)
3368 #endif
3369 RUN_NEW(call_mach_vm_region, "mach_vm_region");
3370 RUN_OLD(call_vm_region, "vm_region");
3371 RUN_NEW(call_mach_vm_region_recurse, "mach_vm_region_recurse");
3372 RUN_OLD(call_vm_region_recurse, "vm_region_recurse");
3373 RUN_OLD(call_vm_region_recurse_64, "vm_region_recurse_64");
3374 #undef RUN_NEW
3375 #undef RUN_OLD
3376
3377 /*
3378 * -- page info functions --
3379 */
3380
3381 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_addr(fn, name " (addr)")))
3382 #if TEST_OLD_STYLE_MACH
3383 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_addr(fn, name " (addr)")))
3384 #else
3385 #define RUN_OLD(fn, name) do {} while (0)
3386 #endif
3387 RUN_NEW(call_mach_vm_page_info, "mach_vm_page_info");
3388 RUN_NEW(call_mach_vm_page_query, "mach_vm_page_query");
3389 RUN_OLD(call_vm_map_page_query, "vm_map_page_query");
3390 #undef RUN_NEW
3391 #undef RUN_OLD
3392
3393 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3394 RUN_NEW(call_mach_vm_page_range_query, "mach_vm_page_range_query");
3395 #undef RUN_NEW
3396
3397 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3398 RUN(call_mincore, "mincore");
3399 #undef RUN
3400
3401 /*
3402 * -- miscellaneous functions --
3403 */
3404
3405 #define RUN(fn, name) dealloc_results(process_results(test_unix_shared_region_map_and_slide_2_np(fn, name " (files/mappings)")))
3406 RUN(call_shared_region_map_and_slide_2_np_child, "shared_region_map_and_slide_2_np");
3407 RUN(call_shared_region_map_and_slide_2_np_in_thread, "different thread shared_region_map_and_slide_2_np");
3408 #undef RUN
3409
3410 #define RUN(fn, name) dealloc_results(process_results(test_mach_vm_deferred_reclamation_buffer_init(fn, name)))
3411 RUN(call_mach_vm_deferred_reclamation_buffer_init, "mach_vm_deferred_reclamation_buffer_init");
3412 #undef RUN
3413
3414 out:
3415 restore_exc_guard();
3416
3417 if (generate_golden) {
3418 if (!out_param_bad_count || (dump && !should_test_results)) {
3419 // Print after verified there is not OUT_PARAM_BAD results before printing,
3420 // or user explicitly set DUMP_RESULTS=1 GENERATE_GOLDEN_IMAGE=1
3421 printf("%s", GOLDEN_OUTPUT_START);
3422 }
3423 }
3424 free(GOLDEN_OUTPUT_START);
3425
3426 if (dump_golden || should_test_results) {
3427 clean_golden_results();
3428 }
3429
3430 if (reenable_vm_sanitize_telemetry() != 0) {
3431 T_FAIL("Failed to reenable VM API telemetry.");
3432 return;
3433 }
3434
3435 T_PASS("vm parameter validation userspace");
3436 }
3437
3438
3439 /////////////////////////////////////////////////////
3440 // Kernel test invocation.
3441 // The actual test code is in:
3442 // osfmk/tests/vm_parameter_validation_kern.c
3443
3444 #define KERN_RESULT_DELIMITER "\n"
3445
3446 #ifndef STRINGIFY
3447 #define __STR(x) #x
3448 #define STRINGIFY(x) __STR(x)
3449 #endif
3450
3451 // Verify golden list being generated doesn't contain OUT_BAD_PARAM
3452 static int
out_bad_param_in_kern_golden_results(char * kern_buffer)3453 out_bad_param_in_kern_golden_results(char *kern_buffer)
3454 {
3455 const char *out_param_bad_str = STRINGIFY(OUT_PARAM_BAD);
3456 char *out_param_bad_match = strstr(kern_buffer, out_param_bad_str);
3457 if (out_param_bad_match) {
3458 T_FAIL("Out parameter violation return code (%s) found in results, aborting.\n", out_param_bad_str);
3459 return 1;
3460 }
3461 return 0;
3462 }
3463
3464
3465 // Read results written by __dump_results()
3466 static int
populate_kernel_results(char * kern_buffer)3467 populate_kernel_results(char *kern_buffer)
3468 {
3469 char *line = NULL;
3470 char *sub_line = NULL;
3471 char *test_name = NULL;
3472 char *result_name = NULL;
3473 char *token = NULL;
3474 char *s_num_kern_results = NULL;
3475 results_t *kern_results = NULL;
3476 uint32_t num_kern_results = 0;
3477 uint32_t result_number = 0;
3478 int result_ret = 0;
3479 bool in_test = FALSE;
3480
3481 line = strtok(kern_buffer, KERN_RESULT_DELIMITER);
3482 while (line != NULL) {
3483 if (strncmp(line, TESTNAME_DELIMITER, strlen(TESTNAME_DELIMITER)) == 0) {
3484 sub_line = line + strlen(TESTNAME_DELIMITER);
3485 test_name = strdup(sub_line);
3486 result_number = 0;
3487 in_test = TRUE;
3488 } else if (in_test && strncmp(line, RESULTCOUNT_DELIMITER, strlen(RESULTCOUNT_DELIMITER)) == 0) {
3489 s_num_kern_results = line + strlen(RESULTCOUNT_DELIMITER);
3490 num_kern_results = (uint32_t)strtoul(s_num_kern_results, NULL, 10);
3491 kern_results = alloc_results(test_name, eUNKNOWN_TRIALS, num_kern_results);
3492 kern_results->count = num_kern_results;
3493 kern_list[num_kern_tests++] = kern_results;
3494 } else if (in_test && strncmp(line, TESTCONFIG_DELIMITER, strlen(TESTCONFIG_DELIMITER)) == 0) {
3495 sub_line = line + strlen(TESTCONFIG_DELIMITER);
3496 kern_results->testconfig = strdup(sub_line);
3497 } else if (in_test && strstr(line, KERN_TESTRESULT_DELIMITER)) {
3498 // should have found TESTCONFIG already
3499 assert(kern_results->testconfig != NULL);
3500 sscanf(line, KERN_TESTRESULT_DELIMITER "%d", &result_ret);
3501 // get result name (comes after the first ,)
3502 token = strchr(line, ',');
3503 if (token) {
3504 token = token + 2; // skip the , and the extra space
3505 result_name = strdup(token);
3506 if (result_number >= num_kern_results) {
3507 T_LOG("\tKERN Invalid output in test %s, seeing more results (%u) than expected (%u), ignoring trial RESULT %d, %s\n",
3508 test_name, result_number, num_kern_results, result_ret, result_name);
3509 free(result_name);
3510 } else {
3511 kern_results->list[result_number++] = (result_t){.ret = result_ret, .name = result_name};
3512 }
3513 }
3514 } else {
3515 // T_LOG("Unknown kernel result line: %s\n", line);
3516 //in_test = FALSE;
3517 }
3518
3519 line = strtok(NULL, KERN_RESULT_DELIMITER);
3520 }
3521
3522 dump_kernel_results_list();
3523
3524 return 0;
3525 }
3526
3527 static int64_t
run_sysctl_test(const char * t,int64_t value)3528 run_sysctl_test(const char *t, int64_t value)
3529 {
3530 char name[1024];
3531 int64_t result = 0;
3532 size_t s = sizeof(value);
3533 int rc;
3534
3535 snprintf(name, sizeof(name), "debug.test.%s", t);
3536 rc = sysctlbyname(name, &result, &s, &value, s);
3537 T_QUIET; T_ASSERT_POSIX_SUCCESS(rc, "sysctlbyname(%s)", t);
3538 return result;
3539 }
3540
3541 T_DECL(vm_parameter_validation_kern,
3542 "parameter validation for kext/xnu calls",
3543 T_META_SPAWN_TOOL(DECOMPRESS),
3544 T_META_SPAWN_TOOL_ARG("kern"),
3545 T_META_SPAWN_TOOL_ARG(TMP_DIR),
3546 T_META_SPAWN_TOOL_ARG(GOLDEN_FILES_VERSION)
3547 )
3548 {
3549 if (disable_vm_sanitize_telemetry() != 0) {
3550 T_FAIL("Could not disable VM API telemetry. Bailing out early.");
3551 return;
3552 }
3553
3554 read_env();
3555
3556 T_LOG("dump %d, golden %d, dump_golden %d, test %d\n", dump, generate_golden, dump_golden, should_test_results);
3557
3558 disable_exc_guard();
3559
3560 if (dump_golden) {
3561 if (populate_golden_results(KERN_GOLDEN_FILE)) {
3562 // couldn't load golden test results
3563 T_FAIL("Could not load golden file '%s'\n", KERN_GOLDEN_FILE);
3564 goto out;
3565 }
3566
3567 // just print the parsed golden file
3568 for (uint32_t x = 0; x < num_tests; ++x) {
3569 __dump_results(golden_list[x]);
3570 }
3571 clean_golden_results();
3572 goto out;
3573 }
3574
3575 T_LOG("Running kernel tests\n");
3576
3577 // We allocate a large buffer. The kernel-side code writes output to it.
3578 // Then we print that output. This is faster than making the kernel-side
3579 // code print directly to the serial console, which takes many minutes
3580 // to transfer our test output at 14.4 KB/s.
3581 // We align this buffer to KB16 to allow the lower bits to be used for a fd.
3582 void *output;
3583 int alloc_failed = posix_memalign(&output, KB16, SYSCTL_OUTPUT_BUFFER_SIZE);
3584 assert(alloc_failed == 0);
3585
3586 memset(output, 0, SYSCTL_OUTPUT_BUFFER_SIZE);
3587
3588 int fd = get_fd();
3589 assert((fd & ((int)KB16 - 1)) == fd);
3590 if (generate_golden) {
3591 // pass flag on the msb of the fd
3592 assert((fd & ((int)(KB16 >> 1) - 1)) == fd);
3593 fd |= KB16 >> 1;
3594 }
3595 int64_t result = run_sysctl_test("vm_parameter_validation_kern", (int64_t)output + fd);
3596
3597 T_QUIET; T_EXPECT_EQ(1ull, result, "vm_parameter_validation_kern");
3598
3599 if (generate_golden) {
3600 if (!out_bad_param_in_kern_golden_results(output) || (dump && !should_test_results)) {
3601 // Print after verified there is not OUT_PARAM_BAD results before printing,
3602 // or user explicitly set DUMP_RESULTS=1 GENERATE_GOLDEN_IMAGE=1
3603 printf("%s", output);
3604 }
3605 free(output);
3606 output = NULL;
3607 } else {
3608 // recreate a results_t to compare against the golden file results
3609 if (populate_kernel_results(output)) {
3610 T_FAIL("Error while parsing results\n");
3611 }
3612 free(output);
3613 output = NULL;
3614
3615 if (should_test_results && populate_golden_results(KERN_GOLDEN_FILE)) {
3616 // couldn't load golden test results
3617 T_FAIL("Could not load golden file '%s'\n", KERN_GOLDEN_FILE);
3618 clean_kernel_results();
3619 goto out;
3620 }
3621
3622 // compare results against values from golden list
3623 for (uint32_t x = 0; x < num_kern_tests; ++x) {
3624 process_results(kern_list[x]);
3625 dealloc_results(kern_list[x]);
3626 kern_list[x] = NULL;
3627 }
3628 clean_golden_results();
3629 }
3630
3631 out:
3632 restore_exc_guard();
3633
3634 if (reenable_vm_sanitize_telemetry() != 0) {
3635 T_FAIL("Failed to reenable VM API telemetry.");
3636 return;
3637 }
3638
3639 T_PASS("vm parameter validation kern");
3640 }
3641