1 #ifndef VM_PARAMETER_VALIDATION_H
2 #define VM_PARAMETER_VALIDATION_H
3 
4 
5 /*
6  * Common Naming Conventions:
7  * call_* functions are harnesses used to call a single function under test.
8  * They take all arguments needed to call the function and avoid calling functions with PANICing values.
9  * test_* functions are used to call the call_ functions. They iterate through possibilities of interesting parameters
10  * and provide those as arguments to the call_ functions.
11  *
12  * test_* functions are named in the following way:
13  * Arguments under test are put at the end of the name. e.g. (test_mach_vm_prot) tests a vm_prot_t
14  * test_mach_... functions test a function with the first argument being a MAP_T.
15  * test_unix_... functions test a unix-y function. This means it doesn't take a MAP_T.
16  * In kernel context, it means it operates on current_map instead of an arbitrary vm_map_t
17  * test_..._with_allocated_... means an allocation has already been created, and some parameters referring to that allocation are passed in.
18  *
19  * Common Abbreviations:
20  * ssz: Start + Start + Size
21  * ssoo: Start + Size + Offset + Object
22  * sso: Start + Start + Offset
23  */
24 
25 #include <sys/mman.h>
26 #if KERNEL
27 
28 #include <mach/vm_map.h>
29 #include <mach/mach_vm.h>
30 #include <mach/vm_reclaim.h>
31 #include <mach/mach_types.h>
32 #include <mach/mach_host.h>
33 #include <mach/memory_object.h>
34 #include <mach/memory_entry.h>
35 #include <mach/mach_vm_server.h>
36 
37 #include <device/device_port.h>
38 #include <sys/mman.h>
39 #include <sys/errno.h>
40 #include <vm/memory_object.h>
41 #include <vm/vm_fault.h>
42 #include <vm/vm_map_internal.h>
43 #include <vm/vm_kern_internal.h>
44 #include <vm/vm_pageout.h>
45 #include <vm/vm_protos.h>
46 #include <vm/vm_memtag.h>
47 #include <vm/vm_memory_entry.h>
48 #include <vm/vm_memory_entry_xnu.h>
49 #include <vm/vm_object_internal.h>
50 #include <vm/vm_iokit.h>
51 #include <kern/ledger.h>
52 extern ledger_template_t        task_ledger_template;
53 
54 #define FLAGS_AND_TAG(f, t) ({                             \
55 	vm_map_kernel_flags_t vmk_flags;                   \
56 	vm_map_kernel_flags_set_vmflags(&vmk_flags, f, t); \
57 	vmk_flags;                                         \
58 })
59 
60 #else  // KERNEL
61 
62 #include <TargetConditionals.h>
63 
64 #endif // KERNEL
65 
66 
67 // ignore some warnings inside this file
68 #pragma clang diagnostic push
69 #pragma clang diagnostic ignored "-Wdeclaration-after-statement"
70 #pragma clang diagnostic ignored "-Wincompatible-function-pointer-types"
71 #pragma clang diagnostic ignored "-Wmissing-prototypes"
72 #pragma clang diagnostic ignored "-Wpedantic"
73 #pragma clang diagnostic ignored "-Wgcc-compat"
74 
75 /*
76  * Invalid values for various types. These are used by the outparameter tests.
77  * UNLIKELY_ means the value is not 100% guaranteed to be invalid for that type,
78  * and is just a very unlikely value for it. Tests should not rely on them to compare against UNLIKELY_
79  * values without explicit reason it cannot be possible.
80  *
81  * INVALID_* means the value is 100% guaranteed to be invalid. They can be relied on to be compared against.
82  */
83 
84 #define UNLIKELY_INITIAL_ADDRESS 0xabababab
85 /*
86  * It's important for us to never have a test with a size like
87  * UNLIKELY_INITIAL_SIZE, and for this to stay non page aligned.
88  * See comment in call_mach_memory_entry_map_size__start_size for more info
89  */
90 #define UNLIKELY_INITIAL_SIZE 0xabababab
91 #define UNLIKELY_INITIAL_PPNUM 0xabababab
92 #define UNLIKELY_INITIAL_MACH_PORT ((mach_port_t) 0xbabababa)
93 #define UNLIKELY_INITIAL_VID 0xbabababa
94 // This cannot possibly be a valid vnode pointer as they are pointers
95 #define INVALID_VNODE_PTR ((void *) -1)
96 // This cannot possibly be a valid vm_map_copy_t as they are pointers
97 #define INVALID_VM_MAP_COPY ((vm_map_copy_t) (void *) -1)
98 // This cannot be a purgable state (see vm_purgable.h) It's way above the last valid state
99 #define INVALID_PURGABLE_STATE 0xababab
100 static_assert(INVALID_PURGABLE_STATE > VM_PURGABLE_STATE_MAX, "This test requires a purgable state above the max");
101 // Disposition values are generated via the VM_PAGE_QUERY_ values being ored.
102 // This cannot be a valid one as it's above the greatest possible or
103 #define INVALID_DISPOSITION_VALUE 0xffffff0
104 #define INVALID_INHERIT 0xbaba
105 static_assert(INVALID_INHERIT > VM_INHERIT_LAST_VALID, "This test requires an inheritance above the max");
106 
107 #define INVALID_INITIAL_VID 0xbabababa
108 // output buffer size for kext/xnu sysctl tests
109 // note: 1 GB is too big for watchOS
110 static const int64_t SYSCTL_OUTPUT_BUFFER_SIZE = 512 * 1024 * 1024;  // 512 MB
111 
112 // caller name (kernel/kext/userspace), used to label the output
113 #if KERNEL
114 #       define CALLER_NAME "kernel"
115 #else
116 #       define CALLER_NAME "userspace"
117 #endif
118 
119 // os name, used to label the output
120 #if KERNEL
121 #       if XNU_TARGET_OS_OSX
122 #               define OS_NAME "macos"
123 #       elif XNU_TARGET_OS_IOS
124 #              define OS_NAME "ios"
125 #       elif XNU_TARGET_OS_TV
126 #               define OS_NAME "tvos"
127 #       elif XNU_TARGET_OS_WATCH
128 #               define OS_NAME "watchos"
129 #       elif XNU_TARGET_OS_BRIDGE
130 #               define OS_NAME "bridgeos"
131 #       else
132 #               define OS_NAME "unknown-os"
133 #       endif
134 #else
135 #       if TARGET_OS_OSX
136 #               define OS_NAME "macos"
137 #       elif TARGET_OS_MACCATALYST
138 #               define OS_NAME "catalyst"
139 #       elif TARGET_OS_IOS
140 #              define OS_NAME "ios"
141 #       elif TARGET_OS_TV
142 #               define OS_NAME "tvos"
143 #       elif TARGET_OS_WATCH
144 #               define OS_NAME "watchos"
145 #       elif TARGET_OS_BRIDGE
146 #               define OS_NAME "bridgeos"
147 #       else
148 #               define OS_NAME "unknown-os"
149 #       endif
150 #endif
151 
152 // architecture name, used to label the output
153 #if KERNEL
154 #       if __i386__
155 #               define ARCH_NAME "i386"
156 #       elif __x86_64__
157 #               define ARCH_NAME "x86_64"
158 #       elif __arm64__ && __LP64__
159 #               define ARCH_NAME "arm64"
160 #       elif __arm64__ && !__LP64__
161 #               define ARCH_NAME "arm64_32"
162 #       elif __arm__
163 #               define ARCH_NAME "arm"
164 #       else
165 #               define ARCH_NAME "unknown-arch"
166 #       endif
167 #else
168 #       if TARGET_CPU_X86
169 #               define ARCH_NAME "i386"
170 #       elif TARGET_CPU_X86_64
171 #               define ARCH_NAME "x86_64"
172 #       elif TARGET_CPU_ARM64 && __LP64__
173 #               define ARCH_NAME "arm64"
174 #       elif TARGET_CPU_ARM64 && !__LP64__
175 #               define ARCH_NAME "arm64_32"
176 #       elif TARGET_CPU_ARM
177 #               define ARCH_NAME "arm"
178 #       else
179 #               define ARCH_NAME "unknown-arch"
180 #       endif
181 #endif
182 
183 #if KERNEL
184 #       define MAP_T vm_map_t
185 #else
186 #       define MAP_T mach_port_t
187 #endif
188 
189 // Mach has new-style functions with 64-bit address and size
190 // and old-style functions with pointer-size address and size.
191 // On U64 platforms both names send the same MIG message
192 // and run the same kernel code so we need not test both.
193 // On U32 platforms they are different inside the kernel.
194 // fixme for kext/kernel, verify that vm32 entrypoints are not used and not exported
195 #if KERNEL || __LP64__
196 #       define TEST_OLD_STYLE_MACH 0
197 #else
198 #       define TEST_OLD_STYLE_MACH 1
199 #endif
200 
201 // always 64-bit: addr_t, mach_vm_address/size_t, memory_object_size/offset_t
202 // always 32-bit: mach_msg_type_number_t, natural_t
203 // pointer-size:  void*, vm_address_t, vm_size_t
204 typedef uint64_t addr_t;
205 
206 // We often use 4KB or 16KB instead of PAGE_SIZE
207 // (for example using 16KB instead of PAGE_SIZE to avoid Rosetta complications)
208 #define KB4 ((addr_t)4*1024)
209 #define KB16 ((addr_t)16*1024)
210 
211 // Allocation size commonly used in tests.
212 // This size is big enough that our trials of small
213 // address offsets and sizes will still fit inside it.
214 #define TEST_ALLOC_SIZE (4 * KB16)
215 
216 // Magic return codes used for in-band signalling.
217 // These must avoid kern_return_t and errno values.
218 #define BUSTED        -99  // trial is broken
219 #define IGNORED       -98  // trial not performed for acceptable reasons
220 #define ZEROSIZE      -97  // trial succeeded because size==0 (FAKE tests only)
221 #define PANIC         -96  // trial not performed because it would provoke a panic
222 #define GUARD         -95  // trial not performed because it would provoke EXC_GUARD
223 #define ACCEPTABLE    -94  // trial should be considered successful no matter what the golden result is
224 #define OUT_PARAM_BAD -93  // trial has incorrect setting of out parameter values
225 
226 static inline bool
is_fake_error(int err)227 is_fake_error(int err)
228 {
229 	return err == BUSTED || err == IGNORED || err == ZEROSIZE ||
230 	       err == PANIC || err == GUARD || err == OUT_PARAM_BAD;
231 }
232 
233 // Return the count of a (non-decayed!) array.
234 #define countof(array) (sizeof(array) / sizeof((array)[0]))
235 
236 #if !KERNEL
237 static inline uint64_t
VM_MAP_PAGE_SIZE(MAP_T map __unused)238 VM_MAP_PAGE_SIZE(MAP_T map __unused)
239 {
240 	// fixme wrong for out-of-process maps
241 	// on platforms that support processes with two different page sizes
242 	return PAGE_SIZE;
243 }
244 
245 static inline uint64_t
VM_MAP_PAGE_MASK(MAP_T map __unused)246 VM_MAP_PAGE_MASK(MAP_T map __unused)
247 {
248 	// fixme wrong for out-of-process maps
249 	// on platforms that support processes with two different page sizes
250 	return PAGE_MASK;
251 }
252 #endif
253 
254 
255 #define IMPL(T)                                                         \
256 	/* Round up to the given page mask. */                          \
257 	__attribute__((overloadable, used))                             \
258 	static inline T                                                 \
259 	vm_sanitize_map_round_page_mask(T addr, uint64_t pagemask) {                      \
260 	        return (addr + (T)pagemask) & ~((T)pagemask);           \
261 	}                                                               \
262                                                                         \
263 	/* Round up to the given page size. */                          \
264 	__attribute__((overloadable, used))                             \
265 	static inline T                                                 \
266 	round_up_page(T addr, uint64_t pagesize) {                      \
267 	        return vm_sanitize_map_round_page_mask(addr, pagesize - 1);               \
268 	}                                                               \
269                                                                         \
270 	/* Round up to the given map's page size. */                    \
271 	__attribute__((overloadable, used))                             \
272 	static inline T                                                 \
273 	round_up_map(MAP_T map, T addr) {                               \
274 	        return vm_sanitize_map_round_page_mask(addr, VM_MAP_PAGE_MASK(map));      \
275 	}                                                               \
276                                                                         \
277 	/* Truncate to the given page mask. */                          \
278 	__attribute__((overloadable, used))                             \
279 	static inline T                                                 \
280 	vm_sanitize_map_trunc_page_mask(T addr, uint64_t pagemask)                      \
281 	{                                                               \
282 	        return addr & ~((T)pagemask);                           \
283 	}                                                               \
284                                                                         \
285 	/* Truncate to the given page size. */                          \
286 	__attribute__((overloadable, used))                             \
287 	static inline T                                                 \
288 	trunc_down_page(T addr, uint64_t pagesize)                      \
289 	{                                                               \
290 	        return vm_sanitize_map_trunc_page_mask(addr, pagesize - 1);             \
291 	}                                                               \
292                                                                         \
293 	/* Truncate to the given map's page size. */                    \
294 	__attribute__((overloadable, used))                             \
295 	static inline T                                                 \
296 	trunc_down_map(MAP_T map, T addr)                               \
297 	{                                                               \
298 	        return vm_sanitize_map_trunc_page_mask(addr, VM_MAP_PAGE_MASK(map));    \
299 	}                                                               \
300                                                                         \
301 	__attribute__((overloadable, unavailable("use round_up_page instead"))) \
302 	extern T                                                        \
303 	round_up(T addr, uint64_t pagesize);                            \
304 	__attribute__((overloadable, unavailable("use trunc_down_page instead"))) \
305 	extern T                                                        \
306 	trunc_down(T addr, uint64_t pagesize);
307 
308 IMPL(uint64_t)
IMPL(uint32_t)309 IMPL(uint32_t)
310 #undef IMPL
311 
312 
313 // duplicate the logic of VM's vm_map_range_overflows()
314 // false == good start+size combo, true == bad combo
315 #define IMPL(T)                                                         \
316 	__attribute__((overloadable, used))                             \
317 	static bool                                                     \
318 	vm_sanitize_range_overflows_allow_zero(T start, T size, T pgmask)           \
319 	{                                                               \
320 	        if (size == 0) {                                        \
321 	                return false;                                   \
322 	        }                                                       \
323                                                                         \
324 	        T sum;                                                  \
325 	        if (__builtin_add_overflow(start, size, &sum)) {        \
326 	                return true;                                    \
327 	        }                                                       \
328                                                                         \
329 	        T aligned_start = vm_sanitize_map_trunc_page_mask(start, pgmask);       \
330 	        T aligned_end = vm_sanitize_map_round_page_mask(start + size, pgmask);    \
331 	        if (aligned_end <= aligned_start) {                     \
332 	                return true;                                    \
333 	        }                                                       \
334                                                                         \
335 	        return false;                                           \
336 	}                                                               \
337                                                                         \
338 	/* like vm_sanitize_range_overflows_allow_zero(), but without the */        \
339 	/* unconditional approval of size==0 */                         \
340 	__attribute__((overloadable, used))                             \
341 	static bool                                                     \
342 	vm_sanitize_range_overflows_strict_zero(T start, T size, T pgmask)                      \
343 	{                                                               \
344 	        T sum;                                                  \
345 	        if (__builtin_add_overflow(start, size, &sum)) {        \
346 	                return true;                                    \
347 	        }                                                       \
348                                                                         \
349 	        T aligned_start = vm_sanitize_map_trunc_page_mask(start, pgmask);       \
350 	        T aligned_end = vm_sanitize_map_round_page_mask(start + size, pgmask);    \
351 	        if (aligned_end <= aligned_start) {                     \
352 	                return true;                                    \
353 	        }                                                       \
354                                                                         \
355 	        return false;                                           \
356 	}                                                               \
357 
358 IMPL(uint64_t)
359 IMPL(uint32_t)
360 #undef IMPL
361 
362 
363 // return true if the process is running under Rosetta translation
364 // https://developer.apple.com/documentation/apple-silicon/about-the-rosetta-translation-environment#Determine-Whether-Your-App-Is-Running-as-a-Translated-Binary
365 static bool
366 isRosetta()
367 {
368 #if KERNEL
369 	return false;
370 #else
371 	int out_value = 0;
372 	size_t io_size = sizeof(out_value);
373 	if (sysctlbyname("sysctl.proc_translated", &out_value, &io_size, NULL, 0) == 0) {
374 		assert(io_size >= sizeof(out_value));
375 		return out_value;
376 	}
377 	return false;
378 #endif
379 }
380 
381 // Needed to distinguish between rosetta kernel runs and generating trials names from kern golden files.
382 #if KERNEL
383 #define kern_trialname_generation FALSE
384 #else
385 static bool kern_trialname_generation = FALSE;
386 #endif
387 static addr_t trial_page_size = 0;
388 
389 static inline addr_t
adjust_page_size()390 adjust_page_size()
391 {
392 	addr_t test_page_size = PAGE_SIZE;
393 #if !KERNEL && __x86_64__
394 	// Handle kernel page size variation while recreating trials names for golden files in userspace.
395 	if (kern_trialname_generation && isRosetta()) {
396 		test_page_size = trial_page_size;
397 	}
398 #endif //  !KERNEL && __x86_64__
399 	return test_page_size;
400 }
401 
402 #if KERNEL
403 // Knobs controlled from userspace (and passed in MSB of the file_descriptor)
404 extern bool kernel_generate_golden;
405 #else
406 // Knobs controlled by environment variables
407 extern bool dump;
408 extern bool generate_golden;
409 extern bool dump_golden;
410 extern int out_param_bad_count;
411 extern bool should_test_results;
412 static void
read_env()413 read_env()
414 {
415 	dump = (getenv("DUMP_RESULTS") != NULL);
416 	dump_golden = (getenv("DUMP_GOLDEN_IMAGE") != NULL);
417 	// Shouldn't do both
418 	generate_golden = (getenv("GENERATE_GOLDEN_IMAGE") != NULL) && !dump_golden;
419 	// Only test when no other golden image flag is set
420 	should_test_results = (getenv("SKIP_TESTS") == NULL) && !dump_golden && !generate_golden;
421 }
422 #endif
423 
424 
425 /////////////////////////////////////////////////////
426 // String functions that work in both kernel and userspace.
427 
428 // Test output function.
429 // This prints either to stdout (userspace tests) or to a userspace buffer (kernel sysctl tests)
430 // Golden tests generation in userspace also writes to a buffer (GOLDEN_OUTPUT_BUF)
431 #if KERNEL
432 extern void testprintf(const char *, ...) __printflike(1, 2);
433 #define goldenprintf testprintf
434 #else
435 #define testprintf printf
436 extern void goldenprintf(const char *, ...) __printflike(1, 2);
437 #endif
438 
439 // kstrdup() is like strdup() but in the kernel it uses kalloc_data()
440 static inline char *
kstrdup(const char * str)441 kstrdup(const char *str)
442 {
443 #if KERNEL
444 	size_t size = strlen(str) + 1;
445 	char *copy = kalloc_data(size, Z_WAITOK | Z_ZERO);
446 	memcpy(copy, str, size);
447 	return copy;
448 #else
449 	return strdup(str);
450 #endif
451 }
452 
453 // kfree_str() is like free() but in the kernel it uses kfree_data_addr()
454 static inline void
kfree_str(char * str)455 kfree_str(char *str)
456 {
457 #if KERNEL
458 	kfree_data_addr(str);
459 #else
460 	free(str);
461 #endif
462 }
463 
464 // kasprintf() is like asprintf() but in the kernel it uses kalloc_data()
465 
466 #if !KERNEL
467 #       define kasprintf asprintf
468 #else
469 extern int vsnprintf(char *, size_t, const char *, va_list) __printflike(3, 0);
470 static inline int
kasprintf(char ** __restrict out_str,const char * __restrict format,...)471 kasprintf(char ** __restrict out_str, const char * __restrict format, ...) __printflike(2, 3)
472 {
473 	va_list args1, args2;
474 
475 	// compute length
476 	char c;
477 	va_start(args1, format);
478 	va_copy(args2, args1);
479 	int len1 = vsnprintf(&c, sizeof(c), format, args1);
480 	va_end(args1);
481 	if (len1 < 0) {
482 		*out_str = NULL;
483 		return len1;
484 	}
485 
486 	// allocate and print
487 	char *str = kalloc_data(len1 + 1, Z_NOFAIL);
488 	int len2 = vsnprintf(str, len1 + 1, format, args2);
489 	va_end(args2);
490 	if (len2 < 0) {
491 		kfree_data_addr(str);
492 		*out_str = NULL;
493 		return len1;
494 	}
495 	assert(len1 == len2);
496 
497 	*out_str = str;
498 	return len1;
499 }
500 // KERNEL
501 #endif
502 
503 
504 /////////////////////////////////////////////////////
505 // Record trials and return values from tested functions (BSD int or Mach kern_return_t)
506 
507 // Maintain list of known trials "smart" generator functions (trial formulae) as
508 // these are included in the golden result list (keeping the enum forces people to
509 // maintain the list up-to-date when adding new functions).
510 #define TRIALSFORMULA_ENUM(VARIANT) \
511 	VARIANT(eUNKNOWN_TRIALS) \
512 	VARIANT(eSMART_VM_MAP_KERNEL_FLAGS_TRIALS) \
513 	VARIANT(eSMART_VM_INHERIT_TRIALS) \
514 	VARIANT(eSMART_MMAP_KERNEL_FLAGS_TRIALS) \
515 	VARIANT(eSMART_MMAP_FLAGS_TRIALS) \
516 	VARIANT(eSMART_GENERIC_FLAG_TRIALS) \
517 	VARIANT(eSMART_VM_TAG_TRIALS) \
518 	VARIANT(eSMART_VM_PROT_TRIALS) \
519 	VARIANT(eSMART_VM_PROT_PAIR_TRIALS) \
520 	VARIANT(eSMART_LEDGER_TAG_TRIALS) \
521 	VARIANT(eSMART_LEDGER_FLAG_TRIALS) \
522 	VARIANT(eSMART_ADDR_TRIALS) \
523 	VARIANT(eSMART_SIZE_TRIALS) \
524 	VARIANT(eSMART_START_SIZE_TRIALS) \
525 	VARIANT(eSMART_START_SIZE_OFFSET_OBJECT_TRIALS) \
526 	VARIANT(eSMART_START_SIZE_OFFSET_TRIALS) \
527 	VARIANT(eSMART_SIZE_SIZE_TRIALS) \
528 	VARIANT(eSMART_SRC_DST_SIZE_TRIALS) \
529 	VARIANT(eSMART_FILEOFF_DST_SIZE_TRIALS) \
530 	VARIANT(eSMART_VM_BEHAVIOR_TRIALS) \
531 	VARIANT(eSMART_VM_ADVISE_TRIALS) \
532 	VARIANT(eSMART_VM_SYNC_TRIALS) \
533 	VARIANT(eSMART_VM_MSYNC_TRIALS) \
534 	VARIANT(eSMART_VM_MACHINE_ATTRIBUTE_TRIALS) \
535 	VARIANT(eSMART_VM_PURGEABLE_AND_STATE_TRIALS) \
536 	VARIANT(eSMART_START_SIZE_START_SIZE_TRIALS) \
537 	VARIANT(eSMART_SHARED_REGION_MAP_AND_SLIDE_2_TRIALS) \
538 	VARIANT(eSMART_RECLAMATION_BUFFER_INIT_TRIALS)
539 
540 #define TRIALSFORMULA_ENUM_VARIANT(NAME) NAME,
541 typedef enum {
542 	TRIALSFORMULA_ENUM(TRIALSFORMULA_ENUM_VARIANT)
543 } trialsformula_t;
544 
545 #define TRIALSARGUMENTS_NONE 0
546 #define TRIALSARGUMENTS_SIZE 2
547 
548 // formula enum id to string
549 #define TRIALSFORMULA_ENUM_STRING(NAME) case NAME: return #NAME;
550 const char *
trialsformula_name(trialsformula_t formula)551 trialsformula_name(trialsformula_t formula)
552 {
553 	switch (formula) {
554 		TRIALSFORMULA_ENUM(TRIALSFORMULA_ENUM_STRING)
555 	default:
556 		testprintf("Unknown formula_t %d\n", formula);
557 		assert(false);
558 	}
559 }
560 
561 #define TRIALSFORMULA_ENUM_FROM_STRING(NAME)    \
562 	if (strncmp(string, #NAME, strlen(#NAME)) == 0) return NAME;
563 
564 // formula name to enum id
565 trialsformula_t
trialsformula_from_string(const char * string)566 trialsformula_from_string(const char *string)
567 {
568 	TRIALSFORMULA_ENUM(TRIALSFORMULA_ENUM_FROM_STRING)
569 	// else
570 	testprintf("Unknown formula %s\n", string);
571 	assert(false);
572 }
573 
574 // ret: return value of this trial
575 // name: name of this trial, including the input values passed in
576 typedef struct {
577 	int ret;
578 	char *name;
579 } result_t;
580 
581 typedef struct {
582 	const char *testname;
583 	char *testconfig;
584 	trialsformula_t trialsformula;
585 	uint64_t trialsargs[TRIALSARGUMENTS_SIZE];
586 	unsigned capacity;
587 	unsigned count;
588 	unsigned tested_count;
589 	result_t list[];
590 } results_t;
591 
592 extern results_t *golden_list[];
593 extern results_t *kern_list[];
594 static uint32_t num_tests = 0; // num of tests in golden list
595 static uint32_t num_kern_tests = 0; // num of tests in kernel results list
596 
597 static __attribute__((overloadable))
598 results_t *
alloc_results(const char * testname,char * testconfig,trialsformula_t trialsformula,uint64_t trialsargs[static TRIALSARGUMENTS_SIZE],unsigned capacity)599 alloc_results(const char *testname, char *testconfig,
600     trialsformula_t trialsformula, uint64_t trialsargs[static TRIALSARGUMENTS_SIZE],
601     unsigned capacity)
602 {
603 	results_t *results;
604 #if KERNEL
605 	results = kalloc_type(results_t, result_t, capacity, Z_WAITOK | Z_ZERO);
606 #else
607 	results = calloc(sizeof(results_t) + capacity * sizeof(result_t), 1);
608 #endif
609 	assert(results != NULL);
610 	results->testname = testname;
611 	results->testconfig = testconfig;
612 	results->trialsformula = trialsformula;
613 	for (unsigned i = 0; i < TRIALSARGUMENTS_SIZE; i++) {
614 		results->trialsargs[i] = trialsargs[i];
615 	}
616 	results->capacity = capacity;
617 	results->count = 0;
618 	results->tested_count = 0;
619 	return results;
620 }
621 
622 static char *
alloc_default_testconfig(void)623 alloc_default_testconfig(void)
624 {
625 	char *result;
626 	kasprintf(&result, "%s %s %s%s",
627 	    OS_NAME, ARCH_NAME,
628 	    kern_trialname_generation ? "kernel" : CALLER_NAME,
629 	    !kern_trialname_generation && isRosetta() ? " rosetta" : "");
630 	return result;
631 }
632 
633 static __attribute__((overloadable))
634 results_t *
alloc_results(const char * testname,trialsformula_t trialsformula,uint64_t * trialsargs,size_t trialsargs_count,unsigned capacity)635 alloc_results(const char *testname,
636     trialsformula_t trialsformula, uint64_t *trialsargs, size_t trialsargs_count,
637     unsigned capacity)
638 {
639 	assert(trialsargs_count == TRIALSARGUMENTS_SIZE);
640 	return alloc_results(testname, alloc_default_testconfig(), trialsformula, trialsargs, capacity);
641 }
642 
643 static __attribute__((overloadable))
644 results_t *
alloc_results(const char * testname,trialsformula_t trialsformula,uint64_t trialsarg0,unsigned capacity)645 alloc_results(const char *testname, trialsformula_t trialsformula, uint64_t trialsarg0, unsigned capacity)
646 {
647 	uint64_t trialsargs[TRIALSARGUMENTS_SIZE] = {trialsarg0, TRIALSARGUMENTS_NONE};
648 	return alloc_results(testname, trialsformula, trialsargs, TRIALSARGUMENTS_SIZE, capacity);
649 }
650 
651 static __attribute__((overloadable))
652 results_t *
alloc_results(const char * testname,trialsformula_t trialsformula,unsigned capacity)653 alloc_results(const char *testname, trialsformula_t trialsformula, unsigned capacity)
654 {
655 	uint64_t trialsargs[TRIALSARGUMENTS_SIZE] = {TRIALSARGUMENTS_NONE, TRIALSARGUMENTS_NONE};
656 	return alloc_results(testname, trialsformula, trialsargs, TRIALSARGUMENTS_SIZE, capacity);
657 }
658 
659 static void __unused
dealloc_results(results_t * results)660 dealloc_results(results_t *results)
661 {
662 	for (unsigned int i = 0; i < results->count; i++) {
663 		kfree_str(results->list[i].name);
664 	}
665 	kfree_str(results->testconfig);
666 #if KERNEL
667 	kfree_type(results_t, result_t, results->capacity, results);
668 #else
669 	free(results);
670 #endif
671 }
672 
673 static void __attribute__((overloadable, unused))
append_result(results_t * results,int ret,const char * name)674 append_result(results_t *results, int ret, const char *name)
675 {
676 	// halt if the results list is already full
677 	// fixme reallocate instead if we can't always choose the size in advance
678 	assert(results->count < results->capacity);
679 
680 	// name may be freed before we make use of it
681 	char * name_cpy = kstrdup(name);
682 	assert(name_cpy);
683 	results->list[results->count++] =
684 	    (result_t){.ret = ret, .name = name_cpy};
685 }
686 
687 
688 #define TESTNAME_DELIMITER        "TESTNAME "
689 #define RESULTCOUNT_DELIMITER     "RESULT COUNT "
690 #define TESTRESULT_DELIMITER      " "
691 #define TESTCONFIG_DELIMITER      "  TESTCONFIG "
692 #define TRIALSFORMULA_DELIMITER   "TRIALSFORMULA "
693 #define TRIALSARGUMENTS_DELIMITER "TRIALSARGUMENTS"
694 #define KERN_TESTRESULT_DELIMITER "  RESULT "
695 
696 // print results, unformatted
697 // This output is read by populate_kernel_results()
698 // and by tools/format_vm_parameter_validation.py
699 static results_t *
__dump_results(results_t * results)700 __dump_results(results_t *results)
701 {
702 	testprintf(TESTNAME_DELIMITER "%s\n", results->testname);
703 	testprintf(RESULTCOUNT_DELIMITER "%d\n", results->count);
704 	testprintf(TESTCONFIG_DELIMITER "%s\n", results->testconfig);
705 
706 	for (unsigned i = 0; i < results->count; i++) {
707 		testprintf(KERN_TESTRESULT_DELIMITER "%d, %s\n", results->list[i].ret, results->list[i].name);
708 	}
709 
710 	results->tested_count += 1;
711 	return results;
712 }
713 
714 // This output is read by populate_golden_results()
715 static results_t *
dump_golden_results(results_t * results)716 dump_golden_results(results_t *results)
717 {
718 	trial_page_size = PAGE_SIZE;
719 	goldenprintf(TESTNAME_DELIMITER "%s\n", results->testname);
720 	goldenprintf(TRIALSFORMULA_DELIMITER "%s %s %llu,%llu,%llu\n",
721 	    trialsformula_name(results->trialsformula), TRIALSARGUMENTS_DELIMITER,
722 	    results->trialsargs[0], results->trialsargs[1], trial_page_size);
723 	goldenprintf(RESULTCOUNT_DELIMITER "%d\n", results->count);
724 
725 	for (unsigned i = 0; i < results->count; i++) {
726 		goldenprintf(TESTRESULT_DELIMITER "%d: %d\n", i, results->list[i].ret);
727 #if !KERNEL
728 		if (results->list[i].ret == OUT_PARAM_BAD) {
729 			out_param_bad_count += 1;
730 			T_FAIL("Out parameter violation in test %s - %s\n", results->testname, results->list[i].name);
731 		}
732 #endif
733 	}
734 
735 	return results;
736 }
737 
738 #if !KERNEL
739 // Comparator function for sorting result_t list by name
740 static int
compare_names(const void * a,const void * b)741 compare_names(const void *a, const void *b)
742 {
743 	assert(((const result_t *)a)->name);
744 	assert(((const result_t *)b)->name);
745 	return strcmp(((const result_t *)a)->name, ((const result_t *)b)->name);
746 }
747 
748 static unsigned
binary_search(result_t * list,unsigned count,const result_t * trial)749 binary_search(result_t *list, unsigned count, const result_t *trial)
750 {
751 	assert(count > 0);
752 	const char *name = trial->name;
753 	unsigned left = 0, right = count - 1;
754 	while (left <= right) {
755 		unsigned mid = left + (right - left) / 2;
756 		int cmp = strcmp(list[mid].name, name);
757 		if (cmp == 0) {
758 			return mid;
759 		} else if (cmp < 0) {
760 			left = mid + 1;
761 		} else {
762 			right = mid - 1;
763 		}
764 	}
765 	return UINT_MAX; // Not found
766 }
767 
768 static inline bool
trial_name_equals(const result_t * a,const result_t * b)769 trial_name_equals(const result_t *a, const result_t *b)
770 {
771 	// NB: strlen match need to handle cases where a shorter 'bname' would match a longer 'aname'.
772 	if (strlen(a->name) == strlen(b->name) && compare_names(a, b) == 0) {
773 		return true;
774 	}
775 	return false;
776 }
777 
778 static const result_t *
get_golden_result(results_t * golden_results,const result_t * trial,unsigned trial_idx)779 get_golden_result(results_t *golden_results, const result_t *trial, unsigned trial_idx)
780 {
781 	if (golden_results->trialsformula == eUNKNOWN_TRIALS) {
782 		// golden results don't contain trials names
783 		T_LOG("%s: update test's alloc_results to have a valid trialsformula_t\n", golden_results->testname);
784 		return NULL;
785 	}
786 
787 	if (trial_idx < golden_results->count &&
788 	    golden_results->list[trial_idx].name &&
789 	    trial_name_equals(&golden_results->list[trial_idx], trial)) {
790 		// "fast search" path taken when golden file is in sync to test.
791 		return &golden_results->list[trial_idx];
792 	}
793 
794 	// "slow search" path taken when tests idxs are not aligned. Sort the array
795 	// by name and do binary search.
796 	qsort(golden_results->list, golden_results->count, sizeof(result_t), compare_names);
797 	unsigned g_idx = binary_search(golden_results->list, golden_results->count, trial);
798 	if (g_idx < golden_results->count) {
799 		return &golden_results->list[g_idx];
800 	}
801 
802 	return NULL;
803 }
804 
805 static void
test_results(results_t * golden_results,results_t * results)806 test_results(results_t *golden_results, results_t *results)
807 {
808 	bool passed = TRUE;
809 	unsigned result_count = results->count;
810 	unsigned acceptable_count = 0;
811 	const unsigned acceptable_max = 16;  // log up to this many ACCEPTABLE results
812 	const result_t *golden_result = NULL;
813 	if (golden_results->count != results->count) {
814 		T_LOG("%s: number of iterations mismatch (%u vs %u)",
815 		    results->testname, golden_results->count, results->count);
816 	}
817 	for (unsigned i = 0; i < result_count; i++) {
818 		golden_result = get_golden_result(golden_results, &results->list[i], i);
819 		if (golden_result) {
820 			if (results->list[i].ret == ACCEPTABLE) {
821 				// trial has declared itself to be correct
822 				// no matter what the golden result is
823 				acceptable_count++;
824 				if (acceptable_count <= acceptable_max) {
825 					T_LOG("%s RESULT ACCEPTABLE (expected %d), %s\n",
826 					    results->testname,
827 					    golden_result->ret, results->list[i].name);
828 				}
829 			} else if (results->list[i].ret != golden_result->ret) {
830 				T_FAIL("%s RESULT %d (expected %d), %s\n",
831 				    results->testname, results->list[i].ret,
832 				    golden_result->ret, results->list[i].name);
833 				passed = FALSE;
834 			}
835 		} else {
836 			// new trial not present in golden results
837 			T_FAIL("%s NEW RESULT %d, %s - (regenerate golden files to fix this)\n",
838 			    results->testname, results->list[i].ret, results->list[i].name);
839 			passed = FALSE;
840 		}
841 	}
842 
843 	if (acceptable_count > acceptable_max) {
844 		T_LOG("%s %u more RESULT ACCEPTABLE trials not logged\n",
845 		    results->testname, acceptable_count - acceptable_max);
846 	}
847 	if (passed) {
848 		T_PASS("%s passed\n", results->testname);
849 	}
850 }
851 #endif
852 
853 #if !KERNEL
854 static results_t *
855 test_name_to_golden_results(const char* testname);
856 #endif
857 
858 static results_t *
process_results(results_t * results)859 process_results(results_t *results)
860 {
861 #if KERNEL
862 	if (kernel_generate_golden) {
863 		return dump_golden_results(results);
864 	} else {
865 		return __dump_results(results);
866 	}
867 #else
868 	results_t *golden_results = NULL;
869 
870 	if (dump && !generate_golden) {
871 		__dump_results(results);
872 	}
873 
874 	if (generate_golden) {
875 		dump_golden_results(results);
876 	}
877 
878 	if (should_test_results) {
879 		golden_results = test_name_to_golden_results(results->testname);
880 
881 		if (golden_results) {
882 			test_results(golden_results, results);
883 		} else {
884 			T_FAIL("New test %s found, update golden list to allow return code testing", results->testname);
885 			// Dump results if not done previously
886 			if (!dump) {
887 				__dump_results(results);
888 			}
889 		}
890 	}
891 
892 	return results;
893 #endif
894 }
895 
896 static inline mach_vm_address_t
truncate_vm_map_addr_with_flags(MAP_T map,mach_vm_address_t addr,int flags)897 truncate_vm_map_addr_with_flags(MAP_T map, mach_vm_address_t addr, int flags)
898 {
899 	mach_vm_address_t truncated_addr = addr;
900 	if (flags & VM_FLAGS_RETURN_4K_DATA_ADDR) {
901 		// VM_FLAGS_RETURN_4K_DATA_ADDR means return a 4k aligned address rather than the
902 		// base of the page. Truncate to 4k.
903 		truncated_addr = trunc_down_page(addr, KB4);
904 	} else if (flags & VM_FLAGS_RETURN_DATA_ADDR) {
905 		// On VM_FLAGS_RETURN_DATA_ADDR, we expect to get back the unaligned address.
906 		// Don't truncate.
907 	} else {
908 		// Otherwise we truncate to the map page size
909 		truncated_addr = trunc_down_map(map, addr);
910 	}
911 	return truncated_addr;
912 }
913 
914 
915 static inline mach_vm_address_t
get_expected_remap_misalignment(MAP_T map,mach_vm_address_t addr,int flags)916 get_expected_remap_misalignment(MAP_T map, mach_vm_address_t addr, int flags)
917 {
918 	mach_vm_address_t misalignment;
919 	if (flags & VM_FLAGS_RETURN_4K_DATA_ADDR) {
920 		// VM_FLAGS_RETURN_4K_DATA_ADDR means return a 4k aligned address rather than the
921 		// base of the page. The misalignment is relative to the first 4k page
922 		misalignment = addr - trunc_down_page(addr, KB4);
923 	} else if (flags & VM_FLAGS_RETURN_DATA_ADDR) {
924 		// On VM_FLAGS_RETURN_DATA_ADDR, we expect to get back the unaligned address.
925 		// The misalignment is therefore the low bits
926 		misalignment = addr - trunc_down_map(map, addr);
927 	} else {
928 		// Otherwise we expect it to be aligned
929 		misalignment = 0;
930 	}
931 	return misalignment;
932 }
933 
934 // absolute and relative offsets are used to specify a trial's values
935 
936 typedef struct {
937 	bool is_absolute;
938 	addr_t offset;
939 } absolute_or_relative_offset_t;
940 
941 typedef struct {
942 	unsigned count;
943 	unsigned capacity;
944 	absolute_or_relative_offset_t list[];
945 } offset_list_t;
946 
947 static offset_list_t *
allocate_offsets(unsigned capacity)948 allocate_offsets(unsigned capacity)
949 {
950 	offset_list_t *offsets;
951 #if KERNEL
952 	offsets = kalloc_type(offset_list_t, absolute_or_relative_offset_t, capacity, Z_WAITOK | Z_ZERO);
953 #else
954 	offsets = calloc(sizeof(offset_list_t) + capacity * sizeof(absolute_or_relative_offset_t), 1);
955 #endif
956 	offsets->count = 0;
957 	offsets->capacity = capacity;
958 	return offsets;
959 }
960 
961 static void
append_offset(offset_list_t * offsets,bool is_absolute,addr_t offset)962 append_offset(offset_list_t *offsets, bool is_absolute, addr_t offset)
963 {
964 	assert(offsets->count < offsets->capacity);
965 	offsets->list[offsets->count].is_absolute = is_absolute;
966 	offsets->list[offsets->count].offset = offset;
967 	offsets->count++;
968 }
969 
970 
971 /////////////////////////////////////////////////////
972 // Generation of trials and their parameter values
973 // A "trial" is a single execution of a function to be tested
974 
975 #if KERNEL
976 #define ALLOC_TRIALS(NAME, new_capacity)                                \
977 	(NAME ## _trials_t *)kalloc_type(NAME ## _trials_t, NAME ## _trial_t, \
978 	                                 new_capacity, Z_WAITOK | Z_ZERO)
979 #define FREE_TRIALS(NAME, trials)                                       \
980 	kfree_type(NAME ## _trials_t, NAME ## _trial_t, trials->capacity, trials)
981 #else
982 #define ALLOC_TRIALS(NAME, new_capacity)                                \
983 	(NAME ## _trials_t *)calloc(sizeof(NAME ## _trials_t) + (new_capacity) * sizeof(NAME ## _trial_t), 1)
984 #define FREE_TRIALS(NAME, trials)               \
985 	free(trials)
986 #endif
987 
988 #define TRIALS_IMPL(NAME)                                               \
989 	static NAME ## _trials_t *                                      \
990 	__attribute__((used))                                       \
991 	allocate_ ## NAME ## _trials(unsigned capacity)                 \
992 	{                                                               \
993 	        NAME ## _trials_t *trials = ALLOC_TRIALS(NAME, capacity); \
994 	        assert(trials);                                         \
995 	        trials->count = 0;                                      \
996 	        trials->capacity = capacity;                            \
997 	        return trials;                                          \
998 	}                                                               \
999                                                                         \
1000 	static void __attribute__((overloadable, used))                 \
1001 	free_trials(NAME ## _trials_t *trials)                          \
1002 	{                                                               \
1003 	        FREE_TRIALS(NAME, trials);                              \
1004 	}                                                               \
1005                                                                         \
1006 	static void __attribute__((overloadable, used))                 \
1007 	append_trial(NAME ## _trials_t *trials, NAME ## _trial_t new_trial) \
1008 	{                                                               \
1009 	        assert(trials->count < trials->capacity);               \
1010 	        trials->list[trials->count++] = new_trial;              \
1011 	}                                                               \
1012                                                                         \
1013 	static void __attribute__((overloadable, used))                 \
1014 	append_trials(NAME ## _trials_t *trials, NAME ## _trial_t *new_trials, unsigned new_count) \
1015 	{                                                               \
1016 	        for (unsigned i = 0; i < new_count; i++) {              \
1017 	                append_trial(trials, new_trials[i]);            \
1018 	        }                                                       \
1019 	}
1020 
1021 // allocate vm_inherit_t trials, and deallocate it at end of scope
1022 #define SMART_VM_INHERIT_TRIALS()                                               \
1023 	__attribute__((cleanup(cleanup_vm_inherit_trials)))             \
1024 	= allocate_vm_inherit_trials(countof(vm_inherit_trials_values));        \
1025 	append_trials(trials, vm_inherit_trials_values, countof(vm_inherit_trials_values))
1026 
1027 // generate vm_inherit_t trials
1028 
1029 typedef struct {
1030 	vm_inherit_t value;
1031 	const char * name;
1032 } vm_inherit_trial_t;
1033 
1034 typedef struct {
1035 	unsigned count;
1036 	unsigned capacity;
1037 	vm_inherit_trial_t list[];
1038 } vm_inherit_trials_t;
1039 
1040 
1041 #define VM_INHERIT_TRIAL(new_value) \
1042 	(vm_inherit_trial_t) {.value = (vm_inherit_t)(new_value), .name = "vm_inherit " #new_value}
1043 
1044 static_assert(VM_INHERIT_LAST_VALID == VM_INHERIT_NONE,
1045     "Update this test with new vm_inherit_t values");
1046 static vm_inherit_trial_t vm_inherit_trials_values[] = {
1047 	VM_INHERIT_TRIAL(VM_INHERIT_SHARE),
1048 	VM_INHERIT_TRIAL(VM_INHERIT_COPY),
1049 	VM_INHERIT_TRIAL(VM_INHERIT_NONE),
1050 	// end valid ones
1051 	// note: VM_INHERIT_DONATE_COPY is invalid and unimplemented
1052 	// VM_INHERIT_LAST_VALID correctly excludes VM_INHERIT_DONATE_COPY
1053 	VM_INHERIT_TRIAL(VM_INHERIT_LAST_VALID + 1),
1054 	VM_INHERIT_TRIAL(VM_INHERIT_LAST_VALID + 2),
1055 	VM_INHERIT_TRIAL(0xffffffff),
1056 };
1057 
TRIALS_IMPL(vm_inherit)1058 TRIALS_IMPL(vm_inherit)
1059 
1060 static void
1061 cleanup_vm_inherit_trials(vm_inherit_trials_t **trials)
1062 {
1063 	free_trials(*trials);
1064 }
1065 
1066 // allocate vm_behavior_t trials, and deallocate it at end of scope
1067 #define SMART_VM_BEHAVIOR_TRIALS()                                               \
1068 	__attribute__((cleanup(cleanup_vm_behavior_trials)))             \
1069 	= allocate_vm_behavior_trials(countof(vm_behavior_trials_values));        \
1070 	append_trials(trials, vm_behavior_trials_values, countof(vm_behavior_trials_values))
1071 
1072 // generate vm_behavior_t trials
1073 
1074 typedef struct {
1075 	vm_behavior_t value;
1076 	const char * name;
1077 } vm_behavior_trial_t;
1078 
1079 typedef struct {
1080 	unsigned count;
1081 	unsigned capacity;
1082 	vm_behavior_trial_t list[];
1083 } vm_behavior_trials_t;
1084 
1085 
1086 #define VM_BEHAVIOR_TRIAL(new_value) \
1087 	(vm_behavior_trial_t) {.value = (vm_behavior_t)(new_value), .name = "vm_behavior " #new_value}
1088 
1089 static vm_behavior_trial_t vm_behavior_trials_values[] = {
1090 	VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_DEFAULT),
1091 	VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_RANDOM),
1092 	VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_SEQUENTIAL),
1093 	VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_RSEQNTL),
1094 	VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_WILLNEED),
1095 	VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_DONTNEED),
1096 	VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_FREE),
1097 	VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_ZERO_WIRED_PAGES),
1098 	VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_REUSABLE),
1099 	VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_REUSE),
1100 	VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_CAN_REUSE),
1101 	VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_PAGEOUT),
1102 	VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_ZERO),
1103 	// end valid ones
1104 	VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_LAST_VALID + 1),
1105 	VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_LAST_VALID + 2),
1106 	VM_BEHAVIOR_TRIAL(0x12345),
1107 	VM_BEHAVIOR_TRIAL(0xffffffff),
1108 };
1109 
TRIALS_IMPL(vm_behavior)1110 TRIALS_IMPL(vm_behavior)
1111 
1112 static void
1113 cleanup_vm_behavior_trials(vm_behavior_trials_t **trials)
1114 {
1115 	free_trials(*trials);
1116 }
1117 
1118 // allocate vm_sync_t trials, and deallocate it at end of scope
1119 #define SMART_VM_SYNC_TRIALS()                                               \
1120 	__attribute__((cleanup(cleanup_vm_sync_trials)))             \
1121 	= allocate_vm_sync_trials(countof(vm_sync_trials_values));        \
1122 	append_trials(trials, vm_sync_trials_values, countof(vm_sync_trials_values))
1123 
1124 // generate vm_sync_t trials
1125 
1126 typedef struct {
1127 	vm_sync_t value;
1128 	const char * name;
1129 } vm_sync_trial_t;
1130 
1131 typedef struct {
1132 	unsigned count;
1133 	unsigned capacity;
1134 	vm_sync_trial_t list[];
1135 } vm_sync_trials_t;
1136 
1137 
1138 #define VM_SYNC_TRIAL(new_value) \
1139 	(vm_sync_trial_t) {.value = (vm_sync_t)(new_value), .name = "vm_sync_t " #new_value}
1140 
1141 static vm_sync_trial_t vm_sync_trials_values[] = {
1142 	VM_SYNC_TRIAL(0),
1143 	// start valid values
1144 	VM_SYNC_TRIAL(VM_SYNC_ASYNCHRONOUS),
1145 	VM_SYNC_TRIAL(VM_SYNC_SYNCHRONOUS),
1146 	VM_SYNC_TRIAL(VM_SYNC_INVALIDATE),
1147 	VM_SYNC_TRIAL(VM_SYNC_KILLPAGES),
1148 	VM_SYNC_TRIAL(VM_SYNC_DEACTIVATE),
1149 	VM_SYNC_TRIAL(VM_SYNC_CONTIGUOUS),
1150 	VM_SYNC_TRIAL(VM_SYNC_REUSABLEPAGES),
1151 	// end valid values
1152 	VM_SYNC_TRIAL(1u << 7),
1153 	VM_SYNC_TRIAL(1u << 8),
1154 	VM_SYNC_TRIAL(1u << 9),
1155 	VM_SYNC_TRIAL(1u << 10),
1156 	VM_SYNC_TRIAL(1u << 11),
1157 	VM_SYNC_TRIAL(1u << 12),
1158 	VM_SYNC_TRIAL(1u << 13),
1159 	VM_SYNC_TRIAL(1u << 14),
1160 	VM_SYNC_TRIAL(1u << 15),
1161 	VM_SYNC_TRIAL(1u << 16),
1162 	VM_SYNC_TRIAL(1u << 17),
1163 	VM_SYNC_TRIAL(1u << 18),
1164 	VM_SYNC_TRIAL(1u << 19),
1165 	VM_SYNC_TRIAL(1u << 20),
1166 	VM_SYNC_TRIAL(1u << 21),
1167 	VM_SYNC_TRIAL(1u << 22),
1168 	VM_SYNC_TRIAL(1u << 23),
1169 	VM_SYNC_TRIAL(1u << 24),
1170 	VM_SYNC_TRIAL(1u << 25),
1171 	VM_SYNC_TRIAL(1u << 26),
1172 	VM_SYNC_TRIAL(1u << 27),
1173 	VM_SYNC_TRIAL(1u << 28),
1174 	VM_SYNC_TRIAL(1u << 29),
1175 	VM_SYNC_TRIAL(1u << 30),
1176 	VM_SYNC_TRIAL(1u << 31),
1177 	VM_SYNC_TRIAL(VM_SYNC_ASYNCHRONOUS | VM_SYNC_SYNCHRONOUS),
1178 	VM_SYNC_TRIAL(VM_SYNC_ASYNCHRONOUS | (1u << 7)),
1179 	VM_SYNC_TRIAL(0xffffffff),
1180 };
1181 
TRIALS_IMPL(vm_sync)1182 TRIALS_IMPL(vm_sync)
1183 
1184 static void
1185 cleanup_vm_sync_trials(vm_sync_trials_t **trials)
1186 {
1187 	free_trials(*trials);
1188 }
1189 
1190 // allocate vm_msync_t trials, and deallocate it at end of scope
1191 #define SMART_VM_MSYNC_TRIALS()                                               \
1192 	__attribute__((cleanup(cleanup_vm_msync_trials)))             \
1193 	= allocate_vm_msync_trials(countof(vm_msync_trials_values));        \
1194 	append_trials(trials, vm_msync_trials_values, countof(vm_msync_trials_values))
1195 
1196 // generate vm_msync_t trials
1197 
1198 typedef struct {
1199 	int value;
1200 	const char * name;
1201 } vm_msync_trial_t;
1202 
1203 typedef struct {
1204 	unsigned count;
1205 	unsigned capacity;
1206 	vm_msync_trial_t list[];
1207 } vm_msync_trials_t;
1208 
1209 
1210 #define VM_MSYNC_TRIAL(new_value) \
1211 	(vm_msync_trial_t) {.value = (int)(new_value), .name = "vm_msync_t " #new_value}
1212 
1213 static vm_msync_trial_t vm_msync_trials_values[] = {
1214 	VM_MSYNC_TRIAL(0),
1215 	// start valid values
1216 	VM_MSYNC_TRIAL(MS_ASYNC),
1217 	VM_MSYNC_TRIAL(MS_INVALIDATE),
1218 	VM_MSYNC_TRIAL(MS_KILLPAGES),
1219 	VM_MSYNC_TRIAL(MS_DEACTIVATE),
1220 	VM_MSYNC_TRIAL(MS_SYNC),
1221 	VM_MSYNC_TRIAL(MS_ASYNC | MS_INVALIDATE),
1222 	// end valid values
1223 	VM_MSYNC_TRIAL(1u << 5),
1224 	VM_MSYNC_TRIAL(1u << 6),
1225 	VM_MSYNC_TRIAL(1u << 7),
1226 	VM_MSYNC_TRIAL(1u << 8),
1227 	VM_MSYNC_TRIAL(1u << 9),
1228 	VM_MSYNC_TRIAL(1u << 10),
1229 	VM_MSYNC_TRIAL(1u << 11),
1230 	VM_MSYNC_TRIAL(1u << 12),
1231 	VM_MSYNC_TRIAL(1u << 13),
1232 	VM_MSYNC_TRIAL(1u << 14),
1233 	VM_MSYNC_TRIAL(1u << 15),
1234 	VM_MSYNC_TRIAL(1u << 16),
1235 	VM_MSYNC_TRIAL(1u << 17),
1236 	VM_MSYNC_TRIAL(1u << 18),
1237 	VM_MSYNC_TRIAL(1u << 19),
1238 	VM_MSYNC_TRIAL(1u << 20),
1239 	VM_MSYNC_TRIAL(1u << 21),
1240 	VM_MSYNC_TRIAL(1u << 22),
1241 	VM_MSYNC_TRIAL(1u << 23),
1242 	VM_MSYNC_TRIAL(1u << 24),
1243 	VM_MSYNC_TRIAL(1u << 25),
1244 	VM_MSYNC_TRIAL(1u << 26),
1245 	VM_MSYNC_TRIAL(1u << 27),
1246 	VM_MSYNC_TRIAL(1u << 28),
1247 	VM_MSYNC_TRIAL(1u << 29),
1248 	VM_MSYNC_TRIAL(1u << 30),
1249 	VM_MSYNC_TRIAL(1u << 31),
1250 	VM_MSYNC_TRIAL(MS_ASYNC | MS_SYNC),
1251 	VM_MSYNC_TRIAL(0xffffffff),
1252 };
1253 
TRIALS_IMPL(vm_msync)1254 TRIALS_IMPL(vm_msync)
1255 
1256 static void __attribute__((used))
1257 cleanup_vm_msync_trials(vm_msync_trials_t **trials)
1258 {
1259 	free_trials(*trials);
1260 }
1261 
1262 
1263 // allocate advise_t trials, and deallocate it at end of scope
1264 #define SMART_VM_ADVISE_TRIALS()                                           \
1265 	__attribute__((cleanup(cleanup_advise_trials)))                 \
1266 	= allocate_vm_advise_trials(countof(vm_advise_trials_values));        \
1267 	append_trials(trials, vm_advise_trials_values, countof(vm_advise_trials_values))
1268 
1269 // generate advise_t trials
1270 
1271 typedef struct {
1272 	int value;
1273 	const char * name;
1274 } vm_advise_trial_t;
1275 
1276 typedef struct {
1277 	unsigned count;
1278 	unsigned capacity;
1279 	vm_advise_trial_t list[];
1280 } vm_advise_trials_t;
1281 
1282 
1283 #define ADVISE_TRIAL(new_value) \
1284 	(vm_advise_trial_t) {.value = (int)(new_value), .name = "advise " #new_value}
1285 
1286 static vm_advise_trial_t vm_advise_trials_values[] = {
1287 	ADVISE_TRIAL(MADV_NORMAL),
1288 	ADVISE_TRIAL(MADV_RANDOM),
1289 	ADVISE_TRIAL(MADV_SEQUENTIAL),
1290 	ADVISE_TRIAL(MADV_WILLNEED),
1291 	ADVISE_TRIAL(MADV_DONTNEED),
1292 	ADVISE_TRIAL(MADV_FREE),
1293 	ADVISE_TRIAL(MADV_ZERO_WIRED_PAGES),
1294 	ADVISE_TRIAL(MADV_FREE_REUSABLE),
1295 	ADVISE_TRIAL(MADV_FREE_REUSE),
1296 	ADVISE_TRIAL(MADV_CAN_REUSE),
1297 	ADVISE_TRIAL(MADV_PAGEOUT),
1298 	ADVISE_TRIAL(MADV_ZERO),
1299 	// end valid ones
1300 	ADVISE_TRIAL(MADV_ZERO + 1),
1301 	ADVISE_TRIAL(MADV_ZERO + 2),
1302 	ADVISE_TRIAL(0xffffffff),
1303 };
1304 
TRIALS_IMPL(vm_advise)1305 TRIALS_IMPL(vm_advise)
1306 
1307 static void __attribute__((used))
1308 cleanup_advise_trials(vm_advise_trials_t **trials)
1309 {
1310 	free_trials(*trials);
1311 }
1312 
1313 // allocate machine_attribute_t trials, and deallocate it at end of scope
1314 #define SMART_VM_MACHINE_ATTRIBUTE_TRIALS()                                           \
1315 	__attribute__((cleanup(cleanup_vm_machine_attribute_trials)))                 \
1316 	= allocate_vm_machine_attribute_trials(countof(vm_machine_attribute_trials_values));        \
1317 	append_trials(trials, vm_machine_attribute_trials_values, countof(vm_machine_attribute_trials_values))
1318 
1319 // generate advise_t trials
1320 
1321 typedef struct {
1322 	vm_machine_attribute_t value;
1323 	const char * name;
1324 } vm_machine_attribute_trial_t;
1325 
1326 typedef struct {
1327 	unsigned count;
1328 	unsigned capacity;
1329 	vm_machine_attribute_trial_t list[];
1330 } vm_machine_attribute_trials_t;
1331 
1332 
1333 #define VM_MACHINE_ATTRIBUTE_TRIAL(new_value) \
1334 	(vm_machine_attribute_trial_t) {.value = (vm_machine_attribute_t)(new_value), .name = "vm_machine_attribute_t " #new_value}
1335 
1336 static vm_machine_attribute_trial_t vm_machine_attribute_trials_values[] = {
1337 	VM_MACHINE_ATTRIBUTE_TRIAL(0),
1338 	// start valid ones
1339 	VM_MACHINE_ATTRIBUTE_TRIAL(MATTR_CACHE),
1340 	VM_MACHINE_ATTRIBUTE_TRIAL(MATTR_MIGRATE),
1341 	VM_MACHINE_ATTRIBUTE_TRIAL(MATTR_REPLICATE),
1342 	// end valid ones
1343 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 3),
1344 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 4),
1345 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 5),
1346 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 6),
1347 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 7),
1348 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 8),
1349 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 9),
1350 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 10),
1351 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 11),
1352 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 12),
1353 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 13),
1354 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 14),
1355 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 15),
1356 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 16),
1357 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 17),
1358 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 18),
1359 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 19),
1360 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 20),
1361 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 21),
1362 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 22),
1363 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 23),
1364 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 24),
1365 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 25),
1366 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 26),
1367 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 27),
1368 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 28),
1369 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 29),
1370 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 30),
1371 	VM_MACHINE_ATTRIBUTE_TRIAL(1u << 31),
1372 };
1373 
TRIALS_IMPL(vm_machine_attribute)1374 TRIALS_IMPL(vm_machine_attribute)
1375 
1376 static void
1377 cleanup_vm_machine_attribute_trials(vm_machine_attribute_trials_t **trials)
1378 {
1379 	free_trials(*trials);
1380 }
1381 
1382 // allocate vm_map_kernel_flags trials, and deallocate it at end of scope
1383 #define SMART_VM_MAP_KERNEL_FLAGS_TRIALS()                              \
1384 	__attribute__((cleanup(cleanup_vm_map_kernel_flags_trials)))    \
1385 	= generate_vm_map_kernel_flags_trials()
1386 
1387 
1388 // generate vm_map_kernel_flags_t trials
1389 
1390 typedef struct {
1391 	int flags;
1392 	char * name;
1393 } vm_map_kernel_flags_trial_t;
1394 
1395 typedef struct {
1396 	unsigned count;
1397 	unsigned capacity;
1398 	vm_map_kernel_flags_trial_t list[];
1399 } vm_map_kernel_flags_trials_t;
1400 
1401 #define VM_MAP_KERNEL_FLAGS_TRIAL(new_flags) \
1402 	(vm_map_kernel_flags_trial_t) {.flags = (int)(new_flags), .name ="vm_map_kernel_flags " #new_flags}
1403 
TRIALS_IMPL(vm_map_kernel_flags)1404 TRIALS_IMPL(vm_map_kernel_flags)
1405 
1406 static vm_map_kernel_flags_trials_t *
1407 generate_prefixed_vm_map_kernel_flags_trials(int prefix_flags, const char *prefix_name)
1408 {
1409 	vm_map_kernel_flags_trials_t *trials;
1410 	trials = allocate_vm_map_kernel_flags_trials(32);
1411 
1412 	char *str;
1413 #define APPEND(flag)                                                    \
1414 	({                                                              \
1415 	        kasprintf(&str, "vm_map_kernel_flags %s%s%s", \
1416 	            prefix_name, prefix_flags == 0 ? "" : " | ", #flag); \
1417 	        append_trial(trials, (vm_map_kernel_flags_trial_t){ prefix_flags | (int)flag, str }); \
1418 	})
1419 
1420 	// First trial is just the prefix flags set, if any.
1421 	// (either ANYWHERE or FIXED | OVERWRITE)
1422 	if (prefix_flags != 0) {
1423 		kasprintf(&str, "vm_map_kernel_flags %s", prefix_name);
1424 		append_trial(trials, (vm_map_kernel_flags_trial_t){ prefix_flags, str });
1425 	}
1426 
1427 	// Try each other flag with the prefix flags.
1428 	// Skip FIXED and ANYWHERE and OVERWRITE because they cause
1429 	// memory management changes that the caller may not be prepared for.
1430 	// skip 0x00000000 VM_FLAGS_FIXED
1431 	// skip 0x00000001 VM_FLAGS_ANYWHERE
1432 	APPEND(VM_FLAGS_PURGABLE);
1433 	APPEND(VM_FLAGS_4GB_CHUNK);
1434 	APPEND(VM_FLAGS_RANDOM_ADDR);
1435 	APPEND(VM_FLAGS_NO_CACHE);
1436 	APPEND(VM_FLAGS_RESILIENT_CODESIGN);
1437 	APPEND(VM_FLAGS_RESILIENT_MEDIA);
1438 	APPEND(VM_FLAGS_PERMANENT);
1439 	// skip 0x00001000 VM_FLAGS_TPRO; it only works on some hardware.
1440 	APPEND(0x00002000);
1441 	// skip 0x00004000 VM_FLAGS_OVERWRITE
1442 	APPEND(0x00008000);
1443 	APPEND(VM_FLAGS_SUPERPAGE_MASK); // 0x10000, 0x20000, 0x40000
1444 	APPEND(0x00080000);
1445 	APPEND(VM_FLAGS_RETURN_DATA_ADDR);
1446 	APPEND(VM_FLAGS_RETURN_4K_DATA_ADDR);
1447 	APPEND(VM_FLAGS_ALIAS_MASK);
1448 
1449 	return trials;
1450 }
1451 
1452 static vm_map_kernel_flags_trials_t *
generate_vm_map_kernel_flags_trials()1453 generate_vm_map_kernel_flags_trials()
1454 {
1455 	vm_map_kernel_flags_trials_t *fixed =
1456 	    generate_prefixed_vm_map_kernel_flags_trials(
1457 		VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, "VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE");
1458 	vm_map_kernel_flags_trials_t *anywhere =
1459 	    generate_prefixed_vm_map_kernel_flags_trials(
1460 		VM_FLAGS_ANYWHERE, "VM_FLAGS_ANYWHERE");
1461 	vm_map_kernel_flags_trials_t *trials =
1462 	    allocate_vm_map_kernel_flags_trials(fixed->count + anywhere->count);
1463 	append_trials(trials, fixed->list, fixed->count);
1464 	append_trials(trials, anywhere->list, anywhere->count);
1465 
1466 	// free not cleanup, trials has stolen their strings
1467 	free_trials(fixed);
1468 	free_trials(anywhere);
1469 
1470 	return trials;
1471 }
1472 
1473 static void
cleanup_vm_map_kernel_flags_trials(vm_map_kernel_flags_trials_t ** trials)1474 cleanup_vm_map_kernel_flags_trials(vm_map_kernel_flags_trials_t **trials)
1475 {
1476 	for (size_t i = 0; i < (*trials)->count; i++) {
1477 		kfree_str((*trials)->list[i].name);
1478 	}
1479 	free_trials(*trials);
1480 }
1481 
1482 
1483 // generate mmap flags trials
1484 
1485 typedef struct {
1486 	int flags;
1487 	const char *name;
1488 } mmap_flags_trial_t;
1489 
1490 typedef struct {
1491 	unsigned count;
1492 	unsigned capacity;
1493 	mmap_flags_trial_t list[];
1494 } mmap_flags_trials_t;
1495 
1496 #define MMAP_FLAGS_TRIAL(new_flags)                                             \
1497 	(mmap_flags_trial_t){ .flags = (int)(new_flags), .name = "mmap flags "#new_flags }
1498 
1499 static mmap_flags_trial_t mmap_flags_trials_values[] = {
1500 	MMAP_FLAGS_TRIAL(MAP_FILE),
1501 	MMAP_FLAGS_TRIAL(MAP_ANON),
1502 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_SHARED),
1503 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE),
1504 	MMAP_FLAGS_TRIAL(MAP_ANON | MAP_SHARED),
1505 	MMAP_FLAGS_TRIAL(MAP_ANON | MAP_PRIVATE),
1506 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_SHARED | MAP_PRIVATE),
1507 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_FIXED),
1508 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_RENAME),
1509 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_NORESERVE),
1510 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_RESERVED0080),
1511 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_NOEXTEND),
1512 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_HASSEMAPHORE),
1513 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_NOCACHE),
1514 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_JIT),
1515 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_RESILIENT_CODESIGN),
1516 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_RESILIENT_MEDIA),
1517 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_TRANSLATED_ALLOW_EXECUTE),
1518 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_UNIX03),
1519 	// skip MAP_TPRO; it only works on some hardware
1520 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 3),
1521 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 4),
1522 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 5),
1523 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 6),
1524 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 7),
1525 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 8),
1526 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 9),
1527 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 10),
1528 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 11),
1529 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 12),
1530 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 13),
1531 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 14),
1532 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 15),
1533 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 16),
1534 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 17),
1535 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 18),
1536 	// skip MAP_TPRO (1<<19); it only works on some hardware
1537 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 20),
1538 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 21),
1539 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 22),
1540 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 23),
1541 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 24),
1542 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 25),
1543 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 26),
1544 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 27),
1545 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 28),
1546 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 29),
1547 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 30),
1548 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 31),
1549 };
1550 
TRIALS_IMPL(mmap_flags)1551 TRIALS_IMPL(mmap_flags)
1552 
1553 static void
1554 cleanup_mmap_flags_trials(mmap_flags_trials_t **trials)
1555 {
1556 	free_trials(*trials);
1557 }
1558 
1559 // allocate mmap_flag trials, and deallocate it at end of scope
1560 #define SMART_MMAP_FLAGS_TRIALS()                                               \
1561 	__attribute__((cleanup(cleanup_mmap_flags_trials)))             \
1562 	= allocate_mmap_flags_trials(countof(mmap_flags_trials_values));        \
1563 	append_trials(trials, mmap_flags_trials_values, countof(mmap_flags_trials_values))
1564 
1565 // generate generic flag trials
1566 
1567 typedef struct {
1568 	int flag;
1569 	const char *name;
1570 } generic_flag_trial_t;
1571 
1572 typedef struct {
1573 	unsigned count;
1574 	unsigned capacity;
1575 	generic_flag_trial_t list[];
1576 } generic_flag_trials_t;
1577 
1578 #define GENERIC_FLAG_TRIAL(new_flag)                                            \
1579 	(generic_flag_trial_t){ .flag = (int)(new_flag), .name = "generic flag "#new_flag }
1580 
1581 static generic_flag_trial_t generic_flag_trials_values[] = {
1582 	GENERIC_FLAG_TRIAL(0),
1583 	GENERIC_FLAG_TRIAL(1),
1584 	GENERIC_FLAG_TRIAL(2),
1585 	GENERIC_FLAG_TRIAL(3),
1586 	GENERIC_FLAG_TRIAL(4),
1587 	GENERIC_FLAG_TRIAL(5),
1588 	GENERIC_FLAG_TRIAL(6),
1589 	GENERIC_FLAG_TRIAL(7),
1590 	GENERIC_FLAG_TRIAL(1u << 3),
1591 	GENERIC_FLAG_TRIAL(1u << 4),
1592 	GENERIC_FLAG_TRIAL(1u << 5),
1593 	GENERIC_FLAG_TRIAL(1u << 6),
1594 	GENERIC_FLAG_TRIAL(1u << 7),
1595 	GENERIC_FLAG_TRIAL(1u << 8),
1596 	GENERIC_FLAG_TRIAL(1u << 9),
1597 	GENERIC_FLAG_TRIAL(1u << 10),
1598 	GENERIC_FLAG_TRIAL(1u << 11),
1599 	GENERIC_FLAG_TRIAL(1u << 12),
1600 	GENERIC_FLAG_TRIAL(1u << 13),
1601 	GENERIC_FLAG_TRIAL(1u << 14),
1602 	GENERIC_FLAG_TRIAL(1u << 15),
1603 	GENERIC_FLAG_TRIAL(1u << 16),
1604 	GENERIC_FLAG_TRIAL(1u << 17),
1605 	GENERIC_FLAG_TRIAL(1u << 18),
1606 	GENERIC_FLAG_TRIAL(1u << 19),
1607 	GENERIC_FLAG_TRIAL(1u << 20),
1608 	GENERIC_FLAG_TRIAL(1u << 21),
1609 	GENERIC_FLAG_TRIAL(1u << 22),
1610 	GENERIC_FLAG_TRIAL(1u << 23),
1611 	GENERIC_FLAG_TRIAL(1u << 24),
1612 	GENERIC_FLAG_TRIAL(1u << 25),
1613 	GENERIC_FLAG_TRIAL(1u << 26),
1614 	GENERIC_FLAG_TRIAL(1u << 27),
1615 	GENERIC_FLAG_TRIAL(1u << 28),
1616 	GENERIC_FLAG_TRIAL(1u << 29),
1617 	GENERIC_FLAG_TRIAL(1u << 30),
1618 	GENERIC_FLAG_TRIAL(1u << 31),
1619 };
1620 
TRIALS_IMPL(generic_flag)1621 TRIALS_IMPL(generic_flag)
1622 
1623 static void
1624 cleanup_generic_flag_trials(generic_flag_trials_t **trials)
1625 {
1626 	free_trials(*trials);
1627 }
1628 
1629 // allocate mmap_flag trials, and deallocate it at end of scope
1630 #define SMART_GENERIC_FLAG_TRIALS()                                             \
1631 	__attribute__((cleanup(cleanup_generic_flag_trials)))           \
1632 	= allocate_generic_flag_trials(countof(generic_flag_trials_values));    \
1633 	append_trials(trials, generic_flag_trials_values, countof(generic_flag_trials_values))
1634 
1635 
1636 // generate vm_prot_t trials
1637 
1638 #ifndef KERNEL
1639 typedef int vm_tag_t;
1640 #endif /* KERNEL */
1641 
1642 typedef struct {
1643 	vm_tag_t tag;
1644 	const char *name;
1645 } vm_tag_trial_t;
1646 
1647 typedef struct {
1648 	unsigned count;
1649 	unsigned capacity;
1650 	vm_tag_trial_t list[];
1651 } vm_tag_trials_t;
1652 
1653 #if KERNEL
1654 #define KERNEL_VM_TAG_TRIAL(new_tag)     \
1655 	(vm_tag_trial_t){ .tag = (vm_tag_t)(new_tag), .name = "vm_tag "#new_tag }
1656 
1657 #define VM_TAG_TRIAL KERNEL_VM_TAG_TRIAL
1658 #else
1659 #define USER_VM_TAG_TRIAL(new_tag)      \
1660 	(vm_tag_trial_t){ .tag = (vm_tag_t)0, .name = "vm_tag "#new_tag }
1661 
1662 #define VM_TAG_TRIAL USER_VM_TAG_TRIAL
1663 #endif
1664 
1665 static vm_tag_trial_t vm_tag_trials_values[] = {
1666 	VM_TAG_TRIAL(VM_KERN_MEMORY_NONE),
1667 	VM_TAG_TRIAL(VM_KERN_MEMORY_OSFMK),
1668 	VM_TAG_TRIAL(VM_KERN_MEMORY_BSD),
1669 	VM_TAG_TRIAL(VM_KERN_MEMORY_IOKIT),
1670 	VM_TAG_TRIAL(VM_KERN_MEMORY_LIBKERN),
1671 	VM_TAG_TRIAL(VM_KERN_MEMORY_OSKEXT),
1672 	VM_TAG_TRIAL(VM_KERN_MEMORY_KEXT),
1673 	VM_TAG_TRIAL(VM_KERN_MEMORY_IPC),
1674 	VM_TAG_TRIAL(VM_KERN_MEMORY_STACK),
1675 	VM_TAG_TRIAL(VM_KERN_MEMORY_CPU),
1676 	VM_TAG_TRIAL(VM_KERN_MEMORY_PMAP),
1677 	VM_TAG_TRIAL(VM_KERN_MEMORY_PTE),
1678 	VM_TAG_TRIAL(VM_KERN_MEMORY_ZONE),
1679 	VM_TAG_TRIAL(VM_KERN_MEMORY_KALLOC),
1680 	VM_TAG_TRIAL(VM_KERN_MEMORY_COMPRESSOR),
1681 	VM_TAG_TRIAL(VM_KERN_MEMORY_COMPRESSED_DATA),
1682 	VM_TAG_TRIAL(VM_KERN_MEMORY_PHANTOM_CACHE),
1683 	VM_TAG_TRIAL(VM_KERN_MEMORY_WAITQ),
1684 	VM_TAG_TRIAL(VM_KERN_MEMORY_DIAG),
1685 	VM_TAG_TRIAL(VM_KERN_MEMORY_LOG),
1686 	VM_TAG_TRIAL(VM_KERN_MEMORY_FILE),
1687 	VM_TAG_TRIAL(VM_KERN_MEMORY_MBUF),
1688 	VM_TAG_TRIAL(VM_KERN_MEMORY_UBC),
1689 	VM_TAG_TRIAL(VM_KERN_MEMORY_SECURITY),
1690 	VM_TAG_TRIAL(VM_KERN_MEMORY_MLOCK),
1691 	VM_TAG_TRIAL(VM_KERN_MEMORY_REASON),
1692 	VM_TAG_TRIAL(VM_KERN_MEMORY_SKYWALK),
1693 	VM_TAG_TRIAL(VM_KERN_MEMORY_LTABLE),
1694 	VM_TAG_TRIAL(VM_KERN_MEMORY_HV),
1695 	VM_TAG_TRIAL(VM_KERN_MEMORY_KALLOC_DATA),
1696 	VM_TAG_TRIAL(VM_KERN_MEMORY_RETIRED),
1697 	VM_TAG_TRIAL(VM_KERN_MEMORY_KALLOC_TYPE),
1698 	VM_TAG_TRIAL(VM_KERN_MEMORY_TRIAGE),
1699 	VM_TAG_TRIAL(VM_KERN_MEMORY_RECOUNT),
1700 };
1701 
TRIALS_IMPL(vm_tag)1702 TRIALS_IMPL(vm_tag)
1703 
1704 static void
1705 cleanup_vm_tag_trials(vm_tag_trials_t **trials)
1706 {
1707 	free_trials(*trials);
1708 }
1709 
1710 #define SMART_VM_TAG_TRIALS()                                           \
1711 	__attribute__((cleanup(cleanup_vm_tag_trials)))         \
1712 	= allocate_vm_tag_trials(countof(vm_tag_trials_values));        \
1713 	append_trials(trials, vm_tag_trials_values, countof(vm_tag_trials_values))
1714 
1715 //END vm_tag_t
1716 
1717 // generate vm_prot_t trials
1718 
1719 typedef struct {
1720 	vm_prot_t prot;
1721 	const char *name;
1722 } vm_prot_trial_t;
1723 
1724 typedef struct {
1725 	unsigned count;
1726 	unsigned capacity;
1727 	vm_prot_trial_t list[];
1728 } vm_prot_trials_t;
1729 
1730 #define VM_PROT_TRIAL(new_prot)                                         \
1731 	(vm_prot_trial_t){ .prot = (vm_prot_t)(new_prot), .name = "vm_prot "#new_prot }
1732 
1733 static vm_prot_trial_t vm_prot_trials_values[] = {
1734 	// none
1735 	VM_PROT_TRIAL(VM_PROT_NONE),
1736 	// ordinary r-- / rw- / r-x
1737 	VM_PROT_TRIAL(VM_PROT_READ),
1738 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE),
1739 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_EXECUTE),
1740 	// rwx (w+x often disallowed)
1741 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE),
1742 	// VM_PROT_READ | VM_PROT_x for each other VM_PROT_x bit
1743 	// plus write and execute for some interesting cases
1744 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 3),
1745 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 4),
1746 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 5),
1747 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 6),
1748 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 7),
1749 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | 1u << 7),
1750 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_EXECUTE | 1u << 7),
1751 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 8),
1752 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | 1u << 8),
1753 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_EXECUTE | 1u << 8),
1754 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 9),
1755 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 10),
1756 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 11),
1757 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 12),
1758 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 13),
1759 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 14),
1760 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 15),
1761 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 16),
1762 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | 1u << 16),
1763 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_EXECUTE | 1u << 16),
1764 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 17),
1765 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 18),
1766 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 19),
1767 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 20),
1768 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 21),
1769 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 22),
1770 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 23),
1771 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | 1u << 23),
1772 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 24),
1773 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 25),
1774 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | 1u << 25),
1775 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_EXECUTE | 1u << 25),
1776 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 26),
1777 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 27),
1778 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 28),
1779 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 29),
1780 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 30),
1781 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 31),
1782 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | 1u << 31),
1783 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_EXECUTE | 1u << 31),
1784 
1785 	// error case coverage in specific subfunctions
1786 	VM_PROT_TRIAL(VM_PROT_READ | MAP_MEM_ONLY | MAP_MEM_USE_DATA_ADDR),
1787 	VM_PROT_TRIAL(VM_PROT_READ | MAP_MEM_ONLY | MAP_MEM_4K_DATA_ADDR),
1788 	VM_PROT_TRIAL(VM_PROT_READ | MAP_MEM_NAMED_CREATE | MAP_MEM_USE_DATA_ADDR),
1789 	VM_PROT_TRIAL(VM_PROT_READ | MAP_MEM_NAMED_CREATE | MAP_MEM_4K_DATA_ADDR),
1790 	VM_PROT_TRIAL(VM_PROT_READ | MAP_MEM_NAMED_CREATE | MAP_MEM_PURGABLE),
1791 	VM_PROT_TRIAL(VM_PROT_NONE | MAP_MEM_VM_SHARE | VM_PROT_IS_MASK),
1792 
1793 	// interesting non-error cases for additional test coverage
1794 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | MAP_MEM_NAMED_CREATE | MAP_MEM_PURGABLE),
1795 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | MAP_MEM_NAMED_CREATE |
1796     MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY),
1797 };
1798 
TRIALS_IMPL(vm_prot)1799 TRIALS_IMPL(vm_prot)
1800 
1801 static void
1802 cleanup_vm_prot_trials(vm_prot_trials_t **trials)
1803 {
1804 	free_trials(*trials);
1805 }
1806 
1807 // allocate vm_prot trials, and deallocate it at end of scope
1808 #define SMART_VM_PROT_TRIALS()                                          \
1809 	__attribute__((cleanup(cleanup_vm_prot_trials)))                \
1810 	= allocate_vm_prot_trials(countof(vm_prot_trials_values));      \
1811 	append_trials(trials, vm_prot_trials_values, countof(vm_prot_trials_values))
1812 
1813 // Trials for pairs of vm_prot_t
1814 
1815 typedef struct {
1816 	vm_prot_t cur;
1817 	vm_prot_t max;
1818 	char * name;
1819 } vm_prot_pair_trial_t;
1820 
1821 typedef struct {
1822 	unsigned count;
1823 	unsigned capacity;
1824 	vm_prot_pair_trial_t list[];
1825 } vm_prot_pair_trials_t;
1826 
TRIALS_IMPL(vm_prot_pair)1827 TRIALS_IMPL(vm_prot_pair)
1828 
1829 #define VM_PROT_PAIR_TRIAL(new_cur, new_max, new_name) \
1830 (vm_prot_pair_trial_t){ .cur = (vm_prot_t)(new_cur), \
1831 	        .max = (vm_prot_t)(new_max), \
1832 	        .name = new_name,}
1833 
1834 vm_prot_pair_trials_t *
1835 generate_vm_prot_pair_trials()
1836 {
1837 	const unsigned D = countof(vm_prot_trials_values);
1838 	unsigned num_trials = D * D;
1839 
1840 	vm_prot_pair_trials_t * trials = allocate_vm_prot_pair_trials(num_trials);
1841 	for (size_t i = 0; i < D; i++) {
1842 		for (size_t j = 0; j < D; j++) {
1843 			vm_prot_t cur = vm_prot_trials_values[i].prot;
1844 			vm_prot_t max = vm_prot_trials_values[j].prot;
1845 			char *str;
1846 			kasprintf(&str, "cur: 0x%x, max: 0x%x", cur, max);
1847 			append_trial(trials, VM_PROT_PAIR_TRIAL(cur, max, str));
1848 		}
1849 	}
1850 	return trials;
1851 }
1852 
1853 #define SMART_VM_PROT_PAIR_TRIALS()                                             \
1854 	__attribute__((cleanup(cleanup_vm_prot_pair_trials)))           \
1855 	= generate_vm_prot_pair_trials();
1856 
1857 static void
cleanup_vm_prot_pair_trials(vm_prot_pair_trials_t ** trials)1858 cleanup_vm_prot_pair_trials(vm_prot_pair_trials_t **trials)
1859 {
1860 	for (size_t i = 0; i < (*trials)->count; i++) {
1861 		kfree_str((*trials)->list[i].name);
1862 	}
1863 	free_trials(*trials);
1864 }
1865 
1866 
1867 // vm_purgeable_t trial contents.
1868 typedef struct {
1869 	vm_purgable_t value;
1870 	char * name;
1871 } vm_purgeable_trial_t;
1872 
1873 #define VM_PURGEABLE_TRIAL(new_value) \
1874 	(vm_purgeable_trial_t) {.value = (vm_purgable_t)(new_value), .name = "vm_purgeable_t " #new_value}
1875 
1876 static vm_purgeable_trial_t vm_purgeable_trials_values[] = {
1877 	VM_PURGEABLE_TRIAL(VM_PURGABLE_SET_STATE),
1878 	VM_PURGEABLE_TRIAL(VM_PURGABLE_GET_STATE),
1879 	VM_PURGEABLE_TRIAL(VM_PURGABLE_PURGE_ALL),
1880 	VM_PURGEABLE_TRIAL(VM_PURGABLE_SET_STATE_FROM_KERNEL),
1881 	// end valid values
1882 	VM_PURGEABLE_TRIAL(VM_PURGABLE_SET_STATE_FROM_KERNEL + 1),
1883 	VM_PURGEABLE_TRIAL(VM_PURGABLE_SET_STATE_FROM_KERNEL + 2),
1884 	VM_PURGEABLE_TRIAL(0x12345),
1885 	VM_PURGEABLE_TRIAL(0xffffffff),
1886 };
1887 
1888 typedef struct {
1889 	int value;
1890 	char * name;
1891 } vm_purgeable_state_trial_t;
1892 
1893 #define VM_PURGEABLE_STATE_TRIAL(new_value) \
1894 	(vm_purgeable_state_trial_t) {.value = (int)(new_value), .name = "state " #new_value}
1895 
1896 static vm_purgeable_state_trial_t vm_purgeable_state_trials_values[] = {
1897 	VM_PURGEABLE_STATE_TRIAL(VM_PURGABLE_NO_AGING),
1898 	VM_PURGEABLE_STATE_TRIAL(VM_PURGABLE_DEBUG_EMPTY),
1899 	VM_PURGEABLE_STATE_TRIAL(VM_VOLATILE_GROUP_0),
1900 	VM_PURGEABLE_STATE_TRIAL(VM_VOLATILE_GROUP_7),
1901 	VM_PURGEABLE_STATE_TRIAL(VM_PURGABLE_BEHAVIOR_FIFO),
1902 	VM_PURGEABLE_STATE_TRIAL(VM_PURGABLE_ORDERING_NORMAL),
1903 	VM_PURGEABLE_STATE_TRIAL(VM_PURGABLE_EMPTY),
1904 	VM_PURGEABLE_STATE_TRIAL(VM_PURGABLE_DENY),
1905 	VM_PURGEABLE_STATE_TRIAL(VM_PURGABLE_NONVOLATILE),
1906 	VM_PURGEABLE_STATE_TRIAL(VM_PURGABLE_VOLATILE),
1907 	VM_PURGEABLE_STATE_TRIAL(0x12345),
1908 	VM_PURGEABLE_STATE_TRIAL(0xffffffff),
1909 };
1910 
1911 // Trials for vm_purgeable_t and state
1912 typedef struct {
1913 	vm_purgable_t control;
1914 	int state;
1915 	char * name;
1916 } vm_purgeable_and_state_trial_t;
1917 
1918 typedef struct {
1919 	unsigned count;
1920 	unsigned capacity;
1921 	vm_purgeable_and_state_trial_t list[];
1922 } vm_purgeable_and_state_trials_t;
1923 
TRIALS_IMPL(vm_purgeable_and_state)1924 TRIALS_IMPL(vm_purgeable_and_state)
1925 
1926 #define VM_PURGEABLE_AND_STATE_TRIAL(new_control, new_state, new_name) \
1927 (vm_purgeable_and_state_trial_t){ .control = (vm_purgable_t)(new_control), \
1928 	        .state = (int)(new_state), \
1929 	        .name = new_name,}
1930 
1931 vm_purgeable_and_state_trials_t *
1932 generate_vm_purgeable_t_and_state_trials()
1933 {
1934 	const unsigned purgeable_trial_count = countof(vm_purgeable_trials_values);
1935 	const unsigned state_trial_count = countof(vm_purgeable_state_trials_values);
1936 	unsigned num_trials = purgeable_trial_count * state_trial_count;
1937 
1938 	vm_purgeable_and_state_trials_t * trials = allocate_vm_purgeable_and_state_trials(num_trials);
1939 	for (size_t i = 0; i < purgeable_trial_count; i++) {
1940 		for (size_t j = 0; j < state_trial_count; j++) {
1941 			vm_purgeable_trial_t control_trial = vm_purgeable_trials_values[i];
1942 			vm_purgeable_state_trial_t state_trial = vm_purgeable_state_trials_values[j];
1943 			char *str;
1944 			kasprintf(&str, "%s, %s", control_trial.name, state_trial.name);
1945 			append_trial(trials, VM_PURGEABLE_AND_STATE_TRIAL(control_trial.value, state_trial.value, str));
1946 		}
1947 	}
1948 	return trials;
1949 }
1950 
1951 #define SMART_VM_PURGEABLE_AND_STATE_TRIALS()                           \
1952 	__attribute__((cleanup(cleanup_vm_purgeable_t_and_state_trials))) \
1953 	= generate_vm_purgeable_t_and_state_trials();
1954 
1955 static void
cleanup_vm_purgeable_t_and_state_trials(vm_purgeable_and_state_trials_t ** trials)1956 cleanup_vm_purgeable_t_and_state_trials(vm_purgeable_and_state_trials_t **trials)
1957 {
1958 	for (size_t i = 0; i < (*trials)->count; i++) {
1959 		kfree_str((*trials)->list[i].name);
1960 	}
1961 	free_trials(*trials);
1962 }
1963 
1964 // generate ledger tag trials
1965 
1966 typedef struct {
1967 	int tag;
1968 	const char *name;
1969 } ledger_tag_trial_t;
1970 
1971 typedef struct {
1972 	unsigned count;
1973 	unsigned capacity;
1974 	ledger_tag_trial_t list[];
1975 } ledger_tag_trials_t;
1976 
1977 #define LEDGER_TAG_TRIAL(new_tag)                            \
1978 	(ledger_tag_trial_t){ .tag = (int)(new_tag), .name = "ledger tag "#new_tag }
1979 
1980 static ledger_tag_trial_t ledger_tag_trials_values[] = {
1981 	LEDGER_TAG_TRIAL(VM_LEDGER_TAG_NONE),
1982 	LEDGER_TAG_TRIAL(VM_LEDGER_TAG_DEFAULT),
1983 	LEDGER_TAG_TRIAL(VM_LEDGER_TAG_NETWORK),
1984 	LEDGER_TAG_TRIAL(VM_LEDGER_TAG_MEDIA),
1985 	LEDGER_TAG_TRIAL(VM_LEDGER_TAG_GRAPHICS),
1986 	LEDGER_TAG_TRIAL(VM_LEDGER_TAG_NEURAL),
1987 	LEDGER_TAG_TRIAL(VM_LEDGER_TAG_MAX),
1988 	LEDGER_TAG_TRIAL(1u << 16),
1989 	LEDGER_TAG_TRIAL(1u << 17),
1990 	LEDGER_TAG_TRIAL(1u << 18),
1991 	LEDGER_TAG_TRIAL(1u << 19),
1992 	LEDGER_TAG_TRIAL(1u << 20),
1993 	LEDGER_TAG_TRIAL(1u << 21),
1994 	LEDGER_TAG_TRIAL(1u << 22),
1995 	LEDGER_TAG_TRIAL(1u << 23),
1996 	LEDGER_TAG_TRIAL(1u << 24),
1997 	LEDGER_TAG_TRIAL(1u << 25),
1998 	LEDGER_TAG_TRIAL(1u << 26),
1999 	LEDGER_TAG_TRIAL(1u << 27),
2000 	LEDGER_TAG_TRIAL(1u << 28),
2001 	LEDGER_TAG_TRIAL(1u << 29),
2002 	LEDGER_TAG_TRIAL(1u << 30),
2003 	LEDGER_TAG_TRIAL(1u << 31),
2004 	LEDGER_TAG_TRIAL(VM_LEDGER_TAG_UNCHANGED),
2005 };
2006 
TRIALS_IMPL(ledger_tag)2007 TRIALS_IMPL(ledger_tag)
2008 
2009 static void
2010 cleanup_ledger_tag_trials(ledger_tag_trials_t **trials)
2011 {
2012 	free_trials(*trials);
2013 }
2014 
2015 // allocate ledger tag trials, and deallocate it at end of scope
2016 #define SMART_LEDGER_TAG_TRIALS()                                               \
2017 	__attribute__((cleanup(cleanup_ledger_tag_trials)))             \
2018 	= allocate_ledger_tag_trials(countof(ledger_tag_trials_values));        \
2019 	append_trials(trials, ledger_tag_trials_values, countof(ledger_tag_trials_values))
2020 
2021 
2022 // generate ledger flag trials
2023 
2024 typedef struct {
2025 	int flag;
2026 	const char *name;
2027 } ledger_flag_trial_t;
2028 
2029 typedef struct {
2030 	unsigned count;
2031 	unsigned capacity;
2032 	ledger_flag_trial_t list[];
2033 } ledger_flag_trials_t;
2034 
2035 #define LEDGER_FLAG_TRIAL(new_flag)                            \
2036 	(ledger_flag_trial_t){ .flag = (int)(new_flag), .name = "ledger flag "#new_flag }
2037 
2038 static ledger_flag_trial_t ledger_flag_trials_values[] = {
2039 	LEDGER_FLAG_TRIAL(0),
2040 	LEDGER_FLAG_TRIAL(VM_LEDGER_FLAG_NO_FOOTPRINT),
2041 	LEDGER_FLAG_TRIAL(VM_LEDGER_FLAG_NO_FOOTPRINT_FOR_DEBUG),
2042 	LEDGER_FLAG_TRIAL(VM_LEDGER_FLAGS_USER),
2043 	LEDGER_FLAG_TRIAL(VM_LEDGER_FLAG_FROM_KERNEL),
2044 	LEDGER_FLAG_TRIAL(VM_LEDGER_FLAGS_ALL),
2045 	LEDGER_FLAG_TRIAL(1u << 3),
2046 	LEDGER_FLAG_TRIAL(1u << 4),
2047 	LEDGER_FLAG_TRIAL(1u << 5),
2048 	LEDGER_FLAG_TRIAL(1u << 6),
2049 	LEDGER_FLAG_TRIAL(1u << 7),
2050 	LEDGER_FLAG_TRIAL(1u << 8),
2051 	LEDGER_FLAG_TRIAL(1u << 9),
2052 	LEDGER_FLAG_TRIAL(1u << 10),
2053 	LEDGER_FLAG_TRIAL(1u << 11),
2054 	LEDGER_FLAG_TRIAL(1u << 12),
2055 	LEDGER_FLAG_TRIAL(1u << 13),
2056 	LEDGER_FLAG_TRIAL(1u << 14),
2057 	LEDGER_FLAG_TRIAL(1u << 15),
2058 	LEDGER_FLAG_TRIAL(1u << 16),
2059 	LEDGER_FLAG_TRIAL(1u << 17),
2060 	LEDGER_FLAG_TRIAL(1u << 18),
2061 	LEDGER_FLAG_TRIAL(1u << 19),
2062 	LEDGER_FLAG_TRIAL(1u << 20),
2063 	LEDGER_FLAG_TRIAL(1u << 21),
2064 	LEDGER_FLAG_TRIAL(1u << 22),
2065 	LEDGER_FLAG_TRIAL(1u << 23),
2066 	LEDGER_FLAG_TRIAL(1u << 24),
2067 	LEDGER_FLAG_TRIAL(1u << 25),
2068 	LEDGER_FLAG_TRIAL(1u << 26),
2069 	LEDGER_FLAG_TRIAL(1u << 27),
2070 	LEDGER_FLAG_TRIAL(1u << 28),
2071 	LEDGER_FLAG_TRIAL(1u << 29),
2072 	LEDGER_FLAG_TRIAL(1u << 30),
2073 	LEDGER_FLAG_TRIAL(1u << 31),
2074 };
2075 
TRIALS_IMPL(ledger_flag)2076 TRIALS_IMPL(ledger_flag)
2077 
2078 static void
2079 cleanup_ledger_flag_trials(ledger_flag_trials_t **trials)
2080 {
2081 	free_trials(*trials);
2082 }
2083 
2084 // allocate ledger flag trials, and deallocate it at end of scope
2085 #define SMART_LEDGER_FLAG_TRIALS()                                              \
2086 	__attribute__((cleanup(cleanup_ledger_flag_trials)))            \
2087 	= allocate_ledger_flag_trials(countof(ledger_flag_trials_values));      \
2088 	append_trials(trials, ledger_flag_trials_values, countof(ledger_flag_trials_values))
2089 
2090 // generate address-parameter trials
2091 // where the address has no associated size
2092 // and the callee's arithmetic includes `round_page(addr)`
2093 
2094 typedef struct {
2095 	addr_t addr;
2096 	bool addr_is_absolute;
2097 	char *name;
2098 } addr_trial_t;
2099 
2100 typedef struct {
2101 	unsigned count;
2102 	unsigned capacity;
2103 	addr_trial_t list[];
2104 } addr_trials_t;
2105 
2106 #define ADDR_TRIAL(new_addr, new_absolute, new_name)                    \
2107 	(addr_trial_t){ .addr = (addr_t)(new_addr), .addr_is_absolute = new_absolute, .name = new_name }
2108 
2109 static addr_trial_t __attribute__((overloadable, used))
slide_trial(addr_trial_t trial,mach_vm_address_t slide)2110 slide_trial(addr_trial_t trial, mach_vm_address_t slide)
2111 {
2112 	addr_trial_t result = trial;
2113 	if (!trial.addr_is_absolute) {
2114 		result.addr += slide;
2115 	}
2116 	return result;
2117 }
2118 
2119 static const offset_list_t *
get_addr_trial_offsets(void)2120 get_addr_trial_offsets(void)
2121 {
2122 	static offset_list_t *offsets;
2123 	addr_t test_page_size = adjust_page_size();
2124 	if (!offsets) {
2125 		offsets = allocate_offsets(20);
2126 		append_offset(offsets, true, 0);
2127 		append_offset(offsets, true, 1);
2128 		append_offset(offsets, true, 2);
2129 		append_offset(offsets, true, test_page_size - 2);
2130 		append_offset(offsets, true, test_page_size - 1);
2131 		append_offset(offsets, true, test_page_size);
2132 		append_offset(offsets, true, test_page_size + 1);
2133 		append_offset(offsets, true, test_page_size + 2);
2134 		append_offset(offsets, true, -(mach_vm_address_t)test_page_size - 2);
2135 		append_offset(offsets, true, -(mach_vm_address_t)test_page_size - 1);
2136 		append_offset(offsets, true, -(mach_vm_address_t)test_page_size);
2137 		append_offset(offsets, true, -(mach_vm_address_t)test_page_size + 1);
2138 		append_offset(offsets, true, -(mach_vm_address_t)test_page_size + 2);
2139 		append_offset(offsets, true, -(mach_vm_address_t)2);
2140 		append_offset(offsets, true, -(mach_vm_address_t)1);
2141 
2142 		append_offset(offsets, false, 0);
2143 		append_offset(offsets, false, 1);
2144 		append_offset(offsets, false, 2);
2145 		append_offset(offsets, false, test_page_size - 2);
2146 		append_offset(offsets, false, test_page_size - 1);
2147 	}
2148 	return offsets;
2149 }
2150 
TRIALS_IMPL(addr)2151 TRIALS_IMPL(addr)
2152 
2153 addr_trials_t *
2154 generate_addr_trials(addr_t base)
2155 {
2156 	const offset_list_t *offsets = get_addr_trial_offsets();
2157 	const unsigned ADDRS = offsets->count;
2158 	addr_trials_t *trials = allocate_addr_trials(ADDRS);
2159 
2160 	for (unsigned a = 0; a < ADDRS; a++) {
2161 		mach_vm_address_t addr_offset = offsets->list[a].offset;
2162 		mach_vm_address_t addr = addr_offset;
2163 		bool addr_is_absolute = offsets->list[a].is_absolute;
2164 		if (!addr_is_absolute) {
2165 			addr += base;
2166 		}
2167 
2168 		char *str;
2169 		kasprintf(&str, "addr: %s0x%llx",
2170 		    addr_is_absolute ? "" : "base+", addr_offset);
2171 		append_trial(trials, ADDR_TRIAL(addr, addr_is_absolute, str));
2172 	}
2173 	return trials;
2174 }
2175 
2176 static void
cleanup_addr_trials(addr_trials_t ** trials)2177 cleanup_addr_trials(addr_trials_t **trials)
2178 {
2179 	for (size_t i = 0; i < (*trials)->count; i++) {
2180 		kfree_str((*trials)->list[i].name);
2181 	}
2182 	free_trials(*trials);
2183 }
2184 
2185 // allocate address trials around a base address
2186 // and deallocate it at end of scope
2187 #define SMART_ADDR_TRIALS(base)                                         \
2188 	__attribute__((cleanup(cleanup_addr_trials)))                   \
2189 	    = generate_addr_trials(base)
2190 
2191 
2192 /////////////////////////////////////////////////////
2193 // generate size-parameter trials
2194 // where the size is not associated with any base address
2195 // and the callee's arithmetic includes `round_page(size)`
2196 
2197 typedef struct {
2198 	addr_t size;
2199 	char *name;
2200 } size_trial_t;
2201 
2202 typedef struct {
2203 	unsigned count;
2204 	unsigned capacity;
2205 	size_trial_t list[];
2206 } size_trials_t;
2207 
2208 #define SIZE_TRIAL(new_size, new_name)                                          \
2209 	(size_trial_t){ .size = (addr_t)(new_size), .name = new_name }
2210 
2211 static const offset_list_t *
get_size_trial_offsets(void)2212 get_size_trial_offsets(void)
2213 {
2214 	static offset_list_t *offsets;
2215 	addr_t test_page_size = adjust_page_size();
2216 	if (!offsets) {
2217 		offsets = allocate_offsets(15);
2218 		append_offset(offsets, true, 0);
2219 		append_offset(offsets, true, 1);
2220 		append_offset(offsets, true, 2);
2221 		append_offset(offsets, true, test_page_size - 2);
2222 		append_offset(offsets, true, test_page_size - 1);
2223 		append_offset(offsets, true, test_page_size);
2224 		append_offset(offsets, true, test_page_size + 1);
2225 		append_offset(offsets, true, test_page_size + 2);
2226 		append_offset(offsets, true, -(mach_vm_address_t)test_page_size - 2);
2227 		append_offset(offsets, true, -(mach_vm_address_t)test_page_size - 1);
2228 		append_offset(offsets, true, -(mach_vm_address_t)test_page_size);
2229 		append_offset(offsets, true, -(mach_vm_address_t)test_page_size + 1);
2230 		append_offset(offsets, true, -(mach_vm_address_t)test_page_size + 2);
2231 		append_offset(offsets, true, -(mach_vm_address_t)2);
2232 		append_offset(offsets, true, -(mach_vm_address_t)1);
2233 	}
2234 	return offsets;
2235 }
2236 
TRIALS_IMPL(size)2237 TRIALS_IMPL(size)
2238 
2239 size_trials_t *
2240 generate_size_trials(void)
2241 {
2242 	const offset_list_t *size_offsets = get_size_trial_offsets();
2243 	const unsigned SIZES = size_offsets->count;
2244 	size_trials_t *trials = allocate_size_trials(SIZES);
2245 
2246 	for (unsigned s = 0; s < SIZES; s++) {
2247 		mach_vm_size_t size = size_offsets->list[s].offset;
2248 
2249 		char *str;
2250 		kasprintf(&str, "size: 0x%llx", size);
2251 		append_trial(trials, SIZE_TRIAL(size, str));
2252 	}
2253 	return trials;
2254 }
2255 
2256 static void
cleanup_size_trials(size_trials_t ** trials)2257 cleanup_size_trials(size_trials_t **trials)
2258 {
2259 	for (size_t i = 0; i < (*trials)->count; i++) {
2260 		kfree_str((*trials)->list[i].name);
2261 	}
2262 	free_trials(*trials);
2263 }
2264 
2265 // allocate size trials, and deallocate it at end of scope
2266 #define SMART_SIZE_TRIALS()                                             \
2267 	__attribute__((cleanup(cleanup_size_trials)))                   \
2268 	= generate_size_trials()
2269 
2270 /////////////////////////////////////////////////////
2271 // generate start/size trials
2272 // using absolute addresses or addresses around a given address
2273 // where `size` is the size of the thing at `start`
2274 // and the callee's arithmetic performs `start+size`
2275 
2276 typedef struct {
2277 	addr_t start;
2278 	addr_t size;
2279 	char *name;
2280 	bool start_is_absolute;  // start computation does not include any allocation's base address
2281 	bool size_is_absolute;   // size computation does not include start
2282 } start_size_trial_t;
2283 
2284 typedef struct {
2285 	unsigned count;
2286 	unsigned capacity;
2287 	start_size_trial_t list[];
2288 } start_size_trials_t;
2289 
2290 
2291 #define START_SIZE_TRIAL(new_start, start_absolute, new_size, size_absolute, new_name) \
2292 	(start_size_trial_t){ .start = (addr_t)(new_start), .size = (addr_t)(new_size), \
2293 	                .name = new_name,                                       \
2294 	                .start_is_absolute = start_absolute, .size_is_absolute = size_absolute }
2295 
2296 static const offset_list_t *
get_start_size_trial_start_offsets(void)2297 get_start_size_trial_start_offsets(void)
2298 {
2299 	return get_addr_trial_offsets();
2300 }
2301 
2302 static const offset_list_t *
get_start_size_trial_size_offsets(void)2303 get_start_size_trial_size_offsets(void)
2304 {
2305 	static offset_list_t *offsets;
2306 	if (!offsets) {
2307 		// use each size offset twice: once absolute and once relative
2308 		const offset_list_t *old_offsets = get_size_trial_offsets();
2309 		offsets = allocate_offsets(2 * old_offsets->count);
2310 		for (unsigned i = 0; i < old_offsets->count; i++) {
2311 			append_offset(offsets, true, old_offsets->list[i].offset);
2312 		}
2313 		for (unsigned i = 0; i < old_offsets->count; i++) {
2314 			append_offset(offsets, false, old_offsets->list[i].offset);
2315 		}
2316 	}
2317 	return offsets;
2318 }
2319 
TRIALS_IMPL(start_size)2320 TRIALS_IMPL(start_size)
2321 
2322 // Return a new start/size trial which is offset by `slide` bytes
2323 // Only "relative" start and size values get slid.
2324 // "absolute" values don't change.
2325 static start_size_trial_t __attribute__((overloadable, used))
2326 slide_trial(start_size_trial_t trial, mach_vm_address_t slide)
2327 {
2328 	start_size_trial_t result = trial;
2329 	if (!result.start_is_absolute) {
2330 		result.start += slide;
2331 		if (!result.size_is_absolute) {
2332 			result.size -= slide;
2333 		}
2334 	}
2335 	return result;
2336 }
2337 
2338 start_size_trials_t *
generate_start_size_trials(addr_t base)2339 generate_start_size_trials(addr_t base)
2340 {
2341 	const offset_list_t *start_offsets = get_start_size_trial_start_offsets();
2342 	const offset_list_t *size_offsets = get_start_size_trial_size_offsets();
2343 
2344 	const unsigned ADDRS = start_offsets->count;
2345 	const unsigned SIZES = size_offsets->count;
2346 
2347 	start_size_trials_t *trials = allocate_start_size_trials(ADDRS * SIZES);
2348 
2349 	for (unsigned a = 0; a < ADDRS; a++) {
2350 		for (unsigned s = 0; s < SIZES; s++) {
2351 			mach_vm_address_t start_offset = start_offsets->list[a].offset;
2352 			mach_vm_address_t start = start_offset;
2353 			bool start_is_absolute = start_offsets->list[a].is_absolute;
2354 			if (!start_is_absolute) {
2355 				start += base;
2356 			}
2357 
2358 			mach_vm_size_t size_offset = size_offsets->list[s].offset;
2359 			mach_vm_size_t size = size_offset;
2360 			bool size_is_absolute = size_offsets->list[s].is_absolute;
2361 			if (!size_is_absolute) {
2362 				size = -start + size;
2363 			}
2364 
2365 			char *str;
2366 			kasprintf(&str, "start: %s0x%llx, size: %s0x%llx",
2367 			    start_is_absolute ? "" : "base+", start_offset,
2368 			    size_is_absolute ? "" :"-start+", size_offset);
2369 			append_trial(trials, START_SIZE_TRIAL(start, start_is_absolute, size, size_is_absolute, str));
2370 		}
2371 	}
2372 	return trials;
2373 }
2374 
2375 static void
cleanup_start_size_trials(start_size_trials_t ** trials)2376 cleanup_start_size_trials(start_size_trials_t **trials)
2377 {
2378 	for (size_t i = 0; i < (*trials)->count; i++) {
2379 		kfree_str((*trials)->list[i].name);
2380 	}
2381 	free_trials(*trials);
2382 }
2383 
2384 // allocate start/size trials around a base address
2385 // and deallocate it at end of scope
2386 #define SMART_START_SIZE_TRIALS(base)                                   \
2387 	__attribute__((cleanup(cleanup_start_size_trials)))             \
2388 	= generate_start_size_trials(base)
2389 
2390 // Trials for start/size/offset/object tuples
2391 
2392 typedef struct {
2393 	mach_vm_address_t start;
2394 	mach_vm_size_t size;
2395 	vm_object_offset_t offset;
2396 	mach_vm_size_t obj_size;
2397 	bool start_is_absolute;
2398 	bool size_is_absolute;
2399 	char * name;
2400 } start_size_offset_object_trial_t;
2401 
2402 typedef struct {
2403 	unsigned count;
2404 	unsigned capacity;
2405 	start_size_offset_object_trial_t list[];
2406 } start_size_offset_object_trials_t;
2407 
TRIALS_IMPL(start_size_offset_object)2408 TRIALS_IMPL(start_size_offset_object)
2409 
2410 #define START_SIZE_OFFSET_OBJECT_TRIAL(new_start, new_size, new_offset, new_obj_size, new_start_is_absolute, new_size_is_absolute, new_name) \
2411 (start_size_offset_object_trial_t){ .start = (mach_vm_address_t)(new_start), \
2412 	        .size = (mach_vm_size_t)(new_size), \
2413 	        .offset = (vm_object_offset_t)(new_offset), \
2414 	        .obj_size = (mach_vm_size_t)(new_obj_size), \
2415 	        .start_is_absolute = (bool)(new_start_is_absolute), \
2416 	        .size_is_absolute = (bool)(new_size_is_absolute), \
2417 	        .name = new_name,}
2418 
2419 bool
2420 obj_size_is_ok(mach_vm_size_t obj_size)
2421 {
2422 	addr_t test_page_size = adjust_page_size();
2423 	if (round_up_page(obj_size, test_page_size) == 0) {
2424 		return false;
2425 	}
2426 	/* in rosetta, PAGE_SIZE is 4K but rounding to 16K also panics */ \
2427 	if (!kern_trialname_generation && isRosetta() && round_up_page(obj_size, KB16) == 0) {
2428 		return false;
2429 	}
2430 	return true;
2431 }
2432 
2433 static start_size_offset_object_trial_t __attribute__((overloadable, used))
slide_trial(start_size_offset_object_trial_t trial,mach_vm_address_t slide)2434 slide_trial(start_size_offset_object_trial_t trial, mach_vm_address_t slide)
2435 {
2436 	start_size_offset_object_trial_t result = trial;
2437 
2438 	if (!trial.start_is_absolute) {
2439 		result.start += slide;
2440 		if (!trial.size_is_absolute) {
2441 			result.size -= slide;
2442 		}
2443 	}
2444 	return result;
2445 }
2446 
2447 static offset_list_t *
get_ssoo_absolute_offsets()2448 get_ssoo_absolute_offsets()
2449 {
2450 	static offset_list_t *offsets;
2451 	addr_t test_page_size = adjust_page_size();
2452 	if (!offsets) {
2453 		offsets = allocate_offsets(20);
2454 		append_offset(offsets, true, 0);
2455 		append_offset(offsets, true, 1);
2456 		append_offset(offsets, true, 2);
2457 		append_offset(offsets, true, test_page_size - 2);
2458 		append_offset(offsets, true, test_page_size - 1);
2459 		append_offset(offsets, true, test_page_size);
2460 		append_offset(offsets, true, test_page_size + 1);
2461 		append_offset(offsets, true, test_page_size + 2);
2462 		append_offset(offsets, true, -(mach_vm_address_t)test_page_size - 2);
2463 		append_offset(offsets, true, -(mach_vm_address_t)test_page_size - 1);
2464 		append_offset(offsets, true, -(mach_vm_address_t)test_page_size);
2465 		append_offset(offsets, true, -(mach_vm_address_t)test_page_size + 1);
2466 		append_offset(offsets, true, -(mach_vm_address_t)test_page_size + 2);
2467 		append_offset(offsets, true, -(mach_vm_address_t)2);
2468 		append_offset(offsets, true, -(mach_vm_address_t)1);
2469 	}
2470 	return offsets;
2471 }
2472 
2473 static offset_list_t *
get_ssoo_absolute_and_relative_offsets()2474 get_ssoo_absolute_and_relative_offsets()
2475 {
2476 	static offset_list_t *offsets;
2477 	addr_t test_page_size = adjust_page_size();
2478 	if (!offsets) {
2479 		const offset_list_t *old_offsets = get_ssoo_absolute_offsets();
2480 		offsets = allocate_offsets(old_offsets->count + 5);
2481 		// absolute offsets
2482 		for (unsigned i = 0; i < old_offsets->count; i++) {
2483 			append_offset(offsets, true, old_offsets->list[i].offset);
2484 		}
2485 		// relative offsets
2486 		append_offset(offsets, false, 0);
2487 		append_offset(offsets, false, 1);
2488 		append_offset(offsets, false, 2);
2489 		append_offset(offsets, false, test_page_size - 2);
2490 		append_offset(offsets, false, test_page_size - 1);
2491 	}
2492 	return offsets;
2493 }
2494 
2495 start_size_offset_object_trials_t *
generate_start_size_offset_object_trials()2496 generate_start_size_offset_object_trials()
2497 {
2498 	const offset_list_t *start_offsets = get_ssoo_absolute_and_relative_offsets();
2499 	const offset_list_t *size_offsets  = get_ssoo_absolute_and_relative_offsets();
2500 	const offset_list_t *offset_values = get_ssoo_absolute_offsets();
2501 	const offset_list_t *object_sizes  = get_ssoo_absolute_offsets();
2502 
2503 	unsigned num_trials = 0;
2504 	for (size_t d = 0; d < object_sizes->count; d++) {
2505 		mach_vm_size_t obj_size = object_sizes->list[d].offset;
2506 		if (!obj_size_is_ok(obj_size)) { // make_a_mem_object would fail
2507 			continue;
2508 		}
2509 		num_trials++;
2510 	}
2511 	num_trials *= start_offsets->count * size_offsets->count * offset_values->count;
2512 
2513 	start_size_offset_object_trials_t * trials = allocate_start_size_offset_object_trials(num_trials);
2514 	for (size_t a = 0; a < start_offsets->count; a++) {
2515 		for (size_t b = 0; b < size_offsets->count; b++) {
2516 			for (size_t c = 0; c < offset_values->count; c++) {
2517 				for (size_t d = 0; d < object_sizes->count; d++) {
2518 					bool start_is_absolute = start_offsets->list[a].is_absolute;
2519 					bool size_is_absolute = size_offsets->list[b].is_absolute;
2520 					mach_vm_address_t start = start_offsets->list[a].offset;
2521 					mach_vm_size_t size = size_offsets->list[b].offset;
2522 					vm_object_offset_t offset = offset_values->list[c].offset;
2523 					mach_vm_size_t obj_size = object_sizes->list[d].offset;
2524 					if (!obj_size_is_ok(obj_size)) { // make_a_mem_object would fail
2525 						continue;
2526 					}
2527 					char *str;
2528 					kasprintf(&str, "start: %s0x%llx, size: %s0x%llx, offset: 0x%llx, obj_size: 0x%llx",
2529 					    start_is_absolute ? "" : "base+", start,
2530 					    size_is_absolute ? "" :"-start+", size,
2531 					    offset,
2532 					    obj_size);
2533 					append_trial(trials, START_SIZE_OFFSET_OBJECT_TRIAL(start, size, offset, obj_size, start_is_absolute, size_is_absolute, str));
2534 				}
2535 			}
2536 		}
2537 	}
2538 	return trials;
2539 }
2540 
2541 #define SMART_START_SIZE_OFFSET_OBJECT_TRIALS()                                         \
2542 	__attribute__((cleanup(cleanup_start_size_offset_object_trials)))               \
2543 	= generate_start_size_offset_object_trials();
2544 
2545 static void
cleanup_start_size_offset_object_trials(start_size_offset_object_trials_t ** trials)2546 cleanup_start_size_offset_object_trials(start_size_offset_object_trials_t **trials)
2547 {
2548 	for (size_t i = 0; i < (*trials)->count; i++) {
2549 		kfree_str((*trials)->list[i].name);
2550 	}
2551 	free_trials(*trials);
2552 }
2553 
2554 
2555 // Trials for start/size/start/size tuples
2556 
2557 typedef struct {
2558 	mach_vm_address_t start;
2559 	mach_vm_size_t size;
2560 	mach_vm_address_t second_start;
2561 	mach_vm_size_t second_size;
2562 	bool start_is_absolute;
2563 	bool size_is_absolute;
2564 	bool second_start_is_absolute;
2565 	bool second_size_is_absolute;
2566 	char * name;
2567 } start_size_start_size_trial_t;
2568 
2569 typedef struct {
2570 	unsigned count;
2571 	unsigned capacity;
2572 	start_size_start_size_trial_t list[];
2573 } start_size_start_size_trials_t;
2574 
TRIALS_IMPL(start_size_start_size)2575 TRIALS_IMPL(start_size_start_size)
2576 
2577 #define START_SIZE_START_SIZE_TRIAL(new_start, new_size, new_second_start, new_second_size, new_start_is_absolute, \
2578 	    new_size_is_absolute, new_second_start_is_absolute, new_second_size_is_absolute, new_name) \
2579 (start_size_start_size_trial_t){ .start = (mach_vm_address_t)(new_start), \
2580 	        .size = (mach_vm_size_t)(new_size), \
2581 	        .second_start = (mach_vm_address_t)(new_second_start), \
2582 	        .second_size = (mach_vm_size_t)(new_second_size), \
2583 	        .start_is_absolute = (bool)(new_start_is_absolute), \
2584 	        .size_is_absolute = (bool)(new_size_is_absolute), \
2585 	        .second_start_is_absolute = (bool)(new_second_start_is_absolute), \
2586 	        .second_size_is_absolute = (bool)(new_second_size_is_absolute),\
2587 	        .name = new_name,}
2588 
2589 static start_size_start_size_trial_t __attribute__((overloadable, used))
2590 slide_trial(start_size_start_size_trial_t trial, mach_vm_address_t slide, mach_vm_address_t second_slide)
2591 {
2592 	start_size_start_size_trial_t result = trial;
2593 
2594 	if (!trial.start_is_absolute) {
2595 		result.start += slide;
2596 		if (!trial.size_is_absolute) {
2597 			result.size -= slide;
2598 		}
2599 	}
2600 	if (!trial.second_start_is_absolute) {
2601 		result.second_start += second_slide;
2602 		if (!trial.second_size_is_absolute) {
2603 			result.second_size -= second_slide;
2604 		}
2605 	}
2606 	return result;
2607 }
2608 
2609 start_size_start_size_trials_t *
generate_start_size_start_size_trials()2610 generate_start_size_start_size_trials()
2611 {
2612 	/*
2613 	 * Reuse the starts/sizes from start/size/offset/object
2614 	 */
2615 	const offset_list_t *start_offsets        = get_ssoo_absolute_and_relative_offsets();
2616 	const offset_list_t *size_offsets         = get_ssoo_absolute_and_relative_offsets();
2617 	const offset_list_t *second_start_offsets = get_ssoo_absolute_and_relative_offsets();
2618 	const offset_list_t *second_size_offsets  = get_ssoo_absolute_and_relative_offsets();
2619 
2620 	unsigned num_trials = start_offsets->count * size_offsets->count
2621 	    * second_start_offsets->count * second_start_offsets->count;
2622 
2623 	start_size_start_size_trials_t * trials = allocate_start_size_start_size_trials(num_trials);
2624 	for (size_t a = 0; a < start_offsets->count; a++) {
2625 		for (size_t b = 0; b < size_offsets->count; b++) {
2626 			for (size_t c = 0; c < second_start_offsets->count; c++) {
2627 				for (size_t d = 0; d < second_size_offsets->count; d++) {
2628 					bool start_is_absolute = start_offsets->list[a].is_absolute;
2629 					bool size_is_absolute = size_offsets->list[b].is_absolute;
2630 					bool second_start_is_absolute = second_start_offsets->list[c].is_absolute;
2631 					bool second_size_is_absolute = second_size_offsets->list[d].is_absolute;
2632 					mach_vm_address_t start = start_offsets->list[a].offset;
2633 					mach_vm_size_t size = size_offsets->list[b].offset;
2634 					mach_vm_address_t second_start = second_start_offsets->list[c].offset;
2635 					mach_vm_size_t second_size = second_size_offsets->list[d].offset;
2636 
2637 					char *str;
2638 					kasprintf(&str, "start: %s0x%llx, size: %s0x%llx, second_start: %s0x%llx, second_size: %s0x%llx",
2639 					    start_is_absolute ? "" : "base+", start,
2640 					    size_is_absolute ? "" :"-start+", size,
2641 					    second_start_is_absolute ? "" : "base+", second_start,
2642 					    second_size_is_absolute ? "" : "-start+", second_size);
2643 					append_trial(trials, START_SIZE_START_SIZE_TRIAL(start, size, second_start, second_size,
2644 					    start_is_absolute, size_is_absolute,
2645 					    second_start_is_absolute, second_size_is_absolute, str));
2646 				}
2647 			}
2648 		}
2649 	}
2650 	return trials;
2651 }
2652 
2653 #define SMART_START_SIZE_START_SIZE_TRIALS()                                            \
2654 	__attribute__((cleanup(cleanup_start_size_start_size_trials)))                  \
2655 	= generate_start_size_start_size_trials();
2656 
2657 static void __attribute__((used))
cleanup_start_size_start_size_trials(start_size_start_size_trials_t ** trials)2658 cleanup_start_size_start_size_trials(start_size_start_size_trials_t **trials)
2659 {
2660 	for (size_t i = 0; i < (*trials)->count; i++) {
2661 		kfree_str((*trials)->list[i].name);
2662 	}
2663 	free_trials(*trials);
2664 }
2665 
2666 
2667 // start/size/offset: test start+size and a second independent address
2668 // consider src/dst/size instead if the size may be added to both addresses
2669 
2670 typedef struct {
2671 	mach_vm_address_t start;
2672 	mach_vm_size_t size;
2673 	vm_object_offset_t offset;
2674 	bool start_is_absolute;
2675 	bool size_is_absolute;
2676 	char * name;
2677 } start_size_offset_trial_t;
2678 
2679 typedef struct {
2680 	unsigned count;
2681 	unsigned capacity;
2682 	start_size_offset_trial_t list[];
2683 } start_size_offset_trials_t;
2684 
TRIALS_IMPL(start_size_offset)2685 TRIALS_IMPL(start_size_offset)
2686 
2687 #define START_SIZE_OFFSET_TRIAL(new_start, new_size, new_offset, new_start_is_absolute, new_size_is_absolute, new_name) \
2688 (start_size_offset_trial_t){ .start = (mach_vm_address_t)(new_start), \
2689 	        .size = (mach_vm_size_t)(new_size), \
2690 	        .offset = (vm_object_offset_t)(new_offset), \
2691 	        .start_is_absolute = (bool)(new_start_is_absolute), \
2692 	        .size_is_absolute = (bool)(new_size_is_absolute), \
2693 	        .name = new_name,}
2694 
2695 
2696 static start_size_offset_trial_t __attribute__((overloadable, used))
2697 slide_trial(start_size_offset_trial_t trial, mach_vm_address_t slide)
2698 {
2699 	start_size_offset_trial_t result = trial;
2700 
2701 	if (!trial.start_is_absolute) {
2702 		result.start += slide;
2703 		if (!trial.size_is_absolute) {
2704 			result.size -= slide;
2705 		}
2706 	}
2707 	return result;
2708 }
2709 
2710 start_size_offset_trials_t *
generate_start_size_offset_trials()2711 generate_start_size_offset_trials()
2712 {
2713 	const offset_list_t *start_offsets = get_ssoo_absolute_and_relative_offsets();
2714 	const offset_list_t *offset_values = get_ssoo_absolute_offsets();
2715 	const offset_list_t *size_offsets  = get_ssoo_absolute_and_relative_offsets();
2716 
2717 	// output is actually ordered start - offset - size
2718 	// because it pretty-prints better than start - size - offset
2719 	unsigned num_trials = start_offsets->count * offset_values->count * size_offsets->count;
2720 	start_size_offset_trials_t * trials = allocate_start_size_offset_trials(num_trials);
2721 	for (size_t a = 0; a < start_offsets->count; a++) {
2722 		for (size_t b = 0; b < offset_values->count; b++) {
2723 			for (size_t c = 0; c < size_offsets->count; c++) {
2724 				bool start_is_absolute = start_offsets->list[a].is_absolute;
2725 				bool size_is_absolute = size_offsets->list[c].is_absolute;
2726 				mach_vm_address_t start = start_offsets->list[a].offset;
2727 				vm_object_offset_t offset = offset_values->list[b].offset;
2728 				mach_vm_size_t size = size_offsets->list[c].offset;
2729 
2730 				char *str;
2731 				kasprintf(&str, "start: %s0x%llx, offset: 0x%llx, size: %s0x%llx",
2732 				    start_is_absolute ? "" : "base+", start,
2733 				    offset,
2734 				    size_is_absolute ? "" :"-start+", size);
2735 				append_trial(trials, START_SIZE_OFFSET_TRIAL(start, size, offset, start_is_absolute, size_is_absolute, str));
2736 			}
2737 		}
2738 	}
2739 	return trials;
2740 }
2741 
2742 #define SMART_START_SIZE_OFFSET_TRIALS()                                        \
2743 	__attribute__((cleanup(cleanup_start_size_offset_trials)))              \
2744 	= generate_start_size_offset_trials();
2745 
2746 static void
cleanup_start_size_offset_trials(start_size_offset_trials_t ** trials)2747 cleanup_start_size_offset_trials(start_size_offset_trials_t **trials)
2748 {
2749 	for (size_t i = 0; i < (*trials)->count; i++) {
2750 		kfree_str((*trials)->list[i].name);
2751 	}
2752 	free_trials(*trials);
2753 }
2754 
2755 // src/dst/size: test a source address, a dest address,
2756 // and a common size that may be added to both addresses
2757 
2758 typedef struct {
2759 	addr_t src;
2760 	addr_t dst;
2761 	addr_t size;
2762 	char *name;
2763 	bool src_is_absolute;  // src computation does not include any allocation's base address
2764 	bool dst_is_absolute;  // dst computation does not include any allocation's base address
2765 	bool size_is_src_relative;   // size computation includes src
2766 	bool size_is_dst_relative;   // size computation includes dst
2767 } src_dst_size_trial_t;
2768 
2769 typedef struct {
2770 	unsigned count;
2771 	unsigned capacity;
2772 	src_dst_size_trial_t list[];
2773 } src_dst_size_trials_t;
2774 
TRIALS_IMPL(src_dst_size)2775 TRIALS_IMPL(src_dst_size)
2776 
2777 #define SRC_DST_SIZE_TRIAL(new_src, new_dst, new_size, new_name, src_absolute, dst_absolute, size_src_rel, size_dst_rel) \
2778 	(src_dst_size_trial_t){                                         \
2779 	        .src = (addr_t)(new_src),                               \
2780 	        .dst = (addr_t)(new_dst),                               \
2781 	        .size = (addr_t)(new_size),                             \
2782 	        .name = new_name,                                       \
2783 	        .src_is_absolute = src_absolute,                        \
2784 	        .dst_is_absolute = dst_absolute,                        \
2785 	        .size_is_src_relative = size_src_rel,                   \
2786 	        .size_is_dst_relative = size_dst_rel,                   \
2787 	}
2788 
2789 src_dst_size_trials_t * __attribute__((overloadable))
2790 generate_src_dst_size_trials(const char *srcname, const char *dstname)
2791 {
2792 	const offset_list_t *addr_offsets = get_addr_trial_offsets();
2793 	const offset_list_t *size_offsets = get_size_trial_offsets();
2794 	unsigned src_count = addr_offsets->count;
2795 	unsigned dst_count = src_count;
2796 	unsigned size_count = 3 * size_offsets->count;
2797 	unsigned num_trials = src_count * dst_count * size_count;
2798 	src_dst_size_trials_t * trials = allocate_src_dst_size_trials(num_trials);
2799 
2800 	// each size is used three times:
2801 	// once src-relative, once dst-relative, and once absolute
2802 	unsigned size_part = size_count / 3;
2803 
2804 	for (size_t i = 0; i < src_count; i++) {
2805 		bool rebase_src = !addr_offsets->list[i].is_absolute;
2806 		addr_t src_offset = addr_offsets->list[i].offset;
2807 
2808 		for (size_t j = 0; j < dst_count; j++) {
2809 			bool rebase_dst = !addr_offsets->list[j].is_absolute;
2810 			addr_t dst_offset = addr_offsets->list[j].offset;
2811 
2812 			for (size_t k = 0; k < size_count; k++) {
2813 				bool rebase_size_from_src = false;
2814 				bool rebase_size_from_dst = false;
2815 				addr_t size_offset;
2816 				if (k < size_part) {
2817 					size_offset = size_offsets->list[k].offset;
2818 				} else if (k < 2 * size_part) {
2819 					size_offset = size_offsets->list[k - size_part].offset;
2820 					rebase_size_from_src = true;
2821 					rebase_size_from_dst = false;
2822 				} else {
2823 					size_offset = size_offsets->list[k - 2 * size_part].offset;
2824 					rebase_size_from_src = false;
2825 					rebase_size_from_dst = true;
2826 				}
2827 
2828 				addr_t size;
2829 				char *desc;
2830 				if (rebase_size_from_src) {
2831 					size = -src_offset + size_offset;
2832 					kasprintf(&desc, "%s: %s%lli, %s: %s%lli, size: -%s%+lli",
2833 					    srcname, rebase_src ? "base+" : "", (int64_t)src_offset,
2834 					    dstname, rebase_dst ? "base+" : "", (int64_t)dst_offset,
2835 					    srcname, (int64_t)size_offset);
2836 				} else if (rebase_size_from_dst) {
2837 					size = -dst_offset + size_offset;
2838 					kasprintf(&desc, "%s: %s%lli, %s: %s%lli, size: -%s%+lli",
2839 					    srcname, rebase_src ? "base+" : "", (int64_t)src_offset,
2840 					    dstname, rebase_dst ? "base+" : "", (int64_t)dst_offset,
2841 					    dstname, (int64_t)size_offset);
2842 				} else {
2843 					size = size_offset;
2844 					kasprintf(&desc, "%s: %s%lli, %s: %s%lli, size: %lli",
2845 					    srcname, rebase_src ? "base+" : "", (int64_t)src_offset,
2846 					    dstname, rebase_dst ? "base+" : "", (int64_t)dst_offset,
2847 					    (int64_t)size_offset);
2848 				}
2849 				assert(desc);
2850 				append_trial(trials, SRC_DST_SIZE_TRIAL(src_offset, dst_offset, size, desc,
2851 				    !rebase_src, !rebase_dst, rebase_size_from_src, rebase_size_from_dst));
2852 			}
2853 		}
2854 	}
2855 	return trials;
2856 }
2857 
2858 src_dst_size_trials_t * __attribute__((overloadable))
generate_src_dst_size_trials(void)2859 generate_src_dst_size_trials(void)
2860 {
2861 	return generate_src_dst_size_trials("src", "dst");
2862 }
2863 #define SMART_SRC_DST_SIZE_TRIALS()                                     \
2864 	__attribute__((cleanup(cleanup_src_dst_size_trials)))           \
2865 	= generate_src_dst_size_trials();
2866 
2867 #define SMART_FILEOFF_DST_SIZE_TRIALS()                                 \
2868 	__attribute__((cleanup(cleanup_src_dst_size_trials)))           \
2869 	= generate_src_dst_size_trials("fileoff", "dst");
2870 
2871 static void
cleanup_src_dst_size_trials(src_dst_size_trials_t ** trials)2872 cleanup_src_dst_size_trials(src_dst_size_trials_t **trials)
2873 {
2874 	for (size_t i = 0; i < (*trials)->count; i++) {
2875 		kfree_str((*trials)->list[i].name);
2876 	}
2877 	free_trials(*trials);
2878 }
2879 
2880 static src_dst_size_trial_t __attribute__((overloadable, used))
slide_trial_src(src_dst_size_trial_t trial,mach_vm_address_t slide)2881 slide_trial_src(src_dst_size_trial_t trial, mach_vm_address_t slide)
2882 {
2883 	src_dst_size_trial_t result = trial;
2884 
2885 	if (!trial.src_is_absolute) {
2886 		result.src += slide;
2887 		if (trial.size_is_src_relative) {
2888 			result.size -= slide;
2889 		}
2890 	}
2891 	return result;
2892 }
2893 
2894 static src_dst_size_trial_t __attribute__((overloadable, used))
slide_trial_dst(src_dst_size_trial_t trial,mach_vm_address_t slide)2895 slide_trial_dst(src_dst_size_trial_t trial, mach_vm_address_t slide)
2896 {
2897 	src_dst_size_trial_t result = trial;
2898 
2899 	if (!trial.dst_is_absolute) {
2900 		result.dst += slide;
2901 		if (trial.size_is_dst_relative) {
2902 			result.size -= slide;
2903 		}
2904 	}
2905 	return result;
2906 }
2907 
2908 #if !KERNEL
2909 // shared_file_np / shared_file_mapping_slide_np tests
2910 
2911 // copied from bsd/vm/vm_unix.c
2912 #define _SR_FILE_MAPPINGS_MAX_FILES     256
2913 #define SFM_MAX (_SR_FILE_MAPPINGS_MAX_FILES * 8)
2914 
2915 // From Rosetta dyld
2916 #define kNumSharedCacheMappings 4
2917 #define kMaxSubcaches 16
2918 
2919 typedef struct {
2920 	uint32_t files_count;
2921 	struct shared_file_np *files;
2922 	char *name;
2923 } shared_file_np_trial_t;
2924 
2925 typedef struct {
2926 	unsigned count;
2927 	unsigned capacity;
2928 	shared_file_np_trial_t list[];
2929 } shared_file_np_trials_t;
2930 
TRIALS_IMPL(shared_file_np)2931 TRIALS_IMPL(shared_file_np)
2932 
2933 #define SHARED_FILE_NP_TRIAL(new_files_count, new_files, new_name) \
2934 (shared_file_np_trial_t){ .files_count = (uint32_t)(new_files_count), \
2935 	    .files = (struct shared_file_np *)(new_files), \
2936 	    .name = "files_count="#new_files_count new_name }
2937 
2938 struct shared_file_np *
2939 alloc_shared_file_np(uint32_t files_count)
2940 {
2941 	struct shared_file_np *files;
2942 #if KERNEL
2943 	files = kalloc_type(struct shared_file_np, files_count, Z_WAITOK | Z_ZERO);
2944 #else
2945 	files = calloc(files_count, sizeof(struct shared_file_np));
2946 #endif
2947 	return files;
2948 }
2949 
2950 void
free_shared_file_np(shared_file_np_trial_t * trial)2951 free_shared_file_np(shared_file_np_trial_t *trial)
2952 {
2953 #if KERNEL
2954 	// some trials have files_count > 0 but null files.
2955 	if (trial->files) {
2956 		kfree_type(struct shared_file_np, trial->files_count, trial->files);
2957 	}
2958 #else
2959 	free(trial->files);
2960 #endif
2961 }
2962 
2963 static int get_fd();
2964 
2965 shared_file_np_trials_t *
get_shared_file_np_trials(uint64_t dyld_fd)2966 get_shared_file_np_trials(uint64_t dyld_fd)
2967 {
2968 	struct shared_file_np * files = NULL;
2969 	shared_file_np_trials_t *trials = allocate_shared_file_np_trials(11);
2970 	append_trial(trials, SHARED_FILE_NP_TRIAL(0, NULL, " (NULL files)"));
2971 	append_trial(trials, SHARED_FILE_NP_TRIAL(1, NULL, " (NULL files)"));
2972 	append_trial(trials, SHARED_FILE_NP_TRIAL(_SR_FILE_MAPPINGS_MAX_FILES - 1, NULL, " (NULL files)"));
2973 	append_trial(trials, SHARED_FILE_NP_TRIAL(_SR_FILE_MAPPINGS_MAX_FILES, NULL, " (NULL files)"));
2974 	append_trial(trials, SHARED_FILE_NP_TRIAL(_SR_FILE_MAPPINGS_MAX_FILES + 1, NULL, " (NULL files)"));
2975 	files = alloc_shared_file_np(1);
2976 	append_trial(trials, SHARED_FILE_NP_TRIAL(1, files, ""));
2977 	files = alloc_shared_file_np(_SR_FILE_MAPPINGS_MAX_FILES - 1);
2978 	append_trial(trials, SHARED_FILE_NP_TRIAL(_SR_FILE_MAPPINGS_MAX_FILES - 1, files, ""));
2979 	files = alloc_shared_file_np(_SR_FILE_MAPPINGS_MAX_FILES);
2980 	append_trial(trials, SHARED_FILE_NP_TRIAL(_SR_FILE_MAPPINGS_MAX_FILES, files, ""));
2981 	files = alloc_shared_file_np(_SR_FILE_MAPPINGS_MAX_FILES + 1);
2982 	append_trial(trials, SHARED_FILE_NP_TRIAL(_SR_FILE_MAPPINGS_MAX_FILES + 1, files, ""));
2983 	files = alloc_shared_file_np(1);
2984 	files->sf_fd = get_fd();
2985 	files->sf_slide = 4096;
2986 	files->sf_mappings_count = 1;
2987 	append_trial(trials, SHARED_FILE_NP_TRIAL(1, files, " non-zero shared_file_np"));
2988 	files = alloc_shared_file_np(2);
2989 	files[0].sf_fd = (int)dyld_fd;
2990 	files[0].sf_mappings_count = 1;
2991 	files[1].sf_fd = files[0].sf_fd;
2992 	files[1].sf_mappings_count = 4;
2993 	append_trial(trials, SHARED_FILE_NP_TRIAL(2, files, " checks shared_file_np"));
2994 	return trials;
2995 }
2996 
2997 static void
cleanup_shared_file_np_trials(shared_file_np_trials_t ** trials)2998 cleanup_shared_file_np_trials(shared_file_np_trials_t **trials)
2999 {
3000 	for (size_t i = 0; i < (*trials)->count; i++) {
3001 		free_shared_file_np(&(*trials)->list[i]);
3002 	}
3003 	free_trials(*trials);
3004 }
3005 
3006 typedef struct {
3007 	uint32_t mappings_count;
3008 	struct shared_file_mapping_slide_np *mappings;
3009 	char *name;
3010 } shared_file_mapping_slide_np_trial_t;
3011 
3012 typedef struct {
3013 	unsigned count;
3014 	unsigned capacity;
3015 	shared_file_mapping_slide_np_trial_t list[];
3016 } shared_file_mapping_slide_np_trials_t;
3017 
TRIALS_IMPL(shared_file_mapping_slide_np)3018 TRIALS_IMPL(shared_file_mapping_slide_np)
3019 
3020 #define SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(new_mappings_count, new_mappings, new_name) \
3021 (shared_file_mapping_slide_np_trial_t){ .mappings_count = (uint32_t)(new_mappings_count), \
3022 	    .mappings = (struct shared_file_mapping_slide_np *)(new_mappings), \
3023 	    .name = "mappings_count="#new_mappings_count new_name }
3024 
3025 struct shared_file_mapping_slide_np *
3026 alloc_shared_file_mapping_slide_np(uint32_t mappings_count)
3027 {
3028 	struct shared_file_mapping_slide_np *mappings;
3029 #if KERNEL
3030 	mappings = kalloc_type(struct shared_file_mapping_slide_np, mappings_count, Z_WAITOK | Z_ZERO);
3031 #else
3032 	mappings = calloc(mappings_count, sizeof(struct shared_file_mapping_slide_np));
3033 #endif
3034 	return mappings;
3035 }
3036 
3037 void
free_shared_file_mapping_slide_np(shared_file_mapping_slide_np_trial_t * trial)3038 free_shared_file_mapping_slide_np(shared_file_mapping_slide_np_trial_t *trial)
3039 {
3040 #if KERNEL
3041 	// some trials have files_count > 0 but null files.
3042 	if (trial->mappings) {
3043 		kfree_type(struct shared_file_mapping_slide_np, trial->mappings_count, trial->mappings);
3044 	}
3045 #else
3046 	free(trial->mappings);
3047 #endif
3048 }
3049 
3050 typedef enum { MP_NORMAL = 0, MP_ADDR_SIZE = 1, MP_OFFSET_SIZE, MP_PROTS } mapping_slide_np_test_style_t;
3051 
3052 static inline struct shared_file_mapping_slide_np *
alloc_and_fill_shared_file_mappings(uint32_t num_mappings,mapping_slide_np_test_style_t style)3053 alloc_and_fill_shared_file_mappings(uint32_t num_mappings, mapping_slide_np_test_style_t style)
3054 {
3055 	assert(num_mappings > 0);
3056 	struct shared_file_mapping_slide_np *mappings = alloc_shared_file_mapping_slide_np(num_mappings);
3057 
3058 	// Checks happen in a for-loop so is desirable to differentiate the first mapping.
3059 	switch (style) {
3060 	case MP_NORMAL:
3061 		mappings[0].sms_slide_size = KB4;
3062 		mappings[0].sms_slide_start = KB4;
3063 		mappings[0].sms_max_prot = VM_PROT_DEFAULT;
3064 		mappings[0].sms_init_prot = VM_PROT_DEFAULT;
3065 		break;
3066 	case MP_ADDR_SIZE:
3067 		mappings[0].sms_address = 1;
3068 		mappings[0].sms_size = UINT64_MAX;
3069 		mappings[0].sms_file_offset = 0;
3070 		mappings[0].sms_slide_size = KB4;
3071 		mappings[0].sms_slide_start = KB4;
3072 		mappings[0].sms_max_prot = VM_PROT_DEFAULT;
3073 		mappings[0].sms_init_prot = VM_PROT_DEFAULT;
3074 		break;
3075 	case MP_OFFSET_SIZE:
3076 		mappings[0].sms_size = 0;
3077 		mappings[0].sms_file_offset = UINT64_MAX;
3078 		mappings[0].sms_slide_size = KB4;
3079 		mappings[0].sms_slide_start = KB4;
3080 		mappings[0].sms_max_prot = VM_PROT_DEFAULT;
3081 		mappings[0].sms_init_prot = VM_PROT_DEFAULT;
3082 		break;
3083 	case MP_PROTS:
3084 		mappings[0].sms_slide_size = KB4;
3085 		mappings[0].sms_slide_start = KB4;
3086 		mappings[0].sms_max_prot = VM_PROT_DEFAULT;
3087 		mappings[0].sms_init_prot = INT_MAX;
3088 		break;
3089 	default:
3090 		assert(0);
3091 		break;
3092 	}
3093 
3094 	for (size_t idx = 1; idx < num_mappings; idx++) {
3095 		size_t i = idx % 4;
3096 		switch (i) {
3097 		case 0:
3098 			mappings[idx].sms_slide_size = KB4;
3099 			mappings[idx].sms_slide_start = KB4;
3100 			mappings[idx].sms_max_prot = VM_PROT_DEFAULT;
3101 			mappings[idx].sms_init_prot = VM_PROT_DEFAULT;
3102 			break;
3103 		case 1:
3104 			mappings[idx].sms_slide_size = KB4;
3105 			mappings[idx].sms_slide_start = UINT64_MAX;
3106 			mappings[idx].sms_max_prot = VM_PROT_DEFAULT;
3107 			mappings[idx].sms_init_prot = VM_PROT_DEFAULT;
3108 			break;
3109 		case 2:
3110 			mappings[idx].sms_slide_size = 0;
3111 			mappings[idx].sms_slide_start = UINT64_MAX;
3112 			mappings[idx].sms_max_prot = VM_PROT_DEFAULT;
3113 			mappings[idx].sms_init_prot = INT_MAX;
3114 			break;
3115 		case 3:
3116 			mappings[idx].sms_slide_size = KB4;
3117 			mappings[idx].sms_slide_start = 0;
3118 			mappings[idx].sms_max_prot = INT_MAX;
3119 			mappings[idx].sms_init_prot = VM_PROT_DEFAULT;
3120 			break;
3121 		default:
3122 			assert(0);
3123 			break;
3124 		}
3125 	}
3126 	return mappings;
3127 }
3128 
3129 shared_file_mapping_slide_np_trials_t*
get_shared_file_mapping_slide_np_trials(void)3130 get_shared_file_mapping_slide_np_trials(void)
3131 {
3132 	struct shared_file_mapping_slide_np *mappings = NULL;
3133 	shared_file_mapping_slide_np_trials_t *trials = allocate_shared_file_mapping_slide_np_trials(14);
3134 	append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(0, NULL, " (NULL mappings)"));
3135 	append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(1, NULL, " (NULL mappings)"));
3136 	append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(SFM_MAX - 1, NULL, " (NULL mappings)"));
3137 	append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(SFM_MAX, NULL, " (NULL mappings)"));
3138 	append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(SFM_MAX + 1, NULL, " (NULL mappings)"));
3139 	mappings = alloc_and_fill_shared_file_mappings(1, MP_NORMAL);
3140 	append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(1, mappings, " (normal)"));
3141 	mappings = alloc_and_fill_shared_file_mappings(1, MP_ADDR_SIZE);
3142 	append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(1, mappings, " (sms_address+sms_size check)"));
3143 	mappings = alloc_and_fill_shared_file_mappings(1, MP_OFFSET_SIZE);
3144 	append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(1, mappings, " (sms_file_offset+sms_size check)"));
3145 	mappings = alloc_and_fill_shared_file_mappings(1, MP_PROTS);
3146 	append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(1, mappings, " (sms_init_prot check)"));
3147 	mappings = alloc_and_fill_shared_file_mappings(SFM_MAX - 1, MP_NORMAL);
3148 	append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(SFM_MAX - 1, mappings, ""));
3149 	mappings = alloc_and_fill_shared_file_mappings(SFM_MAX, MP_NORMAL);
3150 	append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(SFM_MAX, mappings, ""));
3151 	mappings = alloc_and_fill_shared_file_mappings(SFM_MAX + 1, MP_NORMAL);
3152 	append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(SFM_MAX + 1, mappings, ""));
3153 	mappings = alloc_and_fill_shared_file_mappings(kNumSharedCacheMappings, MP_NORMAL);
3154 	append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(kNumSharedCacheMappings, mappings, ""));
3155 	mappings = alloc_and_fill_shared_file_mappings(2 * kNumSharedCacheMappings, MP_NORMAL);
3156 	append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(2 * kNumSharedCacheMappings, mappings, ""));
3157 
3158 	return trials;
3159 }
3160 
3161 static void
cleanup_shared_file_mapping_slide_np_trials(shared_file_mapping_slide_np_trials_t ** trials)3162 cleanup_shared_file_mapping_slide_np_trials(shared_file_mapping_slide_np_trials_t **trials)
3163 {
3164 	for (size_t i = 0; i < (*trials)->count; i++) {
3165 		free_shared_file_mapping_slide_np(&(*trials)->list[i]);
3166 	}
3167 	free_trials(*trials);
3168 }
3169 
3170 typedef struct {
3171 	uint32_t files_count;
3172 	struct shared_file_np *files;
3173 	uint32_t mappings_count;
3174 	struct shared_file_mapping_slide_np *mappings;
3175 	char *name;
3176 } shared_region_map_and_slide_2_trial_t;
3177 
3178 typedef struct {
3179 	unsigned count;
3180 	unsigned capacity;
3181 	shared_file_np_trials_t *shared_files_trials;
3182 	shared_file_mapping_slide_np_trials_t *shared_mappings_trials;
3183 	shared_region_map_and_slide_2_trial_t list[];
3184 } shared_region_map_and_slide_2_trials_t;
3185 
TRIALS_IMPL(shared_region_map_and_slide_2)3186 TRIALS_IMPL(shared_region_map_and_slide_2)
3187 
3188 #define SHARED_REGION_MAP_AND_SLIDE_2_TRIAL(new_files_count, new_files, new_mappings_count, new_mappings, new_name) \
3189 (shared_region_map_and_slide_2_trial_t){ .files_count = (uint32_t)(new_files_count), \
3190 	    .files = (struct shared_file_np *)(new_files), \
3191 	    .mappings_count = (uint32_t)(new_mappings_count), \
3192 	    .mappings = (struct shared_file_mapping_slide_np *)(new_mappings), \
3193 	    .name = new_name }
3194 
3195 shared_region_map_and_slide_2_trials_t *
3196 generate_shared_region_map_and_slide_2_trials(uint64_t dyld_fd)
3197 {
3198 	shared_file_np_trials_t *shared_files = get_shared_file_np_trials(dyld_fd);
3199 	shared_file_mapping_slide_np_trials_t *shared_mappings = get_shared_file_mapping_slide_np_trials();
3200 	unsigned num_trials = shared_files->count * shared_mappings->count;
3201 	shared_region_map_and_slide_2_trials_t *trials = allocate_shared_region_map_and_slide_2_trials(num_trials);
3202 	trials->shared_files_trials = shared_files;
3203 	trials->shared_mappings_trials = shared_mappings;
3204 	for (size_t i = 0; i < shared_files->count; i++) {
3205 		for (size_t j = 0; j < shared_mappings->count; j++) {
3206 			char *buf;
3207 			shared_file_np_trial_t shared_file = shared_files->list[i];
3208 			shared_file_mapping_slide_np_trial_t shared_mapping = shared_mappings->list[j];
3209 			kasprintf(&buf, "%s, %s", shared_file.name, shared_mapping.name);
3210 			append_trial(trials, SHARED_REGION_MAP_AND_SLIDE_2_TRIAL(shared_file.files_count, shared_file.files, shared_mapping.mappings_count, shared_mapping.mappings, buf));
3211 		}
3212 	}
3213 	return trials;
3214 }
3215 
3216 #define SMART_SHARED_REGION_MAP_AND_SLIDE_2_TRIALS(dyld_fd)    \
3217 	__attribute__((cleanup(cleanup_shared_region_map_and_slide_2_trials))) \
3218 	= generate_shared_region_map_and_slide_2_trials(dyld_fd);
3219 
3220 static void __attribute__((used))
cleanup_shared_region_map_and_slide_2_trials(shared_region_map_and_slide_2_trials_t ** trials)3221 cleanup_shared_region_map_and_slide_2_trials(shared_region_map_and_slide_2_trials_t **trials)
3222 {
3223 	for (size_t i = 0; i < (*trials)->count; i++) {
3224 		kfree_str((*trials)->list[i].name);
3225 	}
3226 	cleanup_shared_file_np_trials(&(*trials)->shared_files_trials);
3227 	cleanup_shared_file_mapping_slide_np_trials(&(*trials)->shared_mappings_trials);
3228 	free_trials(*trials);
3229 }
3230 #endif // !KERNEL
3231 
3232 /////////////////////////////////////////////////////
3233 // utility code
3234 
3235 // Return true if flags has VM_FLAGS_FIXED
3236 // This is non-trivial because VM_FLAGS_FIXED is zero;
3237 // the real value is the absence of VM_FLAGS_ANYWHERE.
3238 static inline bool
is_fixed(int flags)3239 is_fixed(int flags)
3240 {
3241 	static_assert(VM_FLAGS_FIXED == 0, "this test requies VM_FLAGS_FIXED be zero");
3242 	static_assert(VM_FLAGS_ANYWHERE != 0, "this test requires VM_FLAGS_ANYWHERE be nonzero");
3243 	return !(flags & VM_FLAGS_ANYWHERE);
3244 }
3245 
3246 // Return true if flags has VM_FLAGS_FIXED and VM_FLAGS_OVERWRITE set.
3247 static inline bool
is_fixed_overwrite(int flags)3248 is_fixed_overwrite(int flags)
3249 {
3250 	return is_fixed(flags) && (flags & VM_FLAGS_OVERWRITE);
3251 }
3252 
3253 
3254 // Return true if flags has VM_FLAGS_ANYWHERE and VM_FLAGS_RANDOM_ADDR set.
3255 static inline bool
is_random_anywhere(int flags)3256 is_random_anywhere(int flags)
3257 {
3258 	static_assert(VM_FLAGS_ANYWHERE != 0, "this test requires VM_FLAGS_ANYWHERE be nonzero");
3259 	return (flags & VM_FLAGS_RANDOM_ADDR) && (flags & VM_FLAGS_ANYWHERE);
3260 }
3261 
3262 // Deallocate [start, start+size).
3263 // Don't deallocate if the allocator failed (allocator_kr)
3264 // Don't deallocate if flags include FIXED | OVERWRITE (in which case
3265 //   the memory is a pre-existing allocation and should be left alone)
3266 static void
deallocate_if_not_fixed_overwrite(kern_return_t allocator_kr,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,int flags)3267 deallocate_if_not_fixed_overwrite(kern_return_t allocator_kr, MAP_T map,
3268     mach_vm_address_t start, mach_vm_size_t size, int flags)
3269 {
3270 	if (is_fixed_overwrite(flags)) {
3271 		// fixed-overwrite with pre-existing allocation, don't deallocate
3272 	} else if (allocator_kr != 0) {
3273 		// allocator failed, don't deallocate
3274 	} else {
3275 		(void)mach_vm_deallocate(map, start, size);
3276 	}
3277 }
3278 
3279 // PPL is inefficient at deallocations of very large address ranges.
3280 // Skip those trials to avoid test timeouts.
3281 // We assume that tests on other devices will cover any testing gaps.
3282 static inline bool
dealloc_would_time_out(mach_vm_address_t addr __unused,mach_vm_size_t size __unused,vm_map_t map __unused)3283 dealloc_would_time_out(
3284 	mach_vm_address_t addr __unused,
3285 	mach_vm_size_t size __unused,
3286 	vm_map_t map __unused)
3287 {
3288 #if CONFIG_SPTM
3289 	/* not PPL - okay */
3290 	return false;
3291 #elif !(__ARM_42BIT_PA_SPACE__ || ARM_LARGE_MEMORY)
3292 	/* PPL but small pmap address space - okay */
3293 	return false;
3294 #else
3295 	/*
3296 	 * PPL with large pmap address space - bad
3297 	 * Pre-empt trials of very large allocations.
3298 	 */
3299 	return size > 0x8000000000;
3300 #endif
3301 }
3302 
3303 #if !KERNEL
3304 
3305 // SMART_MAP is mach_task_self() in userspace and a new empty map in kernel
3306 #define SMART_MAP = mach_task_self()
3307 
3308 // CURRENT_MAP is mach_task_self() in userspace and current_map() in kernel
3309 #define CURRENT_MAP = mach_task_self()
3310 
3311 #else
3312 
3313 static inline vm_map_t
create_map(mach_vm_address_t map_start,mach_vm_address_t map_end)3314 create_map(mach_vm_address_t map_start, mach_vm_address_t map_end)
3315 {
3316 	ledger_t ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
3317 	pmap_t pmap = pmap_create_options(ledger, 0, PMAP_CREATE_64BIT);
3318 	assert(pmap);
3319 	ledger_dereference(ledger);  // now retained by pmap
3320 	vm_map_t map = vm_map_create_options(pmap, map_start, map_end, VM_MAP_CREATE_PAGEABLE);
3321 	assert(map);
3322 
3323 	return map;
3324 }
3325 
3326 static inline void
cleanup_map(vm_map_t * map)3327 cleanup_map(vm_map_t *map)
3328 {
3329 	assert(*map);
3330 	kern_return_t kr = vm_map_terminate(*map);
3331 	assert(kr == 0);
3332 	vm_map_deallocate(*map);  // also destroys pmap
3333 }
3334 
3335 // kernel: create a new vm_map and deallocate it at end of scope
3336 // fixme choose a user-like and a kernel-like address range
3337 #define SMART_MAP                                                       \
3338 	__attribute__((cleanup(cleanup_map))) = create_map(0, 0xffffffffffffffff)
3339 
3340 // This map has a map_offset that matches what a user would get. This allows
3341 // vm_map_user_ranges to work properly when tested from the kernel
3342 #define SMART_RANGE_MAP                                                       \
3343 	__attribute__((cleanup(cleanup_map))) = create_map(0, vm_compute_max_offset(true))
3344 
3345 #define CURRENT_MAP = current_map()
3346 
3347 #endif
3348 
3349 // Allocate with an address hint.
3350 // Important for kernel tests' empty vm_maps
3351 // to avoid allocating near address 0 and ~0.
3352 static kern_return_t
allocate_away_from_zero(MAP_T map,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_size_t align_mask,int additional_map_flags)3353 allocate_away_from_zero(
3354 	MAP_T               map,
3355 	mach_vm_address_t  *address,
3356 	mach_vm_size_t      size,
3357 	mach_vm_size_t      align_mask,
3358 	int                 additional_map_flags)
3359 {
3360 	*address = 2ull * 1024 * 1024 * 1024; // 2 GB address hint
3361 	return mach_vm_map(map, address, size, align_mask,
3362 	           VM_FLAGS_ANYWHERE | additional_map_flags, 0, 0, 0,
3363 	           VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
3364 }
3365 
3366 // allocate a purgeable VM region with size and permissions
3367 // and deallocate it at end of scope
3368 #define SMART_ALLOCATE_PURGEABLE_VM(map, size, perm)                              \
3369     __attribute__((cleanup(cleanup_allocation))) = create_allocation(map, size, 0, perm, false, VM_FLAGS_PURGABLE)
3370 
3371 // allocate a VM region with size and permissions
3372 // and deallocate it at end of scope
3373 #define SMART_ALLOCATE_VM(map, size, perm)                              \
3374     __attribute__((cleanup(cleanup_allocation))) = create_allocation(map, size, 0, perm, false, 0)
3375 
3376 // allocate a VM region with size and permissions and alignment
3377 // and deallocate it at end of scope
3378 #define SMART_ALLOCATE_ALIGNED_VM(map, size, align_mask, perm)          \
3379     __attribute__((cleanup(cleanup_allocation))) = create_allocation(map, size, align_mask, perm, false, 0)
3380 
3381 // allocate a VM region with size and permissions
3382 // and deallocate it at end of scope
3383 // If no such region could be allocated, return {.addr = 0}
3384 #define SMART_TRY_ALLOCATE_VM(map, size, perm)                              \
3385     __attribute__((cleanup(cleanup_allocation))) = create_allocation(map, size, 0, perm, true, 0)
3386 
3387 // a VM allocation with unallocated pages around it
3388 typedef struct {
3389 	MAP_T map;
3390 	addr_t guard_size;
3391 	addr_t guard_prefix;        // guard_size bytes
3392 	addr_t unallocated_prefix;  // guard_size bytes
3393 	addr_t addr;
3394 	addr_t size;
3395 	addr_t unallocated_suffix;  // guard_size bytes
3396 	addr_t guard_suffix;        // guard_size bytes
3397 } allocation_t;
3398 
3399 static allocation_t
create_allocation(MAP_T new_map,mach_vm_address_t new_size,mach_vm_size_t align_mask,vm_prot_t perm,bool allow_failure,int additional_map_flags)3400 create_allocation(MAP_T new_map, mach_vm_address_t new_size, mach_vm_size_t align_mask,
3401     vm_prot_t perm, bool allow_failure, int additional_map_flags)
3402 {
3403 	// allocations in address order:
3404 	// 16K guard_prefix (allocated, prot none)
3405 	// 16K unallocated_prefix (unallocated)
3406 	// N   addr..addr+size
3407 	// 16K unallocated_suffix (unallocated)
3408 	// 16K guard_suffix (allocated, prot none)
3409 
3410 	// allocate new_size + 4 * 16K bytes
3411 	// then carve it up into our regions
3412 
3413 	allocation_t result;
3414 
3415 	result.map = new_map;
3416 
3417 	// this implementation only works with some alignment values
3418 	assert(align_mask == 0 || align_mask == KB4 - 1 || align_mask == KB16 - 1);
3419 
3420 	result.guard_size = KB16;
3421 	result.size = round_up_page(new_size, KB16);
3422 	if (result.size == 0 && allow_failure) {
3423 		return (allocation_t){new_map, 0, 0, 0, 0, 0, 0, 0};
3424 	}
3425 	assert(result.size != 0);
3426 
3427 	mach_vm_address_t allocated_base;
3428 	mach_vm_size_t allocated_size = result.size;
3429 	if (__builtin_add_overflow(result.size, result.guard_size * 4, &allocated_size)) {
3430 		if (allow_failure) {
3431 			return (allocation_t){new_map, 0, 0, 0, 0, 0, 0, 0};
3432 		} else {
3433 			assert(false);
3434 		}
3435 	}
3436 
3437 	kern_return_t kr;
3438 	kr = allocate_away_from_zero(result.map, &allocated_base, allocated_size,
3439 	    align_mask, additional_map_flags);
3440 	if (kr != 0 && allow_failure) {
3441 		return (allocation_t){new_map, 0, 0, 0, 0, 0, 0, 0};
3442 	}
3443 	assert(kr == 0);
3444 
3445 	result.guard_prefix = (addr_t)allocated_base;
3446 	result.unallocated_prefix = result.guard_prefix + result.guard_size;
3447 	result.addr = result.unallocated_prefix + result.guard_size;
3448 	result.unallocated_suffix = result.addr + result.size;
3449 	result.guard_suffix = result.unallocated_suffix + result.guard_size;
3450 
3451 	kr = mach_vm_protect(result.map, result.addr, result.size, false, perm);
3452 	assert(kr == 0);
3453 	kr = mach_vm_protect(result.map, result.guard_prefix, result.guard_size, true, VM_PROT_NONE);
3454 	assert(kr == 0);
3455 	kr = mach_vm_protect(result.map, result.guard_suffix, result.guard_size, true, VM_PROT_NONE);
3456 	assert(kr == 0);
3457 	kr = mach_vm_deallocate(result.map, result.unallocated_prefix, result.guard_size);
3458 	assert(kr == 0);
3459 	kr = mach_vm_deallocate(result.map, result.unallocated_suffix, result.guard_size);
3460 	assert(kr == 0);
3461 
3462 	return result;
3463 }
3464 
3465 // Mark this allocation as deallocated by something else.
3466 // This means cleanup_allocation() won't deallocate it twice.
3467 // cleanup_allocation() will still free the guard pages.
3468 static void
set_already_deallocated(allocation_t * allocation)3469 set_already_deallocated(allocation_t *allocation)
3470 {
3471 	allocation->addr = 0;
3472 	allocation->size = 0;
3473 }
3474 
3475 static void
cleanup_allocation(allocation_t * allocation)3476 cleanup_allocation(allocation_t *allocation)
3477 {
3478 	// fixme verify allocations and unallocated spaces still exist where we expect
3479 	if (allocation->size) {
3480 		(void)mach_vm_deallocate(allocation->map, allocation->addr, allocation->size);
3481 	}
3482 	if (allocation->guard_size) {
3483 		(void)mach_vm_deallocate(allocation->map, allocation->guard_prefix, allocation->guard_size);
3484 		(void)mach_vm_deallocate(allocation->map, allocation->guard_suffix, allocation->guard_size);
3485 	}
3486 }
3487 
3488 
3489 // unallocate a VM region with size
3490 // and deallocate it at end of scope
3491 #define SMART_UNALLOCATE_VM(map, size)                                  \
3492 	__attribute__((cleanup(cleanup_unallocation))) = create_unallocation(map, size)
3493 
3494 // unallocate a VM region with size
3495 // and deallocate it at end of scope
3496 // If no such region could be allocated, return {.addr = 0}
3497 #define SMART_TRY_UNALLOCATE_VM(map, size)                                  \
3498 	__attribute__((cleanup(cleanup_unallocation))) = create_unallocation(map, size, true)
3499 
3500 // a VM space with allocated pages around it
3501 typedef struct {
3502 	MAP_T map;
3503 	addr_t guard_size;
3504 	addr_t guard_prefix;  // 16K
3505 	addr_t addr;
3506 	addr_t size;
3507 	addr_t guard_suffix;  // 16K
3508 } unallocation_t;
3509 
3510 static unallocation_t __attribute__((overloadable))
create_unallocation(MAP_T new_map,mach_vm_address_t new_size,bool allow_failure)3511 create_unallocation(MAP_T new_map, mach_vm_address_t new_size, bool allow_failure)
3512 {
3513 	// allocations in address order:
3514 	// 16K guard_prefix (allocated, prot none)
3515 	// N   addr..addr+size (unallocated)
3516 	// 16K guard_suffix (allocated, prot none)
3517 
3518 	// allocate new_size + 2 * 16K bytes
3519 	// then carve it up into our regions
3520 
3521 	unallocation_t result;
3522 
3523 	result.map = new_map;
3524 
3525 	result.guard_size = KB16;
3526 	result.size = round_up_page(new_size, KB16);
3527 	if (result.size == 0 && allow_failure) {
3528 		return (unallocation_t){new_map, 0, 0, 0, 0, 0};
3529 	}
3530 	assert(result.size != 0);
3531 
3532 	mach_vm_address_t allocated_base;
3533 	mach_vm_size_t allocated_size = result.size;
3534 	if (__builtin_add_overflow(result.size, result.guard_size * 2, &allocated_size)) {
3535 		if (allow_failure) {
3536 			return (unallocation_t){new_map, 0, 0, 0, 0, 0};
3537 		} else {
3538 			assert(false);
3539 		}
3540 	}
3541 	kern_return_t kr;
3542 	kr = allocate_away_from_zero(result.map, &allocated_base, allocated_size, 0, 0);
3543 	if (kr != 0 && allow_failure) {
3544 		return (unallocation_t){new_map, 0, 0, 0, 0, 0};
3545 	}
3546 	assert(kr == 0);
3547 
3548 	result.guard_prefix = (addr_t)allocated_base;
3549 	result.addr = result.guard_prefix + result.guard_size;
3550 	result.guard_suffix = result.addr + result.size;
3551 
3552 	kr = mach_vm_deallocate(result.map, result.addr, result.size);
3553 	assert(kr == 0);
3554 	kr = mach_vm_protect(result.map, result.guard_prefix, result.guard_size, true, VM_PROT_NONE);
3555 	assert(kr == 0);
3556 	kr = mach_vm_protect(result.map, result.guard_suffix, result.guard_size, true, VM_PROT_NONE);
3557 	assert(kr == 0);
3558 
3559 	return result;
3560 }
3561 
3562 static unallocation_t __attribute__((overloadable))
create_unallocation(MAP_T new_map,mach_vm_address_t new_size)3563 create_unallocation(MAP_T new_map, mach_vm_address_t new_size)
3564 {
3565 	return create_unallocation(new_map, new_size, false /*allow_failure*/);
3566 }
3567 
3568 static void
cleanup_unallocation(unallocation_t * unallocation)3569 cleanup_unallocation(unallocation_t *unallocation)
3570 {
3571 	// fixme verify allocations and unallocated spaces still exist where we expect
3572 	if (unallocation->guard_size) {
3573 		(void)mach_vm_deallocate(unallocation->map, unallocation->guard_prefix, unallocation->guard_size);
3574 		(void)mach_vm_deallocate(unallocation->map, unallocation->guard_suffix, unallocation->guard_size);
3575 	}
3576 }
3577 
3578 
3579 // vm_deferred_reclamation_buffer_init_internal tests
3580 typedef struct {
3581 	task_t task;
3582 	mach_vm_address_t address;
3583 	mach_vm_size_t size;
3584 	char *name;
3585 } reclamation_buffer_init_trial_t;
3586 
3587 typedef struct {
3588 	unsigned count;
3589 	unsigned capacity;
3590 	reclamation_buffer_init_trial_t list[];
3591 } reclamation_buffer_init_trials_t;
3592 
TRIALS_IMPL(reclamation_buffer_init)3593 TRIALS_IMPL(reclamation_buffer_init)
3594 
3595 #define RECLAMATION_BUFFER_INIT_TRIAL(new_task, new_address, new_size, new_name) \
3596 (reclamation_buffer_init_trial_t){ .task = (task_t)(new_task), \
3597 	    .address = (mach_vm_address_t)(new_address), \
3598 	    .size = (mach_vm_size_t)(new_size), \
3599 	    .name = new_name }
3600 
3601 /* fixme reclaim struct declarations unavailable outside __LP64__ */
3602 #if __LP64__
3603 #define VM_TEST_RECLAIM_BUFFER_SIZE sizeof(struct mach_vm_reclaim_buffer_v1_s) + 2 * sizeof(struct mach_vm_reclaim_entry_v1_s)
3604 #else
3605 #define VM_TEST_RECLAIM_BUFFER_SIZE     64
3606 #endif
3607 /* __LP64__ */
3608 
3609 #define RECLAMATION_BUFFER_INIT_EXTRA_TRIALS   7
3610 
3611 reclamation_buffer_init_trials_t *
3612 generate_reclamation_buffer_init_trials(void)
3613 {
3614 	MAP_T map SMART_MAP;
3615 	allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
3616 	addr_trials_t *addr_trials SMART_ADDR_TRIALS(0);
3617 	reclamation_buffer_init_trials_t *trials = allocate_reclamation_buffer_init_trials(addr_trials->count + RECLAMATION_BUFFER_INIT_EXTRA_TRIALS);
3618 	for (size_t i = 0; i < addr_trials->count; i++) {
3619 		char *buf;
3620 		mach_vm_size_t size = VM_TEST_RECLAIM_BUFFER_SIZE * i * PAGE_SIZE;
3621 		kasprintf(&buf, "%s, size: 0x%llu", addr_trials->list[i].name, size);
3622 		append_trial(trials, RECLAMATION_BUFFER_INIT_TRIAL(current_task(), addr_trials->list[i].addr, size, buf));
3623 	}
3624 
3625 	append_trial(trials, RECLAMATION_BUFFER_INIT_TRIAL(current_task(), base.addr, 0, "size: 0"));
3626 	append_trial(trials, RECLAMATION_BUFFER_INIT_TRIAL(current_task(), base.addr, UINT64_MAX - 1, "size: UINT64_MAX - 1"));
3627 	append_trial(trials, RECLAMATION_BUFFER_INIT_TRIAL(current_task(), base.addr, UINT64_MAX, "size: UINT64_MAX"));
3628 	append_trial(trials, RECLAMATION_BUFFER_INIT_TRIAL(current_task(), base.addr, UINT64_MAX - PAGE_SIZE + 1, "size: UINT64_MAX - PAGE_SIZE + 1"));
3629 	append_trial(trials, RECLAMATION_BUFFER_INIT_TRIAL(NULL, NULL, 0, "null task, null address, size: 0"));
3630 	append_trial(trials, RECLAMATION_BUFFER_INIT_TRIAL(current_task(), NULL, 0, "null address, size: 0"));
3631 	append_trial(trials, RECLAMATION_BUFFER_INIT_TRIAL(current_task(), base.addr, VM_TEST_RECLAIM_BUFFER_SIZE, "valid arguments to test KERN_NOT_SUPPORTED"));
3632 
3633 	return trials;
3634 }
3635 
3636 #define SMART_RECLAMATION_BUFFER_INIT_TRIALS()    \
3637 	__attribute__((cleanup(cleanup_reclamation_buffer_init_trials))) \
3638 	= generate_reclamation_buffer_init_trials();
3639 
3640 static void __attribute__((used))
cleanup_reclamation_buffer_init_trials(reclamation_buffer_init_trials_t ** trials)3641 cleanup_reclamation_buffer_init_trials(reclamation_buffer_init_trials_t **trials)
3642 {
3643 	for (size_t i = 0; i < (*trials)->count - RECLAMATION_BUFFER_INIT_EXTRA_TRIALS; i++) {
3644 		kfree_str((*trials)->list[i].name);
3645 	}
3646 	free_trials(*trials);
3647 }
3648 
3649 static kern_return_t
call_mach_vm_deferred_reclamation_buffer_init(task_t task,mach_vm_address_t address,mach_vm_size_t size)3650 call_mach_vm_deferred_reclamation_buffer_init(task_t task, mach_vm_address_t address, mach_vm_size_t size)
3651 {
3652 	kern_return_t kr = 0;
3653 	mach_vm_address_t saved_address = address;
3654 	if (task && size > 0 && address == 0) {
3655 		// prevent assert3u(*address, !=, 0)
3656 		return PANIC;
3657 	}
3658 
3659 	kr = mach_vm_deferred_reclamation_buffer_init(task, &address, size);
3660 
3661 	//Out-param validation, failure shouldn't change inout address.
3662 	if (kr != KERN_SUCCESS && saved_address != address) {
3663 		kr = OUT_PARAM_BAD;
3664 	}
3665 	if (kr == KERN_SUCCESS && saved_address == address) {
3666 		kr = OUT_PARAM_BAD;
3667 	}
3668 
3669 	return kr;
3670 }
3671 
3672 
3673 // mach_vm_remap_external/vm_remap_external/vm32_remap/mach_vm_remap_new_external infra
3674 // mach_vm_remap/mach_vm_remap_new_kernel infra
3675 
3676 /*
3677  * This comment describes the testing approach that was fleshed out through
3678  * writing the tests for the map family of functions, and more fully realized
3679  * for the remap family of functions.
3680  *
3681  * This method attempts to radically minimize code reuse, at the expense of
3682  * decreased navigability (cmd+click is unlikely to work for you for this code)
3683  * and increased upfront costs for understanding this code. Maintainability
3684  * should be better in most cases: if a fix needs to happen, it can be
3685  * implemented in the right place once and doesn’t need to be copy-and-pasted
3686  * in multiple duplicated functions. There may however be cases where the
3687  * change you want to make doesn’t fit the spirit of this approach (for
3688  * instance changing the behavior of the test for only one function in the
3689  * family).
3690  *
3691  * The framework is built around the idea that there are three types of
3692  * parameters:
3693  * 1. Parameters that will be fixed for all calls to the function (e.g. some
3694  *    uncommon type specific to the function that doesn’t impact the input
3695  *    validation flow)
3696  * 2. Parameters that cause input validation to change significantly (typically
3697  *    flags, e.g. fixed vs anywhere). For those we basically want to treat
3698  *    different values of the flags as calling into different functions (for
3699  *    the purpose of input validation).
3700  * 3. Parameters that can be tested. For every test this is further broken down
3701  *    into 2 subtypes:
3702  *        A. Parameters being iterated over during the test (e.g. start+size)
3703  *        B. Parameters that should stay fixed during this test (e.g. pick a
3704  *           sane value of prot and pass that same value for all values of
3705  *           start/size)
3706  *
3707  * Often, many functions have very similar signatures (they are in the same
3708  * function family). We want to avoid copy/pasting tests for each function in
3709  * the family.
3710  *
3711  * Here is the flow used for the remap family of functions:
3712  * 1. Typedef a function type with shared parameters (see remap_fn_t)
3713  * 2. Define function wrappers that fit the above typedef for each function
3714  *    in the family (see e.g. mach_vm_remap_new_kernel_wrapped). These might
3715  *    set values for “type 1” params.
3716  * 3. Define “helper” functions that take in parameters of types 2 and 3.A.,
3717  *    and call the wrapper, filling in type 3.B. params. See, e.g.,
3718  *    help_call_remap_fn__src_size. For remap, all helpers can easily be
3719  *    implemented as a single call to a core helper function
3720  *    help_call_remap_fn__src_size_etc.
3721  * 4. Define generic “caller” functions that take in a wrapper and parameters
3722  *    of type 3.A. and call the helper. Macros are used to mass implement these
3723  *    for all values of type 2 parameters and for all functions in the family.
3724  *    See, e.g., `IMPL_FROM_HELPER(dst_size);`.
3725  * 5. Specialize the above "caller" functions for each wrapper in the family,
3726  *    again using macros. See `#define IMPL(remap_fn)` and its uses below.
3727  *    This results in a number of specialized caller functions that is the
3728  *    product of the number of functions in the family by the number of
3729  *    variants induced by type 2 parameters.
3730  * 6. Use macros to call test harnesses on caller functions en masse at test
3731  *    time for all functions. See the call sites in `vm_parameter_validation.c`
3732  *    e.g. `RUN_ALL(mach_vm_remap_new_user, , mach_vm_remap_new);`.
3733  */
3734 
3735 typedef kern_return_t (*remap_fn_t)(vm_map_t target_task,
3736     mach_vm_address_t *target_address,
3737     mach_vm_size_t size,
3738     mach_vm_offset_t mask,
3739     int flags,
3740     vm_map_t src_task,
3741     mach_vm_address_t src_address,
3742     boolean_t copy,
3743     vm_prot_t *cur_protection,
3744     vm_prot_t *max_protection,
3745     vm_inherit_t inheritance);
3746 
3747 // helpers that call a provided function with certain sets of params
3748 
3749 static kern_return_t
help_call_remap_fn__src_size_etc(remap_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t src,mach_vm_size_t size,vm_prot_t cur,vm_prot_t max,vm_inherit_t inherit)3750 help_call_remap_fn__src_size_etc(remap_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t src, mach_vm_size_t size, vm_prot_t cur, vm_prot_t max, vm_inherit_t inherit)
3751 {
3752 	kern_return_t kr;
3753 #if KERNEL
3754 	if (is_random_anywhere(flags)) {
3755 		// RANDOM_ADDR is likely to fall outside pmap's range
3756 		return PANIC;
3757 	}
3758 #endif
3759 	if (is_fixed_overwrite(flags)) {
3760 		// Try to allocate a dest for vm_remap to fixed-overwrite at.
3761 		allocation_t dst_alloc SMART_TRY_ALLOCATE_VM(map, size, VM_PROT_DEFAULT);
3762 		mach_vm_address_t out_addr = dst_alloc.addr;
3763 		if (out_addr == 0) {
3764 			// Failed to allocate. Clear VM_FLAGS_OVERWRITE
3765 			// to prevent wild mappings.
3766 			flags &= ~VM_FLAGS_OVERWRITE;
3767 		}
3768 		kr = fn(map, &out_addr, size, 0, flags,
3769 		    map, src, copy, &cur, &max, inherit);
3770 	} else {
3771 		// vm_remap will allocate anywhere. Deallocate if it succeeds.
3772 		mach_vm_address_t out_addr = 0;
3773 		kr = fn(map, &out_addr, size, 0, flags,
3774 		    map, src, copy, &cur, &max, inherit);
3775 		if (kr == 0) {
3776 			(void)mach_vm_deallocate(map, out_addr, size);
3777 		}
3778 	}
3779 	return kr;
3780 }
3781 
3782 static kern_return_t
help_call_remap_fn__src_size(remap_fn_t fn,MAP_T map,int unused_flags __unused,bool copy,mach_vm_address_t src,mach_vm_size_t size)3783 help_call_remap_fn__src_size(remap_fn_t fn, MAP_T map, int unused_flags __unused, bool copy, mach_vm_address_t src, mach_vm_size_t size)
3784 {
3785 	assert(unused_flags == 0);
3786 	return help_call_remap_fn__src_size_etc(fn, map, VM_FLAGS_ANYWHERE, copy, src, size, 0, 0, VM_INHERIT_NONE);
3787 }
3788 
3789 static kern_return_t
help_call_remap_fn__dst_size(remap_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t dst,mach_vm_size_t size)3790 help_call_remap_fn__dst_size(remap_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t dst, mach_vm_size_t size)
3791 {
3792 	allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
3793 	mach_vm_address_t out_addr = dst;
3794 	vm_prot_t cur = 0;
3795 	vm_prot_t max = 0;
3796 	kern_return_t kr = fn(map, &out_addr, size, 0, flags,
3797 	    map, src.addr, copy, &cur, &max, VM_INHERIT_NONE);
3798 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
3799 	return kr;
3800 }
3801 
3802 static kern_return_t
help_call_remap_fn__inherit(remap_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t src,mach_vm_size_t size,vm_inherit_t inherit)3803 help_call_remap_fn__inherit(remap_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t src, mach_vm_size_t size, vm_inherit_t inherit)
3804 {
3805 	return help_call_remap_fn__src_size_etc(fn, map, flags, copy, src, size, 0, 0, inherit);
3806 }
3807 
3808 static kern_return_t
help_call_remap_fn__flags(remap_fn_t fn,MAP_T map,int unused_flags __unused,bool copy,mach_vm_address_t src,mach_vm_size_t size,int trial_flags)3809 help_call_remap_fn__flags(remap_fn_t fn, MAP_T map, int unused_flags __unused, bool copy, mach_vm_address_t src, mach_vm_size_t size, int trial_flags)
3810 {
3811 	assert(unused_flags == 0);
3812 	return help_call_remap_fn__src_size_etc(fn, map, trial_flags, copy, src, size, 0, 0, VM_INHERIT_NONE);
3813 }
3814 
3815 static kern_return_t
help_call_remap_fn__prot_pairs(remap_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t src,mach_vm_size_t size,vm_prot_t cur,vm_prot_t max)3816 help_call_remap_fn__prot_pairs(remap_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t src, mach_vm_size_t size, vm_prot_t cur, vm_prot_t max)
3817 {
3818 	return help_call_remap_fn__src_size_etc(fn, map, flags, copy, src, size, cur, max, VM_INHERIT_NONE);
3819 }
3820 
3821 static kern_return_t
help_call_remap_fn__src_dst_size(remap_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t src,mach_vm_size_t size,mach_vm_address_t dst)3822 help_call_remap_fn__src_dst_size(remap_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t src, mach_vm_size_t size, mach_vm_address_t dst)
3823 {
3824 	mach_vm_address_t out_addr = dst;
3825 	vm_prot_t cur = 0;
3826 	vm_prot_t max = 0;
3827 	kern_return_t kr = fn(map, &out_addr, size, 0, flags,
3828 	    map, src, copy, &cur, &max, VM_INHERIT_NONE);
3829 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
3830 	return kr;
3831 }
3832 
3833 #define GET_INSTANCE(_0, _1, _2, _3, _4, _5, _6, _7, _8, NAME, ...) NAME
3834 
3835 #define DROP_TYPES_8(a, b, ...) , b DROP_TYPES_6(__VA_ARGS__)
3836 #define DROP_TYPES_6(a, b, ...) , b DROP_TYPES_4(__VA_ARGS__)
3837 #define DROP_TYPES_4(a, b, ...) , b DROP_TYPES_2(__VA_ARGS__)
3838 #define DROP_TYPES_2(a, b, ...) , b
3839 #define DROP_TYPES_0()
3840 
3841 // Parses lists of "type1, arg1, type2, arg" into "arg1, arg2"
3842 #define DROP_TYPES(...) GET_INSTANCE(_0 __VA_OPT__(,) __VA_ARGS__, DROP_TYPES_8, DROP_TYPES_8, DROP_TYPES_6, DROP_TYPES_6, DROP_TYPES_4, DROP_TYPES_4, DROP_TYPES_2, DROP_TYPES_2, DROP_TYPES_0, DROP_TYPES_0)(__VA_ARGS__)
3843 
3844 #define DROP_COMMAS_8(a, b, ...) , a b DROP_COMMAS_6(__VA_ARGS__)
3845 #define DROP_COMMAS_6(a, b, ...) , a b DROP_COMMAS_4(__VA_ARGS__)
3846 #define DROP_COMMAS_4(a, b, ...) , a b DROP_COMMAS_2(__VA_ARGS__)
3847 #define DROP_COMMAS_2(a, b) , a b
3848 #define DROP_COMMAS_0()
3849 
3850 // Parses lists of "type1, arg1, type2, arg" into "type1 arg1, type2 arg2"
3851 #define DROP_COMMAS(...) GET_INSTANCE(_0 __VA_OPT__(,) __VA_ARGS__, DROP_COMMAS_8, DROP_COMMAS_8, DROP_COMMAS_6, DROP_COMMAS_6, DROP_COMMAS_4, DROP_COMMAS_4, DROP_COMMAS_2, DROP_COMMAS_2, DROP_COMMAS_0)(__VA_ARGS__)
3852 
3853 // specialize helpers into implementations of call functions that are still agnostic to the remap function
3854 
3855 #define IMPL_ONE_FROM_HELPER(type, variant, flags, copy, ...)                                                                                           \
3856 	static kern_return_t                                                                                                                            \
3857 	call_remap_fn ## __ ## variant ## __ ## type(remap_fn_t fn, MAP_T map, mach_vm_address_t src, mach_vm_size_t size DROP_COMMAS(__VA_ARGS__)) {   \
3858 	        return help_call_remap_fn__ ## type(fn, map, flags, copy, src, size DROP_TYPES(__VA_ARGS__));                                           \
3859 	}
3860 
3861 #define IMPL_FROM_HELPER(type, ...) \
3862 	IMPL_ONE_FROM_HELPER(type, fixed, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, ##__VA_ARGS__)         \
3863 	IMPL_ONE_FROM_HELPER(type, fixed_copy, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, ##__VA_ARGS__)     \
3864 	IMPL_ONE_FROM_HELPER(type, anywhere, VM_FLAGS_ANYWHERE, false, ##__VA_ARGS__)   \
3865 
3866 IMPL_FROM_HELPER(dst_size);
3867 IMPL_FROM_HELPER(inherit, vm_inherit_t, inherit);
3868 IMPL_FROM_HELPER(prot_pairs, vm_prot_t, cur, vm_prot_t, max);
3869 IMPL_FROM_HELPER(src_dst_size, mach_vm_address_t, dst);
3870 
3871 IMPL_ONE_FROM_HELPER(flags, nocopy, 0 /*ignored*/, false, int, flag)
3872 IMPL_ONE_FROM_HELPER(flags, copy, 0 /*ignored*/, true, int, flag)
3873 
3874 IMPL_ONE_FROM_HELPER(src_size, nocopy, 0 /*ignored*/, false)
3875 IMPL_ONE_FROM_HELPER(src_size, copy, 0 /*ignored*/, true)
3876 
3877 #undef IMPL_FROM_HELPER
3878 #undef IMPL_ONE_FROM_HELPER
3879 
3880 // define call functions that are specific to the remap function, and rely on implementations above under the hood
3881 
3882 #define IMPL_REMAP_FN_HELPER(remap_fn, instance, type, ...)                                             \
3883     static kern_return_t                                                                                \
3884     call_ ## remap_fn ## __ ## instance ## __ ## type(MAP_T map DROP_COMMAS(__VA_ARGS__))               \
3885     {                                                                                                   \
3886 	return call_remap_fn__ ## instance ## __ ## type(remap_fn, map DROP_TYPES(__VA_ARGS__));        \
3887     }
3888 
3889 #define IMPL_REMAP_FN_SRC_SIZE(remap_fn, instance) IMPL_REMAP_FN_HELPER(remap_fn, instance, src_size, mach_vm_address_t, src, mach_vm_size_t, size)
3890 #define IMPL_REMAP_FN_DST_SIZE(remap_fn, instance) IMPL_REMAP_FN_HELPER(remap_fn, instance, dst_size, mach_vm_address_t, src, mach_vm_size_t, size)
3891 #define IMPL_REMAP_FN_SRC_DST_SIZE(remap_fn, instance) IMPL_REMAP_FN_HELPER(remap_fn, instance, src_dst_size, mach_vm_address_t, src, mach_vm_size_t, size, mach_vm_address_t, dst)
3892 #define IMPL_REMAP_FN_SRC_SIZE_INHERIT(remap_fn, instance) IMPL_REMAP_FN_HELPER(remap_fn, instance, inherit, mach_vm_address_t, src, mach_vm_size_t, size, vm_inherit_t, inherit)
3893 #define IMPL_REMAP_FN_SRC_SIZE_FLAGS(remap_fn, instance) IMPL_REMAP_FN_HELPER(remap_fn, instance, flags, mach_vm_address_t, src, mach_vm_size_t, size, int, flags)
3894 #define IMPL_REMAP_FN_PROT_PAIRS(remap_fn, instance) IMPL_REMAP_FN_HELPER(remap_fn, instance, prot_pairs, mach_vm_address_t, src, mach_vm_size_t, size, vm_prot_t, cur, vm_prot_t, max)
3895 
3896 #define IMPL(remap_fn)                                          \
3897 	IMPL_REMAP_FN_SRC_SIZE(remap_fn, nocopy);               \
3898 	IMPL_REMAP_FN_SRC_SIZE(remap_fn, copy);                 \
3899                                                                 \
3900 	IMPL_REMAP_FN_DST_SIZE(remap_fn, fixed);                \
3901 	IMPL_REMAP_FN_DST_SIZE(remap_fn, fixed_copy);           \
3902 	IMPL_REMAP_FN_DST_SIZE(remap_fn, anywhere);             \
3903                                                                 \
3904 	IMPL_REMAP_FN_SRC_SIZE_INHERIT(remap_fn, fixed);        \
3905 	IMPL_REMAP_FN_SRC_SIZE_INHERIT(remap_fn, fixed_copy);   \
3906 	IMPL_REMAP_FN_SRC_SIZE_INHERIT(remap_fn, anywhere);     \
3907                                                                 \
3908 	IMPL_REMAP_FN_SRC_SIZE_FLAGS(remap_fn, nocopy);         \
3909 	IMPL_REMAP_FN_SRC_SIZE_FLAGS(remap_fn, copy);           \
3910                                                                 \
3911 	IMPL_REMAP_FN_PROT_PAIRS(remap_fn, fixed);              \
3912 	IMPL_REMAP_FN_PROT_PAIRS(remap_fn, fixed_copy);         \
3913 	IMPL_REMAP_FN_PROT_PAIRS(remap_fn, anywhere);           \
3914                                                                 \
3915 	IMPL_REMAP_FN_SRC_DST_SIZE(remap_fn, fixed);            \
3916 	IMPL_REMAP_FN_SRC_DST_SIZE(remap_fn, fixed_copy);       \
3917 	IMPL_REMAP_FN_SRC_DST_SIZE(remap_fn, anywhere);         \
3918 
3919 static inline void
check_mach_vm_map_outparam_changes(kern_return_t * kr,mach_vm_address_t addr,mach_vm_address_t saved_addr,int flags,MAP_T map)3920 check_mach_vm_map_outparam_changes(kern_return_t * kr, mach_vm_address_t addr, mach_vm_address_t saved_addr,
3921     int flags, MAP_T map)
3922 {
3923 	if (*kr == KERN_SUCCESS) {
3924 		if (is_fixed(flags)) {
3925 			if (addr != truncate_vm_map_addr_with_flags(map, saved_addr, flags)) {
3926 				*kr = OUT_PARAM_BAD;
3927 			}
3928 		}
3929 	} else {
3930 		if (addr != saved_addr) {
3931 			*kr = OUT_PARAM_BAD;
3932 		}
3933 	}
3934 }
3935 
3936 static inline void
check_mach_vm_remap_outparam_changes(kern_return_t * kr,mach_vm_address_t addr,mach_vm_address_t saved_addr,int flags,vm_prot_t cur_prot,vm_prot_t saved_cur_prot,vm_prot_t max_prot,vm_prot_t saved_max_prot,MAP_T map,mach_vm_address_t src_addr)3937 check_mach_vm_remap_outparam_changes(kern_return_t * kr, mach_vm_address_t addr, mach_vm_address_t saved_addr,
3938     int flags, vm_prot_t cur_prot, vm_prot_t saved_cur_prot, vm_prot_t max_prot, vm_prot_t saved_max_prot, MAP_T map,
3939     mach_vm_address_t src_addr)
3940 {
3941 	if (*kr == KERN_SUCCESS) {
3942 		if (is_fixed(flags)) {
3943 			mach_vm_address_t expected_misalignment = get_expected_remap_misalignment(map, src_addr, flags);
3944 			if (addr != trunc_down_map(map, saved_addr) + expected_misalignment) {
3945 				*kr = OUT_PARAM_BAD;
3946 			}
3947 		}
3948 	} else {
3949 		if ((addr != saved_addr) || (cur_prot != saved_cur_prot) ||
3950 		    (max_prot != saved_max_prot)) {
3951 			*kr = OUT_PARAM_BAD;
3952 		}
3953 	}
3954 }
3955 
3956 #if KERNEL
3957 
3958 static inline kern_return_t
mach_vm_remap_wrapped_kern(vm_map_t target_task,mach_vm_address_t * target_address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,vm_map_t src_task,mach_vm_address_t src_address,boolean_t copy,vm_prot_t * cur_protection,vm_prot_t * max_protection,vm_inherit_t inheritance)3959 mach_vm_remap_wrapped_kern(vm_map_t target_task,
3960     mach_vm_address_t *target_address,
3961     mach_vm_size_t size,
3962     mach_vm_offset_t mask,
3963     int flags,
3964     vm_map_t src_task,
3965     mach_vm_address_t src_address,
3966     boolean_t copy,
3967     vm_prot_t *cur_protection,
3968     vm_prot_t *max_protection,
3969     vm_inherit_t inheritance)
3970 {
3971 	if (dealloc_would_time_out(*target_address, size, target_task)) {
3972 		return ACCEPTABLE;
3973 	}
3974 
3975 	mach_vm_address_t saved_addr = *target_address;
3976 	vm_prot_t saved_cur_prot = *cur_protection;
3977 	vm_prot_t saved_max_prot = *max_protection;
3978 	kern_return_t kr = mach_vm_remap(target_task, target_address, size, mask, flags, src_task, src_address, copy, cur_protection, max_protection, inheritance);
3979 	check_mach_vm_remap_outparam_changes(&kr, *target_address, saved_addr, flags,
3980 	    *cur_protection, saved_cur_prot, *max_protection, saved_max_prot, target_task, src_address);
3981 	return kr;
3982 }
IMPL(mach_vm_remap_wrapped_kern)3983 IMPL(mach_vm_remap_wrapped_kern)
3984 
3985 static inline kern_return_t
3986 mach_vm_remap_new_kernel_wrapped(vm_map_t target_task,
3987     mach_vm_address_t *target_address,
3988     mach_vm_size_t size,
3989     mach_vm_offset_t mask,
3990     int flags,
3991     vm_map_t src_task,
3992     mach_vm_address_t src_address,
3993     boolean_t copy,
3994     vm_prot_t *cur_protection,
3995     vm_prot_t *max_protection,
3996     vm_inherit_t inheritance)
3997 {
3998 	if (dealloc_would_time_out(*target_address, size, target_task)) {
3999 		return ACCEPTABLE;
4000 	}
4001 
4002 	mach_vm_address_t saved_addr = *target_address;
4003 	vm_prot_t saved_cur_prot = *cur_protection;
4004 	vm_prot_t saved_max_prot = *max_protection;
4005 	kern_return_t kr = mach_vm_remap_new_kernel(target_task, target_address, size, mask, FLAGS_AND_TAG(flags, VM_KERN_MEMORY_OSFMK), src_task, src_address, copy, cur_protection, max_protection, inheritance);
4006 	// remap_new sets VM_FLAGS_RETURN_DATA_ADDR
4007 	check_mach_vm_remap_outparam_changes(&kr, *target_address, saved_addr, flags | VM_FLAGS_RETURN_DATA_ADDR,
4008 	    *cur_protection, saved_cur_prot, *max_protection, saved_max_prot, target_task, src_address);
4009 	return kr;
4010 }
4011 IMPL(mach_vm_remap_new_kernel_wrapped)
4012 
4013 #else /* !KERNEL */
4014 
4015 static inline kern_return_t
4016 mach_vm_remap_user(vm_map_t target_task,
4017     mach_vm_address_t *target_address,
4018     mach_vm_size_t size,
4019     mach_vm_offset_t mask,
4020     int flags,
4021     vm_map_t src_task,
4022     mach_vm_address_t src_address,
4023     boolean_t copy,
4024     vm_prot_t *cur_protection,
4025     vm_prot_t *max_protection,
4026     vm_inherit_t inheritance)
4027 {
4028 	mach_vm_address_t saved_addr = *target_address;
4029 	vm_prot_t saved_cur_prot = *cur_protection;
4030 	vm_prot_t saved_max_prot = *max_protection;
4031 	kern_return_t kr = mach_vm_remap(target_task, target_address, size, mask, flags, src_task, src_address, copy, cur_protection, max_protection, inheritance);
4032 	check_mach_vm_remap_outparam_changes(&kr, *target_address, saved_addr, flags,
4033 	    *cur_protection, saved_cur_prot, *max_protection, saved_max_prot, target_task, src_address);
4034 	return kr;
4035 }
4036 IMPL(mach_vm_remap_user)
4037 
4038 static inline kern_return_t
4039 mach_vm_remap_new_user(vm_map_t target_task,
4040     mach_vm_address_t *target_address,
4041     mach_vm_size_t size,
4042     mach_vm_offset_t mask,
4043     int flags,
4044     vm_map_t src_task,
4045     mach_vm_address_t src_address,
4046     boolean_t copy,
4047     vm_prot_t *cur_protection,
4048     vm_prot_t *max_protection,
4049     vm_inherit_t inheritance)
4050 {
4051 	mach_vm_address_t saved_addr = *target_address;
4052 	vm_prot_t saved_cur_prot = *cur_protection;
4053 	vm_prot_t saved_max_prot = *max_protection;
4054 	kern_return_t kr = mach_vm_remap_new(target_task, target_address, size, mask, flags, src_task, src_address, copy, cur_protection, max_protection, inheritance);
4055 	// remap_new sets VM_FLAGS_RETURN_DATA_ADDR
4056 	check_mach_vm_remap_outparam_changes(&kr, *target_address, saved_addr, flags | VM_FLAGS_RETURN_DATA_ADDR,
4057 	    *cur_protection, saved_cur_prot, *max_protection, saved_max_prot, target_task, src_address);
4058 	return kr;
4059 }
4060 IMPL(mach_vm_remap_new_user)
4061 
4062 #if TEST_OLD_STYLE_MACH
4063 static inline kern_return_t
4064 vm_remap_retyped(vm_map_t target_task,
4065     mach_vm_address_t *target_address,
4066     mach_vm_size_t size,
4067     mach_vm_offset_t mask,
4068     int flags,
4069     vm_map_t src_task,
4070     mach_vm_address_t src_address,
4071     boolean_t copy,
4072     vm_prot_t *cur_protection,
4073     vm_prot_t *max_protection,
4074     vm_inherit_t inheritance)
4075 {
4076 	vm_address_t addr = (vm_address_t)*target_address;
4077 	vm_prot_t saved_cur_prot = *cur_protection;
4078 	vm_prot_t saved_max_prot = *max_protection;
4079 	kern_return_t kr = vm_remap(target_task, &addr, (vm_size_t)size, (vm_address_t)mask, flags, src_task, (vm_address_t)src_address, copy, cur_protection, max_protection, inheritance);
4080 	check_mach_vm_remap_outparam_changes(&kr, addr, (vm_address_t) *target_address, flags,
4081 	    *cur_protection, saved_cur_prot, *max_protection, saved_max_prot, target_task, src_address);
4082 	*target_address = addr;
4083 	return kr;
4084 }
4085 
4086 IMPL(vm_remap_retyped)
4087 
4088 #endif /* TEST_OLD_STYLE_MACH */
4089 #endif /* !KERNEL */
4090 
4091 #undef IMPL
4092 #undef IMPL_REMAP_FN_SRC_SIZE
4093 #undef IMPL_REMAP_FN_DST_SIZE
4094 #undef IMPL_REMAP_FN_SRC_DST_SIZE
4095 #undef IMPL_REMAP_FN_SRC_SIZE_INHERIT
4096 #undef IMPL_REMAP_FN_SRC_SIZE_FLAGS
4097 #undef IMPL_REMAP_FN_PROT_PAIRS
4098 #undef IMPL_REMAP_FN_HELPER
4099 
4100 
4101 /////////////////////////////////////////////////////
4102 // Test runners for functions with commonly-used parameter types and setup code.
4103 
4104 #define IMPL(NAME, T)                                                   \
4105 	/* Test a Mach function */                                      \
4106 	/* Run each trial with an allocated vm region and start/size parameters that reference it. */ \
4107 	typedef kern_return_t (*NAME ## mach_with_start_size_fn)(MAP_T map, T start, T size); \
4108                                                                         \
4109 	/* ...and the allocation has a specified minimum alignment */   \
4110 	static results_t * __attribute__((used))                        \
4111 	test_ ## NAME ## mach_with_allocated_aligned_start_size(NAME ## mach_with_start_size_fn fn, T align_mask, const char *testname) \
4112 	{                                                               \
4113 	        MAP_T map SMART_MAP;                                    \
4114 	        allocation_t base SMART_ALLOCATE_ALIGNED_VM(map, TEST_ALLOC_SIZE, align_mask, VM_PROT_DEFAULT); \
4115 	        start_size_trials_t *trials SMART_START_SIZE_TRIALS(base.addr); \
4116 	        results_t *results = alloc_results(testname, eSMART_START_SIZE_TRIALS, base.addr, trials->count); \
4117                                                                         \
4118 	        for (unsigned i = 0; i < trials->count; i++) {          \
4119 	                T start = (T)trials->list[i].start;             \
4120 	                T size = (T)trials->list[i].size;               \
4121 	                kern_return_t ret = fn(map, start, size);       \
4122 	                append_result(results, ret, trials->list[i].name); \
4123 	        }                                                       \
4124 	        return results;                                         \
4125 	}                                                               \
4126                                                                         \
4127 	/* ...and the allocation gets default alignment */              \
4128 	static results_t * __attribute__((used))                        \
4129 	test_ ## NAME ## mach_with_allocated_start_size(NAME ## mach_with_start_size_fn fn, const char *testname) \
4130 	{                                                               \
4131 	        return test_ ## NAME ## mach_with_allocated_aligned_start_size(fn, 0, testname); \
4132 	}                                                               \
4133                                                                         \
4134 	/* Test a Mach function. */                                     \
4135 	/* Run each trial with an allocated vm region and an addr parameter that reference it. */ \
4136 	typedef kern_return_t (*NAME ## mach_with_addr_fn)(MAP_T map, T addr); \
4137                                                                         \
4138 	static results_t * __attribute__((used))                        \
4139 	test_ ## NAME ## mach_with_allocated_addr_of_size_n(NAME ## mach_with_addr_fn fn, size_t obj_size, const char *testname) \
4140 	{                                                               \
4141 	        MAP_T map SMART_MAP;                                    \
4142 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4143 	        addr_trials_t *trials SMART_ADDR_TRIALS(base.addr);     \
4144 	/* Do all the addr trials and an additional trial such that obj_size + addr == 0 */ \
4145 	        uint64_t trial_args[TRIALSARGUMENTS_SIZE] = {base.addr, obj_size}; \
4146 	        results_t *results = alloc_results(testname, eSMART_ADDR_TRIALS, trial_args, TRIALSARGUMENTS_SIZE, trials->count+1); \
4147                                                                         \
4148 	        for (unsigned i = 0; i < trials->count; i++) {          \
4149 	                T addr = (T)trials->list[i].addr;               \
4150 	                kern_return_t ret = fn(map, addr);              \
4151 	                append_result(results, ret, trials->list[i].name); \
4152 	        }                                                       \
4153 	        kern_return_t ret = fn(map,  - ((T) obj_size));         \
4154 	        char *trial_desc;                                       \
4155 	        kasprintf(&trial_desc, "addr: -0x%lx", obj_size);       \
4156 	        append_result(results, ret, trial_desc);                \
4157 	        kfree_str(trial_desc);                                  \
4158 	        return results;                                         \
4159 	}                                                               \
4160                                                                         \
4161 	/* Test a Mach function. */                                     \
4162 	/* Run each trial with an allocated vm region and an addr parameter that reference it. */ \
4163 	typedef kern_return_t (*NAME ## mach_with_addr_fn)(MAP_T map, T addr); \
4164                                                                         \
4165 	static results_t * __attribute__((used))                        \
4166 	test_ ## NAME ## mach_with_allocated_addr(NAME ## mach_with_addr_fn fn, const char *testname) \
4167 	{                                                               \
4168 	        MAP_T map SMART_MAP;                                    \
4169 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4170 	        addr_trials_t *trials SMART_ADDR_TRIALS(base.addr);     \
4171 	        results_t *results = alloc_results(testname, eSMART_ADDR_TRIALS, base.addr, trials->count); \
4172                                                                         \
4173 	        for (unsigned i = 0; i < trials->count; i++) {          \
4174 	                T addr = (T)trials->list[i].addr;               \
4175 	                kern_return_t ret = fn(map, addr);              \
4176 	                append_result(results, ret, trials->list[i].name); \
4177 	        }                                                       \
4178 	        return results;                                         \
4179 	}                                                               \
4180                                                                         \
4181 	static results_t * __attribute__((used))                        \
4182 	test_ ## NAME ## mach_with_allocated_purgeable_addr(NAME ## mach_with_addr_fn fn, const char *testname) \
4183 	{                                                               \
4184 	        MAP_T map SMART_MAP;                                    \
4185 	        allocation_t base SMART_ALLOCATE_PURGEABLE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4186 	        addr_trials_t *trials SMART_ADDR_TRIALS(base.addr);     \
4187 	        results_t *results = alloc_results(testname, eSMART_ADDR_TRIALS, base.addr, trials->count); \
4188                                                                         \
4189 	        for (unsigned i = 0; i < trials->count; i++) {          \
4190 	                T addr = (T)trials->list[i].addr;               \
4191 	                kern_return_t ret = fn(map, addr);              \
4192 	                append_result(results, ret, trials->list[i].name); \
4193 	        }                                                       \
4194 	        return results;                                         \
4195 	}                                                               \
4196                                                                         \
4197 	/* Test a Mach function. */                                     \
4198 	/* Run each trial with a size parameter. */                     \
4199 	typedef kern_return_t (*NAME ## mach_with_size_fn)(MAP_T map, T size); \
4200                                                                         \
4201 	static results_t * __attribute__((used))                        \
4202 	test_ ## NAME ## mach_with_size(NAME ## mach_with_size_fn fn, const char *testname) \
4203 	{                                                               \
4204 	        MAP_T map SMART_MAP;                                    \
4205 	        size_trials_t *trials SMART_SIZE_TRIALS();              \
4206 	        results_t *results = alloc_results(testname, eSMART_SIZE_TRIALS, trials->count); \
4207                                                                         \
4208 	        for (unsigned i = 0; i < trials->count; i++) {          \
4209 	                T size = (T)trials->list[i].size;               \
4210 	                kern_return_t ret = fn(map, size);              \
4211 	                append_result(results, ret, trials->list[i].name); \
4212 	        }                                                       \
4213 	        return results;                                         \
4214 	}                                                               \
4215                                                                         \
4216 	/* Test a Mach function. */                                     \
4217 	/* Run each trial with a size parameter. */                     \
4218 	typedef kern_return_t (*NAME ## mach_with_start_size_offset_object_fn)(MAP_T map, T addr, T size, T offset, T obj_size); \
4219                                                                         \
4220 	static results_t * __attribute__((used))                        \
4221 	test_ ## NAME ## mach_with_allocated_start_size_offset_object(NAME ## mach_with_start_size_offset_object_fn fn, const char *testname) \
4222 	{                                                               \
4223 	        MAP_T map SMART_MAP;                                    \
4224 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4225 	        start_size_offset_object_trials_t *trials SMART_START_SIZE_OFFSET_OBJECT_TRIALS(); \
4226 	        results_t *results = alloc_results(testname, eSMART_START_SIZE_OFFSET_OBJECT_TRIALS, trials->count); \
4227                                                                         \
4228 	        for (unsigned i = 0; i < trials->count; i++) {          \
4229 	                start_size_offset_object_trial_t trial = slide_trial(trials->list[i], base.addr); \
4230 	                T start = (T)trial.start;                       \
4231 	                T size = (T)trial.size;                         \
4232 	                T offset = (T)trial.offset;                     \
4233 	                T obj_size = (T)trial.obj_size;                 \
4234 	                kern_return_t ret = fn(map, start, size, offset, obj_size); \
4235 	                append_result(results, ret, trials->list[i].name); \
4236 	        }                                                       \
4237 	        return results;                                         \
4238 	}                                                               \
4239 	/* Test a Mach function. */                                     \
4240 	/* Run each trial with a size parameter. */                     \
4241 	typedef kern_return_t (*NAME ## mach_with_start_size_offset_fn)(MAP_T map, T addr, T size, T offset, T obj_size); \
4242                                                                         \
4243 	static results_t * __attribute__((used))                        \
4244 	test_ ## NAME ## mach_with_allocated_start_size_offset(NAME ## mach_with_start_size_offset_fn fn, const char *testname) \
4245 	{                                                               \
4246 	        MAP_T map SMART_MAP;                                    \
4247 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4248 	        start_size_offset_trials_t *trials SMART_START_SIZE_OFFSET_TRIALS(); \
4249 	        results_t *results = alloc_results(testname, eSMART_START_SIZE_OFFSET_TRIALS, trials->count); \
4250                                                                         \
4251 	        for (unsigned i = 0; i < trials->count; i++) {          \
4252 	                start_size_offset_trial_t trial = slide_trial(trials->list[i], base.addr); \
4253 	                T start = (T)trial.start;                       \
4254 	                T size = (T)trial.size;                         \
4255 	                T offset = (T)trial.offset;                     \
4256 	                kern_return_t ret = fn(map, start, size, offset, 1); \
4257 	                append_result(results, ret, trials->list[i].name); \
4258 	        }                                                       \
4259 	        return results;                                         \
4260 	}                                                               \
4261                                                                         \
4262 	/* Test a Mach function. */                                     \
4263 	/* Run each trial with an allocated vm region and a set of mmap flags. */ \
4264 	typedef kern_return_t (*NAME ## mach_with_allocated_mmap_flags_fn)(MAP_T map, T addr, T size, int flags); \
4265                                                                         \
4266 	static results_t * __attribute__((used))                        \
4267 	test_ ## NAME ## mach_with_allocated_mmap_flags(NAME ## mach_with_allocated_mmap_flags_fn fn, const char *testname) \
4268 	{                                                               \
4269 	        MAP_T map SMART_MAP;                                    \
4270 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4271 	        mmap_flags_trials_t *trials SMART_MMAP_FLAGS_TRIALS();  \
4272 	        results_t *results = alloc_results(testname, eSMART_MMAP_FLAGS_TRIALS, trials->count); \
4273                                                                         \
4274 	        for (unsigned i = 0; i < trials->count; i++) {          \
4275 	                int flags = trials->list[i].flags;              \
4276 	                kern_return_t ret = fn(map, (T)base.addr, (T)base.size, flags); \
4277 	                append_result(results, ret, trials->list[i].name); \
4278 	        }                                                       \
4279 	        return results;                                         \
4280 	}                                                               \
4281                                                                         \
4282 	/* Test a Mach function. */                                     \
4283 	/* Run each trial with an allocated vm region and a generic 32 bit flag. */ \
4284 	typedef kern_return_t (*NAME ## mach_with_allocated_generic_flag)(MAP_T map, T addr, T size, int flag); \
4285                                                                         \
4286 	static results_t * __attribute__((used))                        \
4287 	test_ ## NAME ## mach_with_allocated_generic_flag(NAME ## mach_with_allocated_generic_flag fn, const char *testname) \
4288 	{                                                               \
4289 	        MAP_T map SMART_MAP;                                    \
4290 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4291 	        generic_flag_trials_t *trials SMART_GENERIC_FLAG_TRIALS();      \
4292 	        results_t *results = alloc_results(testname, eSMART_GENERIC_FLAG_TRIALS, trials->count); \
4293                                                                         \
4294 	        for (unsigned i = 0; i < trials->count; i++) {          \
4295 	                int flag = trials->list[i].flag;                \
4296 	                kern_return_t ret = fn(map, (T)base.addr, (T)base.size, flag); \
4297 	                append_result(results, ret, trials->list[i].name); \
4298 	        }                                                       \
4299 	        return results;                                         \
4300 	}                                                               \
4301                                                                         \
4302 	/* Test a Mach function. */                                     \
4303 	/* Run each trial with a vm_prot_t. */                          \
4304 	typedef kern_return_t (*NAME ## mach_with_prot_fn)(MAP_T map, T size, vm_prot_t prot); \
4305                                                                         \
4306 	static results_t * __attribute__((used))                        \
4307 	test_ ## NAME ## mach_vm_prot(NAME ## mach_with_prot_fn fn, const char *testname) \
4308 	{                                                               \
4309 	        MAP_T map SMART_MAP;                                    \
4310 	        vm_prot_trials_t *trials SMART_VM_PROT_TRIALS();        \
4311 	        results_t *results = alloc_results(testname, eSMART_VM_PROT_TRIALS, trials->count); \
4312                                                                         \
4313 	        for (unsigned i = 0; i < trials->count; i++) {          \
4314 	                kern_return_t ret = fn(map, TEST_ALLOC_SIZE, trials->list[i].prot); \
4315 	                append_result(results, ret, trials->list[i].name); \
4316 	        }                                                       \
4317 	        return results;                                         \
4318 	}                                                               \
4319                                                                         \
4320 	/* Test a Mach function. */                                     \
4321 	/* Run each trial with a pair of vm_prot_t's. */                \
4322 	typedef kern_return_t (*NAME ## mach_with_prot_pair_fn)(MAP_T map, vm_prot_t cur, vm_prot_t max); \
4323                                                                         \
4324 	static results_t * __attribute__((used))                        \
4325 	test_ ## NAME ## mach_vm_prot_pair(NAME ## mach_with_prot_pair_fn fn, const char *testname) \
4326 	{                                                               \
4327 	        MAP_T map SMART_MAP;                                    \
4328 	        vm_prot_pair_trials_t *trials SMART_VM_PROT_PAIR_TRIALS();      \
4329 	        results_t *results = alloc_results(testname, eSMART_VM_PROT_PAIR_TRIALS, trials->count); \
4330                                                                         \
4331 	        for (unsigned i = 0; i < trials->count; i++) {          \
4332 	                kern_return_t ret = fn(map, trials->list[i].cur, trials->list[i].max); \
4333 	                append_result(results, ret, trials->list[i].name); \
4334 	        }                                                       \
4335 	        return results;                                         \
4336 	}                                                               \
4337                                                                         \
4338 	/* Test a Mach function. */                                     \
4339 	/* Run each trial with a pair of vm_prot_t's. */ \
4340 	typedef kern_return_t (*NAME ## mach_with_allocated_prot_pair_fn)(MAP_T map, T addr, T size, vm_prot_t cur, vm_prot_t max); \
4341                                                                         \
4342 	static results_t * __attribute__((used))                        \
4343 	test_ ## NAME ## mach_with_allocated_vm_prot_pair(NAME ## mach_with_allocated_prot_pair_fn fn, const char *testname) \
4344 	{                                                               \
4345 	        MAP_T map SMART_MAP;                                    \
4346 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4347 	        vm_prot_pair_trials_t *trials SMART_VM_PROT_PAIR_TRIALS(); \
4348 	        results_t *results = alloc_results(testname, eSMART_VM_PROT_PAIR_TRIALS, trials->count); \
4349                                                                         \
4350 	        for (unsigned i = 0; i < trials->count; i++) {          \
4351 	                kern_return_t ret = fn(map, (T)base.addr, (T)base.size, trials->list[i].cur, trials->list[i].max); \
4352 	                append_result(results, ret, trials->list[i].name); \
4353 	        }                                                       \
4354 	        return results;                                         \
4355 	}                                                               \
4356                                                                         \
4357 	/* Test a Mach function. */                                     \
4358 	/* Run each trial with an allocated vm region and a vm_prot_t. */ \
4359 	typedef kern_return_t (*NAME ## mach_with_allocated_prot_fn)(MAP_T map, T addr, T size, vm_prot_t prot); \
4360                                                                         \
4361 	static results_t * __attribute__((used))                        \
4362 	test_ ## NAME ## mach_with_allocated_vm_prot_t(NAME ## mach_with_allocated_prot_fn fn, const char *testname) \
4363 	{                                                               \
4364 	        MAP_T map SMART_MAP;                                    \
4365 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4366 	        vm_prot_trials_t *trials SMART_VM_PROT_TRIALS();        \
4367 	        results_t *results = alloc_results(testname, eSMART_VM_PROT_TRIALS, trials->count); \
4368                                                                         \
4369 	        for (unsigned i = 0; i < trials->count; i++) {          \
4370 	                vm_prot_t prot = trials->list[i].prot;          \
4371 	                kern_return_t ret = fn(map, (T)base.addr, (T)base.size, prot); \
4372 	                append_result(results, ret, trials->list[i].name); \
4373 	        }                                                       \
4374 	        return results;                                         \
4375 	}                                                               \
4376                                                                         \
4377 	/* Test a Mach function. */                                     \
4378 	/* Run each trial with a ledger flag. */ \
4379 	typedef kern_return_t (*NAME ## mach_ledger_flag_fn)(MAP_T map, int ledger_flag); \
4380                                                                         \
4381 	static results_t * __attribute__((used))                        \
4382 	test_ ## NAME ## mach_with_ledger_flag(NAME ## mach_ledger_flag_fn fn, const char *testname) \
4383 	{                                                               \
4384 	        MAP_T map SMART_MAP;                                    \
4385 	        ledger_flag_trials_t *trials SMART_LEDGER_FLAG_TRIALS();        \
4386 	        results_t *results = alloc_results(testname, eSMART_LEDGER_FLAG_TRIALS, trials->count); \
4387                                                                         \
4388 	        for (unsigned i = 0; i < trials->count; i++) {          \
4389 	                kern_return_t ret = fn(map, trials->list[i].flag); \
4390 	                append_result(results, ret, trials->list[i].name); \
4391 	        }                                                       \
4392 	        return results;                                         \
4393 	}                                                               \
4394 	/* Test a Mach function. */                                     \
4395 	/* Run each trial with a ledger tag. */                         \
4396 	typedef kern_return_t (*NAME ## mach_ledger_tag_fn)(MAP_T map, int ledger_tag); \
4397                                                                         \
4398 	static results_t * __attribute__((used))                        \
4399 	test_ ## NAME ## mach_with_ledger_tag(NAME ## mach_ledger_tag_fn fn, const char *testname) \
4400 	{                                                               \
4401 	        MAP_T map SMART_MAP;                                    \
4402 	        ledger_tag_trials_t *trials SMART_LEDGER_TAG_TRIALS();  \
4403 	        results_t *results = alloc_results(testname, eSMART_LEDGER_TAG_TRIALS, trials->count); \
4404                                                                         \
4405 	        for (unsigned i = 0; i < trials->count; i++) {          \
4406 	                kern_return_t ret = fn(map, trials->list[i].tag); \
4407 	                append_result(results, ret, trials->list[i].name); \
4408 	        }                                                       \
4409 	        return results;                                         \
4410 	}                                                               \
4411                                                                         \
4412 	/* Test a Mach function. */                                     \
4413 	/* Run each trial with an allocated region and a vm_inherit_t. */ \
4414 	typedef kern_return_t (*NAME ## mach_inherit_fn)(MAP_T map, T addr, T size, vm_inherit_t inherit); \
4415                                                                         \
4416 	static results_t * __attribute__((used))                        \
4417 	test_ ## NAME ## mach_with_allocated_vm_inherit_t(NAME ## mach_inherit_fn fn, const char * testname) { \
4418 	        MAP_T map SMART_MAP;                                    \
4419 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4420 	        vm_inherit_trials_t *trials SMART_VM_INHERIT_TRIALS();  \
4421 	        results_t *results = alloc_results(testname, eSMART_VM_INHERIT_TRIALS, trials->count); \
4422                                                                         \
4423 	        for (unsigned i = 0; i < trials->count; i++) {          \
4424 	                vm_inherit_trial_t trial = trials->list[i];     \
4425 	                int ret = fn(map, (T)base.addr, (T)base.size, trial.value); \
4426 	                append_result(results, ret, trial.name); \
4427 	        }                                                       \
4428 	        return results;                                         \
4429 	}                                                               \
4430 	/* Test a Mach function. */                                     \
4431 	/* Run each trial with an allocated vm region and a vm_prot_t. */ \
4432 	typedef kern_return_t (*NAME ## with_start_end_fn)(MAP_T map, T addr, T end); \
4433                                                                         \
4434 	static results_t * __attribute__((used))                        \
4435 	test_ ## NAME ## mach_with_allocated_start_end(NAME ## with_start_end_fn fn, const char *testname) \
4436 	{                                                               \
4437 	        MAP_T map SMART_MAP;                                    \
4438 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4439 	        start_size_trials_t *trials SMART_START_SIZE_TRIALS(base.addr); \
4440 	        results_t *results = alloc_results(testname, eSMART_START_SIZE_TRIALS, base.addr, trials->count); \
4441                                                                         \
4442 	        for (unsigned i = 0; i < trials->count; i++) {          \
4443 	                T start = (T)trials->list[i].start;             \
4444 	                T size = (T)trials->list[i].size;               \
4445 	                kern_return_t ret = fn(map, start, start + size);       \
4446 	                append_result(results, ret, trials->list[i].name); \
4447 	        }                                                       \
4448 	        return results;                                         \
4449 	}                                                               \
4450 	/* Test a Mach function. */                                     \
4451 	/* Run each trial with an allocated vm region and a vm_prot_t. */ \
4452 	typedef kern_return_t (*NAME ## with_tag_fn)(MAP_T map, T addr, T end, vm_tag_t tag); \
4453                                                                         \
4454 	static results_t * __attribute__((used))                        \
4455 	test_ ## NAME ## mach_with_allocated_tag(NAME ## with_tag_fn fn, const char *testname) \
4456 	{                                                               \
4457 	        MAP_T map SMART_MAP;                                    \
4458 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4459 	        vm_tag_trials_t *trials SMART_VM_TAG_TRIALS();  \
4460 	        results_t *results = alloc_results(testname, eSMART_VM_TAG_TRIALS, trials->count); \
4461                                                                         \
4462 	        for (unsigned i = 0; i < trials->count; i++) {          \
4463 	                kern_return_t ret = fn(map, (T)base.addr, (T)(base.addr + base.size), trials->list[i].tag); \
4464 	                append_result(results, ret, trials->list[i].name); \
4465 	        }                                                       \
4466 	        return results;                                         \
4467 	}                                                               \
4468 	/* Test a Mach function. */                                     \
4469 	/* Run each trial with an allocated region and a vm_behavior_t. */ \
4470 	typedef kern_return_t (*NAME ## mach_behavior_fn)(MAP_T map, T addr, T size, vm_behavior_t behavior); \
4471                                                                         \
4472 	static results_t * __attribute__((used))                        \
4473 	test_ ## NAME ## mach_with_allocated_aligned_vm_behavior_t(NAME ## mach_behavior_fn fn, mach_vm_size_t align_mask, const char * testname) { \
4474 	        MAP_T map SMART_MAP;                                    \
4475 	        allocation_t base SMART_ALLOCATE_ALIGNED_VM(map, TEST_ALLOC_SIZE, align_mask, VM_PROT_DEFAULT); \
4476 	        vm_behavior_trials_t *trials SMART_VM_BEHAVIOR_TRIALS();  \
4477 	        results_t *results = alloc_results(testname, eSMART_VM_BEHAVIOR_TRIALS, trials->count); \
4478                                                                         \
4479 	        for (unsigned i = 0; i < trials->count; i++) {          \
4480 	                vm_behavior_trial_t trial = trials->list[i];     \
4481 	                int ret = fn(map, (T)base.addr, (T)base.size, trial.value); \
4482 	                append_result(results, ret, trial.name); \
4483 	        }                                                       \
4484 	        return results;                                         \
4485 	}                                                               \
4486                                                                         \
4487 	static results_t * __attribute__((used))                        \
4488 	test_ ## NAME ## mach_with_allocated_vm_behavior_t(NAME ## mach_behavior_fn fn, const char * testname) { \
4489 	        return test_ ## NAME ## mach_with_allocated_aligned_vm_behavior_t(fn, 0, testname); \
4490 	}                                                               \
4491                                                                         \
4492 	/* Test a Mach function. */                                     \
4493 	/* Run each trial with an allocated region and a vm_sync_t. */ \
4494 	typedef kern_return_t (*NAME ## mach_sync_fn)(MAP_T map, T addr, T size, vm_sync_t behavior); \
4495                                                                         \
4496 	static results_t * __attribute__((used))                        \
4497 	test_ ## NAME ## mach_with_allocated_vm_sync_t(NAME ## mach_sync_fn fn, const char * testname) { \
4498 	        MAP_T map SMART_MAP;                                    \
4499 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4500 	        vm_sync_trials_t *trials SMART_VM_SYNC_TRIALS(); \
4501 	        results_t *results = alloc_results(testname, eSMART_VM_SYNC_TRIALS, trials->count); \
4502                                                                         \
4503 	        for (unsigned i = 0; i < trials->count; i++) {          \
4504 	                vm_sync_trial_t trial = trials->list[i];    \
4505 	                int ret = fn(map, (T)base.addr, (T)base.size, trial.value); \
4506 	                append_result(results, ret, trial.name);        \
4507 	        }                                                       \
4508 	        return results;                                         \
4509 	}                                                               \
4510 	/* Test a Mach function. */                                     \
4511 	/* Run each trial with an allocated region and a vm_machine_attribute_t. */ \
4512 	typedef kern_return_t (*NAME ## mach_attribute_fn)(MAP_T map, T addr, T size, vm_machine_attribute_t attr); \
4513                                                                         \
4514 	static results_t * __attribute__((used))                        \
4515 	test_ ## NAME ## mach_with_allocated_vm_machine_attribute_t(NAME ## mach_attribute_fn fn, const char * testname) { \
4516 	        MAP_T map SMART_MAP;                                    \
4517 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4518 	        vm_machine_attribute_trials_t *trials SMART_VM_MACHINE_ATTRIBUTE_TRIALS(); \
4519 	        results_t *results = alloc_results(testname, eSMART_VM_MACHINE_ATTRIBUTE_TRIALS, trials->count); \
4520                                                                         \
4521 	        for (unsigned i = 0; i < trials->count; i++) {          \
4522 	                vm_machine_attribute_trial_t trial = trials->list[i];    \
4523 	                int ret = fn(map, (T)base.addr, (T)base.size, trial.value); \
4524 	                append_result(results, ret, trial.name);        \
4525 	        }                                                       \
4526 	        return results;                                         \
4527 	}                                                               \
4528 	/* Test a Mach function. */                                     \
4529 	/* Run each trial with an allocated region and a purgeable trial. */ \
4530 	typedef kern_return_t (*NAME ## mach_purgable_fn)(MAP_T map, T addr, vm_purgable_t control, int state); \
4531                                                                         \
4532 	static results_t * __attribute__((used))                        \
4533 	test_ ## NAME ## mach_with_allocated_purgeable_and_state(NAME ## mach_purgable_fn fn, const char * testname) { \
4534 	        MAP_T map SMART_MAP;                                    \
4535 	        vm_purgeable_and_state_trials_t *trials SMART_VM_PURGEABLE_AND_STATE_TRIALS(); \
4536 	        results_t *results = alloc_results(testname, eSMART_VM_PURGEABLE_AND_STATE_TRIALS, trials->count); \
4537                                                                         \
4538 	        for (unsigned i = 0; i < trials->count; i++) {          \
4539 	                allocation_t base SMART_ALLOCATE_PURGEABLE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4540 	                vm_purgeable_and_state_trial_t trial = trials->list[i];    \
4541 	                int ret = fn(map, (T)base.addr, trial.control, trial.state); \
4542 	                append_result(results, ret, trial.name);        \
4543 	        }                                                       \
4544 	        return results;                                         \
4545 	}
4546 
4547 IMPL(, uint64_t)
4548 #if TEST_OLD_STYLE_MACH
IMPL(old,uint32_t)4549 IMPL(old, uint32_t)
4550 #endif
4551 #undef IMPL
4552 
4553 #if KERNEL && CONFIG_MAP_RANGES
4554 /*
4555  * The vm_range_create tests assume we don't ever do range_creates that should succeed
4556  * that take more than 2 * PAGE_SIZE. This enforces that.
4557  */
4558 void
4559 verify_largest_valid_trial_size_fits(start_size_start_size_trial_t trial)
4560 {
4561 	if (trial.size > 2 * PAGE_SIZE) {
4562 		assert(trial.size > 0xfffffffffffffff);
4563 	}
4564 	if (trial.second_size > 2 * PAGE_SIZE) {
4565 		assert(trial.second_size > 0xfffffffffffffff);
4566 	}
4567 }
4568 
4569 /* Run each trial with start/size/start/size parameters. */
4570 typedef kern_return_t (mach_with_start_size_start_size_fn)(MAP_T map, mach_vm_address_t addr,
4571     mach_vm_size_t size, mach_vm_address_t second_addr, mach_vm_size_t second_size);
4572 
4573 static results_t * __attribute__((used))
test_mach_vm_range_create(mach_with_start_size_start_size_fn fn,const char * testname)4574 test_mach_vm_range_create(mach_with_start_size_start_size_fn fn, const char *testname)
4575 {
4576 	start_size_start_size_trials_t *trials SMART_START_SIZE_START_SIZE_TRIALS();
4577 	results_t *results = alloc_results(testname, eSMART_START_SIZE_START_SIZE_TRIALS, trials->count);
4578 
4579 	for (unsigned i = 0; i < trials->count; i++) {
4580 		/*
4581 		 * Allocate and configure a new map for every trial so that the map has no user ranges.
4582 		 */
4583 		MAP_T map SMART_RANGE_MAP;
4584 		bool has_ranges = vm_map_range_configure(map, false) == KERN_SUCCESS;
4585 		bool has_space_in_ranges = false;
4586 
4587 		struct mach_vm_range void1 = {
4588 			.min_address = map->default_range.max_address,
4589 			.max_address = map->data_range.min_address,
4590 		};
4591 		struct mach_vm_range void2 = {
4592 			.min_address = map->data_range.max_address,
4593 			.max_address = vm_map_max(map),
4594 		};
4595 		struct mach_vm_range range_to_test;
4596 
4597 		/*
4598 		 * For our tests to succeed for good cases, but also trigger failures
4599 		 * when overlap occurs we need:
4600 		 * range1 = {.start = addr}, range2 = {.start = addr + PAGE_SIZE * 2}.
4601 		 * We also want at least 2 * PAGE_SIZE memory available after the start of range2.
4602 		 * We additionally start our first range 2 PAGE_SIZE away from the start.
4603 		 */
4604 		if (void1.min_address + (PAGE_SIZE * 6) < void1.max_address) {
4605 			range_to_test = void1;
4606 			has_space_in_ranges = true;
4607 		} else if (void2.min_address + (PAGE_SIZE * 6) < void2.max_address) {
4608 			range_to_test = void2;
4609 			has_space_in_ranges = true;
4610 		}
4611 
4612 		mach_vm_address_t addr_base = range_to_test.min_address + PAGE_SIZE * 2;
4613 		if (has_ranges && has_space_in_ranges) {
4614 			mach_vm_address_t second_addr_base = addr_base + PAGE_SIZE * 2;
4615 
4616 			start_size_start_size_trial_t trial = slide_trial(trials->list[i], addr_base, second_addr_base);
4617 
4618 			verify_largest_valid_trial_size_fits(trial);
4619 
4620 			mach_vm_address_t start = trial.start;
4621 			mach_vm_size_t size = trial.size;
4622 			mach_vm_address_t second_start = trial.second_start;
4623 			mach_vm_size_t second_size = trial.second_size;
4624 			kern_return_t ret = fn(map, start, size, second_start, second_size);
4625 			append_result(results, ret, trials->list[i].name);
4626 		} else {
4627 			append_result(results, IGNORED, trials->list[i].name);
4628 		}
4629 	}
4630 	return results;
4631 }
4632 #endif /* KERNEL && CONFIG_MAP_RANGES */
4633 
4634 // Test a mach allocation function with a start/size
4635 static results_t *
test_mach_allocation_func_with_start_size(kern_return_t (* func)(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size),const char * testname)4636 test_mach_allocation_func_with_start_size(kern_return_t (*func)(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size), const char * testname)
4637 {
4638 	MAP_T map SMART_MAP;
4639 	start_size_trials_t *trials SMART_START_SIZE_TRIALS(0);
4640 	results_t *results = alloc_results(testname, eSMART_START_SIZE_TRIALS, 0, trials->count);
4641 
4642 	for (unsigned i = 0; i < trials->count; i++) {
4643 		unallocation_t dst SMART_UNALLOCATE_VM(map, TEST_ALLOC_SIZE);
4644 		start_size_trial_t trial = slide_trial(trials->list[i], dst.addr);
4645 		mach_vm_address_t addr = trial.start;
4646 		kern_return_t ret = func(map, &addr, trial.size);
4647 		if (ret == 0) {
4648 			(void)mach_vm_deallocate(map, addr, trial.size);
4649 		}
4650 		append_result(results, ret, trial.name);
4651 	}
4652 	return results;
4653 }
4654 
4655 // Test a mach allocation function with a vm_map_kernel_flags_t
4656 static results_t *
test_mach_allocation_func_with_vm_map_kernel_flags_t(kern_return_t (* func)(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags),const char * testname)4657 test_mach_allocation_func_with_vm_map_kernel_flags_t(kern_return_t (*func)(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags), const char * testname)
4658 {
4659 	MAP_T map SMART_MAP;
4660 	vm_map_kernel_flags_trials_t * trials SMART_VM_MAP_KERNEL_FLAGS_TRIALS();
4661 	results_t *results = alloc_results(testname, eSMART_VM_MAP_KERNEL_FLAGS_TRIALS, trials->count);
4662 
4663 	for (unsigned i = 0; i < trials->count; i++) {
4664 		allocation_t fixed_overwrite_dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
4665 		vm_map_kernel_flags_trial_t trial = trials->list[i];
4666 #if KERNEL
4667 		if (is_random_anywhere(trial.flags)) {
4668 			// RANDOM_ADDR is likely to fall outside pmap's range
4669 			append_result(results, PANIC, trial.name);
4670 			continue;
4671 		}
4672 #endif
4673 		mach_vm_address_t addr = 0;
4674 		if (is_fixed_overwrite(trial.flags)) {
4675 			// use a pre-existing destination for fixed-overwrite
4676 			addr = fixed_overwrite_dst.addr;
4677 		}
4678 		kern_return_t ret = func(map, &addr, TEST_ALLOC_SIZE, trial.flags);
4679 		deallocate_if_not_fixed_overwrite(ret, map, addr, TEST_ALLOC_SIZE, trial.flags);
4680 		append_result(results, ret, trial.name);
4681 	}
4682 	return results;
4683 }
4684 
4685 static results_t *
test_mach_with_allocated_vm_map_kernel_flags_t(kern_return_t (* func)(MAP_T map,mach_vm_address_t src,mach_vm_size_t size,int flags),const char * testname)4686 test_mach_with_allocated_vm_map_kernel_flags_t(kern_return_t (*func)(MAP_T map, mach_vm_address_t src, mach_vm_size_t size, int flags), const char * testname)
4687 {
4688 	MAP_T map SMART_MAP;
4689 
4690 	allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
4691 	vm_map_kernel_flags_trials_t * trials SMART_VM_MAP_KERNEL_FLAGS_TRIALS();
4692 	results_t *results = alloc_results(testname, eSMART_VM_MAP_KERNEL_FLAGS_TRIALS, trials->count);
4693 
4694 	for (unsigned i = 0; i < trials->count; i++) {
4695 		kern_return_t ret = func(map, base.addr, base.size, trials->list[i].flags);
4696 		append_result(results, ret, trials->list[i].name);
4697 	}
4698 	return results;
4699 }
4700 
4701 static results_t *
test_unix_with_allocated_vm_prot_t(int (* func)(void * start,size_t size,int flags),const char * testname)4702 test_unix_with_allocated_vm_prot_t(int (*func)(void * start, size_t size, int flags), const char * testname)
4703 {
4704 	MAP_T map CURRENT_MAP;
4705 	allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
4706 	vm_prot_trials_t * trials SMART_VM_PROT_TRIALS();
4707 	results_t *results = alloc_results(testname, eSMART_VM_PROT_TRIALS, trials->count);
4708 
4709 	for (unsigned i = 0; i < trials->count; i++) {
4710 		int ret = func((void *) base.addr, (size_t) base.size, (int) trials->list[i].prot);
4711 		append_result(results, ret, trials->list[i].name);
4712 	}
4713 	return results;
4714 }
4715 
4716 // Test a Unix function.
4717 // Run each trial with an allocated vm region and start/size parameters that reference it.
4718 typedef int (*unix_with_start_size_fn)(void *start, size_t size);
4719 
4720 static results_t * __unused
test_unix_with_allocated_aligned_start_size(unix_with_start_size_fn fn,mach_vm_size_t align_mask,const char * testname)4721 test_unix_with_allocated_aligned_start_size(unix_with_start_size_fn fn, mach_vm_size_t align_mask, const char *testname)
4722 {
4723 	MAP_T map CURRENT_MAP;
4724 	allocation_t base SMART_ALLOCATE_ALIGNED_VM(map, TEST_ALLOC_SIZE, align_mask, VM_PROT_DEFAULT);
4725 	start_size_trials_t *trials SMART_START_SIZE_TRIALS(base.addr);
4726 	results_t *results = alloc_results(testname, eSMART_START_SIZE_TRIALS, base.addr, trials->count);
4727 
4728 	for (unsigned i = 0; i < trials->count; i++) {
4729 		addr_t start = trials->list[i].start;
4730 		addr_t size = trials->list[i].size;
4731 		int ret = fn((void*)(uintptr_t)start, (size_t)size);
4732 		append_result(results, ret, trials->list[i].name);
4733 	}
4734 	return results;
4735 }
4736 
4737 static results_t * __unused
test_unix_with_allocated_start_size(unix_with_start_size_fn fn,const char * testname)4738 test_unix_with_allocated_start_size(unix_with_start_size_fn fn, const char *testname)
4739 {
4740 	return test_unix_with_allocated_aligned_start_size(fn, 0, testname);
4741 }
4742 
4743 #if KERNEL
4744 static results_t * __unused
test_kext_unix_with_allocated_start_size(unix_with_start_size_fn fn,const char * testname)4745 test_kext_unix_with_allocated_start_size(unix_with_start_size_fn fn, const char *testname)
4746 {
4747 	MAP_T map CURRENT_MAP;
4748 	allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
4749 	start_size_trials_t *trials SMART_START_SIZE_TRIALS(base.addr);
4750 	results_t *results = alloc_results(testname, eSMART_START_SIZE_TRIALS, base.addr, trials->count);
4751 
4752 	for (unsigned i = 0; i < trials->count; i++) {
4753 		addr_t start = trials->list[i].start;
4754 		addr_t size = trials->list[i].size;
4755 		int ret = fn((void*)(uintptr_t)start, (size_t)size);
4756 		append_result(results, ret, trials->list[i].name);
4757 	}
4758 	return results;
4759 }
4760 
4761 /* Test a Kext function requiring memory allocated with a specific tag. */
4762 /* Run each trial with an allocated vm region and an addr parameter that reference it. */
4763 
4764 static results_t * __attribute__((used))
test_kext_tagged_with_allocated_addr(kern_return_t (* func)(MAP_T map,mach_vm_address_t addr),const char * testname)4765 test_kext_tagged_with_allocated_addr(kern_return_t (*func)(MAP_T map, mach_vm_address_t addr), const char *testname)
4766 {
4767 	MAP_T map CURRENT_MAP;
4768 	allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
4769 	addr_trials_t *trials SMART_ADDR_TRIALS(base.addr);
4770 	results_t *results = alloc_results(testname, eSMART_ADDR_TRIALS, base.addr, trials->count);
4771 
4772 	for (unsigned i = 0; i < trials->count; i++) {
4773 		mach_vm_address_t addr = (mach_vm_address_t)trials->list[i].addr;
4774 		kern_return_t ret = func(map, addr);
4775 		append_result(results, ret, trials->list[i].name);
4776 	}
4777 	return results;
4778 }
4779 #endif /* KERNEL */
4780 
4781 static results_t * __attribute__((used))
test_with_int64(kern_return_t (* func)(int64_t),const char * testname)4782 test_with_int64(kern_return_t (*func)(int64_t), const char *testname)
4783 {
4784 	size_trials_t *trials SMART_SIZE_TRIALS();
4785 	results_t *results = alloc_results(testname, eSMART_SIZE_TRIALS, trials->count);
4786 
4787 	for (unsigned i = 0; i < trials->count; i++) {
4788 		int64_t val = (int64_t)trials->list[i].size;
4789 		kern_return_t ret = func(val);
4790 		append_result(results, ret, trials->list[i].name);
4791 	}
4792 	return results;
4793 }
4794 
4795 
4796 #if !KERNEL
4797 
4798 // For deallocators like munmap and vm_deallocate.
4799 // Return a non-zero error code if we should avoid performing this trial.
4800 // Call this BEFORE sliding the trial to a non-zero base address.
4801 extern
4802 kern_return_t
4803 short_circuit_deallocator(MAP_T map, start_size_trial_t trial);
4804 
4805 // implemented in vm_parameter_validation.c
4806 
4807 #else /* KERNEL */
4808 
4809 static inline
4810 kern_return_t
short_circuit_deallocator(MAP_T map __unused,start_size_trial_t trial __unused)4811 short_circuit_deallocator(MAP_T map __unused, start_size_trial_t trial __unused)
4812 {
4813 	// Kernel tests run with an empty vm_map so we're free to deallocate whatever we want.
4814 	return 0;
4815 }
4816 
4817 #endif /* KERNEL */
4818 
4819 
4820 // Test mach_vm_deallocate or munmap.
4821 // Similar to test_mach_with_allocated_addr_size, but mach_vm_deallocate is destructive
4822 // so we can't test all values and we need to re-allocate the vm allocation each time.
4823 static results_t *
test_deallocator(kern_return_t (* func)(MAP_T map,mach_vm_address_t start,mach_vm_size_t size),const char * testname)4824 test_deallocator(kern_return_t (*func)(MAP_T map, mach_vm_address_t start, mach_vm_size_t size), const char *testname)
4825 {
4826 	MAP_T map SMART_MAP;
4827 
4828 	// allocate trials relative to address zero
4829 	// later we slide them to each allocation's address
4830 	start_size_trials_t *trials SMART_START_SIZE_TRIALS(0);
4831 
4832 	results_t *results = alloc_results(testname, eSMART_START_SIZE_TRIALS, 0, trials->count);
4833 
4834 	for (unsigned i = 0; i < trials->count; i++) {
4835 		start_size_trial_t trial = trials->list[i];
4836 		allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
4837 
4838 		// Avoid trials that might deallocate wildly.
4839 		// Check this BEFORE sliding the trial.
4840 		kern_return_t ret = short_circuit_deallocator(map, trial);
4841 		if (ret == 0) {
4842 			// Adjust start and/or size, if that value includes the allocated address
4843 			trial = slide_trial(trial, base.addr);
4844 
4845 			ret = func(map, trial.start, trial.size);
4846 			if (ret == 0) {
4847 				// Deallocation succeeded. Don't deallocate again.
4848 				set_already_deallocated(&base);
4849 			}
4850 		}
4851 		append_result(results, ret, trial.name);
4852 	}
4853 
4854 	return results;
4855 }
4856 
4857 static results_t *
test_allocated_src_unallocated_dst_size(kern_return_t (* func)(MAP_T map,mach_vm_address_t src,mach_vm_size_t size,mach_vm_address_t dst),const char * testname)4858 test_allocated_src_unallocated_dst_size(kern_return_t (*func)(MAP_T map, mach_vm_address_t src, mach_vm_size_t size, mach_vm_address_t dst), const char * testname)
4859 {
4860 	MAP_T map SMART_MAP;
4861 	allocation_t src_base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
4862 	src_dst_size_trials_t * trials SMART_SRC_DST_SIZE_TRIALS();
4863 	results_t *results = alloc_results(testname, eSMART_SRC_DST_SIZE_TRIALS, trials->count);
4864 
4865 	for (unsigned i = 0; i < trials->count; i++) {
4866 		src_dst_size_trial_t trial = trials->list[i];
4867 		unallocation_t dst_base SMART_UNALLOCATE_VM(map, TEST_ALLOC_SIZE);
4868 		trial = slide_trial_src(trial, src_base.addr);
4869 		trial = slide_trial_dst(trial, dst_base.addr);
4870 		int ret = func(map, trial.src, trial.size, trial.dst);
4871 		// func deallocates its own allocation
4872 		append_result(results, ret, trial.name);
4873 	}
4874 	return results;
4875 }
4876 
4877 
4878 static inline void
check_mach_vm_allocate_outparam_changes(kern_return_t * kr,mach_vm_address_t addr,mach_vm_size_t size,mach_vm_address_t saved_start,int flags,MAP_T map)4879 check_mach_vm_allocate_outparam_changes(kern_return_t * kr, mach_vm_address_t addr, mach_vm_size_t size,
4880     mach_vm_address_t saved_start, int flags, MAP_T map)
4881 {
4882 	if (*kr == KERN_SUCCESS) {
4883 		if (size == 0) {
4884 			if (addr != 0) {
4885 				*kr = OUT_PARAM_BAD;
4886 			}
4887 		} else {
4888 			if (is_fixed(flags)) {
4889 				if (addr != trunc_down_map(map, saved_start)) {
4890 					*kr = OUT_PARAM_BAD;
4891 				}
4892 			}
4893 		}
4894 	} else {
4895 		if (saved_start != addr) {
4896 			*kr = OUT_PARAM_BAD;
4897 		}
4898 	}
4899 }
4900 
4901 static kern_return_t
call_mach_vm_behavior_set__start_size__default(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)4902 call_mach_vm_behavior_set__start_size__default(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
4903 {
4904 	kern_return_t kr = mach_vm_behavior_set(map, start, size, VM_BEHAVIOR_DEFAULT);
4905 	return kr;
4906 }
4907 
4908 /*
4909  * VM_BEHAVIOR_CAN_REUSE is additionally tested as it uses slightly different page rounding semantics
4910  */
4911 static kern_return_t
call_mach_vm_behavior_set__start_size__can_reuse(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)4912 call_mach_vm_behavior_set__start_size__can_reuse(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
4913 {
4914 	kern_return_t kr = mach_vm_behavior_set(map, start, size, VM_BEHAVIOR_CAN_REUSE);
4915 	return kr;
4916 }
4917 
4918 static kern_return_t
call_mach_vm_behavior_set__vm_behavior(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_behavior_t behavior)4919 call_mach_vm_behavior_set__vm_behavior(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_behavior_t behavior)
4920 {
4921 	kern_return_t kr = mach_vm_behavior_set(map, start, size, behavior);
4922 	return kr;
4923 }
4924 
4925 static void
check_mach_vm_purgable_control_outparam_changes(kern_return_t * kr,int state,int saved_state,int control)4926 check_mach_vm_purgable_control_outparam_changes(kern_return_t * kr, int state, int saved_state, int control)
4927 {
4928 	if (*kr == KERN_SUCCESS) {
4929 		if (control == VM_PURGABLE_PURGE_ALL || VM_PURGABLE_SET_STATE) {
4930 			if (state != saved_state) {
4931 				*kr = OUT_PARAM_BAD;
4932 			}
4933 		}
4934 		if (control == VM_PURGABLE_GET_STATE) {
4935 			/*
4936 			 * The default state is VM_PURGABLE_NONVOLATILE for a newly created region
4937 			 */
4938 			if (state != VM_PURGABLE_NONVOLATILE) {
4939 				*kr = OUT_PARAM_BAD;
4940 			}
4941 		}
4942 	} else {
4943 		if (state != saved_state) {
4944 			*kr = OUT_PARAM_BAD;
4945 		}
4946 	}
4947 }
4948 
4949 static void
check_mach_vm_region_outparam_changes(kern_return_t * kr,MAP_T map,void * info,void * saved_info,size_t info_size,mach_port_t object_name,mach_port_t saved_object_name,mach_vm_address_t addr,mach_vm_address_t saved_addr,mach_vm_size_t size,mach_vm_size_t saved_size)4950 check_mach_vm_region_outparam_changes(kern_return_t * kr, MAP_T map, void * info, void * saved_info, size_t info_size,
4951     mach_port_t object_name, mach_port_t saved_object_name, mach_vm_address_t addr, mach_vm_address_t saved_addr,
4952     mach_vm_size_t size, mach_vm_size_t saved_size)
4953 {
4954 	if (*kr == KERN_SUCCESS) {
4955 		if (object_name != 0) {
4956 			*kr = OUT_PARAM_BAD;
4957 		}
4958 		if (addr < trunc_down_map(map, saved_addr)) {
4959 			*kr = OUT_PARAM_BAD;
4960 		}
4961 		if (size == saved_size) {
4962 			*kr = OUT_PARAM_BAD;
4963 		}
4964 		if (memcmp(info, saved_info, info_size) == 0) {
4965 			*kr = OUT_PARAM_BAD;
4966 		}
4967 	} else {
4968 		if (object_name != saved_object_name || addr != saved_addr || size != saved_size || memcmp(info, saved_info, info_size) != 0) {
4969 			*kr = OUT_PARAM_BAD;
4970 		}
4971 	}
4972 }
4973 
4974 static int
call_mach_vm_region(MAP_T map,mach_vm_address_t addr)4975 call_mach_vm_region(MAP_T map, mach_vm_address_t addr)
4976 {
4977 	mach_vm_address_t addr_cpy = addr;
4978 	mach_vm_size_t size_out = UNLIKELY_INITIAL_SIZE;
4979 	mach_vm_size_t saved_size = size_out;
4980 	mach_port_t object_name_out = UNLIKELY_INITIAL_MACH_PORT;
4981 	mach_port_t saved_name = object_name_out;
4982 	vm_region_basic_info_data_64_t info;
4983 	info.inheritance = INVALID_INHERIT;
4984 	vm_region_basic_info_data_64_t saved_info = info;
4985 
4986 	mach_msg_type_number_t infoCnt = VM_REGION_BASIC_INFO_COUNT_64;
4987 	kern_return_t kr = mach_vm_region(map, &addr_cpy, &size_out, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&info,
4988 	    &infoCnt, &object_name_out);
4989 	check_mach_vm_region_outparam_changes(&kr, map, &info, &saved_info, sizeof(info), object_name_out, saved_name, addr_cpy, addr, size_out, saved_size);
4990 
4991 	return kr;
4992 }
4993 
4994 #if TEST_OLD_STYLE_MACH || KERNEL
4995 static int
call_vm_region(MAP_T map,vm_address_t addr)4996 call_vm_region(MAP_T map, vm_address_t addr)
4997 {
4998 	vm_address_t addr_cpy = addr;
4999 	vm_size_t size_out = UNLIKELY_INITIAL_SIZE;
5000 	vm_size_t saved_size = size_out;
5001 	mach_port_t object_name_out = UNLIKELY_INITIAL_MACH_PORT;
5002 	mach_port_t saved_name = object_name_out;
5003 	vm_region_basic_info_data_64_t info;
5004 	info.inheritance = INVALID_INHERIT;
5005 	vm_region_basic_info_data_64_t saved_info = info;
5006 
5007 	mach_msg_type_number_t infoCnt = VM_REGION_BASIC_INFO_COUNT_64;
5008 	kern_return_t kr = vm_region(map, &addr_cpy, &size_out, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&info,
5009 	    &infoCnt, &object_name_out);
5010 	check_mach_vm_region_outparam_changes(&kr, map, &info, &saved_info, sizeof(info), object_name_out, saved_name, addr_cpy, addr, size_out, saved_size);
5011 
5012 	return kr;
5013 }
5014 #endif /* TEST_OLD_STYLE_MACH || KERNEL */
5015 
5016 static void
check_mach_vm_page_info_outparam_changes(kern_return_t * kr,vm_page_info_basic_data_t info,vm_page_info_basic_data_t saved_info,mach_msg_type_number_t count,mach_msg_type_number_t saved_count)5017 check_mach_vm_page_info_outparam_changes(kern_return_t * kr, vm_page_info_basic_data_t info, vm_page_info_basic_data_t saved_info,
5018     mach_msg_type_number_t count, mach_msg_type_number_t saved_count)
5019 {
5020 	if (*kr == KERN_SUCCESS) {
5021 		if (memcmp(&info, &saved_info, sizeof(vm_page_info_basic_data_t)) == 0) {
5022 			*kr = OUT_PARAM_BAD;
5023 		}
5024 	} else {
5025 		if (memcmp(&info, &saved_info, sizeof(vm_page_info_basic_data_t)) != 0) {
5026 			*kr = OUT_PARAM_BAD;
5027 		}
5028 	}
5029 	if (count != saved_count) {
5030 		*kr = OUT_PARAM_BAD;
5031 	}
5032 }
5033 
5034 #pragma clang diagnostic pop
5035 
5036 // VM_PARAMETER_VALIDATION_H
5037 #endif
5038