1 #include <darwintest.h>
2 #include <mach/mach.h>
3 #include <sys/sysctl.h>
4 #include <stdio.h>
5 #include <stdbool.h>
6 #include <stdlib.h>
7 #include <unistd.h>
8 #include <inttypes.h>
9 #include <pthread.h>
10 #include <TargetConditionals.h>
11 #include "excserver.h"
12 #include "exc_helpers.h"
13 
14 extern int pid_hibernate(int pid);
15 
16 static vm_address_t page_size;
17 
18 T_GLOBAL_META(
19 	T_META_RADAR_COMPONENT_NAME("xnu"),
20 	T_META_RADAR_COMPONENT_VERSION("arm"),
21 	T_META_OWNER("peter_newman"),
22 	T_META_REQUIRES_SYSCTL_EQ("hw.optional.wkdm_popcount", 1)
23 	);
24 
25 static vm_address_t *blocks;
26 static uint64_t block_count;
27 static const uint64_t block_length = 0x800000;
28 
29 static uint32_t vm_pagesize;
30 
31 static void
dirty_page(const vm_address_t address)32 dirty_page(const vm_address_t address)
33 {
34 	assert((address & (page_size - 1)) == 0UL);
35 	uint32_t *const page_as_u32 = (uint32_t *)address;
36 	for (uint32_t i = 0; i < page_size / sizeof(uint32_t); i += 2) {
37 		page_as_u32[i + 0] = i % 4;
38 		page_as_u32[i + 1] = 0xcdcdcdcd;
39 	}
40 }
41 
42 static bool
try_to_corrupt_page(vm_address_t page_va)43 try_to_corrupt_page(vm_address_t page_va)
44 {
45 	int val;
46 	size_t size = sizeof(val);
47 	int result = sysctlbyname("vm.compressor_inject_error", &val, &size,
48 	    &page_va, sizeof(page_va));
49 	return result == 0;
50 }
51 
52 static void
create_corrupted_regions(void)53 create_corrupted_regions(void)
54 {
55 	uint64_t hw_memsize;
56 
57 	size_t size = sizeof(unsigned int);
58 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("vm.pagesize", &vm_pagesize, &size,
59 	    NULL, 0), "read vm.pagesize");
60 	size = sizeof(uint64_t);
61 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.memsize", &hw_memsize, &size,
62 	    NULL, 0), "read hw.memsize");
63 
64 #if TARGET_OS_OSX
65 	const uint64_t max_memsize = 32ULL * 0x40000000ULL; // 32 GB
66 #else
67 	const uint64_t max_memsize = 8ULL * 0x100000ULL; // 8 MB
68 #endif
69 	const uint64_t effective_memsize = (hw_memsize > max_memsize) ?
70 	    max_memsize : hw_memsize;
71 
72 	const uint64_t total_pages = effective_memsize / vm_pagesize;
73 	const uint64_t pages_per_block = block_length / vm_pagesize;
74 
75 	// Map a as much memory as we have physical memory to back. Dirtying all
76 	// of these pages will force a compressor sweep. The mapping is done using
77 	// the smallest number of malloc() calls to allocate the necessary VAs.
78 	block_count = total_pages / pages_per_block;
79 
80 	blocks = (vm_address_t *)malloc(sizeof(*blocks) * block_count);
81 	for (uint64_t i = 0; i < block_count; i++) {
82 		void *bufferp = malloc(block_length);
83 		blocks[i] = (vm_address_t)bufferp;
84 	}
85 
86 	for (uint32_t i = 0; i < block_count; i++) {
87 		for (size_t buffer_offset = 0; buffer_offset < block_length;
88 		    buffer_offset += vm_pagesize) {
89 			dirty_page(blocks[i] + buffer_offset);
90 		}
91 	}
92 
93 #if !TARGET_OS_OSX
94 	// We can't use a substantial amount of memory on embedded platforms, so
95 	// freeze the current process instead to cause everything to be compressed.
96 	T_ASSERT_POSIX_SUCCESS(pid_hibernate(-2), NULL);
97 	T_ASSERT_POSIX_SUCCESS(pid_hibernate(-2), NULL);
98 #endif
99 
100 	uint32_t corrupt = 0;
101 	for (uint32_t i = 0; i < block_count; i++) {
102 		for (size_t buffer_offset = 0; buffer_offset < block_length;
103 		    buffer_offset += vm_pagesize) {
104 			if (try_to_corrupt_page(blocks[i] + buffer_offset)) {
105 				corrupt++;
106 			}
107 		}
108 	}
109 
110 	T_LOG("corrupted %u/%llu pages. accessing...\n", corrupt, total_pages);
111 	if (corrupt == 0) {
112 		T_SKIP("no pages corrupted");
113 	}
114 }
115 
116 static bool
try_write(volatile uint32_t * word __unused)117 try_write(volatile uint32_t *word __unused)
118 {
119 #ifdef __arm64__
120 	uint64_t val = 1;
121 	__asm__ volatile (
122              "str		%w0, %1\n"
123              "mov		%0, 0\n"
124              : "+r"(val) : "m"(*word));
125 	// The exception handler skips over the instruction that zeroes val when a
126 	// decompression failure is detected.
127 	return val == 0;
128 #else
129 	return false;
130 #endif
131 }
132 
133 static bool
read_blocks(void)134 read_blocks(void)
135 {
136 	for (uint32_t i = 0; i < block_count; i++) {
137 		for (size_t buffer_offset = 0; buffer_offset < block_length;
138 		    buffer_offset += vm_pagesize) {
139 			// Access pages until the fault is detected.
140 			if (!try_write((volatile uint32_t *)(blocks[i] + buffer_offset))) {
141 				T_LOG("test_thread breaking");
142 				return true;
143 			}
144 		}
145 	}
146 	return false;
147 }
148 
149 static size_t
kern_memory_failure_handler(__unused mach_port_t task,__unused mach_port_t thread,exception_type_t exception,mach_exception_data_t code)150 kern_memory_failure_handler(
151 	__unused mach_port_t task,
152 	__unused mach_port_t thread,
153 	exception_type_t exception,
154 	mach_exception_data_t code)
155 {
156 	T_EXPECT_EQ(exception, EXC_BAD_ACCESS,
157 	    "Verified bad address exception");
158 	T_EXPECT_EQ((int)code[0], KERN_MEMORY_FAILURE, "caught KERN_MEMORY_FAILURE");
159 	T_PASS("received KERN_MEMORY_FAILURE from test thread");
160 	// Skip the next instruction as well so that the faulting code can detect
161 	// the exception.
162 	return 8;
163 }
164 
165 T_DECL(decompression_failure,
166     "Confirm that exception is raised on decompression failure",
167     // Disable software checks in development builds, as these would result in
168     // panics.
169     T_META_BOOTARGS_SET("vm_compressor_validation=0"),
170     T_META_ASROOT(true),
171     // This test intentionally corrupts pages backing heap memory, so it's
172     // not practical for it to release all the buffers properly.
173     T_META_CHECK_LEAKS(false))
174 {
175 	T_SETUPBEGIN;
176 
177 #if !TARGET_OS_OSX
178 	if (pid_hibernate(-2) != 0) {
179 		T_SKIP("compressor not active");
180 	}
181 #endif
182 
183 	int value;
184 	size_t size = sizeof(value);
185 	if (sysctlbyname("vm.compressor_inject_error", &value, &size, NULL, 0)
186 	    != 0) {
187 		T_SKIP("vm.compressor_inject_error not present");
188 	}
189 
190 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("vm.pagesize", &value, &size, NULL, 0),
191 	    NULL);
192 	T_ASSERT_EQ_ULONG(size, sizeof(value), NULL);
193 	page_size = (vm_address_t)value;
194 
195 	mach_port_t exc_port = create_exception_port(EXC_MASK_BAD_ACCESS);
196 	create_corrupted_regions();
197 	T_SETUPEND;
198 
199 	run_exception_handler(exc_port, kern_memory_failure_handler);
200 
201 	if (!read_blocks()) {
202 		T_SKIP("no faults");
203 	}
204 }
205