xref: /xnu-11215/tests/vm/mach_vm_map.c (revision 4f1223e8)
1 #include <darwintest.h>
2 #include <mach/mach.h>
3 #include <mach/mach_vm.h>
4 #include <stdlib.h>
5 
6 T_GLOBAL_META(T_META_ALL_VALID_ARCHS(true));
7 
8 static struct mo_spec {
9 	int         flags;
10 	const char *s;
11 } mo_specs[] = {
12 #define E(f)    { f, #f }
13 	E(0),
14 	E(MAP_MEM_VM_COPY),
15 	E(MAP_MEM_VM_SHARE),
16 	E(MAP_MEM_USE_DATA_ADDR),
17 	E(MAP_MEM_VM_COPY | MAP_MEM_USE_DATA_ADDR),
18 	E(MAP_MEM_VM_SHARE | MAP_MEM_USE_DATA_ADDR),
19 	{ },
20 #undef E
21 };
22 
23 struct range_spec {
24 	mach_vm_offset_t start;
25 	mach_vm_offset_t end;
26 };
27 
28 static const mach_vm_offset_t  sz16k     = 16 << 10;
29 static mach_vm_address_t       scratch_addr = 0;
30 static const mach_vm_size_t    scratch_size = 3 * sz16k;
31 static const mach_vm_offset_t  mo_offset = sz16k - 128;
32 static const mach_vm_size_t    mo_size   = sz16k + 256;
33 
34 static struct range_spec *
range_specs(void)35 range_specs(void)
36 {
37 	struct range_spec array[] = {
38 		{ 0, 64, },
39 		{ 0, 128, },
40 		{ 0, 256, },
41 		{ 0, PAGE_SIZE, },
42 		{ 0, PAGE_SIZE + 64, },
43 		{ 0, 2 * PAGE_SIZE - 64 },
44 		{ 0, 2 * PAGE_SIZE },
45 		{ 64, PAGE_SIZE, },
46 		{ 64, PAGE_SIZE + 64, },
47 		{ 64, 2 * PAGE_SIZE - 64 },
48 		{ 64, 2 * PAGE_SIZE },
49 		{ PAGE_SIZE - 64, PAGE_SIZE, },
50 		{ PAGE_SIZE - 64, PAGE_SIZE + 64, },
51 		{ PAGE_SIZE - 64, 2 * PAGE_SIZE - 64 },
52 		{ PAGE_SIZE - 64, 2 * PAGE_SIZE },
53 		{ PAGE_SIZE, PAGE_SIZE + 64, },
54 		{ PAGE_SIZE, 2 * PAGE_SIZE - 64 },
55 		{ PAGE_SIZE, 2 * PAGE_SIZE },
56 		{ PAGE_SIZE + 64, 2 * PAGE_SIZE - 64 },
57 		{ PAGE_SIZE + 64, 2 * PAGE_SIZE },
58 		{ 0, sz16k + 64, },
59 		{ 0, sz16k + 128, },
60 		{ 0, sz16k + 256, },
61 		{ 0, sz16k + PAGE_SIZE, },
62 		{ 0, sz16k + PAGE_SIZE + 64, },
63 		{ 0, sz16k + 2 * PAGE_SIZE - 64 },
64 		{ 0, sz16k + 2 * PAGE_SIZE },
65 		{ 64, sz16k + PAGE_SIZE, },
66 		{ 64, sz16k + PAGE_SIZE + 64, },
67 		{ 64, sz16k + 2 * PAGE_SIZE - 64 },
68 		{ 64, sz16k + 2 * PAGE_SIZE },
69 		{ PAGE_SIZE - 64, sz16k + PAGE_SIZE, },
70 		{ PAGE_SIZE - 64, sz16k + PAGE_SIZE + 64, },
71 		{ PAGE_SIZE - 64, sz16k + 2 * PAGE_SIZE - 64 },
72 		{ PAGE_SIZE - 64, sz16k + 2 * PAGE_SIZE },
73 		{ PAGE_SIZE, sz16k +      PAGE_SIZE + 64, },
74 		{ PAGE_SIZE, sz16k +      2 * PAGE_SIZE - 64 },
75 		{ PAGE_SIZE, sz16k +      2 * PAGE_SIZE },
76 		{ PAGE_SIZE + 64, sz16k +      2 * PAGE_SIZE - 64 },
77 		{ PAGE_SIZE + 64, sz16k +      2 * PAGE_SIZE },
78 		{ },
79 	};
80 	struct range_spec *ret;
81 
82 	ret = malloc(sizeof(array));
83 	memcpy(ret, array, sizeof(array));
84 	return ret;
85 }
86 
87 static void
mach_vm_map_unaligned_test(int mo_flags,int vmflags,mach_vm_offset_t map_offset,mach_vm_size_t map_size)88 mach_vm_map_unaligned_test(
89 	int               mo_flags,
90 	int               vmflags,
91 	mach_vm_offset_t  map_offset,
92 	mach_vm_size_t    map_size)
93 {
94 	const int         tag = VM_MAKE_TAG(VM_MEMORY_SCENEKIT);
95 	mach_vm_address_t map_addr;
96 	kern_return_t     kr;
97 	mach_vm_size_t    size;
98 	mach_port_t       mo_port;
99 	bool              should_fail = false;
100 	mach_vm_offset_t  used_offset_for_size;
101 	mach_vm_offset_t  mo_start;
102 	mach_vm_offset_t  mo_end;
103 
104 	size = mo_size;
105 	kr = mach_make_memory_entry_64(mach_task_self(),
106 	    &size, scratch_addr + mo_offset, VM_PROT_DEFAULT | mo_flags,
107 	    &mo_port, MACH_PORT_NULL);
108 
109 	if (vmflags & VM_FLAGS_RETURN_DATA_ADDR) {
110 		used_offset_for_size = map_offset;
111 		if (mo_flags & MAP_MEM_USE_DATA_ADDR) {
112 			used_offset_for_size += mo_offset;
113 		}
114 	} else {
115 		used_offset_for_size = 0;
116 	}
117 
118 	if (mo_flags & MAP_MEM_USE_DATA_ADDR) {
119 		mo_start = mo_offset;
120 		mo_end   = mo_offset + mo_size;
121 	} else {
122 		mo_start = trunc_page(mo_offset);
123 		mo_end   = round_page(mo_offset + mo_size);
124 	}
125 
126 	if (round_page(used_offset_for_size + map_size) > round_page(mo_offset + mo_size)) {
127 		should_fail = true;
128 	}
129 	if ((map_offset & PAGE_MASK) &&
130 	    !(vmflags & VM_FLAGS_RETURN_DATA_ADDR) &&
131 	    !(mo_flags & (MAP_MEM_VM_COPY | MAP_MEM_VM_SHARE))) {
132 		should_fail = true;
133 	}
134 
135 
136 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr,
137 	    "made memory entry for [%p + %#llx, %p + %#llx), size = %lld",
138 	    (void *)scratch_addr, map_offset,
139 	    (void *)scratch_addr, map_size, size);
140 	if (mo_flags & MAP_MEM_USE_DATA_ADDR) {
141 		T_QUIET; T_EXPECT_EQ(size, (mo_offset & PAGE_MASK) + mo_size, "check memory entry's size");
142 	} else {
143 		T_QUIET; T_EXPECT_EQ(size, round_page((mo_offset & PAGE_MASK) + mo_size), "check memory entry's size");
144 	}
145 
146 	map_addr = 0;
147 	kr = mach_vm_map(mach_task_self(), &map_addr, map_size, 0,
148 	    tag | vmflags, mo_port, map_offset, true,
149 	    VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
150 	if (should_fail) {
151 		T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ARGUMENT,
152 		    "mach_vm_map(mo_port, [%#llx, %#llx) of [%#llx, %#llx) = %p",
153 		    mo_start + map_offset, mo_start + map_offset + map_size,
154 		    mo_start, mo_end, (void *)map_addr);
155 	} else {
156 		T_EXPECT_MACH_SUCCESS(kr,
157 		    "mach_vm_map(mo_port, [%#llx, %#llx) of [%#llx, %#llx) = %p",
158 		    mo_start + map_offset, mo_start + map_offset + map_size,
159 		    mo_start, mo_end, (void *)map_addr);
160 	}
161 
162 	if (kr == KERN_SUCCESS) {
163 		vm_region_basic_info_data_64_t info;
164 		mach_msg_type_number_t         icount = VM_REGION_BASIC_INFO_COUNT_64;
165 		mach_vm_address_t              want_addr, r_addr;
166 		mach_vm_size_t                 want_size, r_size;
167 		uint32_t                       want_data;
168 
169 		if (vmflags & VM_FLAGS_RETURN_DATA_ADDR) {
170 			T_QUIET; T_EXPECT_EQ(map_addr & PAGE_MASK,
171 			    used_offset_for_size & PAGE_MASK,
172 			    "check returned address maintained offset");
173 		} else {
174 			T_QUIET; T_EXPECT_EQ(map_addr & PAGE_MASK, 0ull,
175 			    "check returned address is aligned");
176 		}
177 
178 		r_addr = map_addr;
179 		kr = mach_vm_region(mach_task_self(), &r_addr, &r_size,
180 		    VM_REGION_BASIC_INFO_64, (vm_region_info_t)&info, &icount,
181 		    &(mach_port_t){0});
182 
183 		if (!should_fail) {
184 			want_addr = trunc_page(map_addr);
185 			if (mo_flags & (MAP_MEM_VM_COPY | MAP_MEM_VM_SHARE)) {
186 				/* mach_vm_map() only does full objects */
187 				want_size = round_page(mo_offset + mo_size) - trunc_page(mo_offset);
188 			} else {
189 				want_size = round_page(used_offset_for_size + map_size) - trunc_page(used_offset_for_size);
190 			}
191 			if (mo_flags & (MAP_MEM_VM_COPY | MAP_MEM_VM_SHARE)) {
192 				want_data = trunc_page(mo_offset) / sizeof(uint32_t);
193 			} else if (mo_flags & MAP_MEM_USE_DATA_ADDR) {
194 				want_data = (uint32_t)trunc_page(map_offset +
195 				    mo_offset) / sizeof(uint32_t);
196 			} else {
197 				want_data = (uint32_t)(trunc_page(map_offset) +
198 				    trunc_page(mo_offset)) / sizeof(uint32_t);
199 			}
200 			T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_vm_region(%p)", (void *)map_addr);
201 			T_QUIET; T_EXPECT_EQ(r_addr, want_addr, "validate region base");
202 			T_QUIET; T_EXPECT_EQ(r_size, want_size, "validate region size");
203 			for (uint32_t offs = 4; offs < r_size; offs += PAGE_SIZE) {
204 				T_QUIET; T_EXPECT_EQ(*(uint32_t *)(r_addr + offs),
205 				    want_data + (offs / 4),
206 				    "validate content at offset %d", offs);
207 			}
208 		}
209 
210 		kr = mach_vm_deallocate(mach_task_self(), r_addr, r_size);
211 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate(%p, %lld)", (void *)r_addr, r_size);
212 	}
213 
214 	kr = mach_port_deallocate(mach_task_self(), mo_port);
215 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "m_p_d(mo_port)");
216 }
217 
218 
219 T_DECL(mach_vm_map_unaligned,
220     "check mach_vm_map() with misaligned offsets and sizes")
221 {
222 	kern_return_t kr;
223 
224 	kr = mach_vm_map(mach_task_self(), &scratch_addr, scratch_size,
225 	    (64 << 10) - 1, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, true,
226 	    VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
227 	T_EXPECT_MACH_SUCCESS(kr, "allocated scratch space: %p", (void *)scratch_addr);
228 
229 	for (uint32_t i = 0; i < scratch_size / 4; i++) {
230 		((uint32_t *)scratch_addr)[i] = i;
231 	}
232 
233 	for (struct mo_spec *mo = mo_specs; mo->s; mo++) {
234 		T_LOG("*** mo type: %s (return_data_addr == 0)", mo->s);
235 		for (struct range_spec *r = range_specs(); r->end; r++) {
236 			T_LOG("[%#llx, %#llx)", r->start, r->end);
237 			mach_vm_map_unaligned_test(mo->flags,
238 			    VM_FLAGS_ANYWHERE,
239 			    r->start, r->end - r->start);
240 		}
241 		T_LOG("");
242 	}
243 
244 	for (struct mo_spec *mo = mo_specs; mo->s; mo++) {
245 		T_LOG("*** mo type: %s (return_data_addr == 1)", mo->s);
246 		for (struct range_spec *r = range_specs(); r->end; r++) {
247 			T_LOG("[%#llx, %#llx)", r->start, r->end);
248 			mach_vm_map_unaligned_test(mo->flags,
249 			    VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
250 			    r->start, r->end - r->start);
251 		}
252 		T_LOG("");
253 	}
254 }
255 
256 T_DECL(vm_map_enter_mem_object_overflow,
257     "Test overflow cases in vm_map_enter_mem_object",
258     T_META_RUN_CONCURRENTLY(true), T_META_TAG_VM_PREFERRED)
259 {
260 	kern_return_t kr;
261 
262 	mach_vm_address_t alloced_addr;
263 	mach_vm_size_t size_16kb, entry_size;
264 	vm_map_offset_t entry_offset;
265 	mach_port_t entry_handle;
266 	vm_map_offset_t target_addr, target_offset;
267 	int vmflags;
268 
269 	size_16kb = 16 * 1024;
270 	/*
271 	 * Create an allocation in the source map, then get a non-page-aligned
272 	 * copy entry causing data_offset to be nonzero.
273 	 */
274 	kr = mach_vm_allocate(mach_task_self(), &alloced_addr, 2 * size_16kb, VM_FLAGS_ANYWHERE);
275 	T_ASSERT_MACH_SUCCESS(kr, "set up allocation");
276 
277 	entry_size = size_16kb;
278 	entry_offset = alloced_addr + (size_16kb / 2);
279 	kr = mach_make_memory_entry_64(mach_task_self(), &entry_size, entry_offset,
280 	    MAP_MEM_VM_COPY | MAP_MEM_USE_DATA_ADDR | VM_PROT_ALL,
281 	    &entry_handle, MACH_PORT_NULL);
282 
283 	T_ASSERT_MACH_SUCCESS(kr, "set up copy memory entry");
284 
285 	/*
286 	 * note: currently, the next three cases below are caught early by
287 	 * vm_map_enter_mem_object_sanitize and thus don't give any extra coverage
288 	 */
289 
290 	/*
291 	 * In vm_map_enter_mem_object_sanitize, attempt to overflow obj_size by
292 	 * having size round up to 0.
293 	 */
294 	vmflags = VM_FLAGS_ANYWHERE;
295 	kr = mach_vm_map(mach_task_self(), &target_addr, (mach_vm_size_t) -1, 0,
296 	    vmflags, entry_handle, 0, true, VM_PROT_ALL, VM_PROT_ALL,
297 	    VM_INHERIT_DEFAULT);
298 	T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ARGUMENT, "obj_size overflow case");
299 
300 	/*
301 	 * In vm_map_enter_adjust_offset, attempt to overflow obj_offs + quantity
302 	 * note: quantity = data_offset, which was set to a nonzero value
303 	 */
304 	vmflags = VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR;
305 	target_offset = (vm_map_offset_t) -1;
306 	kr = mach_vm_map(mach_task_self(), &target_addr, size_16kb, 0, vmflags,
307 	    entry_handle, target_offset, true, VM_PROT_ALL, VM_PROT_ALL,
308 	    VM_INHERIT_DEFAULT);
309 	T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ARGUMENT, "obj_offs overflow case");
310 
311 	/*
312 	 * Attempt to overflow obj_end + quantity
313 	 */
314 	vmflags = VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR;
315 	target_offset = (vm_map_offset_t) -(size_16kb + 1);
316 	kr = mach_vm_map(mach_task_self(), &target_addr, size_16kb, 0, vmflags,
317 	    entry_handle, target_offset, true, VM_PROT_ALL, VM_PROT_ALL,
318 	    VM_INHERIT_DEFAULT);
319 	T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ARGUMENT, "obj_end overflow case");
320 
321 	/*
322 	 * Because target_offset points to the second-to-last page and the
323 	 * size of the entry is one page (size_16kb), obj_end will point to the
324 	 * last page.
325 	 *
326 	 * In vm_map_enter_adjust_offset, this means obj_end + data_offset gets
327 	 * rounded up to 0
328 	 */
329 	target_offset = (vm_map_offset_t) -(2 * size_16kb);
330 	kr = mach_vm_map(mach_task_self(), &target_addr, size_16kb, 0, vmflags,
331 	    entry_handle, target_offset, true, VM_PROT_ALL, VM_PROT_ALL,
332 	    VM_INHERIT_DEFAULT);
333 	T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ARGUMENT, "round-to-0 case should be detected");
334 }
335