1 #include <darwintest.h>
2 #include <darwintest_utils.h>
3 #include <mach/mach.h>
4 #include <mach/mach_vm.h>
5 #include <stdlib.h>
6 #include <string.h>
7
8 #define KB4 ((mach_vm_size_t)4*1024)
9 #define KB16 ((mach_vm_size_t)16*1024)
10
11 T_GLOBAL_META(
12 T_META_NAMESPACE("xnu.vm"),
13 T_META_RADAR_COMPONENT_NAME("xnu"),
14 T_META_RADAR_COMPONENT_VERSION("VM"),
15 T_META_ALL_VALID_ARCHS(true));
16
17 #ifdef __x86_64__
18 // return true if the process is running under Rosetta translation
19 // https://developer.apple.com/documentation/apple-silicon/about-the-rosetta-translation-environment#Determine-Whether-Your-App-Is-Running-as-a-Translated-Binary
20 static bool
isRosetta(void)21 isRosetta(void)
22 {
23 int out_value = 0;
24 size_t io_size = sizeof(out_value);
25 if (sysctlbyname("sysctl.proc_translated", &out_value, &io_size, NULL, 0) == 0) {
26 assert(io_size >= sizeof(out_value));
27 return out_value;
28 }
29 return false;
30 }
31 #endif /* __x86_64__ */
32
33 T_DECL(vm_memory_entry_parent,
34 "Test that we properly align child memory_entries after vm_map",
35 T_META_RUN_CONCURRENTLY(true))
36 {
37 mach_vm_address_t src_addr, mapped_addr;
38 mach_vm_size_t size, parent_offset;
39 mach_port_t named_me_port, child_me_port;
40 kern_return_t kr;
41
42 size = KB16 * 2;
43
44 kr = mach_vm_allocate(mach_task_self(), &src_addr, size, VM_FLAGS_ANYWHERE);
45 T_EXPECT_MACH_SUCCESS(kr, "vm_allocate");
46
47 for (size_t i = 0; i < size / KB4; i++) {
48 memset((void *)(src_addr + KB4 * i), (i + 1) * 0x11, KB4);
49 }
50
51 /*
52 * Create a memory entry offset by KB4 * 2.
53 * On userspaces with a vm_map_page_size of KB16,
54 * this should be rounded back to 0 when used as the offset in the kernel.
55 */
56 parent_offset = KB4 * 2;
57 mach_vm_size_t parent_entry_size = size;
58 kr = mach_make_memory_entry_64(mach_task_self(),
59 &parent_entry_size,
60 src_addr + parent_offset,
61 VM_PROT_READ | VM_PROT_WRITE,
62 &named_me_port,
63 MACH_PORT_NULL);
64 T_EXPECT_MACH_SUCCESS(kr, "parent mach_make_memory_entry()");
65
66 /*
67 * Create a memory entry offset into its parent by KB4 * 3.
68 * On kernels with a PAGE_SIZE of KB16,
69 * this should be rounded back to 0 when used as the offset in the kernel.
70 */
71 mach_vm_offset_t child_offset = KB4 * 3;
72 mach_vm_size_t child_entry_size = KB4 * 1;
73 kr = mach_make_memory_entry_64(mach_task_self(),
74 &child_entry_size,
75 child_offset,
76 VM_PROT_READ | VM_PROT_WRITE | MAP_MEM_USE_DATA_ADDR,
77 &child_me_port,
78 named_me_port
79 );
80 T_EXPECT_MACH_SUCCESS(kr, "child mach_make_memory_entry()");
81
82 /*
83 * Map in our child memory entry.
84 */
85 kr = mach_vm_map(mach_task_self(),
86 &mapped_addr,
87 child_entry_size,
88 0,
89 VM_FLAGS_ANYWHERE,
90 child_me_port,
91 0,
92 false,
93 VM_PROT_READ | VM_PROT_WRITE,
94 VM_PROT_READ | VM_PROT_WRITE,
95 VM_INHERIT_NONE);
96 T_EXPECT_MACH_SUCCESS(kr, "mach_vm_map()");
97
98 /*
99 * On rosetta, we expect the mapped address to be offset by the offset of the parent.
100 * On arm64, we expect the child offset to be ignored, and the mapped address to be offset by 0 from the src.
101 * On intel, we expect the mapped address to by offset by KB16.
102 */
103
104 #if __x86_64__
105 if (isRosetta()) {
106 T_ASSERT_EQ(0, memcmp((void *)mapped_addr, (void *) (src_addr + parent_offset), child_entry_size), "Mapped values equal src values");
107 } else {
108 T_ASSERT_EQ(0, memcmp((void *)mapped_addr, (void *) (src_addr + (parent_offset + child_offset)), child_entry_size), "Mapped values equal src values");
109 }
110 #else
111 T_ASSERT_EQ(0, memcmp((void *)mapped_addr, (void *) src_addr, child_entry_size), "Mapped values equal src values");
112 #endif
113 }
114
115 T_DECL(vm_memory_entry_named_reuse_parent,
116 "Test that we re-use the parent entry when possible with MAP_MEM_NAMED_REUSE",
117 T_META_RUN_CONCURRENTLY(true), T_META_TAG_VM_PREFERRED)
118 {
119 /*
120 * Test setup - get a memory entry, then map it into the address space.
121 */
122 mach_port_t parent_handle, entry_handle;
123
124 kern_return_t kr = mach_memory_object_memory_entry_64(mach_host_self(), 1,
125 KB16, VM_PROT_READ | VM_PROT_WRITE, 0, &parent_handle);
126 T_ASSERT_MACH_SUCCESS(kr, "make parent_handle return value");
127
128 mach_vm_address_t alloced_addr;
129 kr = mach_vm_map(mach_task_self(), &alloced_addr, KB16, 0, VM_FLAGS_ANYWHERE,
130 parent_handle, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
131 T_ASSERT_MACH_SUCCESS(kr, "map parent_handle");
132
133
134 /*
135 * Attempt to use MAP_MEM_NAMED_REUSE to have the process "share" the memory
136 * entry with itself. We expect to see that the handle returned is identical
137 * to the handle provided, unlike with MAP_MEM_VM_SHARE where a new handle
138 * to the same region would be returned.
139 */
140 memory_object_size_t entry_size = KB16;
141 kr = mach_make_memory_entry_64(mach_task_self(), &entry_size, alloced_addr,
142 MAP_MEM_NAMED_REUSE | VM_PROT_DEFAULT, &entry_handle, parent_handle);
143 T_EXPECT_MACH_SUCCESS(kr, "make entry_handle return value");
144 T_EXPECT_EQ(parent_handle, entry_handle, "NAMED_REUSE should re-use parent_handle");
145 }
146
147 T_DECL(vm_memory_entry_parent_copy,
148 "Test that making a memory entry fails if the parent is a copy entry",
149 T_META_RUN_CONCURRENTLY(true), T_META_TAG_VM_PREFERRED)
150 {
151 /*
152 * Test setup - allocate a region and get a copy entry to it.
153 */
154 mach_vm_address_t alloced_addr;
155 kern_return_t kr = mach_vm_allocate(mach_task_self(), &alloced_addr, KB16, VM_FLAGS_ANYWHERE);
156 T_ASSERT_MACH_SUCCESS(kr, "mach_vm_allocate");
157
158 memory_object_size_t parent_size = KB16;
159 mach_port_t parent_handle;
160 kr = mach_make_memory_entry_64(mach_task_self(), &parent_size, alloced_addr,
161 MAP_MEM_VM_COPY | VM_PROT_DEFAULT, &parent_handle, MACH_PORT_NULL);
162 T_ASSERT_MACH_SUCCESS(kr, "make parent_handle return value");
163
164 /*
165 * Attempt to make a new entry with the copy entry as parent.
166 */
167 memory_object_size_t entry_size = KB16;
168 mach_port_t invalid_handle = (mach_port_t) 0xdeadbeef;
169 mach_port_t entry_handle = invalid_handle;
170 kr = mach_make_memory_entry_64(mach_task_self(), &entry_size, alloced_addr,
171 MAP_MEM_VM_COPY | VM_PROT_DEFAULT, &entry_handle, parent_handle);
172 T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ARGUMENT, "make entry_handle return value");
173 T_EXPECT_EQ(entry_handle, invalid_handle, "make entry_handle handle unchanged on error");
174 T_EXPECT_EQ(entry_size, KB16, "make entry_handle size unchanged on error");
175 }
176
177 T_DECL(vm_memory_entry_from_parent_entry_insufficient_permissions,
178 "Test that parent permissions are correctly checked in mach_make_memory_entry_from_parent_entry",
179 T_META_RUN_CONCURRENTLY(true), T_META_TAG_VM_PREFERRED)
180 {
181 /*
182 * Test setup - create parent entry with read-only permissions.
183 */
184 mach_port_t parent_handle;
185 kern_return_t kr = mach_memory_object_memory_entry_64(mach_host_self(), 1,
186 KB16, VM_PROT_READ, 0, &parent_handle);
187 T_ASSERT_MACH_SUCCESS(kr, "make parent_handle return value");
188
189
190 /*
191 * Attempt to create a new entry with read-write permissions.
192 */
193 memory_object_size_t entry_size = KB16;
194 mach_port_t invalid_handle = (mach_port_t) 0xdeadbeef;
195 mach_port_t entry_handle = invalid_handle;
196 kr = mach_make_memory_entry_64(mach_task_self(), &entry_size, 0,
197 VM_PROT_READ | VM_PROT_WRITE, &entry_handle, parent_handle);
198 T_EXPECT_MACH_ERROR(kr, KERN_PROTECTION_FAILURE, "return value without mask_permissions");
199 T_EXPECT_EQ(entry_handle, invalid_handle, "handle unchanged on failure");
200 T_EXPECT_EQ(entry_size, KB16, "size unchanged on failure");
201
202 /*
203 * Try again with mask_permissions set, and validate that we only get the
204 * read permissions allowed.
205 */
206 kr = mach_make_memory_entry_64(mach_task_self(), &entry_size, 0,
207 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_IS_MASK, &entry_handle, parent_handle);
208 T_EXPECT_MACH_SUCCESS(kr, "return value with mask_permissions");
209
210 // To validate the permissions, attempt to map it into the address space
211 mach_vm_address_t alloced_addr;
212 kr = mach_vm_map(mach_task_self(), &alloced_addr, KB16, 0, VM_FLAGS_ANYWHERE,
213 parent_handle, 0, false, VM_PROT_READ | VM_PROT_WRITE,
214 VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_DEFAULT);
215 T_EXPECT_MACH_ERROR(kr, KERN_INVALID_RIGHT, "entry shouldn't have write permissions");
216 kr = mach_vm_map(mach_task_self(), &alloced_addr, KB16, 0, VM_FLAGS_ANYWHERE,
217 parent_handle, 0, false, VM_PROT_READ, VM_PROT_READ, VM_INHERIT_DEFAULT);
218 T_EXPECT_MACH_SUCCESS(kr, "entry should have read permissions");
219 }
220