1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (C) 1988, 1989, NeXT, Inc.
30 *
31 * File: kern/mach_loader.c
32 * Author: Avadis Tevanian, Jr.
33 *
34 * Mach object file loader (kernel version, for now).
35 *
36 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
37 * Started.
38 */
39
40 #include <sys/param.h>
41 #include <sys/vnode_internal.h>
42 #include <sys/uio.h>
43 #include <sys/namei.h>
44 #include <sys/proc_internal.h>
45 #include <sys/kauth.h>
46 #include <sys/stat.h>
47 #include <sys/malloc.h>
48 #include <sys/mount_internal.h>
49 #include <sys/fcntl.h>
50 #include <sys/file_internal.h>
51 #include <sys/ubc_internal.h>
52 #include <sys/imgact.h>
53 #include <sys/codesign.h>
54 #include <sys/proc_uuid_policy.h>
55 #include <sys/reason.h>
56 #include <sys/kdebug.h>
57 #include <sys/spawn_internal.h>
58
59 #include <mach/mach_types.h>
60 #include <mach/vm_map.h> /* vm_allocate() */
61 #include <mach/mach_vm.h> /* mach_vm_allocate() */
62 #include <mach/vm_statistics.h>
63 #include <mach/task.h>
64 #include <mach/thread_act.h>
65
66 #include <machine/vmparam.h>
67 #include <machine/exec.h>
68 #include <machine/pal_routines.h>
69
70 #include <kern/ast.h>
71 #include <kern/kern_types.h>
72 #include <kern/cpu_number.h>
73 #include <kern/mach_loader.h>
74 #include <kern/mach_fat.h>
75 #include <kern/kalloc.h>
76 #include <kern/task.h>
77 #include <kern/thread.h>
78 #include <kern/page_decrypt.h>
79
80 #include <mach-o/fat.h>
81 #include <mach-o/loader.h>
82
83 #include <vm/pmap.h>
84 #include <vm/vm_map_xnu.h>
85 #include <vm/vm_kern_xnu.h>
86 #include <vm/vm_pager_xnu.h>
87 #include <vm/vnode_pager.h>
88 #include <vm/vm_protos.h>
89 #include <vm/vm_shared_region.h>
90 #include <IOKit/IOReturn.h> /* for kIOReturnNotPrivileged */
91 #include <IOKit/IOBSD.h> /* for IOVnodeHasEntitlement */
92
93 #include <os/log.h>
94 #include <os/overflow.h>
95
96 #include "kern_exec_internal.h"
97
98
99
100 /* An empty load_result_t */
101 static const load_result_t load_result_null = {
102 .mach_header = MACH_VM_MIN_ADDRESS,
103 .entry_point = MACH_VM_MIN_ADDRESS,
104 .user_stack = MACH_VM_MIN_ADDRESS,
105 .user_stack_size = 0,
106 .user_stack_alloc = MACH_VM_MIN_ADDRESS,
107 .user_stack_alloc_size = 0,
108 .all_image_info_addr = MACH_VM_MIN_ADDRESS,
109 .all_image_info_size = 0,
110 .thread_count = 0,
111 .unixproc = 0,
112 .dynlinker = 0,
113 .needs_dynlinker = 0,
114 .validentry = 0,
115 .using_lcmain = 0,
116 .is_64bit_addr = 0,
117 .is_64bit_data = 0,
118 .custom_stack = 0,
119 .csflags = 0,
120 .has_pagezero = 0,
121 .uuid = { 0 },
122 .min_vm_addr = MACH_VM_MAX_ADDRESS,
123 .max_vm_addr = MACH_VM_MIN_ADDRESS,
124 .ro_vm_start = MACH_VM_MIN_ADDRESS,
125 .ro_vm_end = MACH_VM_MIN_ADDRESS,
126 .cs_end_offset = 0,
127 .threadstate = NULL,
128 .threadstate_sz = 0,
129 .is_rosetta = 0,
130 .dynlinker_ro_vm_start = 0,
131 .dynlinker_ro_vm_end = 0,
132 .dynlinker_mach_header = MACH_VM_MIN_ADDRESS,
133 .dynlinker_fd = -1,
134 };
135
136 /*
137 * Prototypes of static functions.
138 */
139 static load_return_t
140 parse_machfile(
141 struct vnode *vp,
142 vm_map_t map,
143 thread_t thread,
144 struct mach_header *header,
145 off_t file_offset,
146 off_t macho_size,
147 int depth,
148 int64_t slide,
149 int64_t dyld_slide,
150 load_result_t *result,
151 load_result_t *binresult,
152 struct image_params *imgp
153 );
154
155 static load_return_t
156 load_segment(
157 struct load_command *lcp,
158 uint32_t filetype,
159 void *control,
160 off_t pager_offset,
161 off_t macho_size,
162 struct vnode *vp,
163 vm_map_t map,
164 int64_t slide,
165 load_result_t *result,
166 struct image_params *imgp
167 );
168
169 static load_return_t
170 load_uuid(
171 struct uuid_command *uulp,
172 char *command_end,
173 load_result_t *result
174 );
175
176 static load_return_t
177 load_version(
178 struct version_min_command *vmc,
179 boolean_t *found_version_cmd,
180 struct image_params *imgp,
181 load_result_t *result
182 );
183
184 static load_return_t
185 load_code_signature(
186 struct linkedit_data_command *lcp,
187 struct vnode *vp,
188 off_t macho_offset,
189 off_t macho_size,
190 cpu_type_t cputype,
191 cpu_subtype_t cpusubtype,
192 load_result_t *result,
193 struct image_params *imgp);
194
195 #if CONFIG_CODE_DECRYPTION
196 static load_return_t
197 set_code_unprotect(
198 struct encryption_info_command *lcp,
199 caddr_t addr,
200 vm_map_t map,
201 int64_t slide,
202 struct vnode *vp,
203 off_t macho_offset,
204 cpu_type_t cputype,
205 cpu_subtype_t cpusubtype);
206 #endif
207
208 static
209 load_return_t
210 load_main(
211 struct entry_point_command *epc,
212 thread_t thread,
213 int64_t slide,
214 load_result_t *result
215 );
216
217 static
218 load_return_t
219 setup_driver_main(
220 thread_t thread,
221 int64_t slide,
222 load_result_t *result
223 );
224
225 static load_return_t
226 load_unixthread(
227 struct thread_command *tcp,
228 thread_t thread,
229 int64_t slide,
230 boolean_t is_x86_64_compat_binary,
231 load_result_t *result
232 );
233
234 static load_return_t
235 load_threadstate(
236 thread_t thread,
237 uint32_t *ts,
238 uint32_t total_size,
239 load_result_t *
240 );
241
242 static load_return_t
243 load_threadstack(
244 thread_t thread,
245 uint32_t *ts,
246 uint32_t total_size,
247 mach_vm_offset_t *user_stack,
248 int *customstack,
249 boolean_t is_x86_64_compat_binary,
250 load_result_t *result
251 );
252
253 static load_return_t
254 load_threadentry(
255 thread_t thread,
256 uint32_t *ts,
257 uint32_t total_size,
258 mach_vm_offset_t *entry_point
259 );
260
261 static load_return_t
262 load_dylinker(
263 struct dylinker_command *lcp,
264 integer_t archbits,
265 vm_map_t map,
266 thread_t thread,
267 int depth,
268 int64_t slide,
269 load_result_t *result,
270 struct image_params *imgp
271 );
272
273
274 #if CONFIG_ROSETTA
275 static load_return_t
276 load_rosetta(
277 vm_map_t map,
278 thread_t thread,
279 load_result_t *result,
280 struct image_params *imgp
281 );
282 #endif
283
284 #if __x86_64__
285 extern int bootarg_no32exec;
286 static boolean_t
287 check_if_simulator_binary(
288 struct image_params *imgp,
289 off_t file_offset,
290 off_t macho_size);
291 #endif
292
293 struct macho_data;
294
295 static load_return_t
296 get_macho_vnode(
297 const char *path,
298 integer_t archbits,
299 struct mach_header *mach_header,
300 off_t *file_offset,
301 off_t *macho_size,
302 struct macho_data *macho_data,
303 struct vnode **vpp,
304 struct image_params *imgp
305 );
306
307 static inline void
widen_segment_command(const struct segment_command * scp32,struct segment_command_64 * scp)308 widen_segment_command(const struct segment_command *scp32,
309 struct segment_command_64 *scp)
310 {
311 scp->cmd = scp32->cmd;
312 scp->cmdsize = scp32->cmdsize;
313 bcopy(scp32->segname, scp->segname, sizeof(scp->segname));
314 scp->vmaddr = scp32->vmaddr;
315 scp->vmsize = scp32->vmsize;
316 scp->fileoff = scp32->fileoff;
317 scp->filesize = scp32->filesize;
318 scp->maxprot = scp32->maxprot;
319 scp->initprot = scp32->initprot;
320 scp->nsects = scp32->nsects;
321 scp->flags = scp32->flags;
322 }
323
324 static void
note_all_image_info_section(const struct segment_command_64 * scp,boolean_t is64,size_t section_size,const void * sections,int64_t slide,load_result_t * result)325 note_all_image_info_section(const struct segment_command_64 *scp,
326 boolean_t is64, size_t section_size, const void *sections,
327 int64_t slide, load_result_t *result)
328 {
329 const union {
330 struct section s32;
331 struct section_64 s64;
332 } *sectionp;
333 unsigned int i;
334
335
336 if (strncmp(scp->segname, "__DATA_DIRTY", sizeof(scp->segname)) != 0 &&
337 strncmp(scp->segname, "__DATA", sizeof(scp->segname)) != 0) {
338 return;
339 }
340 for (i = 0; i < scp->nsects; ++i) {
341 sectionp = (const void *)
342 ((const char *)sections + section_size * i);
343 if (0 == strncmp(sectionp->s64.sectname, "__all_image_info",
344 sizeof(sectionp->s64.sectname))) {
345 result->all_image_info_addr =
346 is64 ? sectionp->s64.addr : sectionp->s32.addr;
347 result->all_image_info_addr += slide;
348 result->all_image_info_size =
349 is64 ? sectionp->s64.size : sectionp->s32.size;
350 return;
351 }
352 }
353 }
354
355 #if __arm64__
356 /*
357 * Allow bypassing some security rules (hard pagezero, no write+execute)
358 * in exchange for better binary compatibility for legacy apps built
359 * before 16KB-alignment was enforced.
360 */
361 const int fourk_binary_compatibility_unsafe = TRUE;
362 #endif /* __arm64__ */
363
364 #if XNU_TARGET_OS_OSX
365
366 /* Determines whether this process may host/run third party plugins. */
367 static inline bool
process_is_plugin_host(struct image_params * imgp,load_result_t * result)368 process_is_plugin_host(struct image_params *imgp, load_result_t *result)
369 {
370 if (imgp->ip_flags & IMGPF_NOJOP) {
371 return false;
372 }
373
374 if (!result->platform_binary) {
375 return false;
376 }
377
378 struct cs_blob *csblob = csvnode_get_blob(imgp->ip_vp, imgp->ip_arch_offset);
379 const char *identity = csblob_get_identity(csblob);
380 if (!identity) {
381 return false;
382 }
383
384 /* Check if override host plugin entitlement is present and posix spawn attribute to disable A keys is passed */
385 if (IOVnodeHasEntitlement(imgp->ip_vp, (int64_t)imgp->ip_arch_offset, OVERRIDE_PLUGIN_HOST_ENTITLEMENT)) {
386 bool ret = imgp->ip_flags & IMGPF_PLUGIN_HOST_DISABLE_A_KEYS;
387 if (ret) {
388 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
389 set_proc_name(imgp, p);
390 os_log(OS_LOG_DEFAULT, "%s: running binary \"%s\" in keys-off mode due to posix_spawnattr_disable_ptr_auth_a_keys_np", __func__, p->p_name);
391 }
392 return ret;
393 }
394
395 /* Disabling library validation is a good signal that this process plans to host plugins */
396 const char *const disable_lv_entitlements[] = {
397 "com.apple.security.cs.disable-library-validation",
398 "com.apple.private.cs.automator-plugins",
399 CLEAR_LV_ENTITLEMENT,
400 };
401 for (size_t i = 0; i < ARRAY_COUNT(disable_lv_entitlements); i++) {
402 const char *entitlement = disable_lv_entitlements[i];
403 if (IOVnodeHasEntitlement(imgp->ip_vp, (int64_t)imgp->ip_arch_offset, entitlement)) {
404 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
405 set_proc_name(imgp, p);
406 os_log(OS_LOG_DEFAULT, "%s: running binary \"%s\" in keys-off mode due to entitlement: %s", __func__, p->p_name, entitlement);
407 return true;
408 }
409 }
410
411 /* From /System/Library/Security/HardeningExceptions.plist */
412 const char *const hardening_exceptions[] = {
413 "com.apple.perl5", /* Scripting engines may load third party code and jit*/
414 "com.apple.perl", /* Scripting engines may load third party code and jit*/
415 "org.python.python", /* Scripting engines may load third party code and jit*/
416 "com.apple.expect", /* Scripting engines may load third party code and jit*/
417 "com.tcltk.wish", /* Scripting engines may load third party code and jit*/
418 "com.tcltk.tclsh", /* Scripting engines may load third party code and jit*/
419 "com.apple.ruby", /* Scripting engines may load third party code and jit*/
420 "com.apple.bash", /* Required for the 'enable' command */
421 "com.apple.zsh", /* Required for the 'zmodload' command */
422 "com.apple.ksh", /* Required for 'builtin' command */
423 };
424 for (size_t i = 0; i < ARRAY_COUNT(hardening_exceptions); i++) {
425 if (strncmp(hardening_exceptions[i], identity, strlen(hardening_exceptions[i])) == 0) {
426 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
427 set_proc_name(imgp, p);
428 os_log(OS_LOG_DEFAULT, "%s: running binary \"%s\" in keys-off mode due to identity: %s", __func__, p->p_name, identity);
429 return true;
430 }
431 }
432
433 return false;
434 }
435 #endif /* XNU_TARGET_OS_OSX */
436
437 load_return_t
load_machfile(struct image_params * imgp,struct mach_header * header,thread_t thread,vm_map_t * mapp,load_result_t * result)438 load_machfile(
439 struct image_params *imgp,
440 struct mach_header *header,
441 thread_t thread,
442 vm_map_t *mapp,
443 load_result_t *result
444 )
445 {
446 struct vnode *vp = imgp->ip_vp;
447 off_t file_offset = imgp->ip_arch_offset;
448 off_t macho_size = imgp->ip_arch_size;
449 off_t total_size = 0;
450 off_t file_size = imgp->ip_vattr->va_data_size;
451 pmap_t pmap = 0; /* protected by create_map */
452 vm_map_t map;
453 load_result_t myresult;
454 load_return_t lret;
455 boolean_t enforce_hard_pagezero = TRUE;
456 int in_exec = (imgp->ip_flags & IMGPF_EXEC);
457 task_t task = current_task();
458 int64_t aslr_page_offset = 0;
459 int64_t dyld_aslr_page_offset = 0;
460 int64_t aslr_section_size = 0;
461 int64_t aslr_section_offset = 0;
462 kern_return_t kret;
463 unsigned int pmap_flags = 0;
464
465 if (os_add_overflow(file_offset, macho_size, &total_size) ||
466 total_size > file_size) {
467 return LOAD_BADMACHO;
468 }
469
470 result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR);
471 result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA);
472 #if defined(HAS_APPLE_PAC)
473 pmap_flags |= (imgp->ip_flags & IMGPF_NOJOP) ? PMAP_CREATE_DISABLE_JOP : 0;
474 #endif /* defined(HAS_APPLE_PAC) */
475 #if CONFIG_ROSETTA
476 pmap_flags |= (imgp->ip_flags & IMGPF_ROSETTA) ? PMAP_CREATE_ROSETTA : 0;
477 #endif
478 pmap_flags |= result->is_64bit_addr ? PMAP_CREATE_64BIT : 0;
479
480 task_t ledger_task;
481 if (imgp->ip_new_thread) {
482 ledger_task = get_threadtask(imgp->ip_new_thread);
483 } else {
484 ledger_task = task;
485 }
486
487 #if XNU_TARGET_OS_OSX && _POSIX_SPAWN_FORCE_4K_PAGES && PMAP_CREATE_FORCE_4K_PAGES
488 if (imgp->ip_px_sa != NULL) {
489 struct _posix_spawnattr* psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
490 if (psa->psa_flags & _POSIX_SPAWN_FORCE_4K_PAGES) {
491 pmap_flags |= PMAP_CREATE_FORCE_4K_PAGES;
492 }
493 }
494 #endif /* XNU_TARGET_OS_OSX && _POSIX_SPAWN_FORCE_4K_PAGES && PMAP_CREATE_FORCE_4K_PAGE */
495
496 pmap = pmap_create_options(get_task_ledger(ledger_task),
497 (vm_map_size_t) 0,
498 pmap_flags);
499 if (pmap == NULL) {
500 return LOAD_RESOURCE;
501 }
502 map = vm_map_create_options(pmap, 0,
503 vm_compute_max_offset(result->is_64bit_addr),
504 VM_MAP_CREATE_PAGEABLE);
505
506 #if defined(__arm64__)
507 if (result->is_64bit_addr) {
508 /* enforce 16KB alignment of VM map entries */
509 vm_map_set_page_shift(map, SIXTEENK_PAGE_SHIFT);
510 } else {
511 vm_map_set_page_shift(map, page_shift_user32);
512 }
513 #endif /* __arm64__ */
514
515 #if PMAP_CREATE_FORCE_4K_PAGES
516 if (pmap_flags & PMAP_CREATE_FORCE_4K_PAGES) {
517 DEBUG4K_LIFE("***** launching '%s' as 4k *****\n", vp->v_name);
518 vm_map_set_page_shift(map, FOURK_PAGE_SHIFT);
519 }
520 #endif /* PMAP_CREATE_FORCE_4K_PAGES */
521
522 #ifndef CONFIG_ENFORCE_SIGNED_CODE
523 /* This turns off faulting for executable pages, which allows
524 * to circumvent Code Signing Enforcement. The per process
525 * flag (CS_ENFORCEMENT) is not set yet, but we can use the
526 * global flag.
527 */
528 if (!cs_process_global_enforcement() && (header->flags & MH_ALLOW_STACK_EXECUTION)) {
529 vm_map_disable_NX(map);
530 // TODO: Message Trace or log that this is happening
531 }
532 #endif
533
534 /* Forcibly disallow execution from data pages on even if the arch
535 * normally permits it. */
536 if ((header->flags & MH_NO_HEAP_EXECUTION) && !(imgp->ip_flags & IMGPF_ALLOW_DATA_EXEC)) {
537 vm_map_disallow_data_exec(map);
538 }
539
540 /*
541 * Compute a random offset for ASLR, and an independent random offset for dyld.
542 */
543 if (!(imgp->ip_flags & IMGPF_DISABLE_ASLR)) {
544 vm_map_get_max_aslr_slide_section(map, &aslr_section_offset, &aslr_section_size);
545 aslr_section_offset = (random() % aslr_section_offset) * aslr_section_size;
546
547 aslr_page_offset = random();
548 aslr_page_offset = (aslr_page_offset % (vm_map_get_max_aslr_slide_pages(map) - 1)) + 1;
549 aslr_page_offset <<= vm_map_page_shift(map);
550
551 dyld_aslr_page_offset = random();
552 dyld_aslr_page_offset = (dyld_aslr_page_offset %
553 (vm_map_get_max_loader_aslr_slide_pages(map) - 1)) + 1;
554 dyld_aslr_page_offset <<= vm_map_page_shift(map);
555
556 aslr_page_offset += aslr_section_offset;
557 }
558 if (vm_map_page_shift(map) < (int)PAGE_SHIFT) {
559 DEBUG4K_LOAD("slide=0x%llx dyld_slide=0x%llx\n", aslr_page_offset, dyld_aslr_page_offset);
560 }
561
562 if (!result) {
563 result = &myresult;
564 }
565
566 *result = load_result_null;
567
568 /*
569 * re-set the bitness on the load result since we cleared the load result above.
570 */
571 result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR);
572 result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA);
573
574 lret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
575 0, aslr_page_offset, dyld_aslr_page_offset, result,
576 NULL, imgp);
577
578 if (lret != LOAD_SUCCESS) {
579 imgp->ip_free_map = map;
580 return lret;
581 }
582
583 #if __x86_64__
584 /*
585 * On x86, for compatibility, don't enforce the hard page-zero restriction for 32-bit binaries.
586 */
587 if (!result->is_64bit_addr) {
588 enforce_hard_pagezero = FALSE;
589 }
590
591 /*
592 * For processes with IMGPF_HIGH_BITS_ASLR, add a few random high bits
593 * to the start address for "anywhere" memory allocations.
594 */
595 #define VM_MAP_HIGH_START_BITS_COUNT 8
596 #define VM_MAP_HIGH_START_BITS_SHIFT 27
597 if (result->is_64bit_addr &&
598 (imgp->ip_flags & IMGPF_HIGH_BITS_ASLR)) {
599 int random_bits;
600 vm_map_offset_t high_start;
601
602 random_bits = random();
603 random_bits &= (1 << VM_MAP_HIGH_START_BITS_COUNT) - 1;
604 high_start = (((vm_map_offset_t)random_bits)
605 << VM_MAP_HIGH_START_BITS_SHIFT);
606 vm_map_set_high_start(map, high_start);
607 }
608 #endif /* __x86_64__ */
609
610 /*
611 * Check to see if the page zero is enforced by the map->min_offset.
612 */
613 if (enforce_hard_pagezero &&
614 (vm_map_has_hard_pagezero(map, 0x1000) == FALSE)) {
615 #if __arm64__
616 if (
617 !result->is_64bit_addr && /* not 64-bit address space */
618 !(header->flags & MH_PIE) && /* not PIE */
619 (vm_map_page_shift(map) != FOURK_PAGE_SHIFT ||
620 PAGE_SHIFT != FOURK_PAGE_SHIFT) && /* page size != 4KB */
621 result->has_pagezero && /* has a "soft" page zero */
622 fourk_binary_compatibility_unsafe) {
623 /*
624 * For backwards compatibility of "4K" apps on
625 * a 16K system, do not enforce a hard page zero...
626 */
627 } else
628 #endif /* __arm64__ */
629 {
630 imgp->ip_free_map = map;
631 return LOAD_BADMACHO;
632 }
633 }
634
635 #if __arm64__
636 if (enforce_hard_pagezero && result->is_64bit_addr && (header->cputype == CPU_TYPE_ARM64)) {
637 /* 64 bit ARM binary must have "hard page zero" of 4GB to cover the lower 32 bit address space */
638 if (vm_map_has_hard_pagezero(map, 0x100000000) == FALSE) {
639 imgp->ip_free_map = map;
640 return LOAD_BADMACHO;
641 }
642 }
643 #endif
644
645 vm_commit_pagezero_status(map);
646
647 /*
648 * If this is an exec, then we are going to destroy the old
649 * task, and it's correct to halt it; if it's spawn, the
650 * task is not yet running, and it makes no sense.
651 */
652 if (in_exec) {
653 proc_t p = current_proc();
654 /*
655 * Mark the task as halting and start the other
656 * threads towards terminating themselves. Then
657 * make sure any threads waiting for a process
658 * transition get informed that we are committed to
659 * this transition, and then finally complete the
660 * task halting (wait for threads and then cleanup
661 * task resources).
662 *
663 * NOTE: task_start_halt() makes sure that no new
664 * threads are created in the task during the transition.
665 * We need to mark the workqueue as exiting before we
666 * wait for threads to terminate (at the end of which
667 * we no longer have a prohibition on thread creation).
668 *
669 * Finally, clean up any lingering workqueue data structures
670 * that may have been left behind by the workqueue threads
671 * as they exited (and then clean up the work queue itself).
672 */
673 kret = task_start_halt(task);
674 if (kret != KERN_SUCCESS) {
675 imgp->ip_free_map = map;
676 return LOAD_FAILURE;
677 }
678 proc_transcommit(p, 0);
679 workq_mark_exiting(p);
680 task_complete_halt(task);
681 workq_exit(p);
682
683 /*
684 * Roll up accounting info to new task. The roll up is done after
685 * task_complete_halt to make sure the thread accounting info is
686 * rolled up to current_task.
687 */
688 task_rollup_accounting_info(get_threadtask(thread), task);
689 }
690 *mapp = map;
691
692 #if XNU_TARGET_OS_OSX
693 if (process_is_plugin_host(imgp, result)) {
694 /*
695 * We need to disable security policies for processes
696 * that run third party plugins.
697 */
698 imgp->ip_flags |= IMGPF_3P_PLUGINS;
699 }
700
701 #if __has_feature(ptrauth_calls)
702 /*
703 * arm64e plugin hosts currently run with JOP keys disabled, since they
704 * may need to run arm64 plugins.
705 */
706 if (imgp->ip_flags & IMGPF_3P_PLUGINS) {
707 imgp->ip_flags |= IMGPF_NOJOP;
708 pmap_disable_user_jop(pmap);
709 }
710
711 #if CONFIG_ROSETTA
712 /* Disable JOP keys if the Rosetta runtime being used isn't arm64e */
713 if (result->is_rosetta && (imgp->ip_flags & IMGPF_NOJOP)) {
714 pmap_disable_user_jop(pmap);
715 }
716 #endif /* CONFIG_ROSETTA */
717 #endif /* __has_feature(ptrauth_calls)*/
718 #endif /* XNU_TARGET_OS_OSX */
719
720
721 return LOAD_SUCCESS;
722 }
723
724 int macho_printf = 0;
725 #define MACHO_PRINTF(args) \
726 do { \
727 if (macho_printf) { \
728 printf args; \
729 } \
730 } while (0)
731
732
733 static boolean_t
pie_required(cpu_type_t exectype,cpu_subtype_t execsubtype)734 pie_required(
735 cpu_type_t exectype,
736 cpu_subtype_t execsubtype)
737 {
738 switch (exectype) {
739 case CPU_TYPE_X86_64:
740 return FALSE;
741 case CPU_TYPE_ARM64:
742 return TRUE;
743 case CPU_TYPE_ARM:
744 switch (execsubtype) {
745 case CPU_SUBTYPE_ARM_V7K:
746 return TRUE;
747 }
748 break;
749 }
750 return FALSE;
751 }
752
753 /*
754 * The file size of a mach-o file is limited to 32 bits; this is because
755 * this is the limit on the kalloc() of enough bytes for a mach_header and
756 * the contents of its sizeofcmds, which is currently constrained to 32
757 * bits in the file format itself. We read into the kernel buffer the
758 * commands section, and then parse it in order to parse the mach-o file
759 * format load_command segment(s). We are only interested in a subset of
760 * the total set of possible commands. If "map"==VM_MAP_NULL or
761 * "thread"==THREAD_NULL, do not make permament VM modifications,
762 * just preflight the parse.
763 */
764 static
765 load_return_t
parse_machfile(struct vnode * vp,vm_map_t map,thread_t thread,struct mach_header * header,off_t file_offset,off_t macho_size,int depth,int64_t aslr_offset,int64_t dyld_aslr_offset,load_result_t * result,load_result_t * binresult,struct image_params * imgp)766 parse_machfile(
767 struct vnode *vp,
768 vm_map_t map,
769 thread_t thread,
770 struct mach_header *header,
771 off_t file_offset,
772 off_t macho_size,
773 int depth,
774 int64_t aslr_offset,
775 int64_t dyld_aslr_offset,
776 load_result_t *result,
777 load_result_t *binresult,
778 struct image_params *imgp
779 )
780 {
781 uint32_t ncmds;
782 struct load_command *lcp;
783 struct dylinker_command *dlp = 0;
784 void * control;
785 load_return_t ret = LOAD_SUCCESS;
786 void * addr;
787 vm_size_t alloc_size, cmds_size;
788 size_t offset;
789 size_t oldoffset; /* for overflow check */
790 int pass;
791 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
792 int error;
793 int resid = 0;
794 int spawn = (imgp->ip_flags & IMGPF_SPAWN);
795 size_t mach_header_sz = sizeof(struct mach_header);
796 boolean_t abi64;
797 boolean_t got_code_signatures = FALSE;
798 boolean_t found_header_segment = FALSE;
799 boolean_t found_xhdr = FALSE;
800 boolean_t found_version_cmd = FALSE;
801 int64_t slide = 0;
802 boolean_t dyld_no_load_addr = FALSE;
803 boolean_t is_dyld = FALSE;
804 vm_map_offset_t effective_page_mask = PAGE_MASK;
805 #if __arm64__
806 uint64_t pagezero_end = 0;
807 uint64_t executable_end = 0;
808 uint64_t writable_start = 0;
809 vm_map_size_t effective_page_size;
810
811 effective_page_mask = vm_map_page_mask(map);
812 effective_page_size = vm_map_page_size(map);
813 #endif /* __arm64__ */
814
815 if (header->magic == MH_MAGIC_64 ||
816 header->magic == MH_CIGAM_64) {
817 mach_header_sz = sizeof(struct mach_header_64);
818 }
819
820 /*
821 * Break infinite recursion
822 */
823 if (depth > 2) {
824 return LOAD_FAILURE;
825 }
826
827 depth++;
828
829 /*
830 * Set CS_NO_UNTRUSTED_HELPERS by default; load_dylinker and load_rosetta
831 * will unset it if necessary.
832 */
833 if (depth == 1) {
834 result->csflags |= CS_NO_UNTRUSTED_HELPERS;
835 }
836
837 /*
838 * Check to see if right machine type.
839 */
840 if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
841 ) {
842 return LOAD_BADARCH;
843 }
844
845 if (!grade_binary(header->cputype,
846 header->cpusubtype & ~CPU_SUBTYPE_MASK,
847 header->cpusubtype & CPU_SUBTYPE_MASK, TRUE)) {
848 return LOAD_BADARCH;
849 }
850
851 abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);
852
853 switch (header->filetype) {
854 case MH_EXECUTE:
855 if (depth != 1 && depth != 3) {
856 return LOAD_FAILURE;
857 }
858 if (header->flags & MH_DYLDLINK) {
859 /* Check properties of dynamic executables */
860 if (!(header->flags & MH_PIE) && pie_required(header->cputype, header->cpusubtype & ~CPU_SUBTYPE_MASK)) {
861 return LOAD_FAILURE;
862 }
863 result->needs_dynlinker = TRUE;
864 } else if (header->cputype == CPU_TYPE_X86_64) {
865 /* x86_64 static binaries allowed */
866 #if CONFIG_ROSETTA
867 } else if (imgp->ip_flags & IMGPF_ROSETTA) {
868 /* Rosetta runtime allowed */
869 #endif /* CONFIG_X86_64_COMPAT */
870 } else {
871 /* Check properties of static executables (disallowed except for development) */
872 #if !(DEVELOPMENT || DEBUG)
873 return LOAD_FAILURE;
874 #endif
875 }
876 break;
877 case MH_DYLINKER:
878 if (depth != 2) {
879 return LOAD_FAILURE;
880 }
881 is_dyld = TRUE;
882 break;
883
884 default:
885 return LOAD_FAILURE;
886 }
887
888 /*
889 * For PIE and dyld, slide everything by the ASLR offset.
890 */
891 if ((header->flags & MH_PIE) || is_dyld) {
892 slide = aslr_offset;
893 }
894
895 /*
896 * Get the pager for the file.
897 */
898 control = ubc_getobject(vp, UBC_FLAGS_NONE);
899
900 /* ensure header + sizeofcmds falls within the file */
901 if (os_add_overflow(mach_header_sz, header->sizeofcmds, &cmds_size) ||
902 (off_t)cmds_size > macho_size ||
903 round_page_overflow(cmds_size, &alloc_size) ||
904 alloc_size > INT_MAX) {
905 return LOAD_BADMACHO;
906 }
907
908 /*
909 * Map the load commands into kernel memory.
910 */
911 addr = kalloc_data(alloc_size, Z_WAITOK);
912 if (addr == NULL) {
913 return LOAD_NOSPACE;
914 }
915
916 error = vn_rdwr(UIO_READ, vp, addr, (int)alloc_size, file_offset,
917 UIO_SYSSPACE, 0, vfs_context_ucred(imgp->ip_vfs_context), &resid, p);
918 if (error) {
919 kfree_data(addr, alloc_size);
920 return LOAD_IOERROR;
921 }
922
923 if (resid) {
924 {
925 /* We must be able to read in as much as the mach_header indicated */
926 kfree_data(addr, alloc_size);
927 return LOAD_BADMACHO;
928 }
929 }
930
931 /*
932 * Scan through the commands, processing each one as necessary.
933 * We parse in three passes through the headers:
934 * 0: determine if TEXT and DATA boundary can be page-aligned, load platform version
935 * 1: thread state, uuid, code signature
936 * 2: segments
937 * 3: dyld, encryption, check entry point
938 */
939
940 boolean_t slide_realign = FALSE;
941 #if __arm64__
942 if (!abi64) {
943 slide_realign = TRUE;
944 }
945 #endif
946
947 for (pass = 0; pass <= 3; pass++) {
948 if (pass == 1) {
949 #if __arm64__
950 boolean_t is_pie;
951 int64_t adjust;
952
953 is_pie = ((header->flags & MH_PIE) != 0);
954 if (pagezero_end != 0 &&
955 pagezero_end < effective_page_size) {
956 /* need at least 1 page for PAGEZERO */
957 adjust = effective_page_size;
958 MACHO_PRINTF(("pagezero boundary at "
959 "0x%llx; adjust slide from "
960 "0x%llx to 0x%llx%s\n",
961 (uint64_t) pagezero_end,
962 slide,
963 slide + adjust,
964 (is_pie
965 ? ""
966 : " BUT NO PIE ****** :-(")));
967 if (is_pie) {
968 slide += adjust;
969 pagezero_end += adjust;
970 executable_end += adjust;
971 writable_start += adjust;
972 }
973 }
974 if (pagezero_end != 0) {
975 result->has_pagezero = TRUE;
976 }
977 if (executable_end == writable_start &&
978 (executable_end & effective_page_mask) != 0 &&
979 (executable_end & FOURK_PAGE_MASK) == 0) {
980 /*
981 * The TEXT/DATA boundary is 4K-aligned but
982 * not page-aligned. Adjust the slide to make
983 * it page-aligned and avoid having a page
984 * with both write and execute permissions.
985 */
986 adjust =
987 (effective_page_size -
988 (executable_end & effective_page_mask));
989 MACHO_PRINTF(("page-unaligned X-W boundary at "
990 "0x%llx; adjust slide from "
991 "0x%llx to 0x%llx%s\n",
992 (uint64_t) executable_end,
993 slide,
994 slide + adjust,
995 (is_pie
996 ? ""
997 : " BUT NO PIE ****** :-(")));
998 if (is_pie) {
999 slide += adjust;
1000 }
1001 }
1002 #endif /* __arm64__ */
1003
1004 if (dyld_no_load_addr && binresult) {
1005 /*
1006 * The dyld Mach-O does not specify a load address. Try to locate
1007 * it right after the main binary. If binresult == NULL, load
1008 * directly to the given slide.
1009 */
1010 mach_vm_address_t max_vm_addr = binresult->max_vm_addr;
1011 slide = vm_map_round_page(slide + max_vm_addr, effective_page_mask);
1012 }
1013 }
1014
1015 /*
1016 * Check that the entry point is contained in an executable segment
1017 */
1018 if ((pass == 3) && (thread != THREAD_NULL)) {
1019 if (depth == 1 && imgp && (imgp->ip_flags & IMGPF_DRIVER)) {
1020 /* Driver binaries must have driverkit platform */
1021 if (result->ip_platform == PLATFORM_DRIVERKIT) {
1022 /* Driver binaries have no entry point */
1023 ret = setup_driver_main(thread, slide, result);
1024 } else {
1025 ret = LOAD_FAILURE;
1026 }
1027 } else if (!result->using_lcmain && result->validentry == 0) {
1028 ret = LOAD_FAILURE;
1029 }
1030 if (ret != KERN_SUCCESS) {
1031 thread_state_initialize(thread);
1032 break;
1033 }
1034 }
1035
1036 /*
1037 * Check that some segment maps the start of the mach-o file, which is
1038 * needed by the dynamic loader to read the mach headers, etc.
1039 */
1040 if ((pass == 3) && (found_header_segment == FALSE)) {
1041 ret = LOAD_BADMACHO;
1042 break;
1043 }
1044
1045 /*
1046 * Loop through each of the load_commands indicated by the
1047 * Mach-O header; if an absurd value is provided, we just
1048 * run off the end of the reserved section by incrementing
1049 * the offset too far, so we are implicitly fail-safe.
1050 */
1051 offset = mach_header_sz;
1052 ncmds = header->ncmds;
1053
1054 while (ncmds--) {
1055 /* ensure enough space for a minimal load command */
1056 if (offset + sizeof(struct load_command) > cmds_size) {
1057 ret = LOAD_BADMACHO;
1058 break;
1059 }
1060
1061 /*
1062 * Get a pointer to the command.
1063 */
1064 lcp = (struct load_command *)((uintptr_t)addr + offset);
1065 oldoffset = offset;
1066
1067 /*
1068 * Perform prevalidation of the struct load_command
1069 * before we attempt to use its contents. Invalid
1070 * values are ones which result in an overflow, or
1071 * which can not possibly be valid commands, or which
1072 * straddle or exist past the reserved section at the
1073 * start of the image.
1074 */
1075 if (os_add_overflow(offset, lcp->cmdsize, &offset) ||
1076 lcp->cmdsize < sizeof(struct load_command) ||
1077 offset > cmds_size) {
1078 ret = LOAD_BADMACHO;
1079 break;
1080 }
1081
1082 /*
1083 * Act on struct load_command's for which kernel
1084 * intervention is required.
1085 * Note that each load command implementation is expected to validate
1086 * that lcp->cmdsize is large enough to fit its specific struct type
1087 * before dereferencing fields not covered by struct load_command.
1088 */
1089 switch (lcp->cmd) {
1090 case LC_SEGMENT: {
1091 struct segment_command *scp = (struct segment_command *) lcp;
1092 if (scp->cmdsize < sizeof(*scp)) {
1093 ret = LOAD_BADMACHO;
1094 break;
1095 }
1096 if (pass == 0) {
1097 if (is_dyld && scp->vmaddr == 0 && scp->fileoff == 0) {
1098 dyld_no_load_addr = TRUE;
1099 if (!slide_realign) {
1100 /* got what we need, bail early on pass 0 */
1101 continue;
1102 }
1103 }
1104
1105 #if __arm64__
1106 assert(!abi64);
1107
1108 if (scp->initprot == 0 && scp->maxprot == 0 && scp->vmaddr == 0) {
1109 /* PAGEZERO */
1110 if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &pagezero_end) || pagezero_end > UINT32_MAX) {
1111 ret = LOAD_BADMACHO;
1112 break;
1113 }
1114 }
1115 if (scp->initprot & VM_PROT_EXECUTE) {
1116 /* TEXT */
1117 if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &executable_end) || executable_end > UINT32_MAX) {
1118 ret = LOAD_BADMACHO;
1119 break;
1120 }
1121 }
1122 if (scp->initprot & VM_PROT_WRITE) {
1123 /* DATA */
1124 if (os_add_overflow(scp->vmaddr, slide, &writable_start) || writable_start > UINT32_MAX) {
1125 ret = LOAD_BADMACHO;
1126 break;
1127 }
1128 }
1129 #endif /* __arm64__ */
1130 break;
1131 }
1132
1133 if (pass == 1 && !strncmp(scp->segname, "__XHDR", sizeof(scp->segname))) {
1134 found_xhdr = TRUE;
1135 }
1136
1137 if (pass != 2) {
1138 break;
1139 }
1140
1141 if (abi64) {
1142 /*
1143 * Having an LC_SEGMENT command for the
1144 * wrong ABI is invalid <rdar://problem/11021230>
1145 */
1146 ret = LOAD_BADMACHO;
1147 break;
1148 }
1149
1150 ret = load_segment(lcp,
1151 header->filetype,
1152 control,
1153 file_offset,
1154 macho_size,
1155 vp,
1156 map,
1157 slide,
1158 result,
1159 imgp);
1160 if (ret == LOAD_SUCCESS && scp->fileoff == 0 && scp->filesize > 0) {
1161 /* Enforce a single segment mapping offset zero, with R+X
1162 * protection. */
1163 if (found_header_segment ||
1164 ((scp->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) != (VM_PROT_READ | VM_PROT_EXECUTE))) {
1165 ret = LOAD_BADMACHO;
1166 break;
1167 }
1168 found_header_segment = TRUE;
1169 }
1170
1171 break;
1172 }
1173 case LC_SEGMENT_64: {
1174 struct segment_command_64 *scp64 = (struct segment_command_64 *) lcp;
1175 if (scp64->cmdsize < sizeof(*scp64)) {
1176 ret = LOAD_BADMACHO;
1177 break;
1178 }
1179 if (pass == 0) {
1180 if (is_dyld && scp64->vmaddr == 0 && scp64->fileoff == 0) {
1181 dyld_no_load_addr = TRUE;
1182 }
1183 /* got what we need, bail early on pass 0 */
1184 continue;
1185 }
1186
1187 if (pass == 1 && !strncmp(scp64->segname, "__XHDR", sizeof(scp64->segname))) {
1188 found_xhdr = TRUE;
1189 }
1190
1191 if (pass != 2) {
1192 break;
1193 }
1194
1195 if (!abi64) {
1196 /*
1197 * Having an LC_SEGMENT_64 command for the
1198 * wrong ABI is invalid <rdar://problem/11021230>
1199 */
1200 ret = LOAD_BADMACHO;
1201 break;
1202 }
1203
1204 ret = load_segment(lcp,
1205 header->filetype,
1206 control,
1207 file_offset,
1208 macho_size,
1209 vp,
1210 map,
1211 slide,
1212 result,
1213 imgp);
1214
1215 if (ret == LOAD_SUCCESS && scp64->fileoff == 0 && scp64->filesize > 0) {
1216 /* Enforce a single segment mapping offset zero, with R+X
1217 * protection. */
1218 if (found_header_segment ||
1219 ((scp64->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) != (VM_PROT_READ | VM_PROT_EXECUTE))) {
1220 ret = LOAD_BADMACHO;
1221 break;
1222 }
1223 found_header_segment = TRUE;
1224 }
1225
1226 break;
1227 }
1228 case LC_UNIXTHREAD: {
1229 boolean_t is_x86_64_compat_binary = FALSE;
1230 if (pass != 1) {
1231 break;
1232 }
1233 #if CONFIG_ROSETTA
1234 if (depth == 2 && (imgp->ip_flags & IMGPF_ROSETTA)) {
1235 // Ignore dyld, Rosetta will parse it's load commands to get the
1236 // entry point.
1237 result->validentry = 1;
1238 break;
1239 }
1240 #endif
1241 ret = load_unixthread(
1242 (struct thread_command *) lcp,
1243 thread,
1244 slide,
1245 is_x86_64_compat_binary,
1246 result);
1247 break;
1248 }
1249 case LC_MAIN:
1250 if (pass != 1) {
1251 break;
1252 }
1253 if (depth != 1) {
1254 break;
1255 }
1256 ret = load_main(
1257 (struct entry_point_command *) lcp,
1258 thread,
1259 slide,
1260 result);
1261 break;
1262 case LC_LOAD_DYLINKER:
1263 if (pass != 3) {
1264 break;
1265 }
1266 if ((depth == 1) && (dlp == 0)) {
1267 dlp = (struct dylinker_command *)lcp;
1268 } else {
1269 ret = LOAD_FAILURE;
1270 }
1271 break;
1272 case LC_UUID:
1273 if (pass == 1 && depth == 1) {
1274 ret = load_uuid((struct uuid_command *) lcp,
1275 (char *)addr + cmds_size,
1276 result);
1277 }
1278 break;
1279 case LC_CODE_SIGNATURE:
1280 /* CODE SIGNING */
1281 if (pass != 1) {
1282 break;
1283 }
1284
1285 /* pager -> uip ->
1286 * load signatures & store in uip
1287 * set VM object "signed_pages"
1288 */
1289 ret = load_code_signature(
1290 (struct linkedit_data_command *) lcp,
1291 vp,
1292 file_offset,
1293 macho_size,
1294 header->cputype,
1295 header->cpusubtype,
1296 result,
1297 imgp);
1298 if (ret != LOAD_SUCCESS) {
1299 printf("proc %d: load code signature error %d "
1300 "for file \"%s\"\n",
1301 proc_getpid(p), ret, vp->v_name);
1302 /*
1303 * Allow injections to be ignored on devices w/o enforcement enabled
1304 */
1305 if (!cs_process_global_enforcement()) {
1306 ret = LOAD_SUCCESS; /* ignore error */
1307 }
1308 } else {
1309 got_code_signatures = TRUE;
1310 }
1311
1312 if (got_code_signatures) {
1313 unsigned tainted = CS_VALIDATE_TAINTED;
1314 boolean_t valid = FALSE;
1315 vm_size_t off = 0;
1316
1317
1318 if (cs_debug > 10) {
1319 printf("validating initial pages of %s\n", vp->v_name);
1320 }
1321
1322 while (off < alloc_size && ret == LOAD_SUCCESS) {
1323 tainted = CS_VALIDATE_TAINTED;
1324
1325 valid = cs_validate_range(vp,
1326 NULL,
1327 file_offset + off,
1328 (const void *)((uintptr_t)addr + off),
1329 MIN(PAGE_SIZE, cmds_size),
1330 &tainted);
1331 if (!valid || (tainted & CS_VALIDATE_TAINTED)) {
1332 if (cs_debug) {
1333 printf("CODE SIGNING: %s[%d]: invalid initial page at offset %lld validated:%d tainted:%d csflags:0x%x\n",
1334 vp->v_name, proc_getpid(p), (long long)(file_offset + off), valid, tainted, result->csflags);
1335 }
1336 if (cs_process_global_enforcement() ||
1337 (result->csflags & (CS_HARD | CS_KILL | CS_ENFORCEMENT))) {
1338 ret = LOAD_FAILURE;
1339 }
1340 result->csflags &= ~CS_VALID;
1341 }
1342 off += PAGE_SIZE;
1343 }
1344 }
1345
1346 break;
1347 #if CONFIG_CODE_DECRYPTION
1348 case LC_ENCRYPTION_INFO:
1349 case LC_ENCRYPTION_INFO_64:
1350 if (pass != 3) {
1351 break;
1352 }
1353 ret = set_code_unprotect(
1354 (struct encryption_info_command *) lcp,
1355 addr, map, slide, vp, file_offset,
1356 header->cputype, header->cpusubtype);
1357 if (ret != LOAD_SUCCESS) {
1358 os_reason_t load_failure_reason = OS_REASON_NULL;
1359 printf("proc %d: set_code_unprotect() error %d "
1360 "for file \"%s\"\n",
1361 proc_getpid(p), ret, vp->v_name);
1362 /*
1363 * Don't let the app run if it's
1364 * encrypted but we failed to set up the
1365 * decrypter. If the keys are missing it will
1366 * return LOAD_DECRYPTFAIL.
1367 */
1368 if (ret == LOAD_DECRYPTFAIL) {
1369 /* failed to load due to missing FP keys */
1370 proc_lock(p);
1371 p->p_lflag |= P_LTERM_DECRYPTFAIL;
1372 proc_unlock(p);
1373
1374 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1375 proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT, 0, 0);
1376 load_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT);
1377 } else {
1378 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1379 proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_DECRYPT, 0, 0);
1380 load_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_DECRYPT);
1381 }
1382
1383 /*
1384 * Don't signal the process if it was forked and in a partially constructed
1385 * state as part of a spawn -- it will just be torn down when the exec fails.
1386 */
1387 if (!spawn) {
1388 assert(load_failure_reason != OS_REASON_NULL);
1389 {
1390 psignal_with_reason(current_proc(), SIGKILL, load_failure_reason);
1391 load_failure_reason = OS_REASON_NULL;
1392 }
1393 } else {
1394 os_reason_free(load_failure_reason);
1395 load_failure_reason = OS_REASON_NULL;
1396 }
1397 }
1398 break;
1399 #endif
1400 case LC_VERSION_MIN_IPHONEOS:
1401 case LC_VERSION_MIN_MACOSX:
1402 case LC_VERSION_MIN_WATCHOS:
1403 case LC_VERSION_MIN_TVOS: {
1404 struct version_min_command *vmc;
1405
1406 if (depth != 1 || pass != 0) {
1407 break;
1408 }
1409 vmc = (struct version_min_command *) lcp;
1410 ret = load_version(vmc, &found_version_cmd, imgp, result);
1411 #if XNU_TARGET_OS_OSX
1412 if (ret == LOAD_SUCCESS) {
1413 if (result->ip_platform == PLATFORM_IOS) {
1414 vm_map_mark_alien(map);
1415 } else {
1416 assert(!vm_map_is_alien(map));
1417 }
1418 }
1419 #endif /* XNU_TARGET_OS_OSX */
1420 break;
1421 }
1422 case LC_BUILD_VERSION: {
1423 if (depth != 1 || pass != 0) {
1424 break;
1425 }
1426 struct build_version_command* bvc = (struct build_version_command*)lcp;
1427 if (bvc->cmdsize < sizeof(*bvc)) {
1428 ret = LOAD_BADMACHO;
1429 break;
1430 }
1431 if (found_version_cmd == TRUE) {
1432 ret = LOAD_BADMACHO;
1433 break;
1434 }
1435 result->ip_platform = bvc->platform;
1436 result->lr_sdk = bvc->sdk;
1437 result->lr_min_sdk = bvc->minos;
1438 found_version_cmd = TRUE;
1439 #if XNU_TARGET_OS_OSX
1440 if (result->ip_platform == PLATFORM_IOS) {
1441 vm_map_mark_alien(map);
1442 } else {
1443 assert(!vm_map_is_alien(map));
1444 }
1445 #endif /* XNU_TARGET_OS_OSX */
1446 break;
1447 }
1448 default:
1449 /* Other commands are ignored by the kernel */
1450 ret = LOAD_SUCCESS;
1451 break;
1452 }
1453 if (ret != LOAD_SUCCESS) {
1454 break;
1455 }
1456 }
1457 if (ret != LOAD_SUCCESS) {
1458 break;
1459 }
1460 }
1461
1462 if (ret == LOAD_SUCCESS) {
1463 if (!got_code_signatures && cs_process_global_enforcement()) {
1464 ret = LOAD_FAILURE;
1465 }
1466
1467 /* Make sure if we need dyld, we got it */
1468 if (result->needs_dynlinker && !dlp) {
1469 ret = LOAD_FAILURE;
1470 }
1471
1472 if ((ret == LOAD_SUCCESS) && (dlp != 0)) {
1473 /*
1474 * load the dylinker, and slide it by the independent DYLD ASLR
1475 * offset regardless of the PIE-ness of the main binary.
1476 */
1477 ret = load_dylinker(dlp, header->cputype, map, thread, depth,
1478 dyld_aslr_offset, result, imgp);
1479 }
1480
1481 #if CONFIG_ROSETTA
1482 if ((ret == LOAD_SUCCESS) && (depth == 1) && (imgp->ip_flags & IMGPF_ROSETTA)) {
1483 ret = load_rosetta(map, thread, result, imgp);
1484 if (ret == LOAD_SUCCESS) {
1485 if (result->user_stack_alloc_size != 0) {
1486 // If a stack allocation is required then add a 4gb gap after the main
1487 // binary/dyld for the worst case static translation size.
1488 mach_vm_size_t reserved_aot_size = 0x100000000;
1489 vm_map_offset_t mask = vm_map_page_mask(map);
1490
1491 mach_vm_address_t vm_end;
1492 if (dlp != 0) {
1493 vm_end = vm_map_round_page(result->dynlinker_max_vm_addr, mask);
1494 } else {
1495 vm_end = vm_map_round_page(result->max_vm_addr, mask);
1496 }
1497
1498 mach_vm_size_t user_stack_size = vm_map_round_page(result->user_stack_alloc_size, mask);
1499 result->user_stack = vm_map_round_page(vm_end + user_stack_size + reserved_aot_size + slide, mask);
1500 }
1501 }
1502 }
1503 #endif
1504
1505 if ((ret == LOAD_SUCCESS) && (depth == 1)) {
1506 if (result->thread_count == 0) {
1507 ret = LOAD_FAILURE;
1508 }
1509 #if CONFIG_ENFORCE_SIGNED_CODE
1510 if (!(result->csflags & CS_NO_UNTRUSTED_HELPERS)) {
1511 ret = LOAD_FAILURE;
1512 }
1513 #endif
1514 }
1515 }
1516
1517 if (ret == LOAD_BADMACHO && found_xhdr) {
1518 ret = LOAD_BADMACHO_UPX;
1519 }
1520
1521 kfree_data(addr, alloc_size);
1522
1523 return ret;
1524 }
1525
1526 load_return_t
validate_potential_simulator_binary(cpu_type_t exectype __unused,struct image_params * imgp __unused,off_t file_offset __unused,off_t macho_size __unused)1527 validate_potential_simulator_binary(
1528 cpu_type_t exectype __unused,
1529 struct image_params *imgp __unused,
1530 off_t file_offset __unused,
1531 off_t macho_size __unused)
1532 {
1533 #if __x86_64__
1534 /* Allow 32 bit exec only for simulator binaries */
1535 if (bootarg_no32exec && imgp != NULL && exectype == CPU_TYPE_X86) {
1536 if (imgp->ip_simulator_binary == IMGPF_SB_DEFAULT) {
1537 boolean_t simulator_binary = check_if_simulator_binary(imgp, file_offset, macho_size);
1538 imgp->ip_simulator_binary = simulator_binary ? IMGPF_SB_TRUE : IMGPF_SB_FALSE;
1539 }
1540
1541 if (imgp->ip_simulator_binary != IMGPF_SB_TRUE) {
1542 return LOAD_BADARCH;
1543 }
1544 }
1545 #endif
1546 return LOAD_SUCCESS;
1547 }
1548
1549 #if __x86_64__
1550 static boolean_t
check_if_simulator_binary(struct image_params * imgp,off_t file_offset,off_t macho_size)1551 check_if_simulator_binary(
1552 struct image_params *imgp,
1553 off_t file_offset,
1554 off_t macho_size)
1555 {
1556 struct mach_header *header;
1557 char *ip_vdata = NULL;
1558 kauth_cred_t cred = NULL;
1559 uint32_t ncmds;
1560 struct load_command *lcp;
1561 boolean_t simulator_binary = FALSE;
1562 void * addr = NULL;
1563 vm_size_t alloc_size, cmds_size;
1564 size_t offset;
1565 proc_t p = current_proc(); /* XXXX */
1566 int error;
1567 int resid = 0;
1568 size_t mach_header_sz = sizeof(struct mach_header);
1569
1570
1571 cred = kauth_cred_proc_ref(p);
1572
1573 /* Allocate page to copyin mach header */
1574 ip_vdata = kalloc_data(PAGE_SIZE, Z_WAITOK | Z_ZERO);
1575 if (ip_vdata == NULL) {
1576 goto bad;
1577 }
1578
1579 /* Read the Mach-O header */
1580 error = vn_rdwr(UIO_READ, imgp->ip_vp, ip_vdata,
1581 PAGE_SIZE, file_offset,
1582 UIO_SYSSPACE, (IO_UNIT | IO_NODELOCKED),
1583 cred, &resid, p);
1584 if (error) {
1585 goto bad;
1586 }
1587
1588 header = (struct mach_header *)ip_vdata;
1589
1590 if (header->magic == MH_MAGIC_64 ||
1591 header->magic == MH_CIGAM_64) {
1592 mach_header_sz = sizeof(struct mach_header_64);
1593 }
1594
1595 /* ensure header + sizeofcmds falls within the file */
1596 if (os_add_overflow(mach_header_sz, header->sizeofcmds, &cmds_size) ||
1597 (off_t)cmds_size > macho_size ||
1598 round_page_overflow(cmds_size, &alloc_size) ||
1599 alloc_size > INT_MAX) {
1600 goto bad;
1601 }
1602
1603 /*
1604 * Map the load commands into kernel memory.
1605 */
1606 addr = kalloc_data(alloc_size, Z_WAITOK);
1607 if (addr == NULL) {
1608 goto bad;
1609 }
1610
1611 error = vn_rdwr(UIO_READ, imgp->ip_vp, addr, (int)alloc_size, file_offset,
1612 UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p);
1613 if (error) {
1614 goto bad;
1615 }
1616
1617 if (resid) {
1618 /* We must be able to read in as much as the mach_header indicated */
1619 goto bad;
1620 }
1621
1622 /*
1623 * Loop through each of the load_commands indicated by the
1624 * Mach-O header; if an absurd value is provided, we just
1625 * run off the end of the reserved section by incrementing
1626 * the offset too far, so we are implicitly fail-safe.
1627 */
1628 offset = mach_header_sz;
1629 ncmds = header->ncmds;
1630
1631 while (ncmds--) {
1632 /* ensure enough space for a minimal load command */
1633 if (offset + sizeof(struct load_command) > cmds_size) {
1634 break;
1635 }
1636
1637 /*
1638 * Get a pointer to the command.
1639 */
1640 lcp = (struct load_command *)((uintptr_t)addr + offset);
1641
1642 /*
1643 * Perform prevalidation of the struct load_command
1644 * before we attempt to use its contents. Invalid
1645 * values are ones which result in an overflow, or
1646 * which can not possibly be valid commands, or which
1647 * straddle or exist past the reserved section at the
1648 * start of the image.
1649 */
1650 if (os_add_overflow(offset, lcp->cmdsize, &offset) ||
1651 lcp->cmdsize < sizeof(struct load_command) ||
1652 offset > cmds_size) {
1653 break;
1654 }
1655
1656 /* Check if its a simulator binary. */
1657 switch (lcp->cmd) {
1658 case LC_VERSION_MIN_WATCHOS:
1659 simulator_binary = TRUE;
1660 break;
1661
1662 case LC_BUILD_VERSION: {
1663 struct build_version_command *bvc;
1664
1665 bvc = (struct build_version_command *) lcp;
1666 if (bvc->cmdsize < sizeof(*bvc)) {
1667 /* unsafe to use this command struct if cmdsize
1668 * validated above is too small for it to fit */
1669 break;
1670 }
1671 if (bvc->platform == PLATFORM_IOSSIMULATOR ||
1672 bvc->platform == PLATFORM_WATCHOSSIMULATOR) {
1673 simulator_binary = TRUE;
1674 }
1675
1676 break;
1677 }
1678
1679 case LC_VERSION_MIN_IPHONEOS: {
1680 simulator_binary = TRUE;
1681 break;
1682 }
1683
1684 default:
1685 /* ignore other load commands */
1686 break;
1687 }
1688
1689 if (simulator_binary == TRUE) {
1690 break;
1691 }
1692 }
1693
1694 bad:
1695 if (ip_vdata) {
1696 kfree_data(ip_vdata, PAGE_SIZE);
1697 }
1698
1699 if (cred) {
1700 kauth_cred_unref(&cred);
1701 }
1702
1703 if (addr) {
1704 kfree_data(addr, alloc_size);
1705 }
1706
1707 return simulator_binary;
1708 }
1709 #endif /* __x86_64__ */
1710
1711 #if CONFIG_CODE_DECRYPTION
1712
1713 #define APPLE_UNPROTECTED_HEADER_SIZE (3 * 4096)
1714
1715 static load_return_t
unprotect_dsmos_segment(uint64_t file_off,uint64_t file_size,struct vnode * vp,off_t macho_offset,vm_map_t map,vm_map_offset_t map_addr,vm_map_size_t map_size)1716 unprotect_dsmos_segment(
1717 uint64_t file_off,
1718 uint64_t file_size,
1719 struct vnode *vp,
1720 off_t macho_offset,
1721 vm_map_t map,
1722 vm_map_offset_t map_addr,
1723 vm_map_size_t map_size)
1724 {
1725 kern_return_t kr;
1726 uint64_t slice_off;
1727
1728 /*
1729 * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of
1730 * this part of a Universal binary) are not protected...
1731 * The rest needs to be "transformed".
1732 */
1733 slice_off = file_off - macho_offset;
1734 if (slice_off <= APPLE_UNPROTECTED_HEADER_SIZE &&
1735 slice_off + file_size <= APPLE_UNPROTECTED_HEADER_SIZE) {
1736 /* it's all unprotected, nothing to do... */
1737 kr = KERN_SUCCESS;
1738 } else {
1739 if (slice_off <= APPLE_UNPROTECTED_HEADER_SIZE) {
1740 /*
1741 * We start mapping in the unprotected area.
1742 * Skip the unprotected part...
1743 */
1744 uint64_t delta_file;
1745 vm_map_offset_t delta_map;
1746
1747 delta_file = (uint64_t)APPLE_UNPROTECTED_HEADER_SIZE;
1748 delta_file -= slice_off;
1749 if (os_convert_overflow(delta_file, &delta_map)) {
1750 return LOAD_BADMACHO;
1751 }
1752 if (os_add_overflow(map_addr, delta_map, &map_addr)) {
1753 return LOAD_BADMACHO;
1754 }
1755 if (os_sub_overflow(map_size, delta_map, &map_size)) {
1756 return LOAD_BADMACHO;
1757 }
1758 }
1759 /* ... transform the rest of the mapping. */
1760 struct pager_crypt_info crypt_info;
1761 crypt_info.page_decrypt = dsmos_page_transform;
1762 crypt_info.crypt_ops = NULL;
1763 crypt_info.crypt_end = NULL;
1764 #pragma unused(vp, macho_offset)
1765 crypt_info.crypt_ops = (void *)0x2e69cf40;
1766 vm_map_offset_t crypto_backing_offset;
1767 crypto_backing_offset = -1; /* i.e. use map entry's offset */
1768 #if VM_MAP_DEBUG_APPLE_PROTECT
1769 if (vm_map_debug_apple_protect) {
1770 struct proc *p;
1771 p = current_proc();
1772 printf("APPLE_PROTECT: %d[%s] map %p "
1773 "[0x%llx:0x%llx] %s(%s)\n",
1774 proc_getpid(p), p->p_comm, map,
1775 (uint64_t) map_addr,
1776 (uint64_t) (map_addr + map_size),
1777 __FUNCTION__, vp->v_name);
1778 }
1779 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
1780
1781 /* The DSMOS pager can only be used by apple signed code */
1782 struct cs_blob * blob = csvnode_get_blob(vp, file_off);
1783 if (blob == NULL || !blob->csb_platform_binary || blob->csb_platform_path) {
1784 return LOAD_FAILURE;
1785 }
1786
1787 kr = vm_map_apple_protected(map,
1788 map_addr,
1789 map_addr + map_size,
1790 crypto_backing_offset,
1791 &crypt_info,
1792 CRYPTID_APP_ENCRYPTION);
1793 }
1794
1795 if (kr != KERN_SUCCESS) {
1796 return LOAD_FAILURE;
1797 }
1798 return LOAD_SUCCESS;
1799 }
1800 #else /* CONFIG_CODE_DECRYPTION */
1801 static load_return_t
unprotect_dsmos_segment(__unused uint64_t file_off,__unused uint64_t file_size,__unused struct vnode * vp,__unused off_t macho_offset,__unused vm_map_t map,__unused vm_map_offset_t map_addr,__unused vm_map_size_t map_size)1802 unprotect_dsmos_segment(
1803 __unused uint64_t file_off,
1804 __unused uint64_t file_size,
1805 __unused struct vnode *vp,
1806 __unused off_t macho_offset,
1807 __unused vm_map_t map,
1808 __unused vm_map_offset_t map_addr,
1809 __unused vm_map_size_t map_size)
1810 {
1811 return LOAD_SUCCESS;
1812 }
1813 #endif /* CONFIG_CODE_DECRYPTION */
1814
1815
1816 /*
1817 * map_segment:
1818 * Maps a Mach-O segment.
1819 */
1820 static kern_return_t
map_segment(vm_map_t map,vm_map_offset_t vm_start,vm_map_offset_t vm_end,vm_map_kernel_flags_t vmk_flags,memory_object_control_t control,vm_map_offset_t file_start,vm_map_offset_t file_end,vm_prot_t initprot,vm_prot_t maxprot,load_result_t * result)1821 map_segment(
1822 vm_map_t map,
1823 vm_map_offset_t vm_start,
1824 vm_map_offset_t vm_end,
1825 vm_map_kernel_flags_t vmk_flags,
1826 memory_object_control_t control,
1827 vm_map_offset_t file_start,
1828 vm_map_offset_t file_end,
1829 vm_prot_t initprot,
1830 vm_prot_t maxprot,
1831 load_result_t *result)
1832 {
1833 kern_return_t ret;
1834 vm_map_offset_t effective_page_mask;
1835
1836 if (vm_end < vm_start ||
1837 file_end < file_start) {
1838 return LOAD_BADMACHO;
1839 }
1840 if (vm_end == vm_start ||
1841 file_end == file_start) {
1842 /* nothing to map... */
1843 return LOAD_SUCCESS;
1844 }
1845
1846 effective_page_mask = vm_map_page_mask(map);
1847
1848 if (vm_map_page_aligned(vm_start, effective_page_mask) &&
1849 vm_map_page_aligned(vm_end, effective_page_mask) &&
1850 vm_map_page_aligned(file_start, effective_page_mask) &&
1851 vm_map_page_aligned(file_end, effective_page_mask)) {
1852 /* all page-aligned and map-aligned: proceed */
1853 } else {
1854 /*
1855 * There's no more fourk_pager to handle mis-alignments;
1856 * all binaries should be page-aligned and map-aligned
1857 */
1858 return LOAD_BADMACHO;
1859 }
1860
1861 #if !defined(XNU_TARGET_OS_OSX)
1862 (void) result;
1863 #else /* !defined(XNU_TARGET_OS_OSX) */
1864 /*
1865 * This process doesn't have its new csflags (from
1866 * the image being loaded) yet, so tell VM to override the
1867 * current process's CS_ENFORCEMENT for this mapping.
1868 */
1869 if (result->csflags & CS_ENFORCEMENT) {
1870 vmk_flags.vmkf_cs_enforcement = TRUE;
1871 } else {
1872 vmk_flags.vmkf_cs_enforcement = FALSE;
1873 }
1874 vmk_flags.vmkf_cs_enforcement_override = TRUE;
1875 #endif /* !defined(XNU_TARGET_OS_OSX) */
1876
1877 if (result->is_rosetta && (initprot & VM_PROT_EXECUTE) == VM_PROT_EXECUTE) {
1878 vmk_flags.vmkf_translated_allow_execute = TRUE;
1879 }
1880
1881 if (control != MEMORY_OBJECT_CONTROL_NULL) {
1882 /* no copy-on-read for mapped binaries */
1883 vmk_flags.vmkf_no_copy_on_read = 1;
1884 ret = vm_map_enter_mem_object_control(
1885 map,
1886 &vm_start,
1887 file_end - file_start,
1888 (mach_vm_offset_t)0,
1889 vmk_flags,
1890 control,
1891 file_start,
1892 TRUE, /* copy */
1893 initprot, maxprot,
1894 VM_INHERIT_DEFAULT);
1895 } else {
1896 ret = mach_vm_map_kernel(
1897 map,
1898 &vm_start,
1899 file_end - file_start,
1900 (mach_vm_offset_t)0,
1901 vmk_flags,
1902 IPC_PORT_NULL,
1903 0, /* offset */
1904 TRUE, /* copy */
1905 initprot, maxprot,
1906 VM_INHERIT_DEFAULT);
1907 }
1908 if (ret != KERN_SUCCESS) {
1909 return LOAD_NOSPACE;
1910 }
1911 return LOAD_SUCCESS;
1912 }
1913
1914 static
1915 load_return_t
load_segment(struct load_command * lcp,uint32_t filetype,void * control,off_t pager_offset,off_t macho_size,struct vnode * vp,vm_map_t map,int64_t slide,load_result_t * result,struct image_params * imgp)1916 load_segment(
1917 struct load_command *lcp,
1918 uint32_t filetype,
1919 void * control,
1920 off_t pager_offset,
1921 off_t macho_size,
1922 struct vnode *vp,
1923 vm_map_t map,
1924 int64_t slide,
1925 load_result_t *result,
1926 struct image_params *imgp)
1927 {
1928 struct segment_command_64 segment_command, *scp;
1929 kern_return_t ret;
1930 vm_map_size_t delta_size;
1931 vm_prot_t initprot;
1932 vm_prot_t maxprot;
1933 size_t segment_command_size, total_section_size,
1934 single_section_size;
1935 uint64_t file_offset, file_size;
1936 vm_map_offset_t vm_offset;
1937 size_t vm_size;
1938 vm_map_offset_t vm_start, vm_end, vm_end_aligned;
1939 vm_map_offset_t file_start, file_end;
1940 vm_map_kernel_flags_t vmk_flags;
1941 kern_return_t kr;
1942 boolean_t verbose;
1943 vm_map_size_t effective_page_size;
1944 vm_map_offset_t effective_page_mask;
1945 #if __arm64__
1946 boolean_t fourk_align;
1947 #endif /* __arm64__ */
1948
1949 (void)imgp;
1950
1951 effective_page_size = vm_map_page_size(map);
1952 effective_page_mask = vm_map_page_mask(map);
1953 vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED();
1954
1955 verbose = FALSE;
1956 if (LC_SEGMENT_64 == lcp->cmd) {
1957 segment_command_size = sizeof(struct segment_command_64);
1958 single_section_size = sizeof(struct section_64);
1959 #if __arm64__
1960 /* 64-bit binary: should already be 16K-aligned */
1961 fourk_align = FALSE;
1962
1963 if (vm_map_page_shift(map) == FOURK_PAGE_SHIFT &&
1964 PAGE_SHIFT != FOURK_PAGE_SHIFT) {
1965 fourk_align = TRUE;
1966 verbose = TRUE;
1967 }
1968 #endif /* __arm64__ */
1969 } else {
1970 segment_command_size = sizeof(struct segment_command);
1971 single_section_size = sizeof(struct section);
1972 #if __arm64__
1973 /* 32-bit binary or arm64_32 binary: should already be page-aligned */
1974 fourk_align = FALSE;
1975 #endif /* __arm64__ */
1976 }
1977 if (lcp->cmdsize < segment_command_size) {
1978 DEBUG4K_ERROR("LOAD_BADMACHO cmdsize %d < %zu\n", lcp->cmdsize, segment_command_size);
1979 return LOAD_BADMACHO;
1980 }
1981 total_section_size = lcp->cmdsize - segment_command_size;
1982
1983 if (LC_SEGMENT_64 == lcp->cmd) {
1984 scp = (struct segment_command_64 *)lcp;
1985 } else {
1986 scp = &segment_command;
1987 widen_segment_command((struct segment_command *)lcp, scp);
1988 }
1989
1990 if (verbose) {
1991 MACHO_PRINTF(("+++ load_segment %s "
1992 "vm[0x%llx:0x%llx] file[0x%llx:0x%llx] "
1993 "prot %d/%d flags 0x%x\n",
1994 scp->segname,
1995 (uint64_t)(slide + scp->vmaddr),
1996 (uint64_t)(slide + scp->vmaddr + scp->vmsize),
1997 pager_offset + scp->fileoff,
1998 pager_offset + scp->fileoff + scp->filesize,
1999 scp->initprot,
2000 scp->maxprot,
2001 scp->flags));
2002 }
2003
2004 /*
2005 * Make sure what we get from the file is really ours (as specified
2006 * by macho_size).
2007 */
2008 if (scp->fileoff + scp->filesize < scp->fileoff ||
2009 scp->fileoff + scp->filesize > (uint64_t)macho_size) {
2010 DEBUG4K_ERROR("LOAD_BADMACHO fileoff 0x%llx filesize 0x%llx macho_size 0x%llx\n", scp->fileoff, scp->filesize, (uint64_t)macho_size);
2011 return LOAD_BADMACHO;
2012 }
2013 /*
2014 * Ensure that the number of sections specified would fit
2015 * within the load command size.
2016 */
2017 if (total_section_size / single_section_size < scp->nsects) {
2018 DEBUG4K_ERROR("LOAD_BADMACHO 0x%zx 0x%zx %d\n", total_section_size, single_section_size, scp->nsects);
2019 return LOAD_BADMACHO;
2020 }
2021 /*
2022 * Make sure the segment is page-aligned in the file.
2023 */
2024 if (os_add_overflow(pager_offset, scp->fileoff, &file_offset)) {
2025 DEBUG4K_ERROR("LOAD_BADMACHO file_offset: 0x%llx + 0x%llx\n", pager_offset, scp->fileoff);
2026 return LOAD_BADMACHO;
2027 }
2028 file_size = scp->filesize;
2029 #if __arm64__
2030 if (fourk_align) {
2031 if ((file_offset & FOURK_PAGE_MASK) != 0) {
2032 /*
2033 * we can't mmap() it if it's not at least 4KB-aligned
2034 * in the file
2035 */
2036 DEBUG4K_ERROR("LOAD_BADMACHO file_offset 0x%llx\n", file_offset);
2037 return LOAD_BADMACHO;
2038 }
2039 } else
2040 #endif /* __arm64__ */
2041 if ((file_offset & PAGE_MASK_64) != 0 ||
2042 /* we can't mmap() it if it's not page-aligned in the file */
2043 (file_offset & vm_map_page_mask(map)) != 0) {
2044 /*
2045 * The 1st test would have failed if the system's page size
2046 * was what this process believe is the page size, so let's
2047 * fail here too for the sake of consistency.
2048 */
2049 DEBUG4K_ERROR("LOAD_BADMACHO file_offset 0x%llx\n", file_offset);
2050 return LOAD_BADMACHO;
2051 }
2052
2053 /*
2054 * If we have a code signature attached for this slice
2055 * require that the segments are within the signed part
2056 * of the file.
2057 */
2058 if (result->cs_end_offset &&
2059 result->cs_end_offset < (off_t)scp->fileoff &&
2060 result->cs_end_offset - scp->fileoff < scp->filesize) {
2061 if (cs_debug) {
2062 printf("section outside code signature\n");
2063 }
2064 DEBUG4K_ERROR("LOAD_BADMACHO end_offset 0x%llx fileoff 0x%llx filesize 0x%llx\n", result->cs_end_offset, scp->fileoff, scp->filesize);
2065 return LOAD_BADMACHO;
2066 }
2067
2068 if (os_add_overflow(scp->vmaddr, slide, &vm_offset)) {
2069 if (cs_debug) {
2070 printf("vmaddr too large\n");
2071 }
2072 DEBUG4K_ERROR("LOAD_BADMACHO vmaddr 0x%llx slide 0x%llx vm_offset 0x%llx\n", scp->vmaddr, slide, (uint64_t)vm_offset);
2073 return LOAD_BADMACHO;
2074 }
2075
2076 if (scp->vmsize > SIZE_MAX) {
2077 DEBUG4K_ERROR("LOAD_BADMACHO vmsize 0x%llx\n", scp->vmsize);
2078 return LOAD_BADMACHO;
2079 }
2080
2081 vm_size = (size_t)scp->vmsize;
2082
2083 if (vm_size == 0) {
2084 return LOAD_SUCCESS;
2085 }
2086 if (scp->vmaddr == 0 &&
2087 file_size == 0 &&
2088 vm_size != 0 &&
2089 (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
2090 (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
2091 if (map == VM_MAP_NULL) {
2092 return LOAD_SUCCESS;
2093 }
2094
2095 /*
2096 * For PIE, extend page zero rather than moving it. Extending
2097 * page zero keeps early allocations from falling predictably
2098 * between the end of page zero and the beginning of the first
2099 * slid segment.
2100 */
2101 /*
2102 * This is a "page zero" segment: it starts at address 0,
2103 * is not mapped from the binary file and is not accessible.
2104 * User-space should never be able to access that memory, so
2105 * make it completely off limits by raising the VM map's
2106 * minimum offset.
2107 */
2108 vm_end = (vm_map_offset_t)(vm_offset + vm_size);
2109 if (vm_end < vm_offset) {
2110 DEBUG4K_ERROR("LOAD_BADMACHO vm_end 0x%llx vm_offset 0x%llx vm_size 0x%llx\n", (uint64_t)vm_end, (uint64_t)vm_offset, (uint64_t)vm_size);
2111 return LOAD_BADMACHO;
2112 }
2113
2114 if (verbose) {
2115 MACHO_PRINTF(("++++++ load_segment: "
2116 "page_zero up to 0x%llx\n",
2117 (uint64_t) vm_end));
2118 }
2119 #if __arm64__
2120 if (fourk_align) {
2121 /* raise min_offset as much as page-alignment allows */
2122 vm_end_aligned = vm_map_trunc_page(vm_end,
2123 effective_page_mask);
2124 } else
2125 #endif /* __arm64__ */
2126 {
2127 vm_end = vm_map_round_page(vm_end,
2128 PAGE_MASK_64);
2129 vm_end_aligned = vm_end;
2130 }
2131 ret = vm_map_raise_min_offset(map,
2132 vm_end_aligned);
2133 if (ret != KERN_SUCCESS) {
2134 DEBUG4K_ERROR("LOAD_FAILURE ret 0x%x\n", ret);
2135 return LOAD_FAILURE;
2136 }
2137 return LOAD_SUCCESS;
2138 } else {
2139 #if !defined(XNU_TARGET_OS_OSX)
2140 /* not PAGEZERO: should not be mapped at address 0 */
2141 if (filetype != MH_DYLINKER && (imgp->ip_flags & IMGPF_ROSETTA) == 0 && scp->vmaddr == 0) {
2142 DEBUG4K_ERROR("LOAD_BADMACHO filetype %d vmaddr 0x%llx\n", filetype, scp->vmaddr);
2143 return LOAD_BADMACHO;
2144 }
2145 #endif /* !defined(XNU_TARGET_OS_OSX) */
2146 }
2147
2148 #if __arm64__
2149 if (fourk_align) {
2150 /* 4K-align */
2151 file_start = vm_map_trunc_page(file_offset,
2152 FOURK_PAGE_MASK);
2153 file_end = vm_map_round_page(file_offset + file_size,
2154 FOURK_PAGE_MASK);
2155 vm_start = vm_map_trunc_page(vm_offset,
2156 FOURK_PAGE_MASK);
2157 vm_end = vm_map_round_page(vm_offset + vm_size,
2158 FOURK_PAGE_MASK);
2159
2160 if (file_offset - file_start > FOURK_PAGE_MASK ||
2161 file_end - file_offset - file_size > FOURK_PAGE_MASK) {
2162 DEBUG4K_ERROR("LOAD_BADMACHO file_start / file_size wrap "
2163 "[0x%llx:0x%llx] -> [0x%llx:0x%llx]\n",
2164 file_offset,
2165 file_offset + file_size,
2166 (uint64_t) file_start,
2167 (uint64_t) file_end);
2168 return LOAD_BADMACHO;
2169 }
2170
2171 if (!strncmp(scp->segname, "__LINKEDIT", 11) &&
2172 page_aligned(file_start) &&
2173 vm_map_page_aligned(file_start, vm_map_page_mask(map)) &&
2174 page_aligned(vm_start) &&
2175 vm_map_page_aligned(vm_start, vm_map_page_mask(map))) {
2176 /* XXX last segment: ignore mis-aligned tail */
2177 file_end = vm_map_round_page(file_end,
2178 effective_page_mask);
2179 vm_end = vm_map_round_page(vm_end,
2180 effective_page_mask);
2181 }
2182 } else
2183 #endif /* __arm64__ */
2184 {
2185 file_start = vm_map_trunc_page(file_offset,
2186 effective_page_mask);
2187 file_end = vm_map_round_page(file_offset + file_size,
2188 effective_page_mask);
2189 vm_start = vm_map_trunc_page(vm_offset,
2190 effective_page_mask);
2191 vm_end = vm_map_round_page(vm_offset + vm_size,
2192 effective_page_mask);
2193
2194 if (file_offset - file_start > effective_page_mask ||
2195 file_end - file_offset - file_size > effective_page_mask) {
2196 DEBUG4K_ERROR("LOAD_BADMACHO file_start / file_size wrap "
2197 "[0x%llx:0x%llx] -> [0x%llx:0x%llx]\n",
2198 file_offset,
2199 file_offset + file_size,
2200 (uint64_t) file_start,
2201 (uint64_t) file_end);
2202 return LOAD_BADMACHO;
2203 }
2204 }
2205
2206 if (vm_start < result->min_vm_addr) {
2207 result->min_vm_addr = vm_start;
2208 }
2209 if (vm_end > result->max_vm_addr) {
2210 result->max_vm_addr = vm_end;
2211 }
2212
2213 if (map == VM_MAP_NULL) {
2214 return LOAD_SUCCESS;
2215 }
2216
2217 if (scp->flags & SG_READ_ONLY) {
2218 /*
2219 * Record the VM start/end of a segment which should
2220 * be RO after fixups. Only __DATA_CONST should
2221 * have this flag.
2222 */
2223 if (result->ro_vm_start != MACH_VM_MIN_ADDRESS ||
2224 result->ro_vm_end != MACH_VM_MIN_ADDRESS) {
2225 DEBUG4K_ERROR("LOAD_BADMACHO segment flags [%x] "
2226 "multiple segments with SG_READ_ONLY flag\n",
2227 scp->flags);
2228 return LOAD_BADMACHO;
2229 }
2230
2231 result->ro_vm_start = vm_start;
2232 result->ro_vm_end = vm_end;
2233 }
2234
2235 if (vm_size > 0) {
2236 #if !__x86_64__
2237 if (!strncmp(scp->segname, "__LINKEDIT", 11)) {
2238 vmk_flags.vmf_permanent = true;
2239 }
2240 #endif /* !__x86_64__ */
2241 initprot = (scp->initprot) & VM_PROT_ALL;
2242 maxprot = (scp->maxprot) & VM_PROT_ALL;
2243 /*
2244 * Map a copy of the file into the address space.
2245 */
2246 if (verbose) {
2247 MACHO_PRINTF(("++++++ load_segment: "
2248 "mapping at vm [0x%llx:0x%llx] of "
2249 "file [0x%llx:0x%llx]\n",
2250 (uint64_t) vm_start,
2251 (uint64_t) vm_end,
2252 (uint64_t) file_start,
2253 (uint64_t) file_end));
2254 }
2255 ret = map_segment(map,
2256 vm_start,
2257 vm_end,
2258 vmk_flags,
2259 control,
2260 file_start,
2261 file_end,
2262 initprot,
2263 maxprot,
2264 result);
2265 if (ret) {
2266 DEBUG4K_ERROR("LOAD_NOSPACE start 0x%llx end 0x%llx ret 0x%x\n", (uint64_t)vm_start, (uint64_t)vm_end, ret);
2267 return LOAD_NOSPACE;
2268 }
2269
2270 #if FIXME
2271 /*
2272 * If the file didn't end on a page boundary,
2273 * we need to zero the leftover.
2274 */
2275 delta_size = map_size - scp->filesize;
2276 if (delta_size > 0) {
2277 void *tmp = kalloc_data(delta_size, Z_WAITOK | Z_ZERO);
2278 int rc;
2279
2280 if (tmp == NULL) {
2281 DEBUG4K_ERROR("LOAD_RESOURCE delta_size 0x%llx ret 0x%x\n", delta_size, ret);
2282 return LOAD_RESOURCE;
2283 }
2284
2285 rc = copyout(tmp, map_addr + scp->filesize, delta_size);
2286 kfree_data(tmp, delta_size);
2287
2288 if (rc) {
2289 DEBUG4K_ERROR("LOAD_FAILURE copyout 0x%llx 0x%llx\n", map_addr + scp->filesize, delta_size);
2290 return LOAD_FAILURE;
2291 }
2292 }
2293 #endif /* FIXME */
2294 }
2295
2296 /*
2297 * If the virtual size of the segment is greater
2298 * than the size from the file, we need to allocate
2299 * zero fill memory for the rest.
2300 */
2301 if ((vm_end - vm_start) > (file_end - file_start)) {
2302 delta_size = (vm_end - vm_start) - (file_end - file_start);
2303 } else {
2304 delta_size = 0;
2305 }
2306 if (delta_size > 0) {
2307 vm_map_offset_t tmp_start;
2308 vm_map_offset_t tmp_end;
2309
2310 if (os_add_overflow(vm_start, file_end - file_start, &tmp_start)) {
2311 DEBUG4K_ERROR("LOAD_NOSPACE tmp_start: 0x%llx + 0x%llx\n", (uint64_t)vm_start, (uint64_t)(file_end - file_start));
2312 return LOAD_NOSPACE;
2313 }
2314
2315 if (os_add_overflow(tmp_start, delta_size, &tmp_end)) {
2316 DEBUG4K_ERROR("LOAD_NOSPACE tmp_end: 0x%llx + 0x%llx\n", (uint64_t)tmp_start, (uint64_t)delta_size);
2317 return LOAD_NOSPACE;
2318 }
2319
2320 if (verbose) {
2321 MACHO_PRINTF(("++++++ load_segment: "
2322 "delta mapping vm [0x%llx:0x%llx]\n",
2323 (uint64_t) tmp_start,
2324 (uint64_t) tmp_end));
2325 }
2326 kr = map_segment(map,
2327 tmp_start,
2328 tmp_end,
2329 vmk_flags,
2330 MEMORY_OBJECT_CONTROL_NULL,
2331 0,
2332 delta_size,
2333 scp->initprot,
2334 scp->maxprot,
2335 result);
2336 if (kr != KERN_SUCCESS) {
2337 DEBUG4K_ERROR("LOAD_NOSPACE 0x%llx 0x%llx kr 0x%x\n", (unsigned long long)tmp_start, (uint64_t)delta_size, kr);
2338 return LOAD_NOSPACE;
2339 }
2340 }
2341
2342 if ((scp->fileoff == 0) && (scp->filesize != 0)) {
2343 result->mach_header = vm_offset;
2344 }
2345
2346 if (scp->flags & SG_PROTECTED_VERSION_1) {
2347 ret = unprotect_dsmos_segment(file_start,
2348 file_end - file_start,
2349 vp,
2350 pager_offset,
2351 map,
2352 vm_start,
2353 vm_end - vm_start);
2354 if (ret != LOAD_SUCCESS) {
2355 DEBUG4K_ERROR("unprotect 0x%llx 0x%llx ret %d \n", (uint64_t)vm_start, (uint64_t)vm_end, ret);
2356 return ret;
2357 }
2358 } else {
2359 ret = LOAD_SUCCESS;
2360 }
2361
2362 if (LOAD_SUCCESS == ret &&
2363 filetype == MH_DYLINKER &&
2364 result->all_image_info_addr == MACH_VM_MIN_ADDRESS) {
2365 note_all_image_info_section(scp,
2366 LC_SEGMENT_64 == lcp->cmd,
2367 single_section_size,
2368 ((const char *)lcp +
2369 segment_command_size),
2370 slide,
2371 result);
2372 }
2373
2374 if (result->entry_point != MACH_VM_MIN_ADDRESS) {
2375 if ((result->entry_point >= vm_offset) && (result->entry_point < (vm_offset + vm_size))) {
2376 if ((scp->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) == (VM_PROT_READ | VM_PROT_EXECUTE)) {
2377 result->validentry = 1;
2378 } else {
2379 /* right range but wrong protections, unset if previously validated */
2380 result->validentry = 0;
2381 }
2382 }
2383 }
2384
2385 if (ret != LOAD_SUCCESS && verbose) {
2386 DEBUG4K_ERROR("ret %d\n", ret);
2387 }
2388 return ret;
2389 }
2390
2391 static
2392 load_return_t
load_uuid(struct uuid_command * uulp,char * command_end,load_result_t * result)2393 load_uuid(
2394 struct uuid_command *uulp,
2395 char *command_end,
2396 load_result_t *result
2397 )
2398 {
2399 /*
2400 * We need to check the following for this command:
2401 * - The command size should be atleast the size of struct uuid_command
2402 * - The UUID part of the command should be completely within the mach-o header
2403 */
2404
2405 if ((uulp->cmdsize < sizeof(struct uuid_command)) ||
2406 (((char *)uulp + sizeof(struct uuid_command)) > command_end)) {
2407 return LOAD_BADMACHO;
2408 }
2409
2410 memcpy(&result->uuid[0], &uulp->uuid[0], sizeof(result->uuid));
2411 return LOAD_SUCCESS;
2412 }
2413
2414 static
2415 load_return_t
load_version(struct version_min_command * vmc,boolean_t * found_version_cmd,struct image_params * imgp __unused,load_result_t * result)2416 load_version(
2417 struct version_min_command *vmc,
2418 boolean_t *found_version_cmd,
2419 struct image_params *imgp __unused,
2420 load_result_t *result
2421 )
2422 {
2423 uint32_t platform = 0;
2424 uint32_t sdk;
2425 uint32_t min_sdk;
2426
2427 if (vmc->cmdsize < sizeof(*vmc)) {
2428 return LOAD_BADMACHO;
2429 }
2430 if (*found_version_cmd == TRUE) {
2431 return LOAD_BADMACHO;
2432 }
2433 *found_version_cmd = TRUE;
2434 sdk = vmc->sdk;
2435 min_sdk = vmc->version;
2436 switch (vmc->cmd) {
2437 case LC_VERSION_MIN_MACOSX:
2438 platform = PLATFORM_MACOS;
2439 break;
2440 #if __x86_64__ /* __x86_64__ */
2441 case LC_VERSION_MIN_IPHONEOS:
2442 platform = PLATFORM_IOSSIMULATOR;
2443 break;
2444 case LC_VERSION_MIN_WATCHOS:
2445 platform = PLATFORM_WATCHOSSIMULATOR;
2446 break;
2447 case LC_VERSION_MIN_TVOS:
2448 platform = PLATFORM_TVOSSIMULATOR;
2449 break;
2450 #else
2451 case LC_VERSION_MIN_IPHONEOS: {
2452 #if __arm64__
2453 if (vmc->sdk < (12 << 16)) {
2454 /* app built with a pre-iOS12 SDK: apply legacy footprint mitigation */
2455 result->legacy_footprint = TRUE;
2456 }
2457 #endif /* __arm64__ */
2458 platform = PLATFORM_IOS;
2459 break;
2460 }
2461 case LC_VERSION_MIN_WATCHOS:
2462 platform = PLATFORM_WATCHOS;
2463 break;
2464 case LC_VERSION_MIN_TVOS:
2465 platform = PLATFORM_TVOS;
2466 break;
2467 #endif /* __x86_64__ */
2468 /* All LC_VERSION_MIN_* load commands are legacy and we will not be adding any more */
2469 default:
2470 sdk = (uint32_t)-1;
2471 min_sdk = (uint32_t)-1;
2472 __builtin_unreachable();
2473 }
2474 result->ip_platform = platform;
2475 result->lr_min_sdk = min_sdk;
2476 result->lr_sdk = sdk;
2477 return LOAD_SUCCESS;
2478 }
2479
2480 static
2481 load_return_t
load_main(struct entry_point_command * epc,thread_t thread,int64_t slide,load_result_t * result)2482 load_main(
2483 struct entry_point_command *epc,
2484 thread_t thread,
2485 int64_t slide,
2486 load_result_t *result
2487 )
2488 {
2489 mach_vm_offset_t addr;
2490 kern_return_t ret;
2491
2492 if (epc->cmdsize < sizeof(*epc)) {
2493 return LOAD_BADMACHO;
2494 }
2495 if (result->thread_count != 0) {
2496 return LOAD_FAILURE;
2497 }
2498
2499 if (thread == THREAD_NULL) {
2500 return LOAD_SUCCESS;
2501 }
2502
2503 /*
2504 * LC_MAIN specifies stack size but not location.
2505 * Add guard page to allocation size (MAXSSIZ includes guard page).
2506 */
2507 if (epc->stacksize) {
2508 if (os_add_overflow(epc->stacksize, 4 * PAGE_SIZE, &result->user_stack_size)) {
2509 /*
2510 * We are going to immediately throw away this result, but we want
2511 * to make sure we aren't loading a dangerously close to
2512 * overflowing value, since this will have a guard page added to it
2513 * and be rounded to page boundaries
2514 */
2515 return LOAD_BADMACHO;
2516 }
2517 result->user_stack_size = epc->stacksize;
2518 if (os_add_overflow(epc->stacksize, PAGE_SIZE, &result->user_stack_alloc_size)) {
2519 return LOAD_BADMACHO;
2520 }
2521 result->custom_stack = TRUE;
2522 } else {
2523 result->user_stack_alloc_size = MAXSSIZ;
2524 }
2525
2526 /* use default location for stack */
2527 ret = thread_userstackdefault(&addr, result->is_64bit_addr);
2528 if (ret != KERN_SUCCESS) {
2529 return LOAD_FAILURE;
2530 }
2531
2532 /* The stack slides down from the default location */
2533 result->user_stack = (user_addr_t)mach_vm_trunc_page((user_addr_t)addr - slide);
2534
2535 if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) {
2536 /* Already processed LC_MAIN or LC_UNIXTHREAD */
2537 return LOAD_FAILURE;
2538 }
2539
2540 /* kernel does *not* use entryoff from LC_MAIN. Dyld uses it. */
2541 result->needs_dynlinker = TRUE;
2542 result->using_lcmain = TRUE;
2543
2544 ret = thread_state_initialize( thread );
2545 if (ret != KERN_SUCCESS) {
2546 return LOAD_FAILURE;
2547 }
2548
2549 result->unixproc = TRUE;
2550 result->thread_count++;
2551
2552 return LOAD_SUCCESS;
2553 }
2554
2555 static
2556 load_return_t
setup_driver_main(thread_t thread,int64_t slide,load_result_t * result)2557 setup_driver_main(
2558 thread_t thread,
2559 int64_t slide,
2560 load_result_t *result
2561 )
2562 {
2563 mach_vm_offset_t addr;
2564 kern_return_t ret;
2565
2566 /* Driver binaries have no LC_MAIN, use defaults */
2567
2568 if (thread == THREAD_NULL) {
2569 return LOAD_SUCCESS;
2570 }
2571
2572 result->user_stack_alloc_size = MAXSSIZ;
2573
2574 /* use default location for stack */
2575 ret = thread_userstackdefault(&addr, result->is_64bit_addr);
2576 if (ret != KERN_SUCCESS) {
2577 return LOAD_FAILURE;
2578 }
2579
2580 /* The stack slides down from the default location */
2581 result->user_stack = (user_addr_t)addr;
2582 result->user_stack -= slide;
2583
2584 if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) {
2585 /* Already processed LC_MAIN or LC_UNIXTHREAD */
2586 return LOAD_FAILURE;
2587 }
2588
2589 result->needs_dynlinker = TRUE;
2590
2591 ret = thread_state_initialize( thread );
2592 if (ret != KERN_SUCCESS) {
2593 return LOAD_FAILURE;
2594 }
2595
2596 result->unixproc = TRUE;
2597 result->thread_count++;
2598
2599 return LOAD_SUCCESS;
2600 }
2601
2602 static
2603 load_return_t
load_unixthread(struct thread_command * tcp,thread_t thread,int64_t slide,boolean_t is_x86_64_compat_binary,load_result_t * result)2604 load_unixthread(
2605 struct thread_command *tcp,
2606 thread_t thread,
2607 int64_t slide,
2608 boolean_t is_x86_64_compat_binary,
2609 load_result_t *result
2610 )
2611 {
2612 load_return_t ret;
2613 int customstack = 0;
2614 mach_vm_offset_t addr;
2615 if (tcp->cmdsize < sizeof(*tcp)) {
2616 return LOAD_BADMACHO;
2617 }
2618 if (result->thread_count != 0) {
2619 return LOAD_FAILURE;
2620 }
2621
2622 if (thread == THREAD_NULL) {
2623 return LOAD_SUCCESS;
2624 }
2625
2626 ret = load_threadstack(thread,
2627 (uint32_t *)(((vm_offset_t)tcp) +
2628 sizeof(struct thread_command)),
2629 tcp->cmdsize - sizeof(struct thread_command),
2630 &addr, &customstack, is_x86_64_compat_binary, result);
2631 if (ret != LOAD_SUCCESS) {
2632 return ret;
2633 }
2634
2635 /* LC_UNIXTHREAD optionally specifies stack size and location */
2636
2637 if (customstack) {
2638 result->custom_stack = TRUE;
2639 } else {
2640 result->user_stack_alloc_size = MAXSSIZ;
2641 }
2642
2643 /* The stack slides down from the default location */
2644 result->user_stack = (user_addr_t)mach_vm_trunc_page((user_addr_t)addr - slide);
2645
2646 {
2647 ret = load_threadentry(thread,
2648 (uint32_t *)(((vm_offset_t)tcp) +
2649 sizeof(struct thread_command)),
2650 tcp->cmdsize - sizeof(struct thread_command),
2651 &addr);
2652 if (ret != LOAD_SUCCESS) {
2653 return ret;
2654 }
2655
2656 if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) {
2657 /* Already processed LC_MAIN or LC_UNIXTHREAD */
2658 return LOAD_FAILURE;
2659 }
2660
2661 result->entry_point = (user_addr_t)addr;
2662 result->entry_point += slide;
2663
2664 ret = load_threadstate(thread,
2665 (uint32_t *)(((vm_offset_t)tcp) + sizeof(struct thread_command)),
2666 tcp->cmdsize - sizeof(struct thread_command),
2667 result);
2668 if (ret != LOAD_SUCCESS) {
2669 return ret;
2670 }
2671 }
2672
2673 result->unixproc = TRUE;
2674 result->thread_count++;
2675
2676 return LOAD_SUCCESS;
2677 }
2678
2679 static
2680 load_return_t
load_threadstate(thread_t thread,uint32_t * ts,uint32_t total_size,load_result_t * result)2681 load_threadstate(
2682 thread_t thread,
2683 uint32_t *ts,
2684 uint32_t total_size,
2685 load_result_t *result
2686 )
2687 {
2688 uint32_t size;
2689 int flavor;
2690 uint32_t thread_size;
2691 uint32_t *local_ts = NULL;
2692 uint32_t local_ts_size = 0;
2693 int ret;
2694
2695 (void)thread;
2696
2697 if (total_size > 0) {
2698 local_ts_size = total_size;
2699 local_ts = (uint32_t *)kalloc_data(local_ts_size, Z_WAITOK);
2700 if (local_ts == NULL) {
2701 return LOAD_FAILURE;
2702 }
2703 memcpy(local_ts, ts, local_ts_size);
2704 ts = local_ts;
2705 }
2706
2707 /*
2708 * Validate the new thread state; iterate through the state flavors in
2709 * the Mach-O file.
2710 * XXX: we should validate the machine state here, to avoid failing at
2711 * activation time where we can't bail out cleanly.
2712 */
2713 while (total_size > 0) {
2714 if (total_size < 2 * sizeof(uint32_t)) {
2715 return LOAD_BADMACHO;
2716 }
2717
2718 flavor = *ts++;
2719 size = *ts++;
2720
2721 if (os_add_and_mul_overflow(size, 2, sizeof(uint32_t), &thread_size) ||
2722 os_sub_overflow(total_size, thread_size, &total_size)) {
2723 ret = LOAD_BADMACHO;
2724 goto bad;
2725 }
2726
2727 ts += size; /* ts is a (uint32_t *) */
2728 }
2729
2730 result->threadstate = local_ts;
2731 result->threadstate_sz = local_ts_size;
2732 return LOAD_SUCCESS;
2733
2734 bad:
2735 if (local_ts) {
2736 kfree_data(local_ts, local_ts_size);
2737 }
2738 return ret;
2739 }
2740
2741
2742 static
2743 load_return_t
load_threadstack(thread_t thread,uint32_t * ts,uint32_t total_size,mach_vm_offset_t * user_stack,int * customstack,__unused boolean_t is_x86_64_compat_binary,load_result_t * result)2744 load_threadstack(
2745 thread_t thread,
2746 uint32_t *ts,
2747 uint32_t total_size,
2748 mach_vm_offset_t *user_stack,
2749 int *customstack,
2750 __unused boolean_t is_x86_64_compat_binary,
2751 load_result_t *result
2752 )
2753 {
2754 kern_return_t ret;
2755 uint32_t size;
2756 int flavor;
2757 uint32_t stack_size;
2758
2759 if (total_size == 0) {
2760 return LOAD_BADMACHO;
2761 }
2762
2763 while (total_size > 0) {
2764 if (total_size < 2 * sizeof(uint32_t)) {
2765 return LOAD_BADMACHO;
2766 }
2767
2768 flavor = *ts++;
2769 size = *ts++;
2770 if (UINT32_MAX - 2 < size ||
2771 UINT32_MAX / sizeof(uint32_t) < size + 2) {
2772 return LOAD_BADMACHO;
2773 }
2774 stack_size = (size + 2) * sizeof(uint32_t);
2775 if (stack_size > total_size) {
2776 return LOAD_BADMACHO;
2777 }
2778 total_size -= stack_size;
2779
2780 /*
2781 * Third argument is a kernel space pointer; it gets cast
2782 * to the appropriate type in thread_userstack() based on
2783 * the value of flavor.
2784 */
2785 {
2786 ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack, result->is_64bit_data);
2787 if (ret != KERN_SUCCESS) {
2788 return LOAD_FAILURE;
2789 }
2790 }
2791
2792 ts += size; /* ts is a (uint32_t *) */
2793 }
2794 return LOAD_SUCCESS;
2795 }
2796
2797 static
2798 load_return_t
load_threadentry(thread_t thread,uint32_t * ts,uint32_t total_size,mach_vm_offset_t * entry_point)2799 load_threadentry(
2800 thread_t thread,
2801 uint32_t *ts,
2802 uint32_t total_size,
2803 mach_vm_offset_t *entry_point
2804 )
2805 {
2806 kern_return_t ret;
2807 uint32_t size;
2808 int flavor;
2809 uint32_t entry_size;
2810
2811 /*
2812 * Set the thread state.
2813 */
2814 *entry_point = MACH_VM_MIN_ADDRESS;
2815 while (total_size > 0) {
2816 if (total_size < 2 * sizeof(uint32_t)) {
2817 return LOAD_BADMACHO;
2818 }
2819
2820 flavor = *ts++;
2821 size = *ts++;
2822 if (UINT32_MAX - 2 < size ||
2823 UINT32_MAX / sizeof(uint32_t) < size + 2) {
2824 return LOAD_BADMACHO;
2825 }
2826 entry_size = (size + 2) * sizeof(uint32_t);
2827 if (entry_size > total_size) {
2828 return LOAD_BADMACHO;
2829 }
2830 total_size -= entry_size;
2831 /*
2832 * Third argument is a kernel space pointer; it gets cast
2833 * to the appropriate type in thread_entrypoint() based on
2834 * the value of flavor.
2835 */
2836 ret = thread_entrypoint(thread, flavor, (thread_state_t)ts, size, entry_point);
2837 if (ret != KERN_SUCCESS) {
2838 return LOAD_FAILURE;
2839 }
2840 ts += size; /* ts is a (uint32_t *) */
2841 }
2842 return LOAD_SUCCESS;
2843 }
2844
2845 struct macho_data {
2846 struct nameidata __nid;
2847 union macho_vnode_header {
2848 struct mach_header mach_header;
2849 struct fat_header fat_header;
2850 char __pad[512];
2851 } __header;
2852 };
2853
2854 #define DEFAULT_DYLD_PATH "/usr/lib/dyld"
2855
2856 #if (DEVELOPMENT || DEBUG)
2857 extern char dyld_alt_path[];
2858 extern int use_alt_dyld;
2859
2860 extern char dyld_suffix[];
2861 extern int use_dyld_suffix;
2862
2863 typedef struct _dyld_suffix_map_entry {
2864 const char *suffix;
2865 const char *path;
2866 } dyld_suffix_map_entry_t;
2867
2868 static const dyld_suffix_map_entry_t _dyld_suffix_map[] = {
2869 [0] = {
2870 .suffix = "",
2871 .path = DEFAULT_DYLD_PATH,
2872 }, {
2873 .suffix = "release",
2874 .path = DEFAULT_DYLD_PATH,
2875 }, {
2876 .suffix = "bringup",
2877 .path = "/usr/appleinternal/lib/dyld.bringup",
2878 },
2879 };
2880 #endif
2881
2882 static load_return_t
load_dylinker(struct dylinker_command * lcp,cpu_type_t cputype,vm_map_t map,thread_t thread,int depth,int64_t slide,load_result_t * result,struct image_params * imgp)2883 load_dylinker(
2884 struct dylinker_command *lcp,
2885 cpu_type_t cputype,
2886 vm_map_t map,
2887 thread_t thread,
2888 int depth,
2889 int64_t slide,
2890 load_result_t *result,
2891 struct image_params *imgp
2892 )
2893 {
2894 const char *name;
2895 struct vnode *vp = NULLVP; /* set by get_macho_vnode() */
2896 struct mach_header *header;
2897 off_t file_offset = 0; /* set by get_macho_vnode() */
2898 off_t macho_size = 0; /* set by get_macho_vnode() */
2899 load_result_t *myresult;
2900 kern_return_t ret;
2901 struct macho_data *macho_data;
2902 struct {
2903 struct mach_header __header;
2904 load_result_t __myresult;
2905 struct macho_data __macho_data;
2906 } *dyld_data;
2907
2908 if (lcp->cmdsize < sizeof(*lcp) || lcp->name.offset >= lcp->cmdsize) {
2909 return LOAD_BADMACHO;
2910 }
2911
2912 name = (const char *)lcp + lcp->name.offset;
2913
2914 /* Check for a proper null terminated string. */
2915 size_t maxsz = lcp->cmdsize - lcp->name.offset;
2916 size_t namelen = strnlen(name, maxsz);
2917 if (namelen >= maxsz) {
2918 return LOAD_BADMACHO;
2919 }
2920
2921 #if (DEVELOPMENT || DEBUG)
2922
2923 /*
2924 * rdar://23680808
2925 * If an alternate dyld has been specified via boot args, check
2926 * to see if PROC_UUID_ALT_DYLD_POLICY has been set on this
2927 * executable and redirect the kernel to load that linker.
2928 */
2929
2930 if (use_alt_dyld) {
2931 int policy_error;
2932 uint32_t policy_flags = 0;
2933 int32_t policy_gencount = 0;
2934
2935 policy_error = proc_uuid_policy_lookup(result->uuid, &policy_flags, &policy_gencount);
2936 if (policy_error == 0) {
2937 if (policy_flags & PROC_UUID_ALT_DYLD_POLICY) {
2938 name = dyld_alt_path;
2939 }
2940 }
2941 } else if (use_dyld_suffix) {
2942 size_t i = 0;
2943
2944 #define countof(x) (sizeof(x) / sizeof(x[0]))
2945 for (i = 0; i < countof(_dyld_suffix_map); i++) {
2946 const dyld_suffix_map_entry_t *entry = &_dyld_suffix_map[i];
2947
2948 if (strcmp(entry->suffix, dyld_suffix) == 0) {
2949 name = entry->path;
2950 break;
2951 }
2952 }
2953 }
2954 #endif
2955
2956 #if !(DEVELOPMENT || DEBUG)
2957 if (0 != strcmp(name, DEFAULT_DYLD_PATH)) {
2958 return LOAD_BADMACHO;
2959 }
2960 #endif
2961
2962 /* Allocate wad-of-data from heap to reduce excessively deep stacks */
2963
2964 dyld_data = kalloc_type(typeof(*dyld_data), Z_WAITOK);
2965 header = &dyld_data->__header;
2966 myresult = &dyld_data->__myresult;
2967 macho_data = &dyld_data->__macho_data;
2968
2969 {
2970 cputype = (cputype & CPU_ARCH_MASK) | (cpu_type() & ~CPU_ARCH_MASK);
2971 }
2972
2973 ret = get_macho_vnode(name, cputype, header,
2974 &file_offset, &macho_size, macho_data, &vp, imgp);
2975 if (ret) {
2976 goto novp_out;
2977 }
2978
2979 *myresult = load_result_null;
2980 myresult->is_64bit_addr = result->is_64bit_addr;
2981 myresult->is_64bit_data = result->is_64bit_data;
2982
2983 ret = parse_machfile(vp, map, thread, header, file_offset,
2984 macho_size, depth, slide, 0, myresult, result, imgp);
2985
2986 if (ret == LOAD_SUCCESS) {
2987 if (result->threadstate) {
2988 /* don't use the app's threadstate if we have a dyld */
2989 kfree_data(result->threadstate, result->threadstate_sz);
2990 }
2991 result->threadstate = myresult->threadstate;
2992 result->threadstate_sz = myresult->threadstate_sz;
2993
2994 result->dynlinker = TRUE;
2995 result->entry_point = myresult->entry_point;
2996 result->validentry = myresult->validentry;
2997 result->all_image_info_addr = myresult->all_image_info_addr;
2998 result->all_image_info_size = myresult->all_image_info_size;
2999 if (!myresult->platform_binary) {
3000 result->csflags &= ~CS_NO_UNTRUSTED_HELPERS;
3001 }
3002
3003 #if CONFIG_ROSETTA
3004 if (imgp->ip_flags & IMGPF_ROSETTA) {
3005 extern const struct fileops vnops;
3006 // Save the file descriptor and mach header address for dyld. These will
3007 // be passed on the stack for the Rosetta runtime's use.
3008 struct fileproc *fp;
3009 int dyld_fd;
3010 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
3011 int error = falloc_exec(p, imgp->ip_vfs_context, &fp, &dyld_fd);
3012 if (error == 0) {
3013 error = VNOP_OPEN(vp, FREAD, imgp->ip_vfs_context);
3014 if (error == 0) {
3015 fp->fp_glob->fg_flag = FREAD;
3016 fp->fp_glob->fg_ops = &vnops;
3017 fp_set_data(fp, vp);
3018
3019 proc_fdlock(p);
3020 procfdtbl_releasefd(p, dyld_fd, NULL);
3021 fp_drop(p, dyld_fd, fp, 1);
3022 proc_fdunlock(p);
3023
3024 vnode_ref(vp);
3025
3026 result->dynlinker_fd = dyld_fd;
3027 result->dynlinker_fp = fp;
3028 result->dynlinker_mach_header = myresult->mach_header;
3029 result->dynlinker_max_vm_addr = myresult->max_vm_addr;
3030 result->dynlinker_ro_vm_start = myresult->ro_vm_start;
3031 result->dynlinker_ro_vm_end = myresult->ro_vm_end;
3032 } else {
3033 fp_free(p, dyld_fd, fp);
3034 ret = LOAD_IOERROR;
3035 }
3036 } else {
3037 ret = LOAD_IOERROR;
3038 }
3039 }
3040 #endif
3041 }
3042
3043 struct vnode_attr *va;
3044 va = kalloc_type(struct vnode_attr, Z_WAITOK | Z_ZERO);
3045 VATTR_INIT(va);
3046 VATTR_WANTED(va, va_fsid64);
3047 VATTR_WANTED(va, va_fsid);
3048 VATTR_WANTED(va, va_fileid);
3049 int error = vnode_getattr(vp, va, imgp->ip_vfs_context);
3050 if (error == 0) {
3051 imgp->ip_dyld_fsid = vnode_get_va_fsid(va);
3052 imgp->ip_dyld_fsobjid = va->va_fileid;
3053 }
3054
3055 vnode_put(vp);
3056 kfree_type(struct vnode_attr, va);
3057 novp_out:
3058 kfree_type(typeof(*dyld_data), dyld_data);
3059 return ret;
3060 }
3061
3062 #if CONFIG_ROSETTA
3063 static const char* rosetta_runtime_path = "/usr/libexec/rosetta/runtime";
3064
3065 #if (DEVELOPMENT || DEBUG)
3066 static const char* rosetta_runtime_path_alt_x86 = "/usr/local/libexec/rosetta/runtime_internal";
3067 static const char* rosetta_runtime_path_alt_arm = "/usr/local/libexec/rosetta/runtime_arm_internal";
3068 #endif
3069
3070 static load_return_t
load_rosetta(vm_map_t map,thread_t thread,load_result_t * result,struct image_params * imgp)3071 load_rosetta(
3072 vm_map_t map,
3073 thread_t thread,
3074 load_result_t *result,
3075 struct image_params *imgp)
3076 {
3077 struct vnode *vp = NULLVP; /* set by get_macho_vnode() */
3078 struct mach_header *header;
3079 off_t file_offset = 0; /* set by get_macho_vnode() */
3080 off_t macho_size = 0; /* set by get_macho_vnode() */
3081 load_result_t *myresult;
3082 kern_return_t ret;
3083 struct macho_data *macho_data;
3084 const char *rosetta_file_path;
3085 struct {
3086 struct mach_header __header;
3087 load_result_t __myresult;
3088 struct macho_data __macho_data;
3089 } *rosetta_data;
3090 mach_vm_address_t rosetta_load_addr;
3091 mach_vm_size_t rosetta_size;
3092 mach_vm_address_t shared_cache_base = SHARED_REGION_BASE_ARM64;
3093 int64_t slide = 0;
3094
3095 /* Allocate wad-of-data from heap to reduce excessively deep stacks */
3096 rosetta_data = kalloc_type(typeof(*rosetta_data), Z_WAITOK | Z_NOFAIL);
3097 header = &rosetta_data->__header;
3098 myresult = &rosetta_data->__myresult;
3099 macho_data = &rosetta_data->__macho_data;
3100
3101 rosetta_file_path = rosetta_runtime_path;
3102
3103 #if (DEVELOPMENT || DEBUG)
3104 bool use_alt_rosetta = false;
3105 if (imgp->ip_flags & IMGPF_ALT_ROSETTA) {
3106 use_alt_rosetta = true;
3107 } else {
3108 int policy_error;
3109 uint32_t policy_flags = 0;
3110 int32_t policy_gencount = 0;
3111 policy_error = proc_uuid_policy_lookup(result->uuid, &policy_flags, &policy_gencount);
3112 if (policy_error == 0 && (policy_flags & PROC_UUID_ALT_ROSETTA_POLICY) != 0) {
3113 use_alt_rosetta = true;
3114 }
3115 }
3116
3117 if (use_alt_rosetta) {
3118 if (imgp->ip_origcputype == CPU_TYPE_X86_64) {
3119 rosetta_file_path = rosetta_runtime_path_alt_x86;
3120 } else if (imgp->ip_origcputype == CPU_TYPE_ARM64) {
3121 rosetta_file_path = rosetta_runtime_path_alt_arm;
3122 } else {
3123 ret = LOAD_BADARCH;
3124 goto novp_out;
3125 }
3126 }
3127 #endif
3128
3129 ret = get_macho_vnode(rosetta_file_path, CPU_TYPE_ARM64, header,
3130 &file_offset, &macho_size, macho_data, &vp, imgp);
3131 if (ret) {
3132 goto novp_out;
3133 }
3134
3135 *myresult = load_result_null;
3136 myresult->is_64bit_addr = TRUE;
3137 myresult->is_64bit_data = TRUE;
3138
3139 ret = parse_machfile(vp, NULL, NULL, header, file_offset, macho_size,
3140 2, 0, 0, myresult, NULL, imgp);
3141 if (ret != LOAD_SUCCESS) {
3142 goto out;
3143 }
3144
3145 if (!(imgp->ip_flags & IMGPF_DISABLE_ASLR)) {
3146 slide = random();
3147 slide = (slide % (vm_map_get_max_loader_aslr_slide_pages(map) - 1)) + 1;
3148 slide <<= vm_map_page_shift(map);
3149 }
3150
3151 if (imgp->ip_origcputype == CPU_TYPE_X86_64) {
3152 shared_cache_base = SHARED_REGION_BASE_X86_64;
3153 }
3154
3155 rosetta_size = round_page(myresult->max_vm_addr - myresult->min_vm_addr);
3156 rosetta_load_addr = shared_cache_base - rosetta_size - slide;
3157
3158 *myresult = load_result_null;
3159 myresult->is_64bit_addr = TRUE;
3160 myresult->is_64bit_data = TRUE;
3161 myresult->is_rosetta = TRUE;
3162
3163 ret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
3164 2, rosetta_load_addr, 0, myresult, result, imgp);
3165 if (ret == LOAD_SUCCESS) {
3166 if (result) {
3167 if (result->threadstate) {
3168 /* don't use the app's/dyld's threadstate */
3169 kfree_data(result->threadstate, result->threadstate_sz);
3170 }
3171 assert(myresult->threadstate != NULL);
3172
3173 result->is_rosetta = TRUE;
3174
3175 result->threadstate = myresult->threadstate;
3176 result->threadstate_sz = myresult->threadstate_sz;
3177
3178 result->entry_point = myresult->entry_point;
3179 result->validentry = myresult->validentry;
3180 if (!myresult->platform_binary) {
3181 result->csflags &= ~CS_NO_UNTRUSTED_HELPERS;
3182 }
3183
3184 if ((header->cpusubtype & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_ARM64E) {
3185 imgp->ip_flags |= IMGPF_NOJOP;
3186 }
3187 }
3188 }
3189
3190 out:
3191 vnode_put(vp);
3192 novp_out:
3193 kfree_type(typeof(*rosetta_data), rosetta_data);
3194 return ret;
3195 }
3196 #endif
3197
3198 static void
set_signature_error(struct vnode * vp,struct image_params * imgp,const char * fatal_failure_desc,const size_t fatal_failure_desc_len)3199 set_signature_error(
3200 struct vnode* vp,
3201 struct image_params * imgp,
3202 const char* fatal_failure_desc,
3203 const size_t fatal_failure_desc_len)
3204 {
3205 char *vn_path = NULL;
3206 vm_size_t vn_pathlen = MAXPATHLEN;
3207 char const *path = NULL;
3208
3209 vn_path = zalloc(ZV_NAMEI);
3210 if (vn_getpath(vp, vn_path, (int*)&vn_pathlen) == 0) {
3211 path = vn_path;
3212 } else {
3213 path = "(get vnode path failed)";
3214 }
3215 os_reason_t reason = os_reason_create(OS_REASON_CODESIGNING,
3216 CODESIGNING_EXIT_REASON_TASKGATED_INVALID_SIG);
3217
3218 if (reason == OS_REASON_NULL) {
3219 printf("load_code_signature: %s: failure to allocate exit reason for validation failure: %s\n",
3220 path, fatal_failure_desc);
3221 goto out;
3222 }
3223
3224 imgp->ip_cs_error = reason;
3225 reason->osr_flags = (OS_REASON_FLAG_GENERATE_CRASH_REPORT |
3226 OS_REASON_FLAG_CONSISTENT_FAILURE);
3227
3228 mach_vm_address_t data_addr = 0;
3229
3230 int reason_error = 0;
3231 int kcdata_error = 0;
3232
3233 if ((reason_error = os_reason_alloc_buffer_noblock(reason, kcdata_estimate_required_buffer_size
3234 (1, (uint32_t)fatal_failure_desc_len))) == 0 &&
3235 (kcdata_error = kcdata_get_memory_addr(&reason->osr_kcd_descriptor,
3236 EXIT_REASON_USER_DESC, (uint32_t)fatal_failure_desc_len,
3237 &data_addr)) == KERN_SUCCESS) {
3238 kern_return_t mc_error = kcdata_memcpy(&reason->osr_kcd_descriptor, (mach_vm_address_t)data_addr,
3239 fatal_failure_desc, (uint32_t)fatal_failure_desc_len);
3240
3241 if (mc_error != KERN_SUCCESS) {
3242 printf("load_code_signature: %s: failed to copy reason string "
3243 "(kcdata_memcpy error: %d, length: %ld)\n",
3244 path, mc_error, fatal_failure_desc_len);
3245 }
3246 } else {
3247 printf("load_code_signature: %s: failed to allocate space for reason string "
3248 "(os_reason_alloc_buffer error: %d, kcdata error: %d, length: %ld)\n",
3249 path, reason_error, kcdata_error, fatal_failure_desc_len);
3250 }
3251 out:
3252 if (vn_path) {
3253 zfree(ZV_NAMEI, vn_path);
3254 }
3255 }
3256
3257 static load_return_t
load_code_signature(struct linkedit_data_command * lcp,struct vnode * vp,off_t macho_offset,off_t macho_size,cpu_type_t cputype,cpu_subtype_t cpusubtype,load_result_t * result,struct image_params * imgp)3258 load_code_signature(
3259 struct linkedit_data_command *lcp,
3260 struct vnode *vp,
3261 off_t macho_offset,
3262 off_t macho_size,
3263 cpu_type_t cputype,
3264 cpu_subtype_t cpusubtype,
3265 load_result_t *result,
3266 struct image_params *imgp)
3267 {
3268 int ret;
3269 kern_return_t kr;
3270 vm_offset_t addr;
3271 int resid;
3272 struct cs_blob *blob;
3273 int error;
3274 vm_size_t blob_size;
3275 uint32_t sum;
3276 boolean_t anyCPU;
3277
3278 addr = 0;
3279 blob = NULL;
3280
3281 cpusubtype &= ~CPU_SUBTYPE_MASK;
3282
3283 blob = ubc_cs_blob_get(vp, cputype, cpusubtype, macho_offset);
3284
3285 if (blob != NULL) {
3286 /* we already have a blob for this vnode and cpu(sub)type */
3287 anyCPU = blob->csb_cpu_type == -1;
3288 if ((blob->csb_cpu_type != cputype &&
3289 blob->csb_cpu_subtype != cpusubtype && !anyCPU) ||
3290 (blob->csb_base_offset != macho_offset) ||
3291 ((blob->csb_flags & CS_VALID) == 0)) {
3292 /* the blob has changed for this vnode: fail ! */
3293 ret = LOAD_BADMACHO;
3294 const char* fatal_failure_desc = "embedded signature doesn't match attached signature";
3295 const size_t fatal_failure_desc_len = strlen(fatal_failure_desc) + 1;
3296
3297 printf("load_code_signature: %s\n", fatal_failure_desc);
3298 set_signature_error(vp, imgp, fatal_failure_desc, fatal_failure_desc_len);
3299 goto out;
3300 }
3301
3302 /* It matches the blob we want here, let's verify the version */
3303 if (!anyCPU && ubc_cs_generation_check(vp) == 0) {
3304 /* No need to revalidate, we're good! */
3305 ret = LOAD_SUCCESS;
3306 goto out;
3307 }
3308
3309 /* That blob may be stale, let's revalidate. */
3310 error = ubc_cs_blob_revalidate(vp, blob, imgp, 0, result->ip_platform);
3311 if (error == 0) {
3312 /* Revalidation succeeded, we're good! */
3313 /* If we were revaliding a CS blob with any CPU arch we adjust it */
3314 if (anyCPU) {
3315 vnode_lock_spin(vp);
3316 struct cs_cpu_info cpu_info = {
3317 .csb_cpu_type = cputype,
3318 .csb_cpu_subtype = cpusubtype
3319 };
3320 zalloc_ro_update_field(ZONE_ID_CS_BLOB, blob, csb_cpu_info, &cpu_info);
3321 vnode_unlock(vp);
3322 }
3323 ret = LOAD_SUCCESS;
3324 goto out;
3325 }
3326
3327 if (error != EAGAIN) {
3328 printf("load_code_signature: revalidation failed: %d\n", error);
3329 ret = LOAD_FAILURE;
3330 goto out;
3331 }
3332
3333 assert(error == EAGAIN);
3334
3335 /*
3336 * Revalidation was not possible for this blob. We just continue as if there was no blob,
3337 * rereading the signature, and ubc_cs_blob_add will do the right thing.
3338 */
3339 blob = NULL;
3340 }
3341
3342 if (lcp->cmdsize != sizeof(struct linkedit_data_command)) {
3343 ret = LOAD_BADMACHO;
3344 goto out;
3345 }
3346
3347 sum = 0;
3348 if (os_add_overflow(lcp->dataoff, lcp->datasize, &sum) || sum > macho_size) {
3349 ret = LOAD_BADMACHO;
3350 goto out;
3351 }
3352
3353 blob_size = lcp->datasize;
3354 kr = ubc_cs_blob_allocate(&addr, &blob_size);
3355 if (kr != KERN_SUCCESS) {
3356 ret = LOAD_NOSPACE;
3357 goto out;
3358 }
3359
3360 resid = 0;
3361 error = vn_rdwr(UIO_READ,
3362 vp,
3363 (caddr_t) addr,
3364 lcp->datasize,
3365 macho_offset + lcp->dataoff,
3366 UIO_SYSSPACE,
3367 0,
3368 kauth_cred_get(),
3369 &resid,
3370 current_proc());
3371 if (error || resid != 0) {
3372 ret = LOAD_IOERROR;
3373 goto out;
3374 }
3375
3376 if (ubc_cs_blob_add(vp,
3377 result->ip_platform,
3378 cputype,
3379 cpusubtype,
3380 macho_offset,
3381 &addr,
3382 lcp->datasize,
3383 imgp,
3384 0,
3385 &blob,
3386 CS_BLOB_ADD_ALLOW_MAIN_BINARY)) {
3387 if (addr) {
3388 ubc_cs_blob_deallocate(addr, blob_size);
3389 addr = 0;
3390 }
3391 ret = LOAD_FAILURE;
3392 goto out;
3393 } else {
3394 /* ubc_cs_blob_add() has consumed "addr" */
3395 addr = 0;
3396 }
3397
3398 #if CHECK_CS_VALIDATION_BITMAP
3399 ubc_cs_validation_bitmap_allocate( vp );
3400 #endif
3401
3402 ret = LOAD_SUCCESS;
3403 out:
3404 if (ret == LOAD_SUCCESS) {
3405 if (blob == NULL) {
3406 panic("success, but no blob!");
3407 }
3408
3409 result->csflags |= blob->csb_flags;
3410 result->platform_binary = blob->csb_platform_binary;
3411 result->cs_end_offset = blob->csb_end_offset;
3412 }
3413 if (addr != 0) {
3414 ubc_cs_blob_deallocate(addr, blob_size);
3415 addr = 0;
3416 }
3417
3418 return ret;
3419 }
3420
3421
3422 #if CONFIG_CODE_DECRYPTION
3423
3424 static load_return_t
set_code_unprotect(struct encryption_info_command * eip,caddr_t addr,vm_map_t map,int64_t slide,struct vnode * vp,off_t macho_offset,cpu_type_t cputype,cpu_subtype_t cpusubtype)3425 set_code_unprotect(
3426 struct encryption_info_command *eip,
3427 caddr_t addr,
3428 vm_map_t map,
3429 int64_t slide,
3430 struct vnode *vp,
3431 off_t macho_offset,
3432 cpu_type_t cputype,
3433 cpu_subtype_t cpusubtype)
3434 {
3435 int error, len;
3436 pager_crypt_info_t crypt_info;
3437 const char * cryptname = 0;
3438 char *vpath;
3439
3440 size_t offset;
3441 struct segment_command_64 *seg64;
3442 struct segment_command *seg32;
3443 vm_map_offset_t map_offset, map_size;
3444 vm_object_offset_t crypto_backing_offset;
3445 kern_return_t kr;
3446
3447 if (eip->cmdsize < sizeof(*eip)) {
3448 return LOAD_BADMACHO;
3449 }
3450
3451 switch (eip->cryptid) {
3452 case 0:
3453 /* not encrypted, just an empty load command */
3454 return LOAD_SUCCESS;
3455 case 1:
3456 cryptname = "com.apple.unfree";
3457 break;
3458 case 0x10:
3459 /* some random cryptid that you could manually put into
3460 * your binary if you want NULL */
3461 cryptname = "com.apple.null";
3462 break;
3463 default:
3464 return LOAD_BADMACHO;
3465 }
3466
3467 if (map == VM_MAP_NULL) {
3468 return LOAD_SUCCESS;
3469 }
3470 if (NULL == text_crypter_create) {
3471 return LOAD_FAILURE;
3472 }
3473
3474 vpath = zalloc(ZV_NAMEI);
3475
3476 len = MAXPATHLEN;
3477 error = vn_getpath(vp, vpath, &len);
3478 if (error) {
3479 zfree(ZV_NAMEI, vpath);
3480 return LOAD_FAILURE;
3481 }
3482
3483 if (eip->cryptsize == 0) {
3484 printf("%s:%d '%s': cryptoff 0x%llx cryptsize 0x%llx cryptid 0x%x ignored\n", __FUNCTION__, __LINE__, vpath, (uint64_t)eip->cryptoff, (uint64_t)eip->cryptsize, eip->cryptid);
3485 zfree(ZV_NAMEI, vpath);
3486 return LOAD_SUCCESS;
3487 }
3488
3489 /* set up decrypter first */
3490 crypt_file_data_t crypt_data = {
3491 .filename = vpath,
3492 .cputype = cputype,
3493 .cpusubtype = cpusubtype,
3494 .origin = CRYPT_ORIGIN_APP_LAUNCH,
3495 };
3496 kr = text_crypter_create(&crypt_info, cryptname, (void*)&crypt_data);
3497 #if VM_MAP_DEBUG_APPLE_PROTECT
3498 if (vm_map_debug_apple_protect) {
3499 struct proc *p;
3500 p = current_proc();
3501 printf("APPLE_PROTECT: %d[%s] map %p %s(%s) -> 0x%x\n",
3502 proc_getpid(p), p->p_comm, map, __FUNCTION__, vpath, kr);
3503 }
3504 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
3505 zfree(ZV_NAMEI, vpath);
3506
3507 if (kr) {
3508 printf("set_code_unprotect: unable to create decrypter %s, kr=%d\n",
3509 cryptname, kr);
3510 if (kr == kIOReturnNotPrivileged) {
3511 /* text encryption returned decryption failure */
3512 return LOAD_DECRYPTFAIL;
3513 } else {
3514 return LOAD_RESOURCE;
3515 }
3516 }
3517
3518 /* this is terrible, but we have to rescan the load commands to find the
3519 * virtual address of this encrypted stuff. This code is gonna look like
3520 * the dyld source one day... */
3521 struct mach_header *header = (struct mach_header *)addr;
3522 size_t mach_header_sz = sizeof(struct mach_header);
3523 if (header->magic == MH_MAGIC_64 ||
3524 header->magic == MH_CIGAM_64) {
3525 mach_header_sz = sizeof(struct mach_header_64);
3526 }
3527 offset = mach_header_sz;
3528 uint32_t ncmds = header->ncmds;
3529 while (ncmds--) {
3530 /*
3531 * Get a pointer to the command.
3532 */
3533 struct load_command *lcp = (struct load_command *)(addr + offset);
3534 offset += lcp->cmdsize;
3535
3536 switch (lcp->cmd) {
3537 case LC_SEGMENT_64:
3538 seg64 = (struct segment_command_64 *)lcp;
3539 if ((seg64->fileoff <= eip->cryptoff) &&
3540 (seg64->fileoff + seg64->filesize >=
3541 eip->cryptoff + eip->cryptsize)) {
3542 map_offset = (vm_map_offset_t)(seg64->vmaddr + eip->cryptoff - seg64->fileoff + slide);
3543 map_size = eip->cryptsize;
3544 crypto_backing_offset = macho_offset + eip->cryptoff;
3545 goto remap_now;
3546 }
3547 break;
3548 case LC_SEGMENT:
3549 seg32 = (struct segment_command *)lcp;
3550 if ((seg32->fileoff <= eip->cryptoff) &&
3551 (seg32->fileoff + seg32->filesize >=
3552 eip->cryptoff + eip->cryptsize)) {
3553 map_offset = (vm_map_offset_t)(seg32->vmaddr + eip->cryptoff - seg32->fileoff + slide);
3554 map_size = eip->cryptsize;
3555 crypto_backing_offset = macho_offset + eip->cryptoff;
3556 goto remap_now;
3557 }
3558 break;
3559 }
3560 }
3561
3562 /* if we get here, did not find anything */
3563 return LOAD_BADMACHO;
3564
3565 remap_now:
3566 /* now remap using the decrypter */
3567 MACHO_PRINTF(("+++ set_code_unprotect: vm[0x%llx:0x%llx]\n",
3568 (uint64_t) map_offset,
3569 (uint64_t) (map_offset + map_size)));
3570 kr = vm_map_apple_protected(map,
3571 map_offset,
3572 map_offset + map_size,
3573 crypto_backing_offset,
3574 &crypt_info,
3575 CRYPTID_APP_ENCRYPTION);
3576 if (kr) {
3577 printf("set_code_unprotect(): mapping failed with %x\n", kr);
3578 return LOAD_PROTECT;
3579 }
3580
3581 return LOAD_SUCCESS;
3582 }
3583
3584 #endif
3585
3586 /*
3587 * This routine exists to support the load_dylinker().
3588 *
3589 * This routine has its own, separate, understanding of the FAT file format,
3590 * which is terrifically unfortunate.
3591 */
3592 static
3593 load_return_t
get_macho_vnode(const char * path,cpu_type_t cputype,struct mach_header * mach_header,off_t * file_offset,off_t * macho_size,struct macho_data * data,struct vnode ** vpp,struct image_params * imgp)3594 get_macho_vnode(
3595 const char *path,
3596 cpu_type_t cputype,
3597 struct mach_header *mach_header,
3598 off_t *file_offset,
3599 off_t *macho_size,
3600 struct macho_data *data,
3601 struct vnode **vpp,
3602 struct image_params *imgp
3603 )
3604 {
3605 struct vnode *vp;
3606 vfs_context_t ctx = vfs_context_current();
3607 proc_t p = vfs_context_proc(ctx);
3608 kauth_cred_t kerncred;
3609 struct nameidata *ndp = &data->__nid;
3610 boolean_t is_fat;
3611 struct fat_arch fat_arch;
3612 int error;
3613 int resid;
3614 union macho_vnode_header *header = &data->__header;
3615 off_t fsize = (off_t)0;
3616
3617 /*
3618 * Capture the kernel credential for use in the actual read of the
3619 * file, since the user doing the execution may have execute rights
3620 * but not read rights, but to exec something, we have to either map
3621 * or read it into the new process address space, which requires
3622 * read rights. This is to deal with lack of common credential
3623 * serialization code which would treat NOCRED as "serialize 'root'".
3624 */
3625 kerncred = vfs_context_ucred(vfs_context_kernel());
3626
3627 /* init the namei data to point the file user's program name */
3628 NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW | LOCKLEAF, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
3629
3630 if ((error = namei(ndp)) != 0) {
3631 if (error == ENOENT) {
3632 error = LOAD_ENOENT;
3633 } else {
3634 error = LOAD_FAILURE;
3635 }
3636 return error;
3637 }
3638 nameidone(ndp);
3639 vp = ndp->ni_vp;
3640
3641 /* check for regular file */
3642 if (vp->v_type != VREG) {
3643 error = LOAD_PROTECT;
3644 goto bad1;
3645 }
3646
3647 /* get size */
3648 if ((error = vnode_size(vp, &fsize, ctx)) != 0) {
3649 error = LOAD_FAILURE;
3650 goto bad1;
3651 }
3652
3653 /* Check mount point */
3654 if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
3655 error = LOAD_PROTECT;
3656 goto bad1;
3657 }
3658
3659 /* check access */
3660 if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_EXECUTE | KAUTH_VNODE_READ_DATA, ctx)) != 0) {
3661 error = LOAD_PROTECT;
3662 goto bad1;
3663 }
3664
3665 /* try to open it */
3666 if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
3667 error = LOAD_PROTECT;
3668 goto bad1;
3669 }
3670
3671 if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)header, sizeof(*header), 0,
3672 UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p)) != 0) {
3673 error = LOAD_IOERROR;
3674 goto bad2;
3675 }
3676
3677 if (resid) {
3678 error = LOAD_BADMACHO;
3679 goto bad2;
3680 }
3681
3682 if (header->mach_header.magic == MH_MAGIC ||
3683 header->mach_header.magic == MH_MAGIC_64) {
3684 is_fat = FALSE;
3685 } else if (OSSwapBigToHostInt32(header->fat_header.magic) == FAT_MAGIC) {
3686 is_fat = TRUE;
3687 } else {
3688 error = LOAD_BADMACHO;
3689 goto bad2;
3690 }
3691
3692 if (is_fat) {
3693 error = fatfile_validate_fatarches((vm_offset_t)(&header->fat_header),
3694 sizeof(*header), fsize);
3695 if (error != LOAD_SUCCESS) {
3696 goto bad2;
3697 }
3698
3699 /* Look up our architecture in the fat file. */
3700 error = fatfile_getbestarch_for_cputype(cputype, CPU_SUBTYPE_ANY,
3701 (vm_offset_t)(&header->fat_header), sizeof(*header), imgp, &fat_arch);
3702 if (error != LOAD_SUCCESS) {
3703 goto bad2;
3704 }
3705
3706 /* Read the Mach-O header out of it */
3707 error = vn_rdwr(UIO_READ, vp, (caddr_t)&header->mach_header,
3708 sizeof(header->mach_header), fat_arch.offset,
3709 UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p);
3710 if (error) {
3711 error = LOAD_IOERROR;
3712 goto bad2;
3713 }
3714
3715 if (resid) {
3716 error = LOAD_BADMACHO;
3717 goto bad2;
3718 }
3719
3720 /* Is this really a Mach-O? */
3721 if (header->mach_header.magic != MH_MAGIC &&
3722 header->mach_header.magic != MH_MAGIC_64) {
3723 error = LOAD_BADMACHO;
3724 goto bad2;
3725 }
3726
3727 *file_offset = fat_arch.offset;
3728 *macho_size = fat_arch.size;
3729 } else {
3730 /*
3731 * Force get_macho_vnode() to fail if the architecture bits
3732 * do not match the expected architecture bits. This in
3733 * turn causes load_dylinker() to fail for the same reason,
3734 * so it ensures the dynamic linker and the binary are in
3735 * lock-step. This is potentially bad, if we ever add to
3736 * the CPU_ARCH_* bits any bits that are desirable but not
3737 * required, since the dynamic linker might work, but we will
3738 * refuse to load it because of this check.
3739 */
3740 if ((cpu_type_t)header->mach_header.cputype != cputype) {
3741 error = LOAD_BADARCH;
3742 goto bad2;
3743 }
3744
3745 *file_offset = 0;
3746 *macho_size = fsize;
3747 }
3748
3749 *mach_header = header->mach_header;
3750 *vpp = vp;
3751
3752 ubc_setsize(vp, fsize);
3753 return error;
3754
3755 bad2:
3756 (void) VNOP_CLOSE(vp, FREAD, ctx);
3757 bad1:
3758 vnode_put(vp);
3759 return error;
3760 }
3761