1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Mach Operating System
31 * Copyright (c) 1987 Carnegie-Mellon University
32 * All rights reserved. The CMU software License Agreement specifies
33 * the terms and conditions for use and redistribution.
34 */
35
36 /*-
37 * Copyright (c) 1982, 1986, 1991, 1993
38 * The Regents of the University of California. All rights reserved.
39 * (c) UNIX System Laboratories, Inc.
40 * All or some portions of this file are derived from material licensed
41 * to the University of California by American Telephone and Telegraph
42 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
43 * the permission of UNIX System Laboratories, Inc.
44 *
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 * 1. Redistributions of source code must retain the above copyright
49 * notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 * notice, this list of conditions and the following disclaimer in the
52 * documentation and/or other materials provided with the distribution.
53 * 3. All advertising materials mentioning features or use of this software
54 * must display the following acknowledgement:
55 * This product includes software developed by the University of
56 * California, Berkeley and its contributors.
57 * 4. Neither the name of the University nor the names of its contributors
58 * may be used to endorse or promote products derived from this software
59 * without specific prior written permission.
60 *
61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
72 *
73 * from: @(#)kern_exec.c 8.1 (Berkeley) 6/10/93
74 */
75 /*
76 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
77 * support for mandatory and extensible security protections. This notice
78 * is included in support of clause 2.2 (b) of the Apple Public License,
79 * Version 2.0.
80 */
81 #include <machine/reg.h>
82 #include <machine/cpu_capabilities.h>
83
84 #include <sys/cdefs.h>
85 #include <sys/param.h>
86 #include <sys/systm.h>
87 #include <sys/filedesc.h>
88 #include <sys/kernel.h>
89 #include <sys/proc_internal.h>
90 #include <sys/kauth.h>
91 #include <sys/user.h>
92 #include <sys/socketvar.h>
93 #include <sys/malloc.h>
94 #include <sys/namei.h>
95 #include <sys/mount_internal.h>
96 #include <sys/vnode_internal.h>
97 #include <sys/file_internal.h>
98 #include <sys/stat.h>
99 #include <sys/uio_internal.h>
100 #include <sys/acct.h>
101 #include <sys/exec.h>
102 #include <sys/kdebug.h>
103 #include <sys/signal.h>
104 #include <sys/aio_kern.h>
105 #include <sys/sysproto.h>
106 #include <sys/sysctl.h>
107 #include <sys/persona.h>
108 #include <sys/reason.h>
109 #if SYSV_SHM
110 #include <sys/shm_internal.h> /* shmexec() */
111 #endif
112 #include <sys/ubc_internal.h> /* ubc_map() */
113 #include <sys/spawn.h>
114 #include <sys/spawn_internal.h>
115 #include <sys/process_policy.h>
116 #include <sys/codesign.h>
117 #include <sys/random.h>
118 #include <crypto/sha1.h>
119
120 #include <libkern/libkern.h>
121 #include <libkern/crypto/sha2.h>
122 #include <security/audit/audit.h>
123
124 #include <ipc/ipc_types.h>
125
126 #include <mach/mach_param.h>
127 #include <mach/mach_types.h>
128 #include <mach/port.h>
129 #include <mach/task.h>
130 #include <mach/task_access.h>
131 #include <mach/thread_act.h>
132 #include <mach/vm_map.h>
133 #include <mach/mach_vm.h>
134 #include <mach/vm_param.h>
135 #include <mach_debug/mach_debug_types.h>
136
137 #include <kern/sched_prim.h> /* thread_wakeup() */
138 #include <kern/affinity.h>
139 #include <kern/assert.h>
140 #include <kern/task.h>
141 #include <kern/thread.h>
142 #include <kern/coalition.h>
143 #include <kern/policy_internal.h>
144 #include <kern/kalloc.h>
145 #include <kern/zalloc.h> /* zone_userspace_reboot_checks() */
146
147 #include <os/log.h>
148
149 #if CONFIG_MACF
150 #include <security/mac_framework.h>
151 #include <security/mac_mach_internal.h>
152 #endif
153
154 #if CONFIG_AUDIT
155 #include <bsm/audit_kevents.h>
156 #endif
157
158 #if CONFIG_ARCADE
159 #include <kern/arcade.h>
160 #endif
161
162 #include <vm/vm_map_xnu.h>
163 #include <vm/vm_kern_xnu.h>
164 #include <vm/vm_protos.h>
165 #include <vm/vm_fault.h>
166 #include <vm/vm_pageout_xnu.h>
167 #include <vm/pmap.h>
168 #include <vm/vm_reclaim_xnu.h>
169
170 #include <kdp/kdp_dyld.h>
171
172 #include <machine/machine_routines.h>
173 #include <machine/pal_routines.h>
174
175 #include <pexpert/pexpert.h>
176
177 #if CONFIG_MEMORYSTATUS
178 #include <sys/kern_memorystatus.h>
179 #endif
180
181 #include <IOKit/IOBSD.h>
182
183 #include "kern_exec_internal.h"
184
185 #include <CodeSignature/Entitlements.h>
186
187 #include <mach/exclaves.h>
188
189
190 extern boolean_t vm_darkwake_mode;
191
192 /* enable crash reports on various exec failures */
193 static TUNABLE(bool, bootarg_execfailurereports, "execfailurecrashes", false);
194
195 #if XNU_TARGET_OS_OSX
196 #if __has_feature(ptrauth_calls)
197 static TUNABLE(bool, bootarg_arm64e_preview_abi, "-arm64e_preview_abi", false);
198 #endif /* __has_feature(ptrauth_calls) */
199
200 #if DEBUG || DEVELOPMENT
201 static TUNABLE(bool, unentitled_ios_sim_launch, "unentitled_ios_sim_launch", false);
202 #endif /* DEBUG || DEVELOPMENT */
203 #endif /* XNU_TARGET_OS_OSX */
204
205 #if CONFIG_DTRACE
206 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
207 extern void dtrace_proc_exec(proc_t);
208 extern void (*dtrace_proc_waitfor_exec_ptr)(proc_t);
209
210 /*
211 * Since dtrace_proc_waitfor_exec_ptr can be added/removed in dtrace_subr.c,
212 * we will store its value before actually calling it.
213 */
214 static void (*dtrace_proc_waitfor_hook)(proc_t) = NULL;
215
216 #include <sys/dtrace_ptss.h>
217 #endif
218
219 #if __has_feature(ptrauth_calls)
220 static TUNABLE_DEV_WRITEABLE(int, vm_shared_region_per_team_id,
221 "vm_shared_region_per_team_id", 1);
222 static TUNABLE_DEV_WRITEABLE(int, vm_shared_region_by_entitlement,
223 "vm_shared_region_by_entitlement", 1);
224
225 /* Upon userland request, reslide the shared cache. */
226 static TUNABLE_DEV_WRITEABLE(int, vm_shared_region_reslide_aslr,
227 "vm_shared_region_reslide_aslr",
228 #if CONFIG_RESLIDE_SHARED_CACHE
229 1
230 #else
231 0
232 #endif /* CONFIG_RESLIDE_SHARED_CACHE */
233 );
234
235 /*
236 * Flag to control what processes should get shared cache randomize resliding
237 * after a fault in the shared cache region:
238 *
239 * 0 - all processes get a new randomized slide
240 * 1 - only platform processes get a new randomized slide
241 */
242 TUNABLE_DEV_WRITEABLE(int, vm_shared_region_reslide_restrict,
243 "vm_shared_region_reslide_restrict", 1);
244
245 #if DEVELOPMENT || DEBUG
246 SYSCTL_INT(_vm, OID_AUTO, vm_shared_region_per_team_id,
247 CTLFLAG_RW, &vm_shared_region_per_team_id, 0, "");
248 SYSCTL_INT(_vm, OID_AUTO, vm_shared_region_by_entitlement,
249 CTLFLAG_RW, &vm_shared_region_by_entitlement, 0, "");
250 SYSCTL_INT(_vm, OID_AUTO, vm_shared_region_reslide_restrict,
251 CTLFLAG_RW, &vm_shared_region_reslide_restrict, 0, "");
252 SYSCTL_INT(_vm, OID_AUTO, vm_shared_region_reslide_aslr,
253 CTLFLAG_RW, &vm_shared_region_reslide_aslr, 0, "");
254 #endif
255 #endif /* __has_feature(ptrauth_calls) */
256
257 #if DEVELOPMENT || DEBUG
258 static TUNABLE(bool, enable_dext_coredumps_on_panic, "dext_panic_coredump", true);
259 #else
260 static TUNABLE(bool, enable_dext_coredumps_on_panic, "dext_panic_coredump", false);
261 #endif
262 extern kern_return_t kern_register_userspace_coredump(task_t task, const char * name);
263 #define USERSPACE_COREDUMP_PANIC_ENTITLEMENT "com.apple.private.enable-coredump-on-panic"
264 #define USERSPACE_COREDUMP_PANIC_SEED_ENTITLEMENT \
265 "com.apple.private.enable-coredump-on-panic-seed-privacy-approved"
266
267 extern void proc_apply_task_networkbg_internal(proc_t, thread_t);
268 extern void task_set_did_exec_flag(task_t task);
269 extern void task_clear_exec_copy_flag(task_t task);
270 proc_t proc_exec_switch_task(proc_t old_proc, proc_t new_proc, task_t old_task,
271 task_t new_task, struct image_params *imgp, void **inherit);
272 boolean_t task_is_active(task_t);
273 boolean_t thread_is_active(thread_t thread);
274 void thread_copy_resource_info(thread_t dst_thread, thread_t src_thread);
275 void *ipc_importance_exec_switch_task(task_t old_task, task_t new_task);
276 extern void ipc_importance_release(void *elem);
277 extern boolean_t task_has_watchports(task_t task);
278 extern void task_set_no_smt(task_t task);
279 #if defined(HAS_APPLE_PAC)
280 char *task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *jop_pid);
281 #endif
282 task_t convert_port_to_task(ipc_port_t port);
283
284 #if CONFIG_EXCLAVES
285 int task_add_conclave(task_t task, void *vnode, int64_t off, const char *task_conclave_id);
286 kern_return_t task_inherit_conclave(task_t old_task, task_t new_task, void *vnode, int64_t off);
287 #endif /* CONFIG_EXCLAVES */
288
289
290 /*
291 * Mach things for which prototypes are unavailable from Mach headers
292 */
293 #define IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND 0x1
294 void ipc_task_enable(
295 task_t task);
296 void ipc_task_reset(
297 task_t task);
298 void ipc_thread_reset(
299 thread_t thread);
300 kern_return_t ipc_object_copyin(
301 ipc_space_t space,
302 mach_port_name_t name,
303 mach_msg_type_name_t msgt_name,
304 ipc_object_t *objectp,
305 mach_port_context_t context,
306 mach_msg_guard_flags_t *guard_flags,
307 uint32_t kmsg_flags);
308 void ipc_port_release_send(ipc_port_t);
309
310 #if DEVELOPMENT || DEBUG
311 void task_importance_update_owner_info(task_t);
312 #endif
313
314 extern struct savearea *get_user_regs(thread_t);
315
316 __attribute__((noinline)) int __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__(mach_port_t task_access_port, int32_t new_pid);
317
318 #include <kern/thread.h>
319 #include <kern/task.h>
320 #include <kern/ast.h>
321 #include <kern/mach_loader.h>
322 #include <kern/mach_fat.h>
323 #include <mach-o/fat.h>
324 #include <mach-o/loader.h>
325 #include <machine/vmparam.h>
326 #include <sys/imgact.h>
327
328 #include <sys/sdt.h>
329
330
331 /*
332 * EAI_ITERLIMIT The maximum number of times to iterate an image
333 * activator in exec_activate_image() before treating
334 * it as malformed/corrupt.
335 */
336 #define EAI_ITERLIMIT 3
337
338 /*
339 * For #! interpreter parsing
340 */
341 #define IS_WHITESPACE(ch) ((ch == ' ') || (ch == '\t'))
342 #define IS_EOL(ch) ((ch == '#') || (ch == '\n'))
343
344 extern vm_map_t bsd_pageable_map;
345 extern const struct fileops vnops;
346 extern int nextpidversion;
347
348
349 #define USER_ADDR_ALIGN(addr, val) \
350 ( ( (user_addr_t)(addr) + (val) - 1) \
351 & ~((val) - 1) )
352
353 /*
354 * For subsystem root support
355 */
356 #define SPAWN_SUBSYSTEM_ROOT_ENTITLEMENT "com.apple.private.spawn-subsystem-root"
357
358 /*
359 * Allow setting p_crash_behavior to trigger panic on crash
360 */
361 #define SPAWN_SET_PANIC_CRASH_BEHAVIOR "com.apple.private.spawn-panic-crash-behavior"
362
363 /* Platform Code Exec Logging */
364 static int platform_exec_logging = 0;
365
366 SYSCTL_DECL(_security_mac);
367
368 SYSCTL_INT(_security_mac, OID_AUTO, platform_exec_logging, CTLFLAG_RW, &platform_exec_logging, 0,
369 "log cdhashes for all platform binary executions");
370
371 static os_log_t peLog = OS_LOG_DEFAULT;
372
373 struct exception_port_action_t {
374 ipc_port_t port;
375 _ps_port_action_t *port_action;
376 };
377
378 struct exec_port_actions {
379 uint32_t exception_port_count;
380 uint32_t portwatch_count;
381 uint32_t registered_count;
382 struct exception_port_action_t *excport_array;
383 ipc_port_t *portwatch_array;
384 ipc_port_t registered_array[TASK_PORT_REGISTER_MAX];
385 };
386
387 struct image_params; /* Forward */
388 static int exec_activate_image(struct image_params *imgp);
389 static int exec_copyout_strings(struct image_params *imgp, user_addr_t *stackp);
390 static int load_return_to_errno(load_return_t lrtn);
391 static int execargs_alloc(struct image_params *imgp);
392 static int execargs_free(struct image_params *imgp);
393 static int exec_check_permissions(struct image_params *imgp);
394 static int exec_extract_strings(struct image_params *imgp);
395 static int exec_add_apple_strings(struct image_params *imgp, const load_result_t *load_result);
396 static int exec_handle_sugid(struct image_params *imgp);
397 static int sugid_scripts = 0;
398 SYSCTL_INT(_kern, OID_AUTO, sugid_scripts, CTLFLAG_RW | CTLFLAG_LOCKED, &sugid_scripts, 0, "");
399 static kern_return_t create_unix_stack(vm_map_t map, load_result_t* load_result, proc_t p);
400 static int copyoutptr(user_addr_t ua, user_addr_t ptr, int ptr_size);
401 static void exec_resettextvp(proc_t, struct image_params *);
402 static int process_signature(proc_t, struct image_params *);
403 static void exec_prefault_data(proc_t, struct image_params *, load_result_t *);
404 static errno_t exec_handle_port_actions(struct image_params *imgp,
405 struct exec_port_actions *port_actions);
406 static errno_t exec_handle_exception_port_actions(const struct image_params *imgp,
407 const struct exec_port_actions *port_actions);
408 static errno_t exec_handle_spawnattr_policy(proc_t p, thread_t thread, int psa_apptype, uint64_t psa_qos_clamp,
409 task_role_t psa_darwin_role, struct exec_port_actions *port_actions);
410 static void exec_port_actions_destroy(struct exec_port_actions *port_actions);
411
412 /*
413 * exec_add_user_string
414 *
415 * Add the requested string to the string space area.
416 *
417 * Parameters; struct image_params * image parameter block
418 * user_addr_t string to add to strings area
419 * int segment from which string comes
420 * boolean_t TRUE if string contributes to NCARGS
421 *
422 * Returns: 0 Success
423 * !0 Failure errno from copyinstr()
424 *
425 * Implicit returns:
426 * (imgp->ip_strendp) updated location of next add, if any
427 * (imgp->ip_strspace) updated byte count of space remaining
428 * (imgp->ip_argspace) updated byte count of space in NCARGS
429 */
430 __attribute__((noinline))
431 static int
exec_add_user_string(struct image_params * imgp,user_addr_t str,int seg,boolean_t is_ncargs)432 exec_add_user_string(struct image_params *imgp, user_addr_t str, int seg, boolean_t is_ncargs)
433 {
434 int error = 0;
435
436 do {
437 size_t len = 0;
438 int space;
439
440 if (is_ncargs) {
441 space = imgp->ip_argspace; /* by definition smaller than ip_strspace */
442 } else {
443 space = imgp->ip_strspace;
444 }
445
446 if (space <= 0) {
447 error = E2BIG;
448 break;
449 }
450
451 if (!UIO_SEG_IS_USER_SPACE(seg)) {
452 char *kstr = CAST_DOWN(char *, str); /* SAFE */
453 error = copystr(kstr, imgp->ip_strendp, space, &len);
454 } else {
455 error = copyinstr(str, imgp->ip_strendp, space, &len);
456 }
457
458 imgp->ip_strendp += len;
459 imgp->ip_strspace -= len;
460 if (is_ncargs) {
461 imgp->ip_argspace -= len;
462 }
463 } while (error == ENAMETOOLONG);
464
465 return error;
466 }
467
468 /*
469 * dyld is now passed the executable path as a getenv-like variable
470 * in the same fashion as the stack_guard and malloc_entropy keys.
471 */
472 #define EXECUTABLE_KEY "executable_path="
473
474 /*
475 * exec_save_path
476 *
477 * To support new app package launching for Mac OS X, the dyld needs the
478 * first argument to execve() stored on the user stack.
479 *
480 * Save the executable path name at the bottom of the strings area and set
481 * the argument vector pointer to the location following that to indicate
482 * the start of the argument and environment tuples, setting the remaining
483 * string space count to the size of the string area minus the path length.
484 *
485 * Parameters; struct image_params * image parameter block
486 * char * path used to invoke program
487 * int segment from which path comes
488 *
489 * Returns: int 0 Success
490 * EFAULT Bad address
491 * copy[in]str:EFAULT Bad address
492 * copy[in]str:ENAMETOOLONG Filename too long
493 *
494 * Implicit returns:
495 * (imgp->ip_strings) saved path
496 * (imgp->ip_strspace) space remaining in ip_strings
497 * (imgp->ip_strendp) start of remaining copy area
498 * (imgp->ip_argspace) space remaining of NCARGS
499 * (imgp->ip_applec) Initial applev[0]
500 *
501 * Note: We have to do this before the initial namei() since in the
502 * path contains symbolic links, namei() will overwrite the
503 * original path buffer contents. If the last symbolic link
504 * resolved was a relative pathname, we would lose the original
505 * "path", which could be an absolute pathname. This might be
506 * unacceptable for dyld.
507 */
508 static int
exec_save_path(struct image_params * imgp,user_addr_t path,int seg,const char ** excpath)509 exec_save_path(struct image_params *imgp, user_addr_t path, int seg, const char **excpath)
510 {
511 int error;
512 size_t len;
513 char *kpath;
514
515 // imgp->ip_strings can come out of a cache, so we need to obliterate the
516 // old path.
517 memset(imgp->ip_strings, '\0', strlen(EXECUTABLE_KEY) + MAXPATHLEN);
518
519 len = MIN(MAXPATHLEN, imgp->ip_strspace);
520
521 switch (seg) {
522 case UIO_USERSPACE32:
523 case UIO_USERSPACE64: /* Same for copyin()... */
524 error = copyinstr(path, imgp->ip_strings + strlen(EXECUTABLE_KEY), len, &len);
525 break;
526 case UIO_SYSSPACE:
527 kpath = CAST_DOWN(char *, path); /* SAFE */
528 error = copystr(kpath, imgp->ip_strings + strlen(EXECUTABLE_KEY), len, &len);
529 break;
530 default:
531 error = EFAULT;
532 break;
533 }
534
535 if (!error) {
536 bcopy(EXECUTABLE_KEY, imgp->ip_strings, strlen(EXECUTABLE_KEY));
537 len += strlen(EXECUTABLE_KEY);
538
539 imgp->ip_strendp += len;
540 imgp->ip_strspace -= len;
541
542 if (excpath) {
543 *excpath = imgp->ip_strings + strlen(EXECUTABLE_KEY);
544 }
545 }
546
547 return error;
548 }
549
550 /*
551 * exec_reset_save_path
552 *
553 * If we detect a shell script, we need to reset the string area
554 * state so that the interpreter can be saved onto the stack.
555 *
556 * Parameters; struct image_params * image parameter block
557 *
558 * Returns: int 0 Success
559 *
560 * Implicit returns:
561 * (imgp->ip_strings) saved path
562 * (imgp->ip_strspace) space remaining in ip_strings
563 * (imgp->ip_strendp) start of remaining copy area
564 * (imgp->ip_argspace) space remaining of NCARGS
565 *
566 */
567 static int
exec_reset_save_path(struct image_params * imgp)568 exec_reset_save_path(struct image_params *imgp)
569 {
570 imgp->ip_strendp = imgp->ip_strings;
571 imgp->ip_argspace = NCARGS;
572 imgp->ip_strspace = (NCARGS + PAGE_SIZE);
573
574 return 0;
575 }
576
577 /*
578 * exec_shell_imgact
579 *
580 * Image activator for interpreter scripts. If the image begins with
581 * the characters "#!", then it is an interpreter script. Verify the
582 * length of the script line indicating the interpreter is not in
583 * excess of the maximum allowed size. If this is the case, then
584 * break out the arguments, if any, which are separated by white
585 * space, and copy them into the argument save area as if they were
586 * provided on the command line before all other arguments. The line
587 * ends when we encounter a comment character ('#') or newline.
588 *
589 * Parameters; struct image_params * image parameter block
590 *
591 * Returns: -1 not an interpreter (keep looking)
592 * -3 Success: interpreter: relookup
593 * >0 Failure: interpreter: error number
594 *
595 * A return value other than -1 indicates subsequent image activators should
596 * not be given the opportunity to attempt to activate the image.
597 */
598 static int
exec_shell_imgact(struct image_params * imgp)599 exec_shell_imgact(struct image_params *imgp)
600 {
601 char *vdata = imgp->ip_vdata;
602 char *ihp;
603 char *line_startp, *line_endp;
604 char *interp;
605
606 /*
607 * Make sure it's a shell script. If we've already redirected
608 * from an interpreted file once, don't do it again.
609 */
610 if (vdata[0] != '#' ||
611 vdata[1] != '!' ||
612 (imgp->ip_flags & IMGPF_INTERPRET) != 0) {
613 return -1;
614 }
615
616 if (imgp->ip_origcputype != 0) {
617 /* Fat header previously matched, don't allow shell script inside */
618 return -1;
619 }
620
621 imgp->ip_flags |= IMGPF_INTERPRET;
622 imgp->ip_interp_sugid_fd = -1;
623 imgp->ip_interp_buffer[0] = '\0';
624
625 /* Check to see if SUGID scripts are permitted. If they aren't then
626 * clear the SUGID bits.
627 * imgp->ip_vattr is known to be valid.
628 */
629 if (sugid_scripts == 0) {
630 imgp->ip_origvattr->va_mode &= ~(VSUID | VSGID);
631 }
632
633 /* Try to find the first non-whitespace character */
634 for (ihp = &vdata[2]; ihp < &vdata[IMG_SHSIZE]; ihp++) {
635 if (IS_EOL(*ihp)) {
636 /* Did not find interpreter, "#!\n" */
637 return ENOEXEC;
638 } else if (IS_WHITESPACE(*ihp)) {
639 /* Whitespace, like "#! /bin/sh\n", keep going. */
640 } else {
641 /* Found start of interpreter */
642 break;
643 }
644 }
645
646 if (ihp == &vdata[IMG_SHSIZE]) {
647 /* All whitespace, like "#! " */
648 return ENOEXEC;
649 }
650
651 line_startp = ihp;
652
653 /* Try to find the end of the interpreter+args string */
654 for (; ihp < &vdata[IMG_SHSIZE]; ihp++) {
655 if (IS_EOL(*ihp)) {
656 /* Got it */
657 break;
658 } else {
659 /* Still part of interpreter or args */
660 }
661 }
662
663 if (ihp == &vdata[IMG_SHSIZE]) {
664 /* A long line, like "#! blah blah blah" without end */
665 return ENOEXEC;
666 }
667
668 /* Backtrack until we find the last non-whitespace */
669 while (IS_EOL(*ihp) || IS_WHITESPACE(*ihp)) {
670 ihp--;
671 }
672
673 /* The character after the last non-whitespace is our logical end of line */
674 line_endp = ihp + 1;
675
676 /*
677 * Now we have pointers to the usable part of:
678 *
679 * "#! /usr/bin/int first second third \n"
680 * ^ line_startp ^ line_endp
681 */
682
683 /* copy the interpreter name */
684 interp = imgp->ip_interp_buffer;
685 for (ihp = line_startp; (ihp < line_endp) && !IS_WHITESPACE(*ihp); ihp++) {
686 *interp++ = *ihp;
687 }
688 *interp = '\0';
689
690 exec_reset_save_path(imgp);
691 exec_save_path(imgp, CAST_USER_ADDR_T(imgp->ip_interp_buffer),
692 UIO_SYSSPACE, NULL);
693
694 /* Copy the entire interpreter + args for later processing into argv[] */
695 interp = imgp->ip_interp_buffer;
696 for (ihp = line_startp; (ihp < line_endp); ihp++) {
697 *interp++ = *ihp;
698 }
699 *interp = '\0';
700
701 #if CONFIG_SETUID
702 /*
703 * If we have an SUID or SGID script, create a file descriptor
704 * from the vnode and pass /dev/fd/%d instead of the actual
705 * path name so that the script does not get opened twice
706 */
707 if (imgp->ip_origvattr->va_mode & (VSUID | VSGID)) {
708 proc_t p;
709 struct fileproc *fp;
710 int fd;
711 int error;
712
713 p = vfs_context_proc(imgp->ip_vfs_context);
714 error = falloc_exec(p, imgp->ip_vfs_context, &fp, &fd);
715 if (error) {
716 return error;
717 }
718
719 fp->fp_glob->fg_flag = FREAD;
720 fp->fp_glob->fg_ops = &vnops;
721 fp_set_data(fp, imgp->ip_vp);
722
723 proc_fdlock(p);
724 procfdtbl_releasefd(p, fd, NULL);
725 fp_drop(p, fd, fp, 1);
726 proc_fdunlock(p);
727 vnode_ref(imgp->ip_vp);
728
729 imgp->ip_interp_sugid_fd = fd;
730 }
731 #endif /* CONFIG_SETUID */
732
733 return -3;
734 }
735
736
737
738 /*
739 * exec_fat_imgact
740 *
741 * Image activator for fat 1.0 binaries. If the binary is fat, then we
742 * need to select an image from it internally, and make that the image
743 * we are going to attempt to execute. At present, this consists of
744 * reloading the first page for the image with a first page from the
745 * offset location indicated by the fat header.
746 *
747 * Parameters; struct image_params * image parameter block
748 *
749 * Returns: -1 not a fat binary (keep looking)
750 * -2 Success: encapsulated binary: reread
751 * >0 Failure: error number
752 *
753 * Important: This image activator is byte order neutral.
754 *
755 * Note: A return value other than -1 indicates subsequent image
756 * activators should not be given the opportunity to attempt
757 * to activate the image.
758 *
759 * If we find an encapsulated binary, we make no assertions
760 * about its validity; instead, we leave that up to a rescan
761 * for an activator to claim it, and, if it is claimed by one,
762 * that activator is responsible for determining validity.
763 */
764 static int
exec_fat_imgact(struct image_params * imgp)765 exec_fat_imgact(struct image_params *imgp)
766 {
767 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
768 kauth_cred_t cred = kauth_cred_proc_ref(p);
769 struct fat_header *fat_header = (struct fat_header *)imgp->ip_vdata;
770 struct _posix_spawnattr *psa = NULL;
771 struct fat_arch fat_arch;
772 int resid, error;
773 load_return_t lret;
774
775 if (imgp->ip_origcputype != 0) {
776 /* Fat header previously matched, don't allow another fat file inside */
777 error = -1; /* not claimed */
778 goto bad;
779 }
780
781 /* Make sure it's a fat binary */
782 if (OSSwapBigToHostInt32(fat_header->magic) != FAT_MAGIC) {
783 error = -1; /* not claimed */
784 goto bad;
785 }
786
787 /* imgp->ip_vdata has PAGE_SIZE, zerofilled if the file is smaller */
788 lret = fatfile_validate_fatarches((vm_offset_t)fat_header, PAGE_SIZE,
789 (off_t)imgp->ip_vattr->va_data_size);
790 if (lret != LOAD_SUCCESS) {
791 error = load_return_to_errno(lret);
792 goto bad;
793 }
794
795 /* If posix_spawn binprefs exist, respect those prefs. */
796 psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
797 if (psa != NULL && psa->psa_binprefs[0] != 0) {
798 uint32_t pr = 0;
799
800 /* Check each preference listed against all arches in header */
801 for (pr = 0; pr < NBINPREFS; pr++) {
802 cpu_type_t pref = psa->psa_binprefs[pr];
803 cpu_type_t subpref = psa->psa_subcpuprefs[pr];
804
805 if (pref == 0) {
806 /* No suitable arch in the pref list */
807 error = EBADARCH;
808 goto bad;
809 }
810
811 if (pref == CPU_TYPE_ANY) {
812 /* Fall through to regular grading */
813 goto regular_grading;
814 }
815
816 lret = fatfile_getbestarch_for_cputype(pref,
817 subpref,
818 (vm_offset_t)fat_header,
819 PAGE_SIZE,
820 imgp,
821 &fat_arch);
822 if (lret == LOAD_SUCCESS) {
823 goto use_arch;
824 }
825 }
826
827 /* Requested binary preference was not honored */
828 error = EBADEXEC;
829 goto bad;
830 }
831
832 regular_grading:
833 /* Look up our preferred architecture in the fat file. */
834 lret = fatfile_getbestarch((vm_offset_t)fat_header,
835 PAGE_SIZE,
836 imgp,
837 &fat_arch,
838 (p->p_flag & P_AFFINITY) != 0);
839 if (lret != LOAD_SUCCESS) {
840 error = load_return_to_errno(lret);
841 goto bad;
842 }
843
844 use_arch:
845 /* Read the Mach-O header out of fat_arch */
846 error = vn_rdwr(UIO_READ, imgp->ip_vp, imgp->ip_vdata,
847 PAGE_SIZE, fat_arch.offset,
848 UIO_SYSSPACE, (IO_UNIT | IO_NODELOCKED),
849 cred, &resid, p);
850 if (error) {
851 if (error == ERESTART) {
852 error = EINTR;
853 }
854 goto bad;
855 }
856
857 if (resid) {
858 memset(imgp->ip_vdata + (PAGE_SIZE - resid), 0x0, resid);
859 }
860
861 /* Success. Indicate we have identified an encapsulated binary */
862 error = -2;
863 imgp->ip_arch_offset = (user_size_t)fat_arch.offset;
864 imgp->ip_arch_size = (user_size_t)fat_arch.size;
865 imgp->ip_origcputype = fat_arch.cputype;
866 imgp->ip_origcpusubtype = fat_arch.cpusubtype;
867
868 bad:
869 kauth_cred_unref(&cred);
870 return error;
871 }
872
873 static int
activate_exec_state(task_t task,proc_t p,thread_t thread,load_result_t * result)874 activate_exec_state(task_t task, proc_t p, thread_t thread, load_result_t *result)
875 {
876 int ret;
877
878 (void)task_set_dyld_info(task, MACH_VM_MIN_ADDRESS, 0, false);
879 task_set_64bit(task, result->is_64bit_addr, result->is_64bit_data);
880 if (result->is_64bit_addr) {
881 OSBitOrAtomic(P_LP64, &p->p_flag);
882 get_bsdthread_info(thread)->uu_flag |= UT_LP64;
883 } else {
884 OSBitAndAtomic(~((uint32_t)P_LP64), &p->p_flag);
885 get_bsdthread_info(thread)->uu_flag &= ~UT_LP64;
886 }
887 task_set_mach_header_address(task, result->mach_header);
888
889 ret = thread_state_initialize(thread);
890 if (ret != KERN_SUCCESS) {
891 return ret;
892 }
893
894 if (result->threadstate) {
895 uint32_t *ts = result->threadstate;
896 uint32_t total_size = (uint32_t)result->threadstate_sz;
897
898 while (total_size > 0) {
899 uint32_t flavor = *ts++;
900 uint32_t size = *ts++;
901
902 ret = thread_setstatus(thread, flavor, (thread_state_t)ts, size);
903 if (ret) {
904 return ret;
905 }
906 ts += size;
907 total_size -= (size + 2) * sizeof(uint32_t);
908 }
909 }
910
911 thread_setentrypoint(thread, result->entry_point);
912
913 return KERN_SUCCESS;
914 }
915
916 #if (DEVELOPMENT || DEBUG)
917 extern char panic_on_proc_crash[];
918 extern int use_panic_on_proc_crash;
919
920 extern char panic_on_proc_exit[];
921 extern int use_panic_on_proc_exit;
922
923 extern char panic_on_proc_spawn_fail[];
924 extern int use_panic_on_proc_spawn_fail;
925
926 static inline void
set_crash_behavior_from_bootarg(proc_t p)927 set_crash_behavior_from_bootarg(proc_t p)
928 {
929 if (use_panic_on_proc_crash && strcmp(p->p_comm, panic_on_proc_crash) == 0) {
930 printf("will panic on proc crash: %s\n", p->p_comm);
931 p->p_crash_behavior |= POSIX_SPAWN_PANIC_ON_CRASH;
932 }
933
934 if (use_panic_on_proc_exit && strcmp(p->p_comm, panic_on_proc_exit) == 0) {
935 printf("will panic on proc exit: %s\n", p->p_comm);
936 p->p_crash_behavior |= POSIX_SPAWN_PANIC_ON_EXIT;
937 }
938
939 if (use_panic_on_proc_spawn_fail && strcmp(p->p_comm, panic_on_proc_spawn_fail) == 0) {
940 printf("will panic on proc spawn fail: %s\n", p->p_comm);
941 p->p_crash_behavior |= POSIX_SPAWN_PANIC_ON_SPAWN_FAIL;
942 }
943 }
944 #endif
945
946 void
set_proc_name(struct image_params * imgp,proc_t p)947 set_proc_name(struct image_params *imgp, proc_t p)
948 {
949 int p_name_len = sizeof(p->p_name) - 1;
950
951 if (imgp->ip_ndp->ni_cnd.cn_namelen > p_name_len) {
952 imgp->ip_ndp->ni_cnd.cn_namelen = p_name_len;
953 }
954
955 bcopy((caddr_t)imgp->ip_ndp->ni_cnd.cn_nameptr, (caddr_t)p->p_name,
956 (unsigned)imgp->ip_ndp->ni_cnd.cn_namelen);
957 p->p_name[imgp->ip_ndp->ni_cnd.cn_namelen] = '\0';
958
959 if (imgp->ip_ndp->ni_cnd.cn_namelen > MAXCOMLEN) {
960 imgp->ip_ndp->ni_cnd.cn_namelen = MAXCOMLEN;
961 }
962
963 bcopy((caddr_t)imgp->ip_ndp->ni_cnd.cn_nameptr, (caddr_t)p->p_comm,
964 (unsigned)imgp->ip_ndp->ni_cnd.cn_namelen);
965 p->p_comm[imgp->ip_ndp->ni_cnd.cn_namelen] = '\0';
966
967 #if (DEVELOPMENT || DEBUG)
968 /*
969 * This happens during image activation, so the crash behavior flags from
970 * posix_spawn will have already been set. So we don't have to worry about
971 * this being overridden.
972 */
973 set_crash_behavior_from_bootarg(p);
974 #endif
975 }
976
977 #if __has_feature(ptrauth_calls)
978 /**
979 * Returns a team ID string that may be used to assign a shared region.
980 *
981 * Platform binaries do not have team IDs and will return NULL. Non-platform
982 * binaries without a team ID will be assigned an artificial team ID of ""
983 * (empty string) so that they will not be assigned to the default shared
984 * region.
985 *
986 * @param imgp image parameter block
987 * @return NULL if this is a platform binary, or an appropriate team ID string
988 * otherwise
989 */
990 static inline const char *
get_teamid_for_shared_region(struct image_params * imgp)991 get_teamid_for_shared_region(struct image_params *imgp)
992 {
993 assert(imgp->ip_vp != NULL);
994
995 const char *ret = csvnode_get_teamid(imgp->ip_vp, imgp->ip_arch_offset);
996 if (ret) {
997 return ret;
998 }
999
1000 struct cs_blob *blob = csvnode_get_blob(imgp->ip_vp, imgp->ip_arch_offset);
1001 if (csblob_get_platform_binary(blob)) {
1002 return NULL;
1003 } else {
1004 static const char *NO_TEAM_ID = "";
1005 return NO_TEAM_ID;
1006 }
1007 }
1008
1009 /**
1010 * Determines whether ptrauth should be enabled for the provided arm64 CPU subtype.
1011 *
1012 * @param cpusubtype Mach-O style CPU subtype
1013 * @return whether the CPU subtype matches arm64e with the current ptrauth ABI
1014 */
1015 static inline bool
arm64_cpusubtype_uses_ptrauth(cpu_subtype_t cpusubtype)1016 arm64_cpusubtype_uses_ptrauth(cpu_subtype_t cpusubtype)
1017 {
1018 return (cpusubtype & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_ARM64E &&
1019 CPU_SUBTYPE_ARM64_PTR_AUTH_VERSION(cpusubtype) == CPU_SUBTYPE_ARM64_PTR_AUTH_CURRENT_VERSION;
1020 }
1021
1022 #endif /* __has_feature(ptrauth_calls) */
1023
1024 /**
1025 * Returns whether a type/subtype slice matches the requested
1026 * type/subtype.
1027 *
1028 * @param mask Bits to mask from the requested/tested cpu type
1029 * @param req_cpu Requested cpu type
1030 * @param req_subcpu Requested cpu subtype
1031 * @param test_cpu Tested slice cpu type
1032 * @param test_subcpu Tested slice cpu subtype
1033 */
1034 boolean_t
binary_match(cpu_type_t mask,cpu_type_t req_cpu,cpu_subtype_t req_subcpu,cpu_type_t test_cpu,cpu_subtype_t test_subcpu)1035 binary_match(cpu_type_t mask, cpu_type_t req_cpu,
1036 cpu_subtype_t req_subcpu, cpu_type_t test_cpu,
1037 cpu_subtype_t test_subcpu)
1038 {
1039 if ((test_cpu & ~mask) != (req_cpu & ~mask)) {
1040 return FALSE;
1041 }
1042
1043 test_subcpu &= ~CPU_SUBTYPE_MASK;
1044 req_subcpu &= ~CPU_SUBTYPE_MASK;
1045
1046 if (test_subcpu != req_subcpu && req_subcpu != (CPU_SUBTYPE_ANY & ~CPU_SUBTYPE_MASK)) {
1047 return FALSE;
1048 }
1049
1050 return TRUE;
1051 }
1052
1053
1054 #define MIN_IOS_TPRO_SDK_VERSION 0x00100000
1055 #define MIN_OSX_TPRO_SDK_VERSION 0x000D0000
1056 #define MIN_TVOS_TPRO_SDK_VERSION 0x000D0000
1057 #define MIN_WATCHOS_TPRO_SDK_VERSION 0x00090000
1058 #define MIN_DRIVERKIT_TPRO_SDK_VERSION 0x00600000
1059
1060 static void
exec_setup_tpro(struct image_params * imgp,load_result_t * load_result)1061 exec_setup_tpro(struct image_params *imgp, load_result_t *load_result)
1062 {
1063 extern boolean_t xprr_tpro_enabled;
1064 extern boolean_t enable_user_modifiable_perms;
1065 uint32_t min_sdk_version = 0;
1066
1067 /* x86-64 translated code cannot take advantage of TPRO */
1068 if (imgp->ip_flags & IMGPF_ROSETTA) {
1069 return;
1070 }
1071
1072 /* Do not enable on 32-bit VA targets */
1073 if (!(imgp->ip_flags & IMGPF_IS_64BIT_ADDR)) {
1074 return;
1075 }
1076
1077 switch (load_result->ip_platform) {
1078 case PLATFORM_IOS:
1079 case PLATFORM_IOSSIMULATOR:
1080 case PLATFORM_MACCATALYST:
1081 min_sdk_version = MIN_IOS_TPRO_SDK_VERSION;
1082 break;
1083 case PLATFORM_MACOS:
1084 min_sdk_version = MIN_OSX_TPRO_SDK_VERSION;
1085 break;
1086 case PLATFORM_TVOS:
1087 case PLATFORM_TVOSSIMULATOR:
1088 min_sdk_version = MIN_TVOS_TPRO_SDK_VERSION;
1089 break;
1090 case PLATFORM_WATCHOS:
1091 case PLATFORM_WATCHOSSIMULATOR:
1092 min_sdk_version = MIN_WATCHOS_TPRO_SDK_VERSION;
1093 break;
1094 case PLATFORM_DRIVERKIT:
1095 min_sdk_version = MIN_DRIVERKIT_TPRO_SDK_VERSION;
1096 break;
1097 default:
1098 /* TPRO is on by default for newer platforms */
1099 break;
1100 }
1101
1102 }
1103
1104 /*
1105 * If the passed in executable's vnode should use the RSR
1106 * shared region, then this should return TRUE, otherwise, return FALSE.
1107 */
1108 static uint32_t rsr_current_version = 0;
1109 boolean_t (*rsr_check_vnode)(void *vnode) = NULL;
1110
1111 boolean_t
vnode_is_rsr(vnode_t vp)1112 vnode_is_rsr(vnode_t vp)
1113 {
1114 if (!(vnode_isreg(vp) && vnode_tag(vp) == VT_APFS)) {
1115 return FALSE;
1116 }
1117
1118 if (rsr_check_vnode != NULL && rsr_check_vnode((void *)vp)) {
1119 return TRUE;
1120 }
1121 return FALSE;
1122 }
1123
1124
1125 // Check entitlements to see if this is a hardened runtime binary.
1126 // Save this in load_result until later for two purposes:
1127 // 1. Once the task is created, we can mark it as hardened runtime if needed
1128 // 2. we can propagate which entitlements are present to the apple array
1129 static inline void
encode_HR_entitlement(const char * entitlement,HR_flags_t mask,const struct image_params * imgp,load_result_t * load_result)1130 encode_HR_entitlement(const char *entitlement, HR_flags_t mask,
1131 const struct image_params *imgp, load_result_t *load_result)
1132 {
1133 if (IOVnodeHasEntitlement(imgp->ip_vp, (int64_t)imgp->ip_arch_offset, entitlement)) {
1134 load_result->hardened_runtime_binary |= mask;
1135 }
1136 }
1137
1138 uint32_t
rsr_get_version(void)1139 rsr_get_version(void)
1140 {
1141 return os_atomic_load(&rsr_current_version, relaxed);
1142 }
1143
1144 void
rsr_bump_version(void)1145 rsr_bump_version(void)
1146 {
1147 os_atomic_inc(&rsr_current_version, relaxed);
1148 }
1149
1150 #if XNU_TARGET_OS_OSX
1151 static int
1152 rsr_version_sysctl SYSCTL_HANDLER_ARGS
1153 {
1154 #pragma unused(arg1, arg2, oidp)
1155 int value = rsr_get_version();
1156 int error = SYSCTL_OUT(req, &value, sizeof(int));
1157 if (error) {
1158 return error;
1159 }
1160
1161 if (!req->newptr) {
1162 return 0;
1163 }
1164
1165 error = SYSCTL_IN(req, &value, sizeof(int));
1166 if (error) {
1167 return error;
1168 }
1169 if (value != 0) {
1170 rsr_bump_version();
1171 }
1172 return 0;
1173 }
1174
1175
1176 SYSCTL_PROC(_vm, OID_AUTO, shared_region_control,
1177 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED,
1178 0, 0, rsr_version_sysctl, "I", "");
1179 #endif /* XNU_TARGET_OS_OSX */
1180
1181 /*
1182 * exec_mach_imgact
1183 *
1184 * Image activator for mach-o 1.0 binaries.
1185 *
1186 * Parameters; struct image_params * image parameter block
1187 *
1188 * Returns: -1 not a fat binary (keep looking)
1189 * -2 Success: encapsulated binary: reread
1190 * >0 Failure: error number
1191 * EBADARCH Mach-o binary, but with an unrecognized
1192 * architecture
1193 * ENOMEM No memory for child process after -
1194 * can only happen after vfork()
1195 *
1196 * Important: This image activator is NOT byte order neutral.
1197 *
1198 * Note: A return value other than -1 indicates subsequent image
1199 * activators should not be given the opportunity to attempt
1200 * to activate the image.
1201 */
1202 static int
exec_mach_imgact(struct image_params * imgp)1203 exec_mach_imgact(struct image_params *imgp)
1204 {
1205 struct mach_header *mach_header = (struct mach_header *)imgp->ip_vdata;
1206 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
1207 int error = 0;
1208 task_t task;
1209 task_t new_task = NULL; /* protected by vfexec */
1210 thread_t thread;
1211 struct uthread *uthread;
1212 vm_map_t old_map = VM_MAP_NULL;
1213 vm_map_t map = VM_MAP_NULL;
1214 load_return_t lret;
1215 load_result_t load_result = {};
1216 struct _posix_spawnattr *psa = NULL;
1217 int spawn = (imgp->ip_flags & IMGPF_SPAWN);
1218 const int vfexec = 0;
1219 int exec = (imgp->ip_flags & IMGPF_EXEC);
1220 os_reason_t exec_failure_reason = OS_REASON_NULL;
1221 boolean_t reslide = FALSE;
1222 char * userspace_coredump_name = NULL;
1223
1224 /*
1225 * make sure it's a Mach-O 1.0 or Mach-O 2.0 binary; the difference
1226 * is a reserved field on the end, so for the most part, we can
1227 * treat them as if they were identical. Reverse-endian Mach-O
1228 * binaries are recognized but not compatible.
1229 */
1230 if ((mach_header->magic == MH_CIGAM) ||
1231 (mach_header->magic == MH_CIGAM_64)) {
1232 error = EBADARCH;
1233 goto bad;
1234 }
1235
1236 if ((mach_header->magic != MH_MAGIC) &&
1237 (mach_header->magic != MH_MAGIC_64)) {
1238 error = -1;
1239 goto bad;
1240 }
1241
1242 if (mach_header->filetype != MH_EXECUTE) {
1243 error = -1;
1244 goto bad;
1245 }
1246
1247 if (imgp->ip_origcputype != 0) {
1248 /* Fat header previously had an idea about this thin file */
1249 if (imgp->ip_origcputype != mach_header->cputype ||
1250 imgp->ip_origcpusubtype != mach_header->cpusubtype) {
1251 error = EBADARCH;
1252 goto bad;
1253 }
1254 } else {
1255 imgp->ip_origcputype = mach_header->cputype;
1256 imgp->ip_origcpusubtype = mach_header->cpusubtype;
1257 }
1258
1259 task = current_task();
1260 thread = current_thread();
1261 uthread = get_bsdthread_info(thread);
1262
1263 if ((mach_header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64) {
1264 imgp->ip_flags |= IMGPF_IS_64BIT_ADDR | IMGPF_IS_64BIT_DATA;
1265 }
1266
1267
1268 /* If posix_spawn binprefs exist, respect those prefs. */
1269 psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
1270 if (psa != NULL && psa->psa_binprefs[0] != 0) {
1271 int pr = 0;
1272 for (pr = 0; pr < NBINPREFS; pr++) {
1273 cpu_type_t pref = psa->psa_binprefs[pr];
1274 cpu_subtype_t subpref = psa->psa_subcpuprefs[pr];
1275
1276 if (pref == 0) {
1277 /* No suitable arch in the pref list */
1278 error = EBADARCH;
1279 goto bad;
1280 }
1281
1282 if (pref == CPU_TYPE_ANY) {
1283 /* Jump to regular grading */
1284 goto grade;
1285 }
1286
1287 if (binary_match(CPU_ARCH_MASK, pref, subpref,
1288 imgp->ip_origcputype, imgp->ip_origcpusubtype)) {
1289 goto grade;
1290 }
1291 }
1292 error = EBADARCH;
1293 goto bad;
1294 }
1295 grade:
1296 if (!grade_binary(imgp->ip_origcputype, imgp->ip_origcpusubtype & ~CPU_SUBTYPE_MASK,
1297 imgp->ip_origcpusubtype & CPU_SUBTYPE_MASK, TRUE)) {
1298 error = EBADARCH;
1299 goto bad;
1300 }
1301
1302 if (validate_potential_simulator_binary(imgp->ip_origcputype, imgp,
1303 imgp->ip_arch_offset, imgp->ip_arch_size) != LOAD_SUCCESS) {
1304 #if __x86_64__
1305 const char *excpath;
1306 error = exec_save_path(imgp, imgp->ip_user_fname, imgp->ip_seg, &excpath);
1307 os_log_error(OS_LOG_DEFAULT, "Unsupported 32-bit executable: \"%s\"", (error) ? imgp->ip_vp->v_name : excpath);
1308 #endif
1309 error = EBADARCH;
1310 goto bad;
1311 }
1312
1313 #if defined(HAS_APPLE_PAC)
1314 assert(mach_header->cputype == CPU_TYPE_ARM64
1315 );
1316
1317 if ((mach_header->cputype == CPU_TYPE_ARM64 &&
1318 arm64_cpusubtype_uses_ptrauth(mach_header->cpusubtype))
1319 ) {
1320 imgp->ip_flags &= ~IMGPF_NOJOP;
1321 } else {
1322 imgp->ip_flags |= IMGPF_NOJOP;
1323 }
1324 #endif
1325
1326 /* Copy in arguments/environment from the old process */
1327 error = exec_extract_strings(imgp);
1328 if (error) {
1329 goto bad;
1330 }
1331
1332 AUDIT_ARG(argv, imgp->ip_startargv, imgp->ip_argc,
1333 imgp->ip_endargv - imgp->ip_startargv);
1334 AUDIT_ARG(envv, imgp->ip_endargv, imgp->ip_envc,
1335 imgp->ip_endenvv - imgp->ip_endargv);
1336
1337
1338
1339 /* reset local idea of thread, uthread, task */
1340 thread = imgp->ip_new_thread;
1341 uthread = get_bsdthread_info(thread);
1342 task = new_task = get_threadtask(thread);
1343
1344 /*
1345 * Load the Mach-O file.
1346 *
1347 * NOTE: An error after this point indicates we have potentially
1348 * destroyed or overwritten some process state while attempting an
1349 * execve() following a vfork(), which is an unrecoverable condition.
1350 * We send the new process an immediate SIGKILL to avoid it executing
1351 * any instructions in the mutated address space. For true spawns,
1352 * this is not the case, and "too late" is still not too late to
1353 * return an error code to the parent process.
1354 */
1355
1356 /*
1357 * Actually load the image file we previously decided to load.
1358 */
1359 lret = load_machfile(imgp, mach_header, thread, &map, &load_result);
1360 if (lret != LOAD_SUCCESS) {
1361 error = load_return_to_errno(lret);
1362
1363 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1364 proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_BAD_MACHO, 0, 0);
1365 if (lret == LOAD_BADMACHO_UPX) {
1366 set_proc_name(imgp, p);
1367 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_UPX);
1368 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1369 } else {
1370 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_BAD_MACHO);
1371
1372 if (bootarg_execfailurereports) {
1373 set_proc_name(imgp, p);
1374 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1375 }
1376 }
1377
1378 exec_failure_reason->osr_flags |= OS_REASON_FLAG_CONSISTENT_FAILURE;
1379
1380 goto badtoolate;
1381 }
1382
1383 assert(imgp->ip_free_map == NULL);
1384
1385
1386 // It's safe to check entitlements anytime after `load_machfile` if you check
1387 // based on the vnode in imgp. We must perform this entitlement check
1388 // before we start using load_result->hardened_runtime_binary further down
1389 load_result.hardened_runtime_binary = 0;
1390 encode_HR_entitlement(kCSWebBrowserHostEntitlement, BrowserHostEntitlementMask, imgp, &load_result);
1391 encode_HR_entitlement(kCSWebBrowserGPUEntitlement, BrowserGPUEntitlementMask, imgp, &load_result);
1392 encode_HR_entitlement(kCSWebBrowserNetworkEntitlement, BrowserNetworkEntitlementMask, imgp, &load_result);
1393 encode_HR_entitlement(kCSWebBrowserWebContentEntitlement, BrowserWebContentEntitlementMask, imgp, &load_result);
1394
1395 /*
1396 * ERROR RECOVERY
1397 *
1398 * load_machfile() returned the new VM map ("map") but we haven't
1399 * committed to it yet.
1400 * Any error path between here and the point where we commit to using
1401 * the new "map" (with swap_task_map()) should deallocate "map".
1402 */
1403
1404 #ifndef KASAN
1405 /*
1406 * Security: zone sanity checks on fresh boot or initproc re-exec.
1407 * launchd by design does not tear down its own service port on USR (rdar://72797967),
1408 * which means here is the earliest point we can assert on empty service port label zone,
1409 * after load_machfile() above terminates old launchd's IPC space.
1410 *
1411 * Disable on KASAN builds since zone_size_allocated() accounts for elements
1412 * under quarantine.
1413 */
1414 if (task_pid(task) == 1) {
1415 zone_userspace_reboot_checks();
1416 }
1417 #endif
1418
1419 proc_lock(p);
1420 p->p_cputype = imgp->ip_origcputype;
1421 p->p_cpusubtype = imgp->ip_origcpusubtype;
1422 proc_setplatformdata(p, load_result.ip_platform, load_result.lr_min_sdk, load_result.lr_sdk);
1423 exec_setup_tpro(imgp, &load_result);
1424
1425 vm_map_set_size_limit(map, proc_limitgetcur(p, RLIMIT_AS));
1426 vm_map_set_data_limit(map, proc_limitgetcur(p, RLIMIT_DATA));
1427 vm_map_set_user_wire_limit(map, (vm_size_t)proc_limitgetcur(p, RLIMIT_MEMLOCK));
1428 #if XNU_TARGET_OS_OSX
1429 if (proc_platform(p) == PLATFORM_IOS) {
1430 assert(vm_map_is_alien(map));
1431 } else {
1432 assert(!vm_map_is_alien(map));
1433 }
1434 #endif /* XNU_TARGET_OS_OSX */
1435 proc_unlock(p);
1436
1437 /*
1438 * Set TPRO flags if enabled
1439 */
1440
1441 /*
1442 * Set code-signing flags if this binary is signed, or if parent has
1443 * requested them on exec.
1444 */
1445 if (load_result.csflags & CS_VALID) {
1446 imgp->ip_csflags |= load_result.csflags &
1447 (CS_VALID | CS_SIGNED | CS_DEV_CODE | CS_LINKER_SIGNED |
1448 CS_HARD | CS_KILL | CS_RESTRICT | CS_ENFORCEMENT | CS_REQUIRE_LV |
1449 CS_FORCED_LV | CS_ENTITLEMENTS_VALIDATED | CS_NO_UNTRUSTED_HELPERS | CS_RUNTIME |
1450 CS_ENTITLEMENT_FLAGS |
1451 CS_EXEC_SET_HARD | CS_EXEC_SET_KILL | CS_EXEC_SET_ENFORCEMENT);
1452 } else {
1453 imgp->ip_csflags &= ~CS_VALID;
1454 }
1455
1456 if (proc_getcsflags(p) & CS_EXEC_SET_HARD) {
1457 imgp->ip_csflags |= CS_HARD;
1458 }
1459 if (proc_getcsflags(p) & CS_EXEC_SET_KILL) {
1460 imgp->ip_csflags |= CS_KILL;
1461 }
1462 if (proc_getcsflags(p) & CS_EXEC_SET_ENFORCEMENT) {
1463 imgp->ip_csflags |= CS_ENFORCEMENT;
1464 }
1465 if (proc_getcsflags(p) & CS_EXEC_INHERIT_SIP) {
1466 if (proc_getcsflags(p) & CS_INSTALLER) {
1467 imgp->ip_csflags |= CS_INSTALLER;
1468 }
1469 if (proc_getcsflags(p) & CS_DATAVAULT_CONTROLLER) {
1470 imgp->ip_csflags |= CS_DATAVAULT_CONTROLLER;
1471 }
1472 if (proc_getcsflags(p) & CS_NVRAM_UNRESTRICTED) {
1473 imgp->ip_csflags |= CS_NVRAM_UNRESTRICTED;
1474 }
1475 }
1476
1477 #if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX)
1478 /*
1479 * ptrauth version 0 is a preview ABI. Developers can opt into running
1480 * their own arm64e binaries for local testing, with the understanding
1481 * that future OSes may break ABI.
1482 */
1483 if ((imgp->ip_origcpusubtype & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_ARM64E &&
1484 CPU_SUBTYPE_ARM64_PTR_AUTH_VERSION(imgp->ip_origcpusubtype) == 0 &&
1485 !load_result.platform_binary &&
1486 !bootarg_arm64e_preview_abi) {
1487 static bool logged_once = false;
1488 set_proc_name(imgp, p);
1489
1490 printf("%s: not running binary \"%s\" built against preview arm64e ABI\n", __func__, p->p_name);
1491 if (!os_atomic_xchg(&logged_once, true, relaxed)) {
1492 printf("%s: (to allow this, add \"-arm64e_preview_abi\" to boot-args)\n", __func__);
1493 }
1494
1495 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_BAD_MACHO);
1496 if (bootarg_execfailurereports) {
1497 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1498 exec_failure_reason->osr_flags |= OS_REASON_FLAG_CONSISTENT_FAILURE;
1499 }
1500
1501 /* release new address space since we won't use it */
1502 imgp->ip_free_map = map;
1503 map = VM_MAP_NULL;
1504 goto badtoolate;
1505 }
1506
1507 if ((imgp->ip_origcpusubtype & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_ARM64E &&
1508 imgp->ip_origcputype == CPU_TYPE_ARM64 &&
1509 load_result.platform_binary &&
1510 (imgp->ip_flags & IMGPF_DRIVER) != 0) {
1511 set_proc_name(imgp, p);
1512 printf("%s: disallowing arm64 platform driverkit binary \"%s\", should be arm64e\n", __func__, p->p_name);
1513 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_BAD_MACHO);
1514 if (bootarg_execfailurereports) {
1515 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1516 exec_failure_reason->osr_flags |= OS_REASON_FLAG_CONSISTENT_FAILURE;
1517 }
1518
1519 /* release new address space since we won't use it */
1520 imgp->ip_free_map = map;
1521 map = VM_MAP_NULL;
1522 goto badtoolate;
1523 }
1524 #endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */
1525
1526
1527
1528 /*
1529 * Set up the shared cache region in the new process.
1530 *
1531 * Normally there is a single shared region per architecture.
1532 * However on systems with Pointer Authentication, we can create
1533 * multiple shared caches with the amount of sharing determined
1534 * by team-id or entitlement. Inherited shared region IDs are used
1535 * for system processes that need to match and be able to inspect
1536 * a pre-existing task.
1537 */
1538 int cpu_subtype = 0; /* all cpu_subtypes use the same shared region */
1539 #if __has_feature(ptrauth_calls)
1540 char *shared_region_id = NULL;
1541 size_t len;
1542 char *base;
1543 const char *cbase;
1544 #define HARDENED_RUNTIME_CONTENT_ID "C-"
1545 #define TEAM_ID_PREFIX "T-"
1546 #define ENTITLE_PREFIX "E-"
1547 #define SR_PREFIX_LEN 2
1548 #define SR_ENTITLEMENT "com.apple.pac.shared_region_id"
1549
1550 if (cpu_type() == CPU_TYPE_ARM64 &&
1551 arm64_cpusubtype_uses_ptrauth(p->p_cpusubtype) &&
1552 (imgp->ip_flags & IMGPF_NOJOP) == 0) {
1553 assertf(p->p_cputype == CPU_TYPE_ARM64,
1554 "p %p cpu_type() 0x%x p->p_cputype 0x%x p->p_cpusubtype 0x%x",
1555 p, cpu_type(), p->p_cputype, p->p_cpusubtype);
1556
1557 /*
1558 * arm64e uses pointer authentication, so request a separate
1559 * shared region for this CPU subtype.
1560 */
1561 cpu_subtype = p->p_cpusubtype & ~CPU_SUBTYPE_MASK;
1562
1563 /*
1564 * Determine which shared cache to select based on being told,
1565 * matching a team-id or matching an entitlement.
1566 */
1567 if (load_result.hardened_runtime_binary & BrowserWebContentEntitlementMask) {
1568 len = sizeof(HARDENED_RUNTIME_CONTENT_ID);
1569 shared_region_id = kalloc_data(len, Z_WAITOK | Z_NOFAIL);
1570 strlcpy(shared_region_id, HARDENED_RUNTIME_CONTENT_ID, len);
1571 } else if (imgp->ip_inherited_shared_region_id) {
1572 len = strlen(imgp->ip_inherited_shared_region_id);
1573 shared_region_id = kalloc_data(len + 1, Z_WAITOK | Z_NOFAIL);
1574 memcpy(shared_region_id, imgp->ip_inherited_shared_region_id, len + 1);
1575 } else if ((cbase = get_teamid_for_shared_region(imgp)) != NULL) {
1576 len = strlen(cbase);
1577 if (vm_shared_region_per_team_id) {
1578 shared_region_id = kalloc_data(len + SR_PREFIX_LEN + 1,
1579 Z_WAITOK | Z_NOFAIL);
1580 memcpy(shared_region_id, TEAM_ID_PREFIX, SR_PREFIX_LEN);
1581 memcpy(shared_region_id + SR_PREFIX_LEN, cbase, len + 1);
1582 }
1583 } else if ((base = IOVnodeGetEntitlement(imgp->ip_vp,
1584 (int64_t)imgp->ip_arch_offset, SR_ENTITLEMENT)) != NULL) {
1585 len = strlen(base);
1586 if (vm_shared_region_by_entitlement) {
1587 shared_region_id = kalloc_data(len + SR_PREFIX_LEN + 1,
1588 Z_WAITOK | Z_NOFAIL);
1589 memcpy(shared_region_id, ENTITLE_PREFIX, SR_PREFIX_LEN);
1590 memcpy(shared_region_id + SR_PREFIX_LEN, base, len + 1);
1591 }
1592 /* Discard the copy of the entitlement */
1593 kfree_data(base, len + 1);
1594 }
1595 }
1596
1597 if (imgp->ip_flags & IMGPF_RESLIDE) {
1598 reslide = TRUE;
1599 }
1600
1601 /* use "" as the default shared_region_id */
1602 if (shared_region_id == NULL) {
1603 shared_region_id = kalloc_data(1, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1604 }
1605
1606 /* ensure there's a unique pointer signing key for this shared_region_id */
1607 shared_region_key_alloc(shared_region_id,
1608 imgp->ip_inherited_shared_region_id != NULL, imgp->ip_inherited_jop_pid);
1609 task_set_shared_region_id(task, shared_region_id);
1610 shared_region_id = NULL;
1611 #endif /* __has_feature(ptrauth_calls) */
1612
1613 #if CONFIG_ROSETTA
1614 if (imgp->ip_flags & IMGPF_ROSETTA) {
1615 OSBitOrAtomic(P_TRANSLATED, &p->p_flag);
1616 } else if (p->p_flag & P_TRANSLATED) {
1617 OSBitAndAtomic(~P_TRANSLATED, &p->p_flag);
1618 }
1619 #endif
1620
1621 int cputype = cpu_type();
1622
1623 uint32_t rsr_version = 0;
1624 #if XNU_TARGET_OS_OSX
1625 if (vnode_is_rsr(imgp->ip_vp)) {
1626 rsr_version = rsr_get_version();
1627 os_atomic_or(&p->p_ladvflag, P_RSR, relaxed);
1628 os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_ALTLINK, relaxed);
1629 }
1630 #endif /* XNU_TARGET_OS_OSX */
1631
1632 vm_map_exec(map, task, load_result.is_64bit_addr,
1633 (void *)p->p_fd.fd_rdir, cputype, cpu_subtype, reslide,
1634 (imgp->ip_flags & IMGPF_DRIVER) != 0,
1635 rsr_version);
1636
1637 /*
1638 * Close file descriptors which specify close-on-exec.
1639 */
1640 fdt_exec(p, vfs_context_ucred(imgp->ip_vfs_context),
1641 psa != NULL ? psa->psa_flags : 0, imgp->ip_new_thread, exec);
1642
1643 /*
1644 * deal with set[ug]id.
1645 */
1646 error = exec_handle_sugid(imgp);
1647 if (error) {
1648 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1649 proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_SUGID_FAILURE, 0, 0);
1650
1651 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_SUGID_FAILURE);
1652 if (bootarg_execfailurereports) {
1653 set_proc_name(imgp, p);
1654 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1655 }
1656
1657 /* release new address space since we won't use it */
1658 imgp->ip_free_map = map;
1659 map = VM_MAP_NULL;
1660 goto badtoolate;
1661 }
1662
1663 /*
1664 * Commit to new map.
1665 *
1666 * Swap the new map for the old for target task, which consumes
1667 * our new map reference but each leaves us responsible for the
1668 * old_map reference. That lets us get off the pmap associated
1669 * with it, and then we can release it.
1670 *
1671 * The map needs to be set on the target task which is different
1672 * than current task, thus swap_task_map is used instead of
1673 * vm_map_switch.
1674 */
1675 old_map = swap_task_map(task, thread, map);
1676 #if MACH_ASSERT
1677 /*
1678 * Reset the pmap's process info to prevent ledger checks
1679 * which might fail due to the ledgers being shared between
1680 * the old and new pmaps.
1681 */
1682 vm_map_pmap_set_process(old_map, -1, "<old_map>");
1683 #endif /* MACH_ASSERT */
1684 imgp->ip_free_map = old_map;
1685 old_map = NULL;
1686
1687 lret = activate_exec_state(task, p, thread, &load_result);
1688 if (lret != KERN_SUCCESS) {
1689 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1690 proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_ACTV_THREADSTATE, 0, 0);
1691
1692 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_ACTV_THREADSTATE);
1693 if (bootarg_execfailurereports) {
1694 set_proc_name(imgp, p);
1695 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1696 }
1697
1698 goto badtoolate;
1699 }
1700
1701 /*
1702 * deal with voucher on exec-calling thread.
1703 */
1704 if (imgp->ip_new_thread == NULL) {
1705 thread_set_mach_voucher(current_thread(), IPC_VOUCHER_NULL);
1706 }
1707
1708 /* Make sure we won't interrupt ourself signalling a partial process */
1709 if (!vfexec && !spawn && (p->p_lflag & P_LTRACED)) {
1710 psignal(p, SIGTRAP);
1711 }
1712
1713 if (load_result.unixproc &&
1714 create_unix_stack(get_task_map(task),
1715 &load_result,
1716 p) != KERN_SUCCESS) {
1717 error = load_return_to_errno(LOAD_NOSPACE);
1718
1719 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1720 proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_STACK_ALLOC, 0, 0);
1721
1722 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_STACK_ALLOC);
1723 if (bootarg_execfailurereports) {
1724 set_proc_name(imgp, p);
1725 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1726 }
1727
1728 goto badtoolate;
1729 }
1730
1731 if (load_result.hardened_runtime_binary) {
1732 if (cs_debug) {
1733 printf("setting hardened runtime with entitlement mask= "
1734 "0x%x on task: pid = %d\n",
1735 load_result.hardened_runtime_binary,
1736 proc_getpid(p));
1737 }
1738 task_set_hardened_runtime(task, true);
1739 }
1740
1741 /*
1742 * The load result will have already been munged by AMFI to include the
1743 * platform binary flag if boot-args dictated it (AMFI will mark anything
1744 * that doesn't go through the upcall path as a platform binary if its
1745 * enforcement is disabled).
1746 */
1747 if (load_result.platform_binary) {
1748 if (cs_debug) {
1749 printf("setting platform binary on task: pid = %d\n", proc_getpid(p));
1750 }
1751
1752 /*
1753 * We must use 'task' here because the proc's task has not yet been
1754 * switched to the new one.
1755 */
1756 task_set_platform_binary(task, TRUE);
1757 } else {
1758 if (cs_debug) {
1759 printf("clearing platform binary on task: pid = %d\n", proc_getpid(p));
1760 }
1761
1762 task_set_platform_binary(task, FALSE);
1763 }
1764
1765 #if XNU_TARGET_OS_OSX
1766 /* Disable mach hardening for all 1P tasks which load 3P plugins */
1767 if (imgp->ip_flags & IMGPF_3P_PLUGINS) {
1768 if (cs_debug) {
1769 printf("Disabling some mach hardening on task due to 3P plugins: pid = %d\n", proc_getpid(p));
1770 }
1771 task_disable_mach_hardening(task);
1772 }
1773 #if DEVELOPMENT || DEBUG
1774 /* Disable mach hardening for all tasks if amfi_get_out_of_my_way is set.
1775 * Customers will have to turn SIP off to use this boot-arg, and so this is
1776 * only needed internally since we disable this feature when SIP is off. */
1777 if (AMFI_bootarg_disable_mach_hardening) {
1778 if (cs_debug) {
1779 printf("Disabling some mach hardening on task due to AMFI boot-args: pid = %d\n", proc_getpid(p));
1780 }
1781 task_disable_mach_hardening(task);
1782 }
1783 #endif /* DEVELOPMENT || DEBUG */
1784 #endif /* XNU_TARGET_OS_OSX */
1785
1786 /*
1787 * Set starting EXC_GUARD and control port behavior for task now that
1788 * platform and hardened runtime is set. Use the name directly from imgp since we haven't
1789 * set_proc_name() yet. Also make control port for the task and main thread
1790 * immovable/pinned based on task's option.
1791 *
1792 * Must happen before main thread port copyout in exc_add_apple_strings.
1793 */
1794 task_set_exc_guard_ctrl_port_default(task, thread,
1795 imgp->ip_ndp->ni_cnd.cn_nameptr,
1796 (unsigned)imgp->ip_ndp->ni_cnd.cn_namelen,
1797 proc_is_simulated(p),
1798 load_result.ip_platform,
1799 load_result.lr_sdk);
1800
1801 error = exec_add_apple_strings(imgp, &load_result); /* copies out main thread port */
1802
1803 if (error) {
1804 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1805 proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_APPLE_STRING_INIT, 0, 0);
1806
1807 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_APPLE_STRING_INIT);
1808 if (bootarg_execfailurereports) {
1809 set_proc_name(imgp, p);
1810 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1811 }
1812 goto badtoolate;
1813 }
1814
1815 /* Switch to target task's map to copy out strings */
1816 old_map = vm_map_switch(get_task_map(task));
1817
1818 if (load_result.unixproc) {
1819 user_addr_t ap;
1820
1821 /*
1822 * Copy the strings area out into the new process address
1823 * space.
1824 */
1825 ap = p->user_stack;
1826 error = exec_copyout_strings(imgp, &ap);
1827 if (error) {
1828 vm_map_switch(old_map);
1829
1830 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1831 proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_STRINGS, 0, 0);
1832
1833 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_STRINGS);
1834 if (bootarg_execfailurereports) {
1835 set_proc_name(imgp, p);
1836 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1837 }
1838 goto badtoolate;
1839 }
1840 /* Set the stack */
1841 thread_setuserstack(thread, ap);
1842 }
1843
1844 if (load_result.dynlinker || load_result.is_rosetta) {
1845 user_addr_t ap;
1846 int new_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4;
1847
1848 /* Adjust the stack */
1849 ap = thread_adjuserstack(thread, -new_ptr_size);
1850 error = copyoutptr(load_result.mach_header, ap, new_ptr_size);
1851
1852 if (error) {
1853 vm_map_switch(old_map);
1854
1855 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1856 proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_DYNLINKER, 0, 0);
1857
1858 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_DYNLINKER);
1859 if (bootarg_execfailurereports) {
1860 set_proc_name(imgp, p);
1861 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1862 }
1863 goto badtoolate;
1864 }
1865 error = task_set_dyld_info(task, load_result.all_image_info_addr,
1866 load_result.all_image_info_size, false);
1867 if (error) {
1868 vm_map_switch(old_map);
1869
1870 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1871 proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_SET_DYLD_INFO, 0, 0);
1872
1873 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_SET_DYLD_INFO);
1874 if (bootarg_execfailurereports) {
1875 set_proc_name(imgp, p);
1876 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1877 }
1878 error = EINVAL;
1879 goto badtoolate;
1880 }
1881 } else {
1882 /*
1883 * No dyld or rosetta loaded, set the TF_DYLD_ALL_IMAGE_FINAL bit on task.
1884 */
1885 error = task_set_dyld_info(task, MACH_VM_MIN_ADDRESS,
1886 0, true);
1887 if (error) {
1888 vm_map_switch(old_map);
1889
1890 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1891 proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_SET_DYLD_INFO, 0, 0);
1892
1893 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_SET_DYLD_INFO);
1894 if (bootarg_execfailurereports) {
1895 set_proc_name(imgp, p);
1896 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1897 }
1898 error = EINVAL;
1899 goto badtoolate;
1900 }
1901 }
1902
1903 #if CONFIG_ROSETTA
1904 if (load_result.is_rosetta) {
1905 // Add an fd for the executable file for Rosetta's use
1906 int main_binary_fd;
1907 struct fileproc *fp;
1908
1909 error = falloc_exec(p, imgp->ip_vfs_context, &fp, &main_binary_fd);
1910 if (error) {
1911 vm_map_switch(old_map);
1912
1913 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1914 proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_MAIN_FD_ALLOC, 0, 0);
1915
1916 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_MAIN_FD_ALLOC);
1917 if (bootarg_execfailurereports) {
1918 set_proc_name(imgp, p);
1919 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1920 }
1921 goto badtoolate;
1922 }
1923
1924 error = VNOP_OPEN(imgp->ip_vp, FREAD, imgp->ip_vfs_context);
1925 if (error) {
1926 vm_map_switch(old_map);
1927
1928 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1929 proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_MAIN_FD_ALLOC, 0, 0);
1930
1931 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_MAIN_FD_ALLOC);
1932 if (bootarg_execfailurereports) {
1933 set_proc_name(imgp, p);
1934 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1935 }
1936 goto cleanup_rosetta_fp;
1937 }
1938
1939 fp->fp_glob->fg_flag = FREAD;
1940 fp->fp_glob->fg_ops = &vnops;
1941 fp_set_data(fp, imgp->ip_vp);
1942
1943 proc_fdlock(p);
1944 procfdtbl_releasefd(p, main_binary_fd, NULL);
1945 fp_drop(p, main_binary_fd, fp, 1);
1946 proc_fdunlock(p);
1947
1948 vnode_ref(imgp->ip_vp);
1949
1950 // Pass the dyld load address, main binary fd, and dyld fd on the stack
1951 uint64_t ap = thread_adjuserstack(thread, -24);
1952
1953 error = copyoutptr((user_addr_t)load_result.dynlinker_fd, ap, 8);
1954 if (error) {
1955 vm_map_switch(old_map);
1956
1957 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1958 proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_ROSETTA, 0, 0);
1959
1960 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_ROSETTA);
1961 if (bootarg_execfailurereports) {
1962 set_proc_name(imgp, p);
1963 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1964 }
1965 goto cleanup_rosetta_fp;
1966 }
1967
1968 error = copyoutptr(load_result.dynlinker_mach_header, ap + 8, 8);
1969 if (error) {
1970 vm_map_switch(old_map);
1971
1972 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1973 proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_ROSETTA, 0, 0);
1974
1975 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_ROSETTA);
1976 if (bootarg_execfailurereports) {
1977 set_proc_name(imgp, p);
1978 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1979 }
1980 goto cleanup_rosetta_fp;
1981 }
1982
1983 error = copyoutptr((user_addr_t)main_binary_fd, ap + 16, 8);
1984 if (error) {
1985 vm_map_switch(old_map);
1986
1987 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1988 proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_ROSETTA, 0, 0);
1989
1990 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_ROSETTA);
1991 if (bootarg_execfailurereports) {
1992 set_proc_name(imgp, p);
1993 exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
1994 }
1995 goto cleanup_rosetta_fp;
1996 }
1997
1998 cleanup_rosetta_fp:
1999 if (error) {
2000 fp_free(p, load_result.dynlinker_fd, load_result.dynlinker_fp);
2001 fp_free(p, main_binary_fd, fp);
2002 goto badtoolate;
2003 }
2004 }
2005
2006 #endif
2007
2008 /* Avoid immediate VM faults back into kernel */
2009 exec_prefault_data(p, imgp, &load_result);
2010
2011 vm_map_switch(old_map);
2012
2013 /*
2014 * Reset signal state.
2015 */
2016 execsigs(p, thread);
2017
2018 /*
2019 * need to cancel async IO requests that can be cancelled and wait for those
2020 * already active. MAY BLOCK!
2021 */
2022 _aio_exec( p );
2023
2024 #if SYSV_SHM
2025 /* FIXME: Till vmspace inherit is fixed: */
2026 if (!vfexec && p->vm_shm) {
2027 shmexec(p);
2028 }
2029 #endif
2030 #if SYSV_SEM
2031 /* Clean up the semaphores */
2032 semexit(p);
2033 #endif
2034
2035 /*
2036 * Remember file name for accounting.
2037 */
2038 p->p_acflag &= ~AFORK;
2039
2040 set_proc_name(imgp, p);
2041
2042 #if CONFIG_SECLUDED_MEMORY
2043 if (secluded_for_apps &&
2044 load_result.platform_binary) {
2045 if (strncmp(p->p_name,
2046 "Camera",
2047 sizeof(p->p_name)) == 0) {
2048 task_set_could_use_secluded_mem(task, TRUE);
2049 } else {
2050 task_set_could_use_secluded_mem(task, FALSE);
2051 }
2052 if (strncmp(p->p_name,
2053 "mediaserverd",
2054 sizeof(p->p_name)) == 0) {
2055 task_set_could_also_use_secluded_mem(task, TRUE);
2056 }
2057 if (strncmp(p->p_name,
2058 "cameracaptured",
2059 sizeof(p->p_name)) == 0) {
2060 task_set_could_also_use_secluded_mem(task, TRUE);
2061 }
2062 }
2063 #endif /* CONFIG_SECLUDED_MEMORY */
2064
2065 #if __arm64__
2066 if (load_result.legacy_footprint) {
2067 task_set_legacy_footprint(task);
2068 }
2069 #endif /* __arm64__ */
2070
2071 pal_dbg_set_task_name(task);
2072
2073 #if DEVELOPMENT || DEBUG
2074 /*
2075 * Update the pid an proc name for importance base if any
2076 */
2077 task_importance_update_owner_info(task);
2078 #endif
2079
2080 proc_setexecutableuuid(p, &load_result.uuid[0]);
2081
2082 #if CONFIG_DTRACE
2083 dtrace_proc_exec(p);
2084 #endif
2085
2086
2087 if (kdebug_enable) {
2088 long args[4] = {};
2089
2090 uintptr_t fsid = 0, fileid = 0;
2091 if (imgp->ip_vattr) {
2092 uint64_t fsid64 = vnode_get_va_fsid(imgp->ip_vattr);
2093 fsid = (uintptr_t)fsid64;
2094 fileid = (uintptr_t)imgp->ip_vattr->va_fileid;
2095 // check for (unexpected) overflow and trace zero in that case
2096 if (fsid != fsid64 || fileid != imgp->ip_vattr->va_fileid) {
2097 fsid = fileid = 0;
2098 }
2099 }
2100 KERNEL_DEBUG_CONSTANT_IST1(TRACE_DATA_EXEC, proc_getpid(p), fsid, fileid, 0,
2101 (uintptr_t)thread_tid(thread));
2102
2103 extern void kdebug_proc_name_args(struct proc *proc, long args[static 4]);
2104 kdebug_proc_name_args(p, args);
2105 KERNEL_DEBUG_CONSTANT_IST1(TRACE_STRING_EXEC, args[0], args[1],
2106 args[2], args[3], (uintptr_t)thread_tid(thread));
2107 }
2108
2109
2110 /*
2111 * If posix_spawned with the START_SUSPENDED flag, stop the
2112 * process before it runs.
2113 */
2114 if (imgp->ip_px_sa != NULL) {
2115 psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
2116 if (psa->psa_flags & POSIX_SPAWN_START_SUSPENDED) {
2117 proc_lock(p);
2118 p->p_stat = SSTOP;
2119 proc_unlock(p);
2120 (void) task_suspend_internal(task);
2121 }
2122 }
2123
2124 /*
2125 * mark as execed
2126 */
2127 OSBitOrAtomic(P_EXEC, &p->p_flag);
2128 proc_resetregister(p);
2129 if (p->p_pptr && (p->p_lflag & P_LPPWAIT)) {
2130 proc_lock(p);
2131 p->p_lflag &= ~P_LPPWAIT;
2132 proc_unlock(p);
2133 wakeup((caddr_t)p->p_pptr);
2134 }
2135
2136 /*
2137 * Set up dext coredumps on kernel panic.
2138 * This requires the following:
2139 * - dext_panic_coredump=1 boot-arg (enabled by default on DEVELOPMENT, DEBUG and certain Seed builds)
2140 * - process must be a driver
2141 * - process must have the com.apple.private.enable-coredump-on-panic entitlement, and the
2142 * entitlement has a string value.
2143 * - process must have the com.apple.private.enable-coredump-on-panic-seed-privacy-approved
2144 * entitlement (Seed builds only).
2145 *
2146 * The core dump file name is formatted with the entitlement string value, followed by a hyphen
2147 * and the process PID.
2148 */
2149 if (enable_dext_coredumps_on_panic &&
2150 (imgp->ip_flags & IMGPF_DRIVER) != 0 &&
2151 (userspace_coredump_name = IOVnodeGetEntitlement(imgp->ip_vp,
2152 (int64_t)imgp->ip_arch_offset, USERSPACE_COREDUMP_PANIC_ENTITLEMENT)) != NULL) {
2153 size_t userspace_coredump_name_len = strlen(userspace_coredump_name);
2154
2155 char core_name[MACH_CORE_FILEHEADER_NAMELEN];
2156 /* 16 - NULL char - strlen("-") - maximum of 5 digits for pid */
2157 snprintf(core_name, MACH_CORE_FILEHEADER_NAMELEN, "%.9s-%d", userspace_coredump_name, proc_getpid(p));
2158
2159 kern_register_userspace_coredump(task, core_name);
2160
2161 /* Discard the copy of the entitlement */
2162 kfree_data(userspace_coredump_name, userspace_coredump_name_len + 1);
2163 userspace_coredump_name = NULL;
2164 }
2165
2166 goto done;
2167
2168 badtoolate:
2169 /* Don't allow child process to execute any instructions */
2170 if (!spawn) {
2171 {
2172 assert(exec_failure_reason != OS_REASON_NULL);
2173 if (bootarg_execfailurereports) {
2174 set_proc_name(imgp, current_proc());
2175 }
2176 psignal_with_reason(current_proc(), SIGKILL, exec_failure_reason);
2177 exec_failure_reason = OS_REASON_NULL;
2178
2179 if (exec) {
2180 /* Terminate the exec copy task */
2181 task_terminate_internal(task);
2182 }
2183 }
2184
2185 /* We can't stop this system call at this point, so just pretend we succeeded */
2186 error = 0;
2187 } else {
2188 os_reason_free(exec_failure_reason);
2189 exec_failure_reason = OS_REASON_NULL;
2190 }
2191
2192 done:
2193 if (load_result.threadstate) {
2194 kfree_data(load_result.threadstate, load_result.threadstate_sz);
2195 load_result.threadstate = NULL;
2196 }
2197
2198 bad:
2199 /* If we hit this, we likely would have leaked an exit reason */
2200 assert(exec_failure_reason == OS_REASON_NULL);
2201 return error;
2202 }
2203
2204
2205
2206
2207 /*
2208 * Our image activator table; this is the table of the image types we are
2209 * capable of loading. We list them in order of preference to ensure the
2210 * fastest image load speed.
2211 *
2212 * XXX hardcoded, for now; should use linker sets
2213 */
2214 struct execsw {
2215 int(*const ex_imgact)(struct image_params *);
2216 const char *ex_name;
2217 }const execsw[] = {
2218 { exec_mach_imgact, "Mach-o Binary" },
2219 { exec_fat_imgact, "Fat Binary" },
2220 { exec_shell_imgact, "Interpreter Script" },
2221 { NULL, NULL}
2222 };
2223
2224
2225 /*
2226 * exec_activate_image
2227 *
2228 * Description: Iterate through the available image activators, and activate
2229 * the image associated with the imgp structure. We start with
2230 * the activator for Mach-o binaries followed by that for Fat binaries
2231 * for Interpreter scripts.
2232 *
2233 * Parameters: struct image_params * Image parameter block
2234 *
2235 * Returns: 0 Success
2236 * ENOEXEC No activator for image.
2237 * EBADEXEC The executable is corrupt/unknown
2238 * execargs_alloc:EINVAL Invalid argument
2239 * execargs_alloc:EACCES Permission denied
2240 * execargs_alloc:EINTR Interrupted function
2241 * execargs_alloc:ENOMEM Not enough space
2242 * exec_save_path:EFAULT Bad address
2243 * exec_save_path:ENAMETOOLONG Filename too long
2244 * exec_check_permissions:EACCES Permission denied
2245 * exec_check_permissions:ENOEXEC Executable file format error
2246 * exec_check_permissions:ETXTBSY Text file busy [misuse of error code]
2247 * exec_check_permissions:???
2248 * namei:???
2249 * vn_rdwr:??? [anything vn_rdwr can return]
2250 * <ex_imgact>:??? [anything an imgact can return]
2251 * EDEADLK Process is being terminated
2252 */
2253 static int
exec_activate_image(struct image_params * imgp)2254 exec_activate_image(struct image_params *imgp)
2255 {
2256 struct nameidata *ndp = NULL;
2257 const char *excpath;
2258 int error;
2259 int resid;
2260 int once = 1; /* save SGUID-ness for interpreted files */
2261 int i;
2262 int itercount = 0;
2263 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
2264
2265 /*
2266 * For exec, the translock needs to be taken on old proc and not
2267 * on new shadow proc.
2268 */
2269 if (imgp->ip_flags & IMGPF_EXEC) {
2270 p = current_proc();
2271 }
2272
2273 error = execargs_alloc(imgp);
2274 if (error) {
2275 goto bad_notrans;
2276 }
2277
2278 error = exec_save_path(imgp, imgp->ip_user_fname, imgp->ip_seg, &excpath);
2279 if (error) {
2280 goto bad_notrans;
2281 }
2282
2283 /* Use excpath, which contains the copyin-ed exec path */
2284 DTRACE_PROC1(exec, uintptr_t, excpath);
2285
2286 ndp = kalloc_type(struct nameidata, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2287
2288 NDINIT(ndp, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
2289 UIO_SYSSPACE, CAST_USER_ADDR_T(excpath), imgp->ip_vfs_context);
2290
2291 again:
2292 error = namei(ndp);
2293 if (error) {
2294 if (error == ERESTART) {
2295 error = EINTR;
2296 }
2297 goto bad_notrans;
2298 }
2299 imgp->ip_ndp = ndp; /* successful namei(); call nameidone() later */
2300 imgp->ip_vp = ndp->ni_vp; /* if set, need to vnode_put() at some point */
2301
2302 /*
2303 * Before we start the transition from binary A to binary B, make
2304 * sure another thread hasn't started exiting the process. We grab
2305 * the proc lock to check p_lflag initially, and the transition
2306 * mechanism ensures that the value doesn't change after we release
2307 * the lock.
2308 */
2309 proc_lock(p);
2310 if (p->p_lflag & P_LEXIT) {
2311 error = EDEADLK;
2312 proc_unlock(p);
2313 goto bad_notrans;
2314 }
2315 error = proc_transstart(p, 1, 0);
2316 proc_unlock(p);
2317 if (error) {
2318 goto bad_notrans;
2319 }
2320
2321 error = exec_check_permissions(imgp);
2322 if (error) {
2323 goto bad;
2324 }
2325
2326 /* Copy; avoid invocation of an interpreter overwriting the original */
2327 if (once) {
2328 once = 0;
2329 *imgp->ip_origvattr = *imgp->ip_vattr;
2330 }
2331
2332 error = vn_rdwr(UIO_READ, imgp->ip_vp, imgp->ip_vdata, PAGE_SIZE, 0,
2333 UIO_SYSSPACE, IO_NODELOCKED,
2334 vfs_context_ucred(imgp->ip_vfs_context),
2335 &resid, vfs_context_proc(imgp->ip_vfs_context));
2336 if (error) {
2337 goto bad;
2338 }
2339
2340 if (resid) {
2341 memset(imgp->ip_vdata + (PAGE_SIZE - resid), 0x0, resid);
2342 }
2343
2344 encapsulated_binary:
2345 /* Limit the number of iterations we will attempt on each binary */
2346 if (++itercount > EAI_ITERLIMIT) {
2347 error = EBADEXEC;
2348 goto bad;
2349 }
2350 error = -1;
2351 for (i = 0; error == -1 && execsw[i].ex_imgact != NULL; i++) {
2352 error = (*execsw[i].ex_imgact)(imgp);
2353
2354 switch (error) {
2355 /* case -1: not claimed: continue */
2356 case -2: /* Encapsulated binary, imgp->ip_XXX set for next iteration */
2357 goto encapsulated_binary;
2358
2359 case -3: /* Interpreter */
2360 #if CONFIG_MACF
2361 /*
2362 * Copy the script label for later use. Note that
2363 * the label can be different when the script is
2364 * actually read by the interpreter.
2365 */
2366 if (imgp->ip_scriptlabelp) {
2367 mac_vnode_label_free(imgp->ip_scriptlabelp);
2368 imgp->ip_scriptlabelp = NULL;
2369 }
2370 imgp->ip_scriptlabelp = mac_vnode_label_alloc(NULL);
2371 if (imgp->ip_scriptlabelp == NULL) {
2372 error = ENOMEM;
2373 break;
2374 }
2375 mac_vnode_label_copy(mac_vnode_label(imgp->ip_vp),
2376 imgp->ip_scriptlabelp);
2377
2378 /*
2379 * Take a ref of the script vnode for later use.
2380 */
2381 if (imgp->ip_scriptvp) {
2382 vnode_put(imgp->ip_scriptvp);
2383 imgp->ip_scriptvp = NULLVP;
2384 }
2385 if (vnode_getwithref(imgp->ip_vp) == 0) {
2386 imgp->ip_scriptvp = imgp->ip_vp;
2387 }
2388 #endif
2389
2390 nameidone(ndp);
2391
2392 vnode_put(imgp->ip_vp);
2393 imgp->ip_vp = NULL; /* already put */
2394 imgp->ip_ndp = NULL; /* already nameidone */
2395
2396 /* Use excpath, which exec_shell_imgact reset to the interpreter */
2397 NDINIT(ndp, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF,
2398 UIO_SYSSPACE, CAST_USER_ADDR_T(excpath), imgp->ip_vfs_context);
2399
2400 proc_transend(p, 0);
2401 goto again;
2402
2403 default:
2404 break;
2405 }
2406 }
2407
2408 if (error == -1) {
2409 error = ENOEXEC;
2410 } else if (error == 0) {
2411 if (imgp->ip_flags & IMGPF_INTERPRET && ndp->ni_vp) {
2412 AUDIT_ARG(vnpath, ndp->ni_vp, ARG_VNODE2);
2413 }
2414
2415 /*
2416 * Call out to allow 3rd party notification of exec.
2417 * Ignore result of kauth_authorize_fileop call.
2418 */
2419 if (kauth_authorize_fileop_has_listeners()) {
2420 kauth_authorize_fileop(vfs_context_ucred(imgp->ip_vfs_context),
2421 KAUTH_FILEOP_EXEC,
2422 (uintptr_t)ndp->ni_vp, 0);
2423 }
2424 }
2425 bad:
2426 proc_transend(p, 0);
2427
2428 bad_notrans:
2429 if (imgp->ip_strings) {
2430 execargs_free(imgp);
2431 }
2432 if (imgp->ip_ndp) {
2433 nameidone(imgp->ip_ndp);
2434 }
2435 kfree_type(struct nameidata, ndp);
2436
2437 return error;
2438 }
2439
2440 /*
2441 * exec_validate_spawnattr_policy
2442 *
2443 * Description: Validates the entitlements required to set the apptype.
2444 *
2445 * Parameters: int psa_apptype posix spawn attribute apptype
2446 *
2447 * Returns: 0 Success
2448 * EPERM Failure
2449 */
2450 static errno_t
exec_validate_spawnattr_policy(int psa_apptype)2451 exec_validate_spawnattr_policy(int psa_apptype)
2452 {
2453 if ((psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK) != 0) {
2454 int proctype = psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK;
2455 if (proctype == POSIX_SPAWN_PROC_TYPE_DRIVER) {
2456 if (!IOCurrentTaskHasEntitlement(POSIX_SPAWN_ENTITLEMENT_DRIVER)) {
2457 return EPERM;
2458 }
2459 }
2460 }
2461
2462 return 0;
2463 }
2464
2465 /*
2466 * exec_handle_spawnattr_policy
2467 *
2468 * Description: Decode and apply the posix_spawn apptype, qos clamp, and watchport ports to the task.
2469 *
2470 * Parameters: proc_t p process to apply attributes to
2471 * int psa_apptype posix spawn attribute apptype
2472 *
2473 * Returns: 0 Success
2474 */
2475 static errno_t
exec_handle_spawnattr_policy(proc_t p,thread_t thread,int psa_apptype,uint64_t psa_qos_clamp,task_role_t psa_darwin_role,struct exec_port_actions * port_actions)2476 exec_handle_spawnattr_policy(proc_t p, thread_t thread, int psa_apptype, uint64_t psa_qos_clamp,
2477 task_role_t psa_darwin_role, struct exec_port_actions *port_actions)
2478 {
2479 int apptype = TASK_APPTYPE_NONE;
2480 int qos_clamp = THREAD_QOS_UNSPECIFIED;
2481 task_role_t role = TASK_UNSPECIFIED;
2482
2483 if ((psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK) != 0) {
2484 int proctype = psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK;
2485
2486 switch (proctype) {
2487 case POSIX_SPAWN_PROC_TYPE_DAEMON_INTERACTIVE:
2488 apptype = TASK_APPTYPE_DAEMON_INTERACTIVE;
2489 break;
2490 case POSIX_SPAWN_PROC_TYPE_DAEMON_STANDARD:
2491 apptype = TASK_APPTYPE_DAEMON_STANDARD;
2492 break;
2493 case POSIX_SPAWN_PROC_TYPE_DAEMON_ADAPTIVE:
2494 apptype = TASK_APPTYPE_DAEMON_ADAPTIVE;
2495 break;
2496 case POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND:
2497 apptype = TASK_APPTYPE_DAEMON_BACKGROUND;
2498 break;
2499 case POSIX_SPAWN_PROC_TYPE_APP_DEFAULT:
2500 apptype = TASK_APPTYPE_APP_DEFAULT;
2501 break;
2502 case POSIX_SPAWN_PROC_TYPE_DRIVER:
2503 apptype = TASK_APPTYPE_DRIVER;
2504 break;
2505 default:
2506 apptype = TASK_APPTYPE_NONE;
2507 /* TODO: Should an invalid value here fail the spawn? */
2508 break;
2509 }
2510 }
2511
2512 if (psa_qos_clamp != POSIX_SPAWN_PROC_CLAMP_NONE) {
2513 switch (psa_qos_clamp) {
2514 case POSIX_SPAWN_PROC_CLAMP_UTILITY:
2515 qos_clamp = THREAD_QOS_UTILITY;
2516 break;
2517 case POSIX_SPAWN_PROC_CLAMP_BACKGROUND:
2518 qos_clamp = THREAD_QOS_BACKGROUND;
2519 break;
2520 case POSIX_SPAWN_PROC_CLAMP_MAINTENANCE:
2521 qos_clamp = THREAD_QOS_MAINTENANCE;
2522 break;
2523 default:
2524 qos_clamp = THREAD_QOS_UNSPECIFIED;
2525 /* TODO: Should an invalid value here fail the spawn? */
2526 break;
2527 }
2528 }
2529
2530 if (psa_darwin_role != PRIO_DARWIN_ROLE_DEFAULT) {
2531 proc_darwin_role_to_task_role(psa_darwin_role, &role);
2532 }
2533
2534 if (apptype != TASK_APPTYPE_NONE ||
2535 qos_clamp != THREAD_QOS_UNSPECIFIED ||
2536 role != TASK_UNSPECIFIED ||
2537 port_actions->portwatch_count) {
2538 proc_set_task_spawnpolicy(proc_task(p), thread, apptype, qos_clamp, role,
2539 port_actions->portwatch_array, port_actions->portwatch_count);
2540 }
2541
2542 if (port_actions->registered_count) {
2543 if (_kernelrpc_mach_ports_register3(proc_task(p),
2544 port_actions->registered_array[0],
2545 port_actions->registered_array[1],
2546 port_actions->registered_array[2])) {
2547 return EINVAL;
2548 }
2549 /* mach_ports_register() consumed the array */
2550 bzero(port_actions->registered_array,
2551 sizeof(port_actions->registered_array));
2552 port_actions->registered_count = 0;
2553 }
2554
2555 return 0;
2556 }
2557
2558 static void
exec_port_actions_destroy(struct exec_port_actions * port_actions)2559 exec_port_actions_destroy(struct exec_port_actions *port_actions)
2560 {
2561 if (port_actions->excport_array) {
2562 for (uint32_t i = 0; i < port_actions->exception_port_count; i++) {
2563 ipc_port_t port = NULL;
2564 if ((port = port_actions->excport_array[i].port) != NULL) {
2565 ipc_port_release_send(port);
2566 }
2567 }
2568 kfree_type(struct exception_port_action_t, port_actions->exception_port_count,
2569 port_actions->excport_array);
2570 }
2571
2572 if (port_actions->portwatch_array) {
2573 for (uint32_t i = 0; i < port_actions->portwatch_count; i++) {
2574 ipc_port_t port = NULL;
2575 if ((port = port_actions->portwatch_array[i]) != NULL) {
2576 ipc_port_release_send(port);
2577 }
2578 }
2579 kfree_type(ipc_port_t, port_actions->portwatch_count,
2580 port_actions->portwatch_array);
2581 }
2582
2583 for (uint32_t i = 0; i < port_actions->registered_count; i++) {
2584 ipc_port_t port = NULL;
2585 if ((port = port_actions->registered_array[i]) != NULL) {
2586 ipc_port_release_send(port);
2587 }
2588 }
2589 }
2590
2591 /*
2592 * exec_handle_port_actions
2593 *
2594 * Description: Go through the _posix_port_actions_t contents,
2595 * calling task_set_special_port, task_set_exception_ports
2596 * and/or audit_session_spawnjoin for the current task.
2597 *
2598 * Parameters: struct image_params * Image parameter block
2599 *
2600 * Returns: 0 Success
2601 * EINVAL Failure
2602 * ENOTSUP Illegal posix_spawn attr flag was set
2603 */
2604 static errno_t
exec_handle_port_actions(struct image_params * imgp,struct exec_port_actions * actions)2605 exec_handle_port_actions(struct image_params *imgp,
2606 struct exec_port_actions *actions)
2607 {
2608 _posix_spawn_port_actions_t pacts = imgp->ip_px_spa;
2609 #if CONFIG_AUDIT
2610 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
2611 #endif
2612 _ps_port_action_t *act = NULL;
2613 task_t task = get_threadtask(imgp->ip_new_thread);
2614 ipc_port_t port = NULL;
2615 errno_t ret = 0;
2616 int i = 0, portwatch_i = 0, registered_i = 0, excport_i = 0;
2617 kern_return_t kr;
2618 boolean_t task_has_watchport_boost = task_has_watchports(current_task());
2619 boolean_t in_exec = (imgp->ip_flags & IMGPF_EXEC);
2620 int ptrauth_task_port_count = 0;
2621
2622 for (i = 0; i < pacts->pspa_count; i++) {
2623 act = &pacts->pspa_actions[i];
2624
2625 switch (act->port_type) {
2626 case PSPA_SPECIAL:
2627 #if CONFIG_AUDIT
2628 case PSPA_AU_SESSION:
2629 #endif
2630 break;
2631 case PSPA_EXCEPTION:
2632 if (++actions->exception_port_count > TASK_MAX_EXCEPTION_PORT_COUNT) {
2633 ret = EINVAL;
2634 goto done;
2635 }
2636 break;
2637 case PSPA_IMP_WATCHPORTS:
2638 if (++actions->portwatch_count > TASK_MAX_WATCHPORT_COUNT) {
2639 ret = EINVAL;
2640 goto done;
2641 }
2642 break;
2643 case PSPA_REGISTERED_PORTS:
2644 if (++actions->registered_count > TASK_PORT_REGISTER_MAX) {
2645 ret = EINVAL;
2646 goto done;
2647 }
2648 break;
2649 case PSPA_PTRAUTH_TASK_PORT:
2650 if (++ptrauth_task_port_count > 1) {
2651 ret = EINVAL;
2652 goto done;
2653 }
2654 break;
2655 default:
2656 ret = EINVAL;
2657 goto done;
2658 }
2659 }
2660
2661 if (actions->exception_port_count) {
2662 actions->excport_array = kalloc_type(struct exception_port_action_t,
2663 actions->exception_port_count, Z_WAITOK | Z_ZERO);
2664
2665 if (actions->excport_array == NULL) {
2666 ret = ENOMEM;
2667 goto done;
2668 }
2669 }
2670 if (actions->portwatch_count) {
2671 if (in_exec && task_has_watchport_boost) {
2672 ret = EINVAL;
2673 goto done;
2674 }
2675 actions->portwatch_array = kalloc_type(ipc_port_t,
2676 actions->portwatch_count, Z_WAITOK | Z_ZERO);
2677 if (actions->portwatch_array == NULL) {
2678 ret = ENOMEM;
2679 goto done;
2680 }
2681 }
2682
2683 for (i = 0; i < pacts->pspa_count; i++) {
2684 act = &pacts->pspa_actions[i];
2685
2686 if (MACH_PORT_VALID(act->new_port)) {
2687 kr = ipc_object_copyin(get_task_ipcspace(current_task()),
2688 act->new_port, MACH_MSG_TYPE_COPY_SEND,
2689 (ipc_object_t *) &port, 0, NULL, IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND);
2690
2691 if (kr != KERN_SUCCESS) {
2692 ret = EINVAL;
2693 goto done;
2694 }
2695 } else {
2696 /* it's NULL or DEAD */
2697 port = CAST_MACH_NAME_TO_PORT(act->new_port);
2698 }
2699
2700 switch (act->port_type) {
2701 case PSPA_SPECIAL:
2702 kr = task_set_special_port(task, act->which, port);
2703
2704 if (kr != KERN_SUCCESS) {
2705 ret = EINVAL;
2706 }
2707 break;
2708
2709 #if CONFIG_AUDIT
2710 case PSPA_AU_SESSION:
2711 ret = audit_session_spawnjoin(p, port);
2712 if (ret) {
2713 /* audit_session_spawnjoin() has already dropped the reference in case of error. */
2714 goto done;
2715 }
2716
2717 break;
2718 #endif
2719 case PSPA_EXCEPTION:
2720 assert(excport_i < actions->exception_port_count);
2721 /* hold on to this till end of spawn */
2722 actions->excport_array[excport_i].port_action = act;
2723 actions->excport_array[excport_i].port = port;
2724 excport_i++;
2725 break;
2726 case PSPA_IMP_WATCHPORTS:
2727 assert(portwatch_i < actions->portwatch_count);
2728 /* hold on to this till end of spawn */
2729 actions->portwatch_array[portwatch_i++] = port;
2730 break;
2731 case PSPA_REGISTERED_PORTS:
2732 assert(registered_i < actions->registered_count);
2733 /* hold on to this till end of spawn */
2734 actions->registered_array[registered_i++] = port;
2735 break;
2736
2737 case PSPA_PTRAUTH_TASK_PORT:
2738 #if (DEVELOPMENT || DEBUG)
2739 #if defined(HAS_APPLE_PAC)
2740 {
2741 task_t ptr_auth_task = convert_port_to_task(port);
2742
2743 if (ptr_auth_task == TASK_NULL) {
2744 ret = EINVAL;
2745 break;
2746 }
2747
2748 imgp->ip_inherited_shared_region_id =
2749 task_get_vm_shared_region_id_and_jop_pid(ptr_auth_task,
2750 &imgp->ip_inherited_jop_pid);
2751
2752 /* Deallocate task ref returned by convert_port_to_task */
2753 task_deallocate(ptr_auth_task);
2754 }
2755 #endif /* HAS_APPLE_PAC */
2756 #endif /* (DEVELOPMENT || DEBUG) */
2757
2758 /* consume the port right in case of success */
2759 ipc_port_release_send(port);
2760 break;
2761 default:
2762 ret = EINVAL;
2763 break;
2764 }
2765
2766 if (ret) {
2767 /* action failed, so release port resources */
2768 ipc_port_release_send(port);
2769 break;
2770 }
2771 }
2772
2773 done:
2774 if (0 != ret) {
2775 DTRACE_PROC1(spawn__port__failure, mach_port_name_t, act->new_port);
2776 }
2777 return ret;
2778 }
2779
2780
2781 /*
2782 * exec_handle_exception_port_actions
2783 *
2784 * Description: Go through the saved exception ports in exec_port_actions,
2785 * calling task_set_exception_ports for the current Task.
2786 * This must happen after image activation, and after exec_resettextvp()
2787 * because task_set_exception_ports checks the `TF_PLATFORM` bit and entitlements.
2788 *
2789 * Parameters: struct image_params * Image parameter block
2790 * struct exec_port_actions * Saved Port Actions
2791 *
2792 * Returns: 0 Success
2793 * EINVAL task_set_exception_ports failed
2794 */
2795 static errno_t
exec_handle_exception_port_actions(const struct image_params * imgp,const struct exec_port_actions * actions)2796 exec_handle_exception_port_actions(const struct image_params *imgp,
2797 const struct exec_port_actions *actions)
2798 {
2799 task_t task = get_threadtask(imgp->ip_new_thread);
2800
2801 for (int i = 0; i < actions->exception_port_count; i++) {
2802 ipc_port_t port = actions->excport_array[i].port;
2803 _ps_port_action_t *act = actions->excport_array[i].port_action;
2804 assert(act != NULL);
2805 kern_return_t kr = task_set_exception_ports(task, act->mask, port,
2806 act->behavior, act->flavor);
2807 if (kr != KERN_SUCCESS) {
2808 DTRACE_PROC1(spawn__exception__port__failure, mach_port_name_t, act->new_port);
2809 return EINVAL;
2810 }
2811 actions->excport_array[i].port = NULL;
2812 }
2813
2814 return 0;
2815 }
2816
2817
2818 /*
2819 * exec_handle_file_actions
2820 *
2821 * Description: Go through the _posix_file_actions_t contents applying the
2822 * open, close, and dup2 operations to the open file table for
2823 * the current process.
2824 *
2825 * Parameters: struct image_params * Image parameter block
2826 *
2827 * Returns: 0 Success
2828 * ???
2829 *
2830 * Note: Actions are applied in the order specified, with the credential
2831 * of the parent process. This is done to permit the parent
2832 * process to utilize POSIX_SPAWN_RESETIDS to drop privilege in
2833 * the child following operations the child may in fact not be
2834 * normally permitted to perform.
2835 */
2836 static int
exec_handle_file_actions(struct image_params * imgp,short psa_flags)2837 exec_handle_file_actions(struct image_params *imgp, short psa_flags)
2838 {
2839 int error = 0;
2840 int action;
2841 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
2842 kauth_cred_t p_cred = vfs_context_ucred(imgp->ip_vfs_context);
2843 _posix_spawn_file_actions_t px_sfap = imgp->ip_px_sfa;
2844 int ival[2]; /* dummy retval for system calls) */
2845 #if CONFIG_AUDIT
2846 struct uthread *uthread = current_uthread();
2847 #endif
2848
2849 for (action = 0; action < px_sfap->psfa_act_count; action++) {
2850 _psfa_action_t *psfa = &px_sfap->psfa_act_acts[action];
2851
2852 switch (psfa->psfaa_type) {
2853 case PSFA_OPEN: {
2854 /*
2855 * Open is different, in that it requires the use of
2856 * a path argument, which is normally copied in from
2857 * user space; because of this, we have to support an
2858 * open from kernel space that passes an address space
2859 * context of UIO_SYSSPACE, and casts the address
2860 * argument to a user_addr_t.
2861 */
2862 struct vnode_attr *vap;
2863 struct nameidata *ndp;
2864 int mode = psfa->psfaa_openargs.psfao_mode;
2865 int origfd;
2866 struct {
2867 struct vnode_attr va;
2868 struct nameidata nd;
2869 } *__open_data;
2870
2871 __open_data = kalloc_type(typeof(*__open_data), Z_WAITOK | Z_ZERO);
2872 if (__open_data == NULL) {
2873 error = ENOMEM;
2874 break;
2875 }
2876
2877 vap = &__open_data->va;
2878 ndp = &__open_data->nd;
2879
2880 VATTR_INIT(vap);
2881 /* Mask off all but regular access permissions */
2882 mode = ((mode & ~p->p_fd.fd_cmask) & ALLPERMS) & ~S_ISTXT;
2883 VATTR_SET(vap, va_mode, mode & ACCESSPERMS);
2884
2885 AUDIT_SUBCALL_ENTER(OPEN, p, uthread);
2886
2887 NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_SYSSPACE,
2888 CAST_USER_ADDR_T(psfa->psfaa_openargs.psfao_path),
2889 imgp->ip_vfs_context);
2890
2891 error = open1(imgp->ip_vfs_context, ndp,
2892 psfa->psfaa_openargs.psfao_oflag,
2893 vap, NULL, NULL, &origfd, AUTH_OPEN_NOAUTHFD);
2894
2895 kfree_type(typeof(*__open_data), __open_data);
2896
2897 AUDIT_SUBCALL_EXIT(uthread, error);
2898
2899 /*
2900 * If there's an error, or we get the right fd by
2901 * accident, then drop out here. This is easier than
2902 * reworking all the open code to preallocate fd
2903 * slots, and internally taking one as an argument.
2904 */
2905 if (error || origfd == psfa->psfaa_filedes) {
2906 break;
2907 }
2908
2909 /*
2910 * If we didn't fall out from an error, we ended up
2911 * with the wrong fd; so now we've got to try to dup2
2912 * it to the right one.
2913 */
2914 AUDIT_SUBCALL_ENTER(DUP2, p, uthread);
2915 error = dup2(p, p_cred, origfd, psfa->psfaa_filedes, ival);
2916 AUDIT_SUBCALL_EXIT(uthread, error);
2917 if (error) {
2918 break;
2919 }
2920
2921 /*
2922 * Finally, close the original fd.
2923 */
2924 AUDIT_SUBCALL_ENTER(CLOSE, p, uthread);
2925 error = close_nocancel(p, p_cred, origfd);
2926 AUDIT_SUBCALL_EXIT(uthread, error);
2927 }
2928 break;
2929
2930 case PSFA_DUP2: {
2931 AUDIT_SUBCALL_ENTER(DUP2, p, uthread);
2932 error = dup2(p, p_cred, psfa->psfaa_filedes,
2933 psfa->psfaa_dup2args.psfad_newfiledes, ival);
2934 AUDIT_SUBCALL_EXIT(uthread, error);
2935 }
2936 break;
2937
2938 case PSFA_FILEPORT_DUP2: {
2939 ipc_port_t port;
2940 kern_return_t kr;
2941 int origfd;
2942
2943 if (!MACH_PORT_VALID(psfa->psfaa_fileport)) {
2944 error = EINVAL;
2945 break;
2946 }
2947
2948 kr = ipc_object_copyin(get_task_ipcspace(current_task()),
2949 psfa->psfaa_fileport, MACH_MSG_TYPE_COPY_SEND,
2950 (ipc_object_t *) &port, 0, NULL, IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND);
2951
2952 if (kr != KERN_SUCCESS) {
2953 error = EINVAL;
2954 break;
2955 }
2956
2957 error = fileport_makefd(p, port, 0, &origfd);
2958
2959 if (IPC_PORT_NULL != port) {
2960 ipc_port_release_send(port);
2961 }
2962
2963 if (error || origfd == psfa->psfaa_dup2args.psfad_newfiledes) {
2964 break;
2965 }
2966
2967 AUDIT_SUBCALL_ENTER(DUP2, p, uthread);
2968 error = dup2(p, p_cred, origfd,
2969 psfa->psfaa_dup2args.psfad_newfiledes, ival);
2970 AUDIT_SUBCALL_EXIT(uthread, error);
2971 if (error) {
2972 break;
2973 }
2974
2975 AUDIT_SUBCALL_ENTER(CLOSE, p, uthread);
2976 error = close_nocancel(p, p_cred, origfd);
2977 AUDIT_SUBCALL_EXIT(uthread, error);
2978 }
2979 break;
2980
2981 case PSFA_CLOSE: {
2982 AUDIT_SUBCALL_ENTER(CLOSE, p, uthread);
2983 error = close_nocancel(p, p_cred, psfa->psfaa_filedes);
2984 AUDIT_SUBCALL_EXIT(uthread, error);
2985 }
2986 break;
2987
2988 case PSFA_INHERIT: {
2989 struct fileproc *fp;
2990
2991 /*
2992 * Check to see if the descriptor exists, and
2993 * ensure it's -not- marked as close-on-exec.
2994 *
2995 * Attempting to "inherit" a guarded fd will
2996 * result in a error.
2997 */
2998
2999 proc_fdlock(p);
3000 if ((fp = fp_get_noref_locked(p, psfa->psfaa_filedes)) == NULL) {
3001 error = EBADF;
3002 } else if (fp->fp_guard_attrs) {
3003 error = fp_guard_exception(p, psfa->psfaa_filedes,
3004 fp, kGUARD_EXC_NOCLOEXEC);
3005 } else {
3006 fp->fp_flags &= ~FP_CLOEXEC;
3007 error = 0;
3008 }
3009 proc_fdunlock(p);
3010 }
3011 break;
3012
3013 case PSFA_CHDIR: {
3014 /*
3015 * Chdir is different, in that it requires the use of
3016 * a path argument, which is normally copied in from
3017 * user space; because of this, we have to support a
3018 * chdir from kernel space that passes an address space
3019 * context of UIO_SYSSPACE, and casts the address
3020 * argument to a user_addr_t.
3021 */
3022 struct nameidata *nd;
3023 nd = kalloc_type(struct nameidata,
3024 Z_WAITOK | Z_ZERO | Z_NOFAIL);
3025
3026 AUDIT_SUBCALL_ENTER(CHDIR, p, uthread);
3027 NDINIT(nd, LOOKUP, OP_CHDIR, FOLLOW | AUDITVNPATH1, UIO_SYSSPACE,
3028 CAST_USER_ADDR_T(psfa->psfaa_chdirargs.psfac_path),
3029 imgp->ip_vfs_context);
3030
3031 error = chdir_internal(p, imgp->ip_vfs_context, nd, 0);
3032 kfree_type(struct nameidata, nd);
3033 AUDIT_SUBCALL_EXIT(uthread, error);
3034 }
3035 break;
3036
3037 case PSFA_FCHDIR: {
3038 AUDIT_SUBCALL_ENTER(FCHDIR, p, uthread);
3039 error = fchdir(p, imgp->ip_vfs_context,
3040 psfa->psfaa_filedes, false);
3041 AUDIT_SUBCALL_EXIT(uthread, error);
3042 }
3043 break;
3044
3045 default:
3046 error = EINVAL;
3047 break;
3048 }
3049
3050 /* All file actions failures are considered fatal, per POSIX */
3051
3052 if (error) {
3053 if (PSFA_OPEN == psfa->psfaa_type) {
3054 DTRACE_PROC1(spawn__open__failure, uintptr_t,
3055 psfa->psfaa_openargs.psfao_path);
3056 } else {
3057 DTRACE_PROC1(spawn__fd__failure, int, psfa->psfaa_filedes);
3058 }
3059 break;
3060 }
3061 }
3062
3063 if (error != 0 || (psa_flags & POSIX_SPAWN_CLOEXEC_DEFAULT) == 0) {
3064 return error;
3065 }
3066
3067 /*
3068 * If POSIX_SPAWN_CLOEXEC_DEFAULT is set, behave (during
3069 * this spawn only) as if "close on exec" is the default
3070 * disposition of all pre-existing file descriptors. In this case,
3071 * the list of file descriptors mentioned in the file actions
3072 * are the only ones that can be inherited, so mark them now.
3073 *
3074 * The actual closing part comes later, in fdt_exec().
3075 */
3076 proc_fdlock(p);
3077 for (action = 0; action < px_sfap->psfa_act_count; action++) {
3078 _psfa_action_t *psfa = &px_sfap->psfa_act_acts[action];
3079 int fd = psfa->psfaa_filedes;
3080
3081 switch (psfa->psfaa_type) {
3082 case PSFA_DUP2:
3083 case PSFA_FILEPORT_DUP2:
3084 fd = psfa->psfaa_dup2args.psfad_newfiledes;
3085 OS_FALLTHROUGH;
3086 case PSFA_OPEN:
3087 case PSFA_INHERIT:
3088 *fdflags(p, fd) |= UF_INHERIT;
3089 break;
3090
3091 case PSFA_CLOSE:
3092 case PSFA_CHDIR:
3093 case PSFA_FCHDIR:
3094 /*
3095 * Although PSFA_FCHDIR does have a file descriptor, it is not
3096 * *creating* one, thus we do not automatically mark it for
3097 * inheritance under POSIX_SPAWN_CLOEXEC_DEFAULT. A client that
3098 * wishes it to be inherited should use the PSFA_INHERIT action
3099 * explicitly.
3100 */
3101 break;
3102 }
3103 }
3104 proc_fdunlock(p);
3105
3106 return 0;
3107 }
3108
3109 #if CONFIG_MACF
3110 /*
3111 * Check that the extension's data is within the bounds of the
3112 * allocation storing all extensions' data
3113 */
3114 static inline errno_t
exec_spawnattr_validate_policyext_data(const struct ip_px_smpx_s * px_s,const _ps_mac_policy_extension_t * ext)3115 exec_spawnattr_validate_policyext_data(const struct ip_px_smpx_s *px_s,
3116 const _ps_mac_policy_extension_t *ext)
3117 {
3118 uint64_t dataend;
3119
3120 if (__improbable(os_add_overflow(ext->dataoff, ext->datalen, &dataend))) {
3121 return EOVERFLOW;
3122 }
3123 if (__improbable(dataend > px_s->datalen)) {
3124 return EINVAL;
3125 }
3126
3127 return 0;
3128 }
3129
3130 /*
3131 * exec_spawnattr_getmacpolicyinfo
3132 */
3133 void *
exec_spawnattr_getmacpolicyinfo(const void * macextensions,const char * policyname,size_t * lenp)3134 exec_spawnattr_getmacpolicyinfo(const void *macextensions, const char *policyname, size_t *lenp)
3135 {
3136 const struct ip_px_smpx_s *px_s = macextensions;
3137 const struct _posix_spawn_mac_policy_extensions *psmx = NULL;
3138 int i;
3139
3140 if (px_s == NULL) {
3141 return NULL;
3142 }
3143
3144 psmx = px_s->array;
3145 if (psmx == NULL) {
3146 return NULL;
3147 }
3148
3149 for (i = 0; i < psmx->psmx_count; i++) {
3150 const _ps_mac_policy_extension_t *extension = &psmx->psmx_extensions[i];
3151 if (strncmp(extension->policyname, policyname, sizeof(extension->policyname)) == 0) {
3152 if (__improbable(exec_spawnattr_validate_policyext_data(px_s, extension))) {
3153 panic("invalid mac policy extension data");
3154 }
3155 if (lenp != NULL) {
3156 *lenp = (size_t)extension->datalen;
3157 }
3158 return (void *)((uintptr_t)px_s->data + extension->dataoff);
3159 }
3160 }
3161
3162 if (lenp != NULL) {
3163 *lenp = 0;
3164 }
3165 return NULL;
3166 }
3167
3168 static int
spawn_copyin_macpolicyinfo(const struct user__posix_spawn_args_desc * px_args,struct ip_px_smpx_s * pxsp)3169 spawn_copyin_macpolicyinfo(const struct user__posix_spawn_args_desc *px_args,
3170 struct ip_px_smpx_s *pxsp)
3171 {
3172 _posix_spawn_mac_policy_extensions_t psmx = NULL;
3173 uint8_t *data = NULL;
3174 uint64_t datalen = 0;
3175 uint64_t dataoff = 0;
3176 int error = 0;
3177
3178 bzero(pxsp, sizeof(*pxsp));
3179
3180 if (px_args->mac_extensions_size < PS_MAC_EXTENSIONS_SIZE(1) ||
3181 px_args->mac_extensions_size > PAGE_SIZE) {
3182 error = EINVAL;
3183 goto bad;
3184 }
3185
3186 psmx = kalloc_data(px_args->mac_extensions_size, Z_WAITOK);
3187 if (psmx == NULL) {
3188 error = ENOMEM;
3189 goto bad;
3190 }
3191
3192 error = copyin(px_args->mac_extensions, psmx, px_args->mac_extensions_size);
3193 if (error) {
3194 goto bad;
3195 }
3196
3197 size_t extsize = PS_MAC_EXTENSIONS_SIZE(psmx->psmx_count);
3198 if (extsize == 0 || extsize > px_args->mac_extensions_size) {
3199 error = EINVAL;
3200 goto bad;
3201 }
3202
3203 for (int i = 0; i < psmx->psmx_count; i++) {
3204 _ps_mac_policy_extension_t *extension = &psmx->psmx_extensions[i];
3205 if (extension->datalen == 0 || extension->datalen > PAGE_SIZE) {
3206 error = EINVAL;
3207 goto bad;
3208 }
3209 if (__improbable(os_add_overflow(datalen, extension->datalen, &datalen))) {
3210 error = ENOMEM;
3211 goto bad;
3212 }
3213 }
3214
3215 data = kalloc_data((vm_size_t)datalen, Z_WAITOK);
3216 if (data == NULL) {
3217 error = ENOMEM;
3218 goto bad;
3219 }
3220
3221 for (int i = 0; i < psmx->psmx_count; i++) {
3222 _ps_mac_policy_extension_t *extension = &psmx->psmx_extensions[i];
3223
3224 #if !__LP64__
3225 if (extension->data > UINT32_MAX) {
3226 goto bad;
3227 }
3228 #endif
3229 error = copyin((user_addr_t)extension->data, &data[dataoff], (size_t)extension->datalen);
3230 if (error) {
3231 error = ENOMEM;
3232 goto bad;
3233 }
3234 extension->dataoff = dataoff;
3235 dataoff += extension->datalen;
3236 }
3237
3238 pxsp->array = psmx;
3239 pxsp->data = data;
3240 pxsp->datalen = datalen;
3241 return 0;
3242
3243 bad:
3244 kfree_data(psmx, px_args->mac_extensions_size);
3245 kfree_data(data, (vm_size_t)datalen);
3246 return error;
3247 }
3248 #endif /* CONFIG_MACF */
3249
3250 #if CONFIG_COALITIONS
3251 static inline void
spawn_coalitions_release_all(coalition_t coal[COALITION_NUM_TYPES])3252 spawn_coalitions_release_all(coalition_t coal[COALITION_NUM_TYPES])
3253 {
3254 for (int c = 0; c < COALITION_NUM_TYPES; c++) {
3255 if (coal[c]) {
3256 coalition_remove_active(coal[c]);
3257 coalition_release(coal[c]);
3258 }
3259 }
3260 }
3261 #endif
3262
3263 #if CONFIG_PERSONAS
3264 static int
spawn_validate_persona(struct _posix_spawn_persona_info * px_persona)3265 spawn_validate_persona(struct _posix_spawn_persona_info *px_persona)
3266 {
3267 int error = 0;
3268 struct persona *persona = NULL;
3269 kauth_cred_t mycred = kauth_cred_get();
3270
3271 if (!IOCurrentTaskHasEntitlement( PERSONA_MGMT_ENTITLEMENT)) {
3272 return EPERM;
3273 }
3274
3275 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GROUPS) {
3276 if (px_persona->pspi_ngroups > NGROUPS_MAX) {
3277 return EINVAL;
3278 }
3279 }
3280
3281 persona = persona_lookup(px_persona->pspi_id);
3282 if (!persona) {
3283 return ESRCH;
3284 }
3285
3286 // non-root process should not be allowed to set persona with uid/gid 0
3287 if (!kauth_cred_issuser(mycred) &&
3288 (px_persona->pspi_uid == 0 || px_persona->pspi_gid == 0)) {
3289 return EPERM;
3290 }
3291
3292 persona_put(persona);
3293 return error;
3294 }
3295
3296 static bool
kauth_cred_model_setpersona(kauth_cred_t model,struct _posix_spawn_persona_info * px_persona)3297 kauth_cred_model_setpersona(
3298 kauth_cred_t model,
3299 struct _posix_spawn_persona_info *px_persona)
3300 {
3301 bool updated = false;
3302
3303 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_UID) {
3304 updated |= kauth_cred_model_setresuid(model,
3305 px_persona->pspi_uid,
3306 px_persona->pspi_uid,
3307 px_persona->pspi_uid,
3308 KAUTH_UID_NONE);
3309 }
3310
3311 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GID) {
3312 updated |= kauth_cred_model_setresgid(model,
3313 px_persona->pspi_gid,
3314 px_persona->pspi_gid,
3315 px_persona->pspi_gid);
3316 }
3317
3318 if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GROUPS) {
3319 updated |= kauth_cred_model_setgroups(model,
3320 px_persona->pspi_groups,
3321 px_persona->pspi_ngroups,
3322 px_persona->pspi_gmuid);
3323 }
3324
3325 return updated;
3326 }
3327
3328 static int
spawn_persona_adopt(proc_t p,struct _posix_spawn_persona_info * px_persona)3329 spawn_persona_adopt(proc_t p, struct _posix_spawn_persona_info *px_persona)
3330 {
3331 struct persona *persona = NULL;
3332
3333 /*
3334 * we want to spawn into the given persona, but we want to override
3335 * the kauth with a different UID/GID combo
3336 */
3337 persona = persona_lookup(px_persona->pspi_id);
3338 if (!persona) {
3339 return ESRCH;
3340 }
3341
3342 return persona_proc_adopt(p, persona,
3343 ^bool (kauth_cred_t parent __unused, kauth_cred_t model) {
3344 return kauth_cred_model_setpersona(model, px_persona);
3345 });
3346 }
3347 #endif
3348
3349 #if __arm64__
3350 #if DEVELOPMENT || DEBUG
3351 TUNABLE(int, legacy_footprint_entitlement_mode, "legacy_footprint_entitlement_mode",
3352 LEGACY_FOOTPRINT_ENTITLEMENT_IGNORE);
3353
3354 __startup_func
3355 static void
legacy_footprint_entitlement_mode_init(void)3356 legacy_footprint_entitlement_mode_init(void)
3357 {
3358 /*
3359 * legacy_footprint_entitlement_mode specifies the behavior we want associated
3360 * with the entitlement. The supported modes are:
3361 *
3362 * LEGACY_FOOTPRINT_ENTITLEMENT_IGNORE:
3363 * Indicates that we want every process to have the memory accounting
3364 * that is available in iOS 12.0 and beyond.
3365 *
3366 * LEGACY_FOOTPRINT_ENTITLEMENT_IOS11_ACCT:
3367 * Indicates that for every process that has the 'legacy footprint entitlement',
3368 * we want to give it the old iOS 11.0 accounting behavior which accounted some
3369 * of the process's memory to the kernel.
3370 *
3371 * LEGACY_FOOTPRINT_ENTITLEMENT_LIMIT_INCREASE:
3372 * Indicates that for every process that has the 'legacy footprint entitlement',
3373 * we want it to have a higher memory limit which will help them acclimate to the
3374 * iOS 12.0 (& beyond) accounting behavior that does the right accounting.
3375 * The bonus added to the system-wide task limit to calculate this higher memory limit
3376 * is available in legacy_footprint_bonus_mb.
3377 */
3378
3379 if (legacy_footprint_entitlement_mode < LEGACY_FOOTPRINT_ENTITLEMENT_IGNORE ||
3380 legacy_footprint_entitlement_mode > LEGACY_FOOTPRINT_ENTITLEMENT_LIMIT_INCREASE) {
3381 legacy_footprint_entitlement_mode = LEGACY_FOOTPRINT_ENTITLEMENT_LIMIT_INCREASE;
3382 }
3383 }
3384 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, legacy_footprint_entitlement_mode_init);
3385 #else
3386 const int legacy_footprint_entitlement_mode = LEGACY_FOOTPRINT_ENTITLEMENT_IGNORE;
3387 #endif
3388
3389 static inline void
proc_legacy_footprint_entitled(proc_t p,task_t task)3390 proc_legacy_footprint_entitled(proc_t p, task_t task)
3391 {
3392 #pragma unused(p)
3393 boolean_t legacy_footprint_entitled;
3394
3395 switch (legacy_footprint_entitlement_mode) {
3396 case LEGACY_FOOTPRINT_ENTITLEMENT_IGNORE:
3397 /* the entitlement is ignored */
3398 break;
3399 case LEGACY_FOOTPRINT_ENTITLEMENT_IOS11_ACCT:
3400 /* the entitlement grants iOS11 legacy accounting */
3401 legacy_footprint_entitled = memorystatus_task_has_legacy_footprint_entitlement(proc_task(p));
3402 if (legacy_footprint_entitled) {
3403 task_set_legacy_footprint(task);
3404 }
3405 break;
3406 case LEGACY_FOOTPRINT_ENTITLEMENT_LIMIT_INCREASE:
3407 /* the entitlement grants a footprint limit increase */
3408 legacy_footprint_entitled = memorystatus_task_has_legacy_footprint_entitlement(proc_task(p));
3409 if (legacy_footprint_entitled) {
3410 task_set_extra_footprint_limit(task);
3411 }
3412 break;
3413 default:
3414 break;
3415 }
3416 }
3417
3418 static inline void
proc_ios13extended_footprint_entitled(proc_t p,task_t task)3419 proc_ios13extended_footprint_entitled(proc_t p, task_t task)
3420 {
3421 #pragma unused(p)
3422 boolean_t ios13extended_footprint_entitled;
3423
3424 /* the entitlement grants a footprint limit increase */
3425 ios13extended_footprint_entitled = memorystatus_task_has_ios13extended_footprint_limit(proc_task(p));
3426 if (ios13extended_footprint_entitled) {
3427 task_set_ios13extended_footprint_limit(task);
3428 }
3429 }
3430
3431 static inline void
proc_increased_memory_limit_entitled(proc_t p,task_t task)3432 proc_increased_memory_limit_entitled(proc_t p, task_t task)
3433 {
3434 if (memorystatus_task_has_increased_debugging_memory_limit_entitlement(task)) {
3435 memorystatus_act_on_entitled_developer_task_limit(p);
3436 } else if (memorystatus_task_has_increased_memory_limit_entitlement(task)) {
3437 memorystatus_act_on_entitled_task_limit(p);
3438 }
3439 }
3440
3441 /*
3442 * Check for any of the various entitlements that permit a higher
3443 * task footprint limit or alternate accounting and apply them.
3444 */
3445 static inline void
proc_footprint_entitlement_hacks(proc_t p,task_t task)3446 proc_footprint_entitlement_hacks(proc_t p, task_t task)
3447 {
3448 proc_legacy_footprint_entitled(p, task);
3449 proc_ios13extended_footprint_entitled(p, task);
3450 proc_increased_memory_limit_entitled(p, task);
3451 }
3452 #endif /* __arm64__ */
3453
3454 /*
3455 * Processes with certain entitlements are granted a jumbo-size VM map.
3456 */
3457 static inline void
proc_apply_jit_and_vm_policies(struct image_params * imgp,proc_t p,task_t task)3458 proc_apply_jit_and_vm_policies(struct image_params *imgp, proc_t p, task_t task)
3459 {
3460 #if CONFIG_MACF
3461 bool jit_entitled = false;
3462 #endif /* CONFIG_MACF */
3463 bool needs_jumbo_va = false;
3464 bool needs_extra_jumbo_va = false;
3465 struct _posix_spawnattr *psa = imgp->ip_px_sa;
3466
3467 #if CONFIG_MACF
3468 jit_entitled = (mac_proc_check_map_anon(p, proc_ucred_unsafe(p),
3469 0, 0, 0, MAP_JIT, NULL) == 0);
3470 needs_jumbo_va = jit_entitled || IOTaskHasEntitlement(task,
3471 "com.apple.developer.kernel.extended-virtual-addressing") ||
3472 memorystatus_task_has_increased_memory_limit_entitlement(task) ||
3473 memorystatus_task_has_increased_debugging_memory_limit_entitlement(task);
3474 #else
3475 #pragma unused(p)
3476 #endif /* CONFIG_MACF */
3477
3478
3479 if (needs_jumbo_va) {
3480 vm_map_set_jumbo(get_task_map(task));
3481 }
3482
3483 if (psa && psa->psa_max_addr) {
3484 vm_map_set_max_addr(get_task_map(task), psa->psa_max_addr, false);
3485 }
3486
3487 #if CONFIG_MAP_RANGES
3488 if (task_is_hardened_binary(task) && !proc_is_simulated(p)) {
3489 /*
3490 * This must be done last as it needs to observe
3491 * any kind of VA space growth that was requested.
3492 * This is used by the secure allocator, so
3493 * must be applied to all hardened binaries
3494 */
3495 #if XNU_TARGET_OS_IOS && EXTENDED_USER_VA_SUPPORT
3496 needs_extra_jumbo_va = IOTaskHasEntitlement(task,
3497 "com.apple.kernel.large-file-virtual-addressing");
3498 #endif /* XNU_TARGET_OS_IOS && EXTENDED_USER_VA_SUPPORT */
3499 vm_map_range_configure(get_task_map(task), needs_extra_jumbo_va);
3500 }
3501 #else
3502 #pragma unused(needs_extra_jumbo_va)
3503 #endif /* CONFIG_MAP_RANGES */
3504
3505 #if CONFIG_MACF
3506 if (jit_entitled) {
3507 vm_map_set_jit_entitled(get_task_map(task));
3508
3509 }
3510 #endif /* CONFIG_MACF */
3511
3512 #if XNU_TARGET_OS_OSX
3513 /* TPRO cannot be enforced on binaries that load 3P plugins on macos - rdar://107420220 */
3514 const bool task_loads_3P_plugins = imgp->ip_flags & IMGPF_3P_PLUGINS;
3515 #endif /* XNU_TARGET_OS_OSX */
3516
3517 if (task_is_hardened_binary(task)
3518 #if XNU_TARGET_OS_OSX
3519 && !task_loads_3P_plugins
3520 #endif /* XNU_TARGET_OS_OSX */
3521 ) {
3522 /*
3523 * Pre-emptively disable TPRO remapping for
3524 * hardened binaries (which do not load 3P plugins)
3525 */
3526 vm_map_set_tpro_enforcement(get_task_map(task));
3527 }
3528 }
3529
3530 static int
spawn_posix_cred_adopt(proc_t p,struct _posix_spawn_posix_cred_info * px_pcred_info)3531 spawn_posix_cred_adopt(proc_t p,
3532 struct _posix_spawn_posix_cred_info *px_pcred_info)
3533 {
3534 int error = 0;
3535
3536 if (px_pcred_info->pspci_flags & POSIX_SPAWN_POSIX_CRED_GID) {
3537 struct setgid_args args = {
3538 .gid = px_pcred_info->pspci_gid,
3539 };
3540 error = setgid(p, &args, NULL);
3541 if (error) {
3542 return error;
3543 }
3544 }
3545
3546 if (px_pcred_info->pspci_flags & POSIX_SPAWN_POSIX_CRED_GROUPS) {
3547 error = setgroups_internal(p,
3548 px_pcred_info->pspci_ngroups,
3549 px_pcred_info->pspci_groups,
3550 px_pcred_info->pspci_gmuid);
3551 if (error) {
3552 return error;
3553 }
3554 }
3555
3556 if (px_pcred_info->pspci_flags & POSIX_SPAWN_POSIX_CRED_UID) {
3557 struct setuid_args args = {
3558 .uid = px_pcred_info->pspci_uid,
3559 };
3560 error = setuid(p, &args, NULL);
3561 if (error) {
3562 return error;
3563 }
3564 }
3565 return 0;
3566 }
3567
3568 /*
3569 * posix_spawn
3570 *
3571 * Parameters: uap->pid Pointer to pid return area
3572 * uap->fname File name to exec
3573 * uap->argp Argument list
3574 * uap->envp Environment list
3575 *
3576 * Returns: 0 Success
3577 * EINVAL Invalid argument
3578 * ENOTSUP Not supported
3579 * ENOEXEC Executable file format error
3580 * exec_activate_image:EINVAL Invalid argument
3581 * exec_activate_image:EACCES Permission denied
3582 * exec_activate_image:EINTR Interrupted function
3583 * exec_activate_image:ENOMEM Not enough space
3584 * exec_activate_image:EFAULT Bad address
3585 * exec_activate_image:ENAMETOOLONG Filename too long
3586 * exec_activate_image:ENOEXEC Executable file format error
3587 * exec_activate_image:ETXTBSY Text file busy [misuse of error code]
3588 * exec_activate_image:EAUTH Image decryption failed
3589 * exec_activate_image:EBADEXEC The executable is corrupt/unknown
3590 * exec_activate_image:???
3591 * mac_execve_enter:???
3592 *
3593 * TODO: Expect to need __mac_posix_spawn() at some point...
3594 * Handle posix_spawnattr_t
3595 * Handle posix_spawn_file_actions_t
3596 */
3597 int
posix_spawn(proc_t ap,struct posix_spawn_args * uap,int32_t * retval)3598 posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval)
3599 {
3600 proc_t p = ap;
3601 user_addr_t pid = uap->pid;
3602 int ival[2]; /* dummy retval for setpgid() */
3603 char *subsystem_root_path = NULL;
3604 struct image_params *imgp = NULL;
3605 struct vnode_attr *vap = NULL;
3606 struct vnode_attr *origvap = NULL;
3607 struct uthread *uthread = 0; /* compiler complains if not set to 0*/
3608 int error, sig;
3609 int is_64 = IS_64BIT_PROCESS(p);
3610 struct vfs_context context;
3611 struct user__posix_spawn_args_desc px_args = {};
3612 struct _posix_spawnattr px_sa = {};
3613 _posix_spawn_file_actions_t px_sfap = NULL;
3614 _posix_spawn_port_actions_t px_spap = NULL;
3615 struct __kern_sigaction vec;
3616 boolean_t spawn_no_exec = FALSE;
3617 boolean_t proc_transit_set = TRUE;
3618 boolean_t proc_signal_set = TRUE;
3619 boolean_t exec_done = FALSE;
3620 os_reason_t exec_failure_reason = NULL;
3621
3622 struct exec_port_actions port_actions = { };
3623 vm_size_t px_sa_offset = offsetof(struct _posix_spawnattr, psa_ports);
3624 task_t old_task = current_task();
3625 task_t new_task = NULL;
3626 boolean_t should_release_proc_ref = FALSE;
3627 void *inherit = NULL;
3628 uint8_t crash_behavior = 0;
3629 uint64_t crash_behavior_deadline = 0;
3630 #if CONFIG_EXCLAVES
3631 char *task_conclave_id = NULL;
3632 #endif
3633 #if CONFIG_PERSONAS
3634 struct _posix_spawn_persona_info *px_persona = NULL;
3635 #endif
3636 struct _posix_spawn_posix_cred_info *px_pcred_info = NULL;
3637 struct {
3638 struct image_params imgp;
3639 struct vnode_attr va;
3640 struct vnode_attr origva;
3641 } *__spawn_data;
3642
3643 /*
3644 * Allocate a big chunk for locals instead of using stack since these
3645 * structures are pretty big.
3646 */
3647 __spawn_data = kalloc_type(typeof(*__spawn_data), Z_WAITOK | Z_ZERO);
3648 if (__spawn_data == NULL) {
3649 error = ENOMEM;
3650 goto bad;
3651 }
3652 imgp = &__spawn_data->imgp;
3653 vap = &__spawn_data->va;
3654 origvap = &__spawn_data->origva;
3655
3656 /* Initialize the common data in the image_params structure */
3657 imgp->ip_user_fname = uap->path;
3658 imgp->ip_user_argv = uap->argv;
3659 imgp->ip_user_envv = uap->envp;
3660 imgp->ip_vattr = vap;
3661 imgp->ip_origvattr = origvap;
3662 imgp->ip_vfs_context = &context;
3663 imgp->ip_flags = (is_64 ? IMGPF_WAS_64BIT_ADDR : IMGPF_NONE);
3664 imgp->ip_seg = (is_64 ? UIO_USERSPACE64 : UIO_USERSPACE32);
3665 imgp->ip_mac_return = 0;
3666 imgp->ip_px_persona = NULL;
3667 imgp->ip_px_pcred_info = NULL;
3668 imgp->ip_cs_error = OS_REASON_NULL;
3669 imgp->ip_simulator_binary = IMGPF_SB_DEFAULT;
3670 imgp->ip_subsystem_root_path = NULL;
3671 imgp->ip_inherited_shared_region_id = NULL;
3672 imgp->ip_inherited_jop_pid = 0;
3673 uthread_set_exec_data(current_uthread(), imgp);
3674
3675 if (uap->adesc != USER_ADDR_NULL) {
3676 if (is_64) {
3677 error = copyin(uap->adesc, &px_args, sizeof(px_args));
3678 } else {
3679 struct user32__posix_spawn_args_desc px_args32;
3680
3681 error = copyin(uap->adesc, &px_args32, sizeof(px_args32));
3682
3683 /*
3684 * Convert arguments descriptor from external 32 bit
3685 * representation to internal 64 bit representation
3686 */
3687 px_args.attr_size = px_args32.attr_size;
3688 px_args.attrp = CAST_USER_ADDR_T(px_args32.attrp);
3689 px_args.file_actions_size = px_args32.file_actions_size;
3690 px_args.file_actions = CAST_USER_ADDR_T(px_args32.file_actions);
3691 px_args.port_actions_size = px_args32.port_actions_size;
3692 px_args.port_actions = CAST_USER_ADDR_T(px_args32.port_actions);
3693 px_args.mac_extensions_size = px_args32.mac_extensions_size;
3694 px_args.mac_extensions = CAST_USER_ADDR_T(px_args32.mac_extensions);
3695 px_args.coal_info_size = px_args32.coal_info_size;
3696 px_args.coal_info = CAST_USER_ADDR_T(px_args32.coal_info);
3697 px_args.persona_info_size = px_args32.persona_info_size;
3698 px_args.persona_info = CAST_USER_ADDR_T(px_args32.persona_info);
3699 px_args.posix_cred_info_size = px_args32.posix_cred_info_size;
3700 px_args.posix_cred_info = CAST_USER_ADDR_T(px_args32.posix_cred_info);
3701 px_args.subsystem_root_path_size = px_args32.subsystem_root_path_size;
3702 px_args.subsystem_root_path = CAST_USER_ADDR_T(px_args32.subsystem_root_path);
3703 px_args.conclave_id_size = px_args32.conclave_id_size;
3704 px_args.conclave_id = CAST_USER_ADDR_T(px_args32.conclave_id);
3705 }
3706 if (error) {
3707 goto bad;
3708 }
3709
3710 if (px_args.attr_size != 0) {
3711 /*
3712 * We are not copying the port_actions pointer,
3713 * because we already have it from px_args.
3714 * This is a bit fragile: <rdar://problem/16427422>
3715 */
3716
3717 if ((error = copyin(px_args.attrp, &px_sa, px_sa_offset)) != 0) {
3718 goto bad;
3719 }
3720
3721 imgp->ip_px_sa = &px_sa;
3722 }
3723 if (px_args.file_actions_size != 0) {
3724 /* Limit file_actions to allowed number of open files */
3725 size_t maxfa_size = PSF_ACTIONS_SIZE(proc_limitgetcur_nofile(p));
3726
3727 if (px_args.file_actions_size < PSF_ACTIONS_SIZE(1) ||
3728 maxfa_size == 0 || px_args.file_actions_size > maxfa_size) {
3729 error = EINVAL;
3730 goto bad;
3731 }
3732
3733 px_sfap = kalloc_data(px_args.file_actions_size, Z_WAITOK);
3734 if (px_sfap == NULL) {
3735 error = ENOMEM;
3736 goto bad;
3737 }
3738 imgp->ip_px_sfa = px_sfap;
3739
3740 if ((error = copyin(px_args.file_actions, px_sfap,
3741 px_args.file_actions_size)) != 0) {
3742 goto bad;
3743 }
3744
3745 /* Verify that the action count matches the struct size */
3746 size_t psfsize = PSF_ACTIONS_SIZE(px_sfap->psfa_act_count);
3747 if (psfsize == 0 || psfsize != px_args.file_actions_size) {
3748 error = EINVAL;
3749 goto bad;
3750 }
3751 }
3752 if (px_args.port_actions_size != 0) {
3753 /* Limit port_actions to one page of data */
3754 if (px_args.port_actions_size < PS_PORT_ACTIONS_SIZE(1) ||
3755 px_args.port_actions_size > PAGE_SIZE) {
3756 error = EINVAL;
3757 goto bad;
3758 }
3759
3760 px_spap = kalloc_data(px_args.port_actions_size, Z_WAITOK);
3761 if (px_spap == NULL) {
3762 error = ENOMEM;
3763 goto bad;
3764 }
3765 imgp->ip_px_spa = px_spap;
3766
3767 if ((error = copyin(px_args.port_actions, px_spap,
3768 px_args.port_actions_size)) != 0) {
3769 goto bad;
3770 }
3771
3772 /* Verify that the action count matches the struct size */
3773 size_t pasize = PS_PORT_ACTIONS_SIZE(px_spap->pspa_count);
3774 if (pasize == 0 || pasize != px_args.port_actions_size) {
3775 error = EINVAL;
3776 goto bad;
3777 }
3778 }
3779 #if CONFIG_PERSONAS
3780 /* copy in the persona info */
3781 if (px_args.persona_info_size != 0 && px_args.persona_info != 0) {
3782 /* for now, we need the exact same struct in user space */
3783 if (px_args.persona_info_size != sizeof(*px_persona)) {
3784 error = ERANGE;
3785 goto bad;
3786 }
3787
3788 px_persona = kalloc_data(px_args.persona_info_size, Z_WAITOK);
3789 if (px_persona == NULL) {
3790 error = ENOMEM;
3791 goto bad;
3792 }
3793 imgp->ip_px_persona = px_persona;
3794
3795 if ((error = copyin(px_args.persona_info, px_persona,
3796 px_args.persona_info_size)) != 0) {
3797 goto bad;
3798 }
3799 if ((error = spawn_validate_persona(px_persona)) != 0) {
3800 goto bad;
3801 }
3802 }
3803 #endif
3804 /* copy in the posix cred info */
3805 if (px_args.posix_cred_info_size != 0 && px_args.posix_cred_info != 0) {
3806 /* for now, we need the exact same struct in user space */
3807 if (px_args.posix_cred_info_size != sizeof(*px_pcred_info)) {
3808 error = ERANGE;
3809 goto bad;
3810 }
3811
3812 if (!kauth_cred_issuser(kauth_cred_get())) {
3813 error = EPERM;
3814 goto bad;
3815 }
3816
3817 px_pcred_info = kalloc_data(px_args.posix_cred_info_size, Z_WAITOK);
3818 if (px_pcred_info == NULL) {
3819 error = ENOMEM;
3820 goto bad;
3821 }
3822 imgp->ip_px_pcred_info = px_pcred_info;
3823
3824 if ((error = copyin(px_args.posix_cred_info, px_pcred_info,
3825 px_args.posix_cred_info_size)) != 0) {
3826 goto bad;
3827 }
3828
3829 if (px_pcred_info->pspci_flags & POSIX_SPAWN_POSIX_CRED_GROUPS) {
3830 if (px_pcred_info->pspci_ngroups > NGROUPS_MAX) {
3831 error = EINVAL;
3832 goto bad;
3833 }
3834 }
3835 }
3836 #if CONFIG_MACF
3837 if (px_args.mac_extensions_size != 0) {
3838 if ((error = spawn_copyin_macpolicyinfo(&px_args, (struct ip_px_smpx_s *)&imgp->ip_px_smpx)) != 0) {
3839 goto bad;
3840 }
3841 }
3842 #endif /* CONFIG_MACF */
3843 if ((px_args.subsystem_root_path_size > 0) && (px_args.subsystem_root_path_size <= MAXPATHLEN)) {
3844 /*
3845 * If a valid-looking subsystem root has been
3846 * specified...
3847 */
3848 if (IOTaskHasEntitlement(old_task, SPAWN_SUBSYSTEM_ROOT_ENTITLEMENT)) {
3849 /*
3850 * ...AND the parent has the entitlement, copy
3851 * the subsystem root path in.
3852 */
3853 subsystem_root_path = zalloc_flags(ZV_NAMEI,
3854 Z_WAITOK | Z_ZERO | Z_NOFAIL);
3855
3856 if ((error = copyin(px_args.subsystem_root_path, subsystem_root_path, px_args.subsystem_root_path_size))) {
3857 goto bad;
3858 }
3859
3860 /* Paranoia */
3861 subsystem_root_path[px_args.subsystem_root_path_size - 1] = 0;
3862 }
3863 }
3864 #if CONFIG_EXCLAVES
3865
3866 /*
3867 * Calling exclaves_boot_wait() ensures that the conclave name
3868 * id will only be set when exclaves are actually
3869 * supported/enabled. In practice this will never actually block
3870 * as by the time this is called the system will have booted to
3871 * EXCLAVECORE if it's supported/enabled.
3872 */
3873 if ((px_args.conclave_id_size > 0) && (px_args.conclave_id_size <= MAXCONCLAVENAME) &&
3874 (exclaves_boot_wait(EXCLAVES_BOOT_STAGE_EXCLAVECORE) == KERN_SUCCESS)) {
3875 if (px_args.conclave_id) {
3876 if (imgp->ip_px_sa != NULL && (px_sa.psa_flags & POSIX_SPAWN_SETEXEC)) {
3877 /* Conclave id could be set only for true spawn */
3878 error = EINVAL;
3879 goto bad;
3880 }
3881 task_conclave_id = kalloc_data(MAXCONCLAVENAME,
3882 Z_WAITOK | Z_ZERO | Z_NOFAIL);
3883 if ((error = copyin(px_args.conclave_id, task_conclave_id, MAXCONCLAVENAME))) {
3884 goto bad;
3885 }
3886 task_conclave_id[MAXCONCLAVENAME - 1] = 0;
3887 }
3888 }
3889 #endif
3890 }
3891
3892 if (IOTaskHasEntitlement(old_task, SPAWN_SET_PANIC_CRASH_BEHAVIOR)) {
3893 /* Truncate to uint8_t since we only support 2 flags for now */
3894 crash_behavior = (uint8_t)px_sa.psa_crash_behavior;
3895 crash_behavior_deadline = px_sa.psa_crash_behavior_deadline;
3896 }
3897
3898 /* set uthread to parent */
3899 uthread = current_uthread();
3900
3901 /*
3902 * <rdar://6640530>; this does not result in a behaviour change
3903 * relative to Leopard, so there should not be any existing code
3904 * which depends on it.
3905 */
3906
3907 if (imgp->ip_px_sa != NULL) {
3908 struct _posix_spawnattr *psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
3909 if ((psa->psa_options & PSA_OPTION_PLUGIN_HOST_DISABLE_A_KEYS) == PSA_OPTION_PLUGIN_HOST_DISABLE_A_KEYS) {
3910 imgp->ip_flags |= IMGPF_PLUGIN_HOST_DISABLE_A_KEYS;
3911 }
3912 #if (DEVELOPMENT || DEBUG)
3913 if ((psa->psa_options & PSA_OPTION_ALT_ROSETTA) == PSA_OPTION_ALT_ROSETTA) {
3914 imgp->ip_flags |= (IMGPF_ROSETTA | IMGPF_ALT_ROSETTA);
3915 }
3916 #endif
3917
3918 if ((error = exec_validate_spawnattr_policy(psa->psa_apptype)) != 0) {
3919 goto bad;
3920 }
3921 }
3922
3923 /*
3924 * If we don't have the extension flag that turns "posix_spawn()"
3925 * into "execve() with options", then we will be creating a new
3926 * process which does not inherit memory from the parent process,
3927 * which is one of the most expensive things about using fork()
3928 * and execve().
3929 */
3930 if (imgp->ip_px_sa == NULL || !(px_sa.psa_flags & POSIX_SPAWN_SETEXEC)) {
3931 /* Set the new task's coalition, if it is requested. */
3932 coalition_t coal[COALITION_NUM_TYPES] = { COALITION_NULL };
3933 #if CONFIG_COALITIONS
3934 int i, ncoals;
3935 kern_return_t kr = KERN_SUCCESS;
3936 struct _posix_spawn_coalition_info coal_info;
3937 int coal_role[COALITION_NUM_TYPES];
3938
3939 if (imgp->ip_px_sa == NULL || !px_args.coal_info) {
3940 goto do_fork1;
3941 }
3942
3943 memset(&coal_info, 0, sizeof(coal_info));
3944
3945 if (px_args.coal_info_size > sizeof(coal_info)) {
3946 px_args.coal_info_size = sizeof(coal_info);
3947 }
3948 error = copyin(px_args.coal_info,
3949 &coal_info, px_args.coal_info_size);
3950 if (error != 0) {
3951 goto bad;
3952 }
3953
3954 ncoals = 0;
3955 for (i = 0; i < COALITION_NUM_TYPES; i++) {
3956 uint64_t cid = coal_info.psci_info[i].psci_id;
3957 if (cid != 0) {
3958 /*
3959 * don't allow tasks which are not in a
3960 * privileged coalition to spawn processes
3961 * into coalitions other than their own
3962 */
3963 if (!task_is_in_privileged_coalition(proc_task(p), i) &&
3964 !IOTaskHasEntitlement(proc_task(p), COALITION_SPAWN_ENTITLEMENT)) {
3965 coal_dbg("ERROR: %d not in privilegd "
3966 "coalition of type %d",
3967 proc_getpid(p), i);
3968 spawn_coalitions_release_all(coal);
3969 error = EPERM;
3970 goto bad;
3971 }
3972
3973 coal_dbg("searching for coalition id:%llu", cid);
3974 /*
3975 * take a reference and activation on the
3976 * coalition to guard against free-while-spawn
3977 * races
3978 */
3979 coal[i] = coalition_find_and_activate_by_id(cid);
3980 if (coal[i] == COALITION_NULL) {
3981 coal_dbg("could not find coalition id:%llu "
3982 "(perhaps it has been terminated or reaped)", cid);
3983 /*
3984 * release any other coalition's we
3985 * may have a reference to
3986 */
3987 spawn_coalitions_release_all(coal);
3988 error = ESRCH;
3989 goto bad;
3990 }
3991 if (coalition_type(coal[i]) != i) {
3992 coal_dbg("coalition with id:%lld is not of type:%d"
3993 " (it's type:%d)", cid, i, coalition_type(coal[i]));
3994 spawn_coalitions_release_all(coal);
3995 error = ESRCH;
3996 goto bad;
3997 }
3998 coal_role[i] = coal_info.psci_info[i].psci_role;
3999 ncoals++;
4000 }
4001 }
4002 if (ncoals < COALITION_NUM_TYPES) {
4003 /*
4004 * If the user is attempting to spawn into a subset of
4005 * the known coalition types, then make sure they have
4006 * _at_least_ specified a resource coalition. If not,
4007 * the following fork1() call will implicitly force an
4008 * inheritance from 'p' and won't actually spawn the
4009 * new task into the coalitions the user specified.
4010 * (also the call to coalitions_set_roles will panic)
4011 */
4012 if (coal[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
4013 spawn_coalitions_release_all(coal);
4014 error = EINVAL;
4015 goto bad;
4016 }
4017 }
4018 do_fork1:
4019 #endif /* CONFIG_COALITIONS */
4020
4021 /*
4022 * note that this will implicitly inherit the
4023 * caller's persona (if it exists)
4024 */
4025 error = fork1(p, &imgp->ip_new_thread, PROC_CREATE_SPAWN, coal);
4026 /* returns a thread and task reference */
4027
4028 if (error == 0) {
4029 new_task = get_threadtask(imgp->ip_new_thread);
4030 }
4031 #if CONFIG_COALITIONS
4032 /* set the roles of this task within each given coalition */
4033 if (error == 0) {
4034 kr = coalitions_set_roles(coal, new_task, coal_role);
4035 if (kr != KERN_SUCCESS) {
4036 error = EINVAL;
4037 }
4038 if (kdebug_debugid_enabled(MACHDBG_CODE(DBG_MACH_COALITION,
4039 MACH_COALITION_ADOPT))) {
4040 for (i = 0; i < COALITION_NUM_TYPES; i++) {
4041 if (coal[i] != COALITION_NULL) {
4042 /*
4043 * On 32-bit targets, uniqueid
4044 * will get truncated to 32 bits
4045 */
4046 KDBG_RELEASE(MACHDBG_CODE(
4047 DBG_MACH_COALITION,
4048 MACH_COALITION_ADOPT),
4049 coalition_id(coal[i]),
4050 get_task_uniqueid(new_task));
4051 }
4052 }
4053 }
4054 }
4055
4056 /* drop our references and activations - fork1() now holds them */
4057 spawn_coalitions_release_all(coal);
4058 #endif /* CONFIG_COALITIONS */
4059 if (error != 0) {
4060 goto bad;
4061 }
4062 imgp->ip_flags |= IMGPF_SPAWN; /* spawn w/o exec */
4063 spawn_no_exec = TRUE; /* used in later tests */
4064 } else {
4065 /* Adjust the user proc count */
4066 (void)chgproccnt(kauth_getruid(), 1);
4067 /*
4068 * For execve case, create a new proc, task and thread
4069 * but don't make the proc visible to userland. After
4070 * image activation, the new proc would take place of
4071 * the old proc in pid hash and other lists that make
4072 * the proc visible to the system.
4073 */
4074 imgp->ip_new_thread = cloneproc(old_task, NULL, p, CLONEPROC_EXEC);
4075
4076 /* task and thread ref returned by cloneproc */
4077 if (imgp->ip_new_thread == NULL) {
4078 (void)chgproccnt(kauth_getruid(), -1);
4079 error = ENOMEM;
4080 goto bad;
4081 }
4082
4083 new_task = get_threadtask(imgp->ip_new_thread);
4084 imgp->ip_flags |= IMGPF_EXEC;
4085 }
4086
4087 p = (proc_t)get_bsdthreadtask_info(imgp->ip_new_thread);
4088
4089 if (spawn_no_exec) {
4090 /*
4091 * We had to wait until this point before firing the
4092 * proc:::create probe, otherwise p would not point to the
4093 * child process.
4094 */
4095 DTRACE_PROC1(create, proc_t, p);
4096 }
4097 assert(p != NULL);
4098
4099 if (subsystem_root_path) {
4100 /* If a subsystem root was specified, swap it in */
4101 char * old_subsystem_root_path = p->p_subsystem_root_path;
4102 p->p_subsystem_root_path = subsystem_root_path;
4103 subsystem_root_path = old_subsystem_root_path;
4104 }
4105
4106 p->p_crash_behavior = crash_behavior;
4107 p->p_crash_behavior_deadline = crash_behavior_deadline;
4108
4109 p->p_crash_count = px_sa.psa_crash_count;
4110 p->p_throttle_timeout = px_sa.psa_throttle_timeout;
4111
4112 /* We'll need the subsystem root for setting up Apple strings */
4113 imgp->ip_subsystem_root_path = p->p_subsystem_root_path;
4114
4115 context.vc_thread = imgp->ip_new_thread;
4116 context.vc_ucred = proc_ucred_unsafe(p); /* in init */
4117
4118 /*
4119 * Post fdt_fork(), pre exec_handle_sugid() - this is where we want
4120 * to handle the file_actions.
4121 */
4122
4123 /* Has spawn file actions? */
4124 if (imgp->ip_px_sfa != NULL) {
4125 /*
4126 * The POSIX_SPAWN_CLOEXEC_DEFAULT flag
4127 * is handled in exec_handle_file_actions().
4128 */
4129 #if CONFIG_AUDIT
4130 /*
4131 * The file actions auditing can overwrite the upath of
4132 * AUE_POSIX_SPAWN audit record. Save the audit record.
4133 */
4134 struct kaudit_record *save_uu_ar = uthread->uu_ar;
4135 uthread->uu_ar = NULL;
4136 #endif
4137 error = exec_handle_file_actions(imgp,
4138 imgp->ip_px_sa != NULL ? px_sa.psa_flags : 0);
4139 #if CONFIG_AUDIT
4140 /* Restore the AUE_POSIX_SPAWN audit record. */
4141 uthread->uu_ar = save_uu_ar;
4142 #endif
4143 if (error != 0) {
4144 goto bad;
4145 }
4146 }
4147
4148 /* Has spawn port actions? */
4149 if (imgp->ip_px_spa != NULL) {
4150 #if CONFIG_AUDIT
4151 /*
4152 * Do the same for the port actions as we did for the file
4153 * actions. Save the AUE_POSIX_SPAWN audit record.
4154 */
4155 struct kaudit_record *save_uu_ar = uthread->uu_ar;
4156 uthread->uu_ar = NULL;
4157 #endif
4158 error = exec_handle_port_actions(imgp, &port_actions);
4159 #if CONFIG_AUDIT
4160 /* Restore the AUE_POSIX_SPAWN audit record. */
4161 uthread->uu_ar = save_uu_ar;
4162 #endif
4163 if (error != 0) {
4164 goto bad;
4165 }
4166 }
4167
4168 /* Has spawn attr? */
4169 if (imgp->ip_px_sa != NULL) {
4170 /*
4171 * Reset UID/GID to parent's RUID/RGID; This works only
4172 * because the operation occurs before the call
4173 * to exec_handle_sugid() by the image activator called
4174 * from exec_activate_image().
4175 *
4176 * POSIX requires that any setuid/setgid bits on the process
4177 * image will take precedence over the spawn attributes
4178 * (re)setting them.
4179 *
4180 * Modifications to p_ucred must be guarded using the
4181 * proc's ucred lock. This prevents others from accessing
4182 * a garbage credential.
4183 */
4184 if (px_sa.psa_flags & POSIX_SPAWN_RESETIDS) {
4185 kauth_cred_proc_update(p, PROC_SETTOKEN_NONE,
4186 ^bool (kauth_cred_t parent __unused, kauth_cred_t model){
4187 return kauth_cred_model_setuidgid(model,
4188 kauth_cred_getruid(parent),
4189 kauth_cred_getrgid(parent));
4190 });
4191 }
4192
4193 if (imgp->ip_px_pcred_info) {
4194 if (!spawn_no_exec) {
4195 error = ENOTSUP;
4196 goto bad;
4197 }
4198
4199 error = spawn_posix_cred_adopt(p, imgp->ip_px_pcred_info);
4200 if (error != 0) {
4201 goto bad;
4202 }
4203 }
4204
4205 #if CONFIG_PERSONAS
4206 if (imgp->ip_px_persona != NULL) {
4207 if (!spawn_no_exec) {
4208 error = ENOTSUP;
4209 goto bad;
4210 }
4211
4212 /*
4213 * If we were asked to spawn a process into a new persona,
4214 * do the credential switch now (which may override the UID/GID
4215 * inherit done just above). It's important to do this switch
4216 * before image activation both for reasons stated above, and
4217 * to ensure that the new persona has access to the image/file
4218 * being executed.
4219 */
4220 error = spawn_persona_adopt(p, imgp->ip_px_persona);
4221 if (error != 0) {
4222 goto bad;
4223 }
4224 }
4225 #endif /* CONFIG_PERSONAS */
4226 #if !SECURE_KERNEL
4227 /*
4228 * Disable ASLR for the spawned process.
4229 *
4230 * But only do so if we are not embedded + RELEASE.
4231 * While embedded allows for a boot-arg (-disable_aslr)
4232 * to deal with this (which itself is only honored on
4233 * DEVELOPMENT or DEBUG builds of xnu), it is often
4234 * useful or necessary to disable ASLR on a per-process
4235 * basis for unit testing and debugging.
4236 */
4237 if (px_sa.psa_flags & _POSIX_SPAWN_DISABLE_ASLR) {
4238 OSBitOrAtomic(P_DISABLE_ASLR, &p->p_flag);
4239 }
4240 #endif /* !SECURE_KERNEL */
4241
4242 /* Randomize high bits of ASLR slide */
4243 if (px_sa.psa_flags & _POSIX_SPAWN_HIGH_BITS_ASLR) {
4244 imgp->ip_flags |= IMGPF_HIGH_BITS_ASLR;
4245 }
4246
4247 #if !SECURE_KERNEL
4248 /*
4249 * Forcibly disallow execution from data pages for the spawned process
4250 * even if it would otherwise be permitted by the architecture default.
4251 */
4252 if (px_sa.psa_flags & _POSIX_SPAWN_ALLOW_DATA_EXEC) {
4253 imgp->ip_flags |= IMGPF_ALLOW_DATA_EXEC;
4254 }
4255 #endif /* !SECURE_KERNEL */
4256
4257 #if __has_feature(ptrauth_calls)
4258 if (vm_shared_region_reslide_aslr && is_64 && (px_sa.psa_flags & _POSIX_SPAWN_RESLIDE)) {
4259 imgp->ip_flags |= IMGPF_RESLIDE;
4260 }
4261 #endif /* __has_feature(ptrauth_calls) */
4262
4263 if ((px_sa.psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK) ==
4264 POSIX_SPAWN_PROC_TYPE_DRIVER) {
4265 imgp->ip_flags |= IMGPF_DRIVER;
4266 }
4267 }
4268
4269 /*
4270 * Disable ASLR during image activation. This occurs either if the
4271 * _POSIX_SPAWN_DISABLE_ASLR attribute was found above or if
4272 * P_DISABLE_ASLR was inherited from the parent process.
4273 */
4274 if (p->p_flag & P_DISABLE_ASLR) {
4275 imgp->ip_flags |= IMGPF_DISABLE_ASLR;
4276 }
4277
4278 /*
4279 * Clear transition flag so we won't hang if exec_activate_image() causes
4280 * an automount (and launchd does a proc sysctl to service it).
4281 *
4282 * <rdar://problem/6848672>, <rdar://problem/5959568>.
4283 */
4284 proc_transend(p, 0);
4285 proc_transit_set = 0;
4286
4287 if (!spawn_no_exec) {
4288 /*
4289 * Clear the signal lock in case of exec, since
4290 * image activation uses psignal on child process.
4291 */
4292 proc_signalend(p, 0);
4293 proc_signal_set = 0;
4294 }
4295
4296 #if MAC_SPAWN /* XXX */
4297 if (uap->mac_p != USER_ADDR_NULL) {
4298 error = mac_execve_enter(uap->mac_p, imgp);
4299 if (error) {
4300 goto bad;
4301 }
4302 }
4303 #endif
4304
4305
4306 /*
4307 * Activate the image.
4308 * Warning: If activation failed after point of no return, it returns error
4309 * as 0 and pretends the call succeeded.
4310 */
4311 error = exec_activate_image(imgp);
4312 #if defined(HAS_APPLE_PAC)
4313 const uint8_t disable_user_jop = imgp->ip_flags & IMGPF_NOJOP ? TRUE : FALSE;
4314 ml_task_set_jop_pid_from_shared_region(new_task, disable_user_jop);
4315 ml_task_set_disable_user_jop(new_task, disable_user_jop);
4316 ml_thread_set_disable_user_jop(imgp->ip_new_thread, disable_user_jop);
4317 ml_thread_set_jop_pid(imgp->ip_new_thread, new_task);
4318 #endif
4319
4320
4321 /*
4322 * If you've come here to add support for some new HW feature or some per-process or per-vmmap
4323 * or per-pmap flag that needs to be set before the process runs, or are in general lost, here
4324 * is some help. This summary was accurate as of Jul 2022. Use git log as needed. This comment
4325 * is here to prevent a recurrence of rdar://96307913
4326 *
4327 * In posix_spawn, following is what happens:
4328 * 1. Lots of prep and checking work
4329 * 2. Image activation via exec_activate_image(). The new task will get a new pmap here
4330 * 3. More prep work. (YOU ARE HERE)
4331 * 4. exec_resettextvp() is called
4332 * 5. At this point it is safe to check entitlements and code signatures
4333 * 6. task_clear_return_wait(get_threadtask(imgp->ip_new_thread), TCRW_CLEAR_INITIAL_WAIT);
4334 * The new thread is allowed to run in kernel. It cannot yet get to userland
4335 * 7. More things done here. This is your chance to affect the task before it runs in
4336 * userspace
4337 * 8. task_clear_return_wait(get_threadtask(imgp->ip_new_thread), TCRW_CLEAR_FINAL_WAIT);
4338 * The new thread is allowed to run in userland
4339 */
4340
4341 if (error == 0 && !spawn_no_exec) {
4342 p = proc_exec_switch_task(current_proc(), p, old_task, new_task, imgp, &inherit);
4343 /* proc ref returned */
4344 should_release_proc_ref = TRUE;
4345 }
4346
4347 if (error == 0) {
4348 /* process completed the exec, but may have failed after point of no return */
4349 exec_done = TRUE;
4350 }
4351
4352 #if CONFIG_EXCLAVES
4353 if (!error && task_conclave_id != NULL) {
4354 kern_return_t kr;
4355 kr = task_add_conclave(new_task, imgp->ip_vp, (int64_t)imgp->ip_arch_offset,
4356 task_conclave_id);
4357 if (kr != KERN_SUCCESS) {
4358 error = EINVAL;
4359 goto bad;
4360 }
4361 }
4362 #endif
4363
4364 if (!error && imgp->ip_px_sa != NULL) {
4365 thread_t child_thread = imgp->ip_new_thread;
4366 uthread_t child_uthread = get_bsdthread_info(child_thread);
4367
4368 /*
4369 * Because of POSIX_SPAWN_SETEXEC, we need to handle this after image
4370 * activation, else when image activation fails (before the point of no
4371 * return) would leave the parent process in a modified state.
4372 */
4373 if (px_sa.psa_flags & POSIX_SPAWN_SETPGROUP) {
4374 struct setpgid_args spga;
4375 spga.pid = proc_getpid(p);
4376 spga.pgid = px_sa.psa_pgroup;
4377 /*
4378 * Effectively, call setpgid() system call; works
4379 * because there are no pointer arguments.
4380 */
4381 if ((error = setpgid(p, &spga, ival)) != 0) {
4382 goto bad_px_sa;
4383 }
4384 }
4385
4386 if (px_sa.psa_flags & POSIX_SPAWN_SETSID) {
4387 error = setsid_internal(p);
4388 if (error != 0) {
4389 goto bad_px_sa;
4390 }
4391 }
4392
4393 /*
4394 * If we have a spawn attr, and it contains signal related flags,
4395 * the we need to process them in the "context" of the new child
4396 * process, so we have to process it following image activation,
4397 * prior to making the thread runnable in user space. This is
4398 * necessitated by some signal information being per-thread rather
4399 * than per-process, and we don't have the new allocation in hand
4400 * until after the image is activated.
4401 */
4402
4403 /*
4404 * Mask a list of signals, instead of them being unmasked, if
4405 * they were unmasked in the parent; note that some signals
4406 * are not maskable.
4407 */
4408 if (px_sa.psa_flags & POSIX_SPAWN_SETSIGMASK) {
4409 child_uthread->uu_sigmask = (px_sa.psa_sigmask & ~sigcantmask);
4410 }
4411 /*
4412 * Default a list of signals instead of ignoring them, if
4413 * they were ignored in the parent. Note that we pass
4414 * spawn_no_exec to setsigvec() to indicate that we called
4415 * fork1() and therefore do not need to call proc_signalstart()
4416 * internally.
4417 */
4418 if (px_sa.psa_flags & POSIX_SPAWN_SETSIGDEF) {
4419 vec.sa_handler = SIG_DFL;
4420 vec.sa_tramp = 0;
4421 vec.sa_mask = 0;
4422 vec.sa_flags = 0;
4423 for (sig = 1; sig < NSIG; sig++) {
4424 if (px_sa.psa_sigdefault & (1 << (sig - 1))) {
4425 error = setsigvec(p, child_thread, sig, &vec, spawn_no_exec);
4426 }
4427 }
4428 }
4429
4430 /*
4431 * Activate the CPU usage monitor, if requested. This is done via a task-wide, per-thread CPU
4432 * usage limit, which will generate a resource exceeded exception if any one thread exceeds the
4433 * limit.
4434 *
4435 * Userland gives us interval in seconds, and the kernel SPI expects nanoseconds.
4436 */
4437 if ((px_sa.psa_cpumonitor_percent != 0) && (px_sa.psa_cpumonitor_percent < UINT8_MAX)) {
4438 /*
4439 * Always treat a CPU monitor activation coming from spawn as entitled. Requiring
4440 * an entitlement to configure the monitor a certain way seems silly, since
4441 * whomever is turning it on could just as easily choose not to do so.
4442 */
4443 error = proc_set_task_ruse_cpu(proc_task(p),
4444 TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC,
4445 (uint8_t)px_sa.psa_cpumonitor_percent,
4446 px_sa.psa_cpumonitor_interval * NSEC_PER_SEC,
4447 0, TRUE);
4448 }
4449
4450
4451 if (px_pcred_info &&
4452 (px_pcred_info->pspci_flags & POSIX_SPAWN_POSIX_CRED_LOGIN)) {
4453 /*
4454 * setlogin() must happen after setsid()
4455 */
4456 setlogin_internal(p, px_pcred_info->pspci_login);
4457 }
4458
4459 bad_px_sa:
4460 if (error != 0) {
4461 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
4462 proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_BAD_PSATTR, 0, 0);
4463 exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_BAD_PSATTR);
4464 }
4465 }
4466
4467 bad:
4468
4469 if (error == 0) {
4470 /* reset delay idle sleep status if set */
4471 #if CONFIG_DELAY_IDLE_SLEEP
4472 if ((p->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP) {
4473 OSBitAndAtomic(~((uint32_t)P_DELAYIDLESLEEP), &p->p_flag);
4474 }
4475 #endif /* CONFIG_DELAY_IDLE_SLEEP */
4476 /* upon successful spawn, re/set the proc control state */
4477 if (imgp->ip_px_sa != NULL) {
4478 switch (px_sa.psa_pcontrol) {
4479 case POSIX_SPAWN_PCONTROL_THROTTLE:
4480 p->p_pcaction = P_PCTHROTTLE;
4481 break;
4482 case POSIX_SPAWN_PCONTROL_SUSPEND:
4483 p->p_pcaction = P_PCSUSP;
4484 break;
4485 case POSIX_SPAWN_PCONTROL_KILL:
4486 p->p_pcaction = P_PCKILL;
4487 break;
4488 case POSIX_SPAWN_PCONTROL_NONE:
4489 default:
4490 p->p_pcaction = 0;
4491 break;
4492 }
4493 ;
4494 }
4495 exec_resettextvp(p, imgp);
4496
4497 /*
4498 * Enable new task IPC access if exec_activate_image() returned an
4499 * active task. (Checks active bit in ipc_task_enable() under lock).
4500 * Must enable after resettextvp so that task port policies are not evaluated
4501 * until the csblob in the textvp is accurately reflected.
4502 */
4503 ipc_task_enable(new_task);
4504
4505 /* Set task exception ports now that we can check entitlements */
4506 if (imgp->ip_px_spa != NULL) {
4507 error = exec_handle_exception_port_actions(imgp, &port_actions);
4508 }
4509
4510 #if CONFIG_MEMORYSTATUS
4511 /* Set jetsam priority for DriverKit processes */
4512 if (px_sa.psa_apptype == POSIX_SPAWN_PROC_TYPE_DRIVER) {
4513 px_sa.psa_priority = JETSAM_PRIORITY_DRIVER_APPLE;
4514 }
4515
4516 /* Has jetsam attributes? */
4517 if (imgp->ip_px_sa != NULL && (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_SET)) {
4518 int32_t memlimit_active = px_sa.psa_memlimit_active;
4519 int32_t memlimit_inactive = px_sa.psa_memlimit_inactive;
4520
4521 memstat_priority_options_t priority_options = MEMSTAT_PRIORITY_OPTIONS_NONE;
4522 if ((px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY)) {
4523 priority_options |= MEMSTAT_PRIORITY_IS_EFFECTIVE;
4524 }
4525 memorystatus_set_priority(p, px_sa.psa_priority, 0,
4526 priority_options);
4527
4528 memlimit_options_t memlimit_options = MEMLIMIT_OPTIONS_NONE;
4529 if ((px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_MEMLIMIT_ACTIVE_FATAL)) {
4530 memlimit_options |= MEMLIMIT_ACTIVE_FATAL;
4531 }
4532 if ((px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_MEMLIMIT_INACTIVE_FATAL)) {
4533 memlimit_options |= MEMLIMIT_INACTIVE_FATAL;
4534 }
4535 if (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND) {
4536 /*
4537 * With 2-level high-water-mark support,
4538 * POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND is no longer relevant,
4539 * as background limits are described via the inactive limit
4540 * slots. However, if the
4541 * POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND is passed in, we
4542 * attempt to mimic previous behavior by forcing the BG limit
4543 * data into the inactive/non-fatal mode and force the active
4544 * slots to hold system_wide/fatal mode.
4545 */
4546 memlimit_options |= MEMLIMIT_ACTIVE_FATAL;
4547 memlimit_options &= ~MEMLIMIT_INACTIVE_FATAL;
4548 memlimit_active = -1;
4549 }
4550 memorystatus_set_memlimits(p, memlimit_active, memlimit_inactive,
4551 memlimit_options);
4552 }
4553
4554 /* Has jetsam relaunch behavior? */
4555 if (imgp->ip_px_sa != NULL && (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_RELAUNCH_BEHAVIOR_MASK)) {
4556 /*
4557 * Launchd has passed in data indicating the behavior of this process in response to jetsam.
4558 * This data would be used by the jetsam subsystem to determine the position and protection
4559 * offered to this process on dirty -> clean transitions.
4560 */
4561 int relaunch_flags = P_MEMSTAT_RELAUNCH_UNKNOWN;
4562 switch (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_RELAUNCH_BEHAVIOR_MASK) {
4563 case POSIX_SPAWN_JETSAM_RELAUNCH_BEHAVIOR_LOW:
4564 relaunch_flags = P_MEMSTAT_RELAUNCH_LOW;
4565 break;
4566 case POSIX_SPAWN_JETSAM_RELAUNCH_BEHAVIOR_MED:
4567 relaunch_flags = P_MEMSTAT_RELAUNCH_MED;
4568 break;
4569 case POSIX_SPAWN_JETSAM_RELAUNCH_BEHAVIOR_HIGH:
4570 relaunch_flags = P_MEMSTAT_RELAUNCH_HIGH;
4571 break;
4572 default:
4573 break;
4574 }
4575 memorystatus_relaunch_flags_update(p, relaunch_flags);
4576 }
4577
4578 #endif /* CONFIG_MEMORYSTATUS */
4579 if (imgp->ip_px_sa != NULL && px_sa.psa_thread_limit > 0) {
4580 task_set_thread_limit(new_task, (uint16_t)px_sa.psa_thread_limit);
4581 }
4582
4583 #if CONFIG_PROC_RESOURCE_LIMITS
4584 if (imgp->ip_px_sa != NULL && (px_sa.psa_port_soft_limit > 0 || px_sa.psa_port_hard_limit > 0)) {
4585 task_set_port_space_limits(new_task, (uint32_t)px_sa.psa_port_soft_limit,
4586 (uint32_t)px_sa.psa_port_hard_limit);
4587 }
4588
4589 if (imgp->ip_px_sa != NULL && (px_sa.psa_filedesc_soft_limit > 0 || px_sa.psa_filedesc_hard_limit > 0)) {
4590 proc_set_filedesc_limits(p, (int)px_sa.psa_filedesc_soft_limit,
4591 (int)px_sa.psa_filedesc_hard_limit);
4592 }
4593 if (imgp->ip_px_sa != NULL && (px_sa.psa_kqworkloop_soft_limit > 0 || px_sa.psa_kqworkloop_hard_limit > 0)) {
4594 proc_set_kqworkloop_limits(p, (int)px_sa.psa_kqworkloop_soft_limit,
4595 (int)px_sa.psa_kqworkloop_hard_limit);
4596 }
4597 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
4598 }
4599
4600
4601 /*
4602 * If we successfully called fork1() or cloneproc, we always need
4603 * to do this. This is because we come back from that call with
4604 * signals blocked in the child, and we have to unblock them, for exec
4605 * case they are unblocked before activation, but for true spawn case
4606 * we want to wait until after we've performed any spawn actions.
4607 * This has to happen before process_signature(), which uses psignal.
4608 */
4609 if (proc_transit_set) {
4610 proc_transend(p, 0);
4611 }
4612
4613 /*
4614 * Drop the signal lock on the child which was taken on our
4615 * behalf by forkproc()/cloneproc() to prevent signals being
4616 * received by the child in a partially constructed state.
4617 */
4618 if (proc_signal_set) {
4619 proc_signalend(p, 0);
4620 }
4621
4622 if (error == 0) {
4623 /*
4624 * We need to initialize the bank context behind the protection of
4625 * the proc_trans lock to prevent a race with exit. We can't do this during
4626 * exec_activate_image because task_bank_init checks entitlements that
4627 * aren't loaded until subsequent calls (including exec_resettextvp).
4628 */
4629 error = proc_transstart(p, 0, 0);
4630
4631 if (error == 0) {
4632 task_bank_init(new_task);
4633 proc_transend(p, 0);
4634 }
4635
4636 #if __arm64__
4637 proc_footprint_entitlement_hacks(p, new_task);
4638 #endif /* __arm64__ */
4639
4640 #if XNU_TARGET_OS_OSX
4641 #define SINGLE_JIT_ENTITLEMENT "com.apple.security.cs.single-jit"
4642 if (IOTaskHasEntitlement(new_task, SINGLE_JIT_ENTITLEMENT)) {
4643 vm_map_single_jit(get_task_map(new_task));
4644 }
4645 #endif /* XNU_TARGET_OS_OSX */
4646
4647 #if __has_feature(ptrauth_calls)
4648 task_set_pac_exception_fatal_flag(new_task);
4649 #endif /* __has_feature(ptrauth_calls) */
4650 task_set_jit_exception_fatal_flag(new_task);
4651 }
4652
4653 /* Inherit task role from old task to new task for exec */
4654 if (error == 0 && !spawn_no_exec) {
4655 proc_inherit_task_role(new_task, old_task);
4656 }
4657
4658 #if CONFIG_ARCADE
4659 if (error == 0) {
4660 /*
4661 * Check to see if we need to trigger an arcade upcall AST now
4662 * that the vnode has been reset on the task.
4663 */
4664 arcade_prepare(new_task, imgp->ip_new_thread);
4665 }
4666 #endif /* CONFIG_ARCADE */
4667
4668 if (error == 0) {
4669 proc_apply_jit_and_vm_policies(imgp, p, new_task);
4670 }
4671
4672 /* Clear the initial wait on the thread before handling spawn policy */
4673 if (imgp && imgp->ip_new_thread) {
4674 task_clear_return_wait(get_threadtask(imgp->ip_new_thread), TCRW_CLEAR_INITIAL_WAIT);
4675 }
4676
4677 /*
4678 * Apply the spawnattr policy, apptype (which primes the task for importance donation),
4679 * and bind any portwatch ports to the new task.
4680 * This must be done after the exec so that the child's thread is ready,
4681 * and after the in transit state has been released, because priority is
4682 * dropped here so we need to be prepared for a potentially long preemption interval
4683 *
4684 * TODO: Consider splitting this up into separate phases
4685 */
4686 if (error == 0 && imgp->ip_px_sa != NULL) {
4687 struct _posix_spawnattr *psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
4688
4689 error = exec_handle_spawnattr_policy(p, imgp->ip_new_thread, psa->psa_apptype, psa->psa_qos_clamp,
4690 psa->psa_darwin_role, &port_actions);
4691 }
4692
4693 /* Transfer the turnstile watchport boost to new task if in exec */
4694 if (error == 0 && !spawn_no_exec) {
4695 task_transfer_turnstile_watchports(old_task, new_task, imgp->ip_new_thread);
4696 }
4697
4698 if (error == 0 && imgp->ip_px_sa != NULL) {
4699 struct _posix_spawnattr *psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
4700
4701 if (psa->psa_no_smt) {
4702 task_set_no_smt(new_task);
4703 }
4704 if (psa->psa_tecs) {
4705 task_set_tecs(new_task);
4706 }
4707 }
4708
4709 if (error == 0 && imgp->ip_px_sa != NULL) {
4710 struct _posix_spawnattr *psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
4711
4712 if (psa->psa_options & PSA_OPTION_DATALESS_IOPOLICY) {
4713 struct _iopol_param_t iop_param = {
4714 .iop_scope = IOPOL_SCOPE_PROCESS,
4715 .iop_iotype = IOPOL_TYPE_VFS_MATERIALIZE_DATALESS_FILES,
4716 .iop_policy = psa->psa_dataless_iopolicy,
4717 };
4718 error = iopolicysys_vfs_materialize_dataless_files(p, IOPOL_CMD_SET, iop_param.iop_scope,
4719 iop_param.iop_policy, &iop_param);
4720 }
4721 }
4722
4723 if (error == 0) {
4724 /* Apply the main thread qos */
4725 thread_t main_thread = imgp->ip_new_thread;
4726 task_set_main_thread_qos(new_task, main_thread);
4727 }
4728
4729 /*
4730 * Release any ports we kept around for binding to the new task
4731 * We need to release the rights even if the posix_spawn has failed.
4732 */
4733 if (imgp->ip_px_spa != NULL) {
4734 exec_port_actions_destroy(&port_actions);
4735 }
4736
4737 /*
4738 * We have to delay operations which might throw a signal until after
4739 * the signals have been unblocked; however, we want that to happen
4740 * after exec_resettextvp() so that the textvp is correct when they
4741 * fire.
4742 */
4743 if (error == 0) {
4744 error = process_signature(p, imgp);
4745
4746 /*
4747 * Pay for our earlier safety; deliver the delayed signals from
4748 * the incomplete spawn process now that it's complete.
4749 */
4750 if (imgp != NULL && spawn_no_exec && (p->p_lflag & P_LTRACED)) {
4751 psignal_vfork(p, proc_task(p), imgp->ip_new_thread, SIGTRAP);
4752 }
4753
4754 if (error == 0 && !spawn_no_exec) {
4755 KDBG(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXEC),
4756 proc_getpid(p));
4757 }
4758 }
4759
4760 if (spawn_no_exec) {
4761 /* flag the 'fork' has occurred */
4762 proc_knote(p->p_pptr, NOTE_FORK | proc_getpid(p));
4763 }
4764
4765 /* flag exec has occurred, notify only if it has not failed due to FP Key error */
4766 if (!error && ((p->p_lflag & P_LTERM_DECRYPTFAIL) == 0)) {
4767 proc_knote(p, NOTE_EXEC);
4768 }
4769
4770 if (imgp != NULL) {
4771 uthread_set_exec_data(current_uthread(), NULL);
4772 if (imgp->ip_vp) {
4773 vnode_put(imgp->ip_vp);
4774 }
4775 if (imgp->ip_scriptvp) {
4776 vnode_put(imgp->ip_scriptvp);
4777 }
4778 if (imgp->ip_strings) {
4779 execargs_free(imgp);
4780 }
4781 if (imgp->ip_free_map) {
4782 /* Free the map after dropping iocount on vnode to avoid deadlock */
4783 vm_map_deallocate(imgp->ip_free_map);
4784 }
4785 kfree_data(imgp->ip_px_sfa,
4786 px_args.file_actions_size);
4787 kfree_data(imgp->ip_px_spa,
4788 px_args.port_actions_size);
4789 #if CONFIG_PERSONAS
4790 kfree_data(imgp->ip_px_persona,
4791 px_args.persona_info_size);
4792 #endif
4793 kfree_data(imgp->ip_px_pcred_info,
4794 px_args.posix_cred_info_size);
4795
4796 if (subsystem_root_path != NULL) {
4797 zfree(ZV_NAMEI, subsystem_root_path);
4798 }
4799 #if CONFIG_MACF
4800 struct ip_px_smpx_s *px_s = &imgp->ip_px_smpx;
4801 kfree_data(px_s->array, px_args.mac_extensions_size);
4802 kfree_data(px_s->data, (vm_size_t)px_s->datalen);
4803
4804 if (imgp->ip_execlabelp) {
4805 mac_cred_label_free(imgp->ip_execlabelp);
4806 imgp->ip_execlabelp = NULL;
4807 }
4808 if (imgp->ip_scriptlabelp) {
4809 mac_vnode_label_free(imgp->ip_scriptlabelp);
4810 imgp->ip_scriptlabelp = NULL;
4811 }
4812 if (imgp->ip_cs_error != OS_REASON_NULL) {
4813 os_reason_free(imgp->ip_cs_error);
4814 imgp->ip_cs_error = OS_REASON_NULL;
4815 }
4816 if (imgp->ip_inherited_shared_region_id != NULL) {
4817 kfree_data(imgp->ip_inherited_shared_region_id,
4818 strlen(imgp->ip_inherited_shared_region_id) + 1);
4819 imgp->ip_inherited_shared_region_id = NULL;
4820 }
4821 #endif
4822 }
4823
4824 #if CONFIG_DTRACE
4825 if (spawn_no_exec) {
4826 /*
4827 * In the original DTrace reference implementation,
4828 * posix_spawn() was a libc routine that just
4829 * did vfork(2) then exec(2). Thus the proc::: probes
4830 * are very fork/exec oriented. The details of this
4831 * in-kernel implementation of posix_spawn() is different
4832 * (while producing the same process-observable effects)
4833 * particularly w.r.t. errors, and which thread/process
4834 * is constructing what on behalf of whom.
4835 */
4836 if (error) {
4837 DTRACE_PROC1(spawn__failure, int, error);
4838 } else {
4839 DTRACE_PROC(spawn__success);
4840 /*
4841 * Some DTrace scripts, e.g. newproc.d in
4842 * /usr/bin, rely on the the 'exec-success'
4843 * probe being fired in the child after the
4844 * new process image has been constructed
4845 * in order to determine the associated pid.
4846 *
4847 * So, even though the parent built the image
4848 * here, for compatibility, mark the new thread
4849 * so 'exec-success' fires on it as it leaves
4850 * the kernel.
4851 */
4852 dtrace_thread_didexec(imgp->ip_new_thread);
4853 }
4854 } else {
4855 if (error) {
4856 DTRACE_PROC1(exec__failure, int, error);
4857 } else {
4858 dtrace_thread_didexec(imgp->ip_new_thread);
4859 }
4860 }
4861
4862 if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL) {
4863 (*dtrace_proc_waitfor_hook)(p);
4864 }
4865 #endif
4866
4867 #if CONFIG_AUDIT
4868 if (!error && AUDIT_ENABLED() && p) {
4869 /* Add the CDHash of the new process to the audit record */
4870 uint8_t *cdhash = cs_get_cdhash(p);
4871 if (cdhash) {
4872 AUDIT_ARG(data, cdhash, sizeof(uint8_t), CS_CDHASH_LEN);
4873 }
4874 }
4875 #endif
4876
4877 /* terminate the new task if exec failed */
4878 if (new_task != NULL && task_is_exec_copy(new_task)) {
4879 task_terminate_internal(new_task);
4880 }
4881
4882 if (exec_failure_reason && !spawn_no_exec) {
4883 psignal_with_reason(p, SIGKILL, exec_failure_reason);
4884 exec_failure_reason = NULL;
4885 }
4886
4887 /* Return to both the parent and the child? */
4888 if (imgp != NULL && spawn_no_exec) {
4889 /*
4890 * If the parent wants the pid, copy it out
4891 */
4892 if (error == 0 && pid != USER_ADDR_NULL) {
4893 _Static_assert(sizeof(pid_t) == 4, "posix_spawn() assumes a 32-bit pid_t");
4894 bool aligned = (pid & 3) == 0;
4895 if (aligned) {
4896 (void)copyout_atomic32(proc_getpid(p), pid);
4897 } else {
4898 (void)suword(pid, proc_getpid(p));
4899 }
4900 }
4901 retval[0] = error;
4902
4903 /*
4904 * If we had an error, perform an internal reap ; this is
4905 * entirely safe, as we have a real process backing us.
4906 */
4907 if (error) {
4908 proc_list_lock();
4909 p->p_listflag |= P_LIST_DEADPARENT;
4910 proc_list_unlock();
4911 proc_lock(p);
4912 /* make sure no one else has killed it off... */
4913 if (p->p_stat != SZOMB && p->exit_thread == NULL) {
4914 p->exit_thread = current_thread();
4915 p->p_posix_spawn_failed = true;
4916 proc_unlock(p);
4917 exit1(p, 1, (int *)NULL);
4918 } else {
4919 /* someone is doing it for us; just skip it */
4920 proc_unlock(p);
4921 }
4922 }
4923 }
4924
4925 /*
4926 * Do not terminate the current task, if proc_exec_switch_task did not
4927 * switch the tasks, terminating the current task without the switch would
4928 * result in loosing the SIGKILL status.
4929 */
4930 if (task_did_exec(old_task)) {
4931 /* Terminate the current task, since exec will start in new task */
4932 task_terminate_internal(old_task);
4933 }
4934
4935 /* Release the thread ref returned by cloneproc/fork1 */
4936 if (imgp != NULL && imgp->ip_new_thread) {
4937 /* clear the exec complete flag if there is an error before point of no-return */
4938 uint32_t clearwait_flags = TCRW_CLEAR_FINAL_WAIT;
4939 if (!spawn_no_exec && !exec_done && error != 0) {
4940 clearwait_flags |= TCRW_CLEAR_EXEC_COMPLETE;
4941 }
4942 /* wake up the new thread */
4943 task_clear_return_wait(get_threadtask(imgp->ip_new_thread), clearwait_flags);
4944 thread_deallocate(imgp->ip_new_thread);
4945 imgp->ip_new_thread = NULL;
4946 }
4947
4948 /* Release the ref returned by cloneproc/fork1 */
4949 if (new_task) {
4950 task_deallocate(new_task);
4951 new_task = NULL;
4952 }
4953
4954 if (should_release_proc_ref) {
4955 proc_rele(p);
4956 }
4957
4958 kfree_type(typeof(*__spawn_data), __spawn_data);
4959
4960 if (inherit != NULL) {
4961 ipc_importance_release(inherit);
4962 }
4963
4964 #if CONFIG_EXCLAVES
4965 if (task_conclave_id != NULL) {
4966 kfree_data(task_conclave_id, MAXCONCLAVENAME);
4967 }
4968 #endif
4969
4970 assert(spawn_no_exec || exec_failure_reason == NULL);
4971 return error;
4972 }
4973
4974 /*
4975 * proc_exec_switch_task
4976 *
4977 * Parameters: old_proc proc before exec
4978 * new_proc proc after exec
4979 * old_task task before exec
4980 * new_task task after exec
4981 * imgp image params
4982 * inherit resulting importance linkage
4983 *
4984 * Returns: proc.
4985 *
4986 * Note: The function will switch proc in pid hash from old proc to new proc.
4987 * The switch needs to happen after draining all proc refs and inside
4988 * a proc list lock. In the case of failure to switch the proc, which
4989 * might happen if the process received a SIGKILL or jetsam killed it,
4990 * it will make sure that the new tasks terminates. User proc ref returned
4991 * to caller.
4992 *
4993 * This function is called after point of no return, in the case
4994 * failure to switch, it will terminate the new task and swallow the
4995 * error and let the terminated process complete exec and die.
4996 */
4997 proc_t
proc_exec_switch_task(proc_t old_proc,proc_t new_proc,task_t old_task,task_t new_task,struct image_params * imgp,void ** inherit)4998 proc_exec_switch_task(proc_t old_proc, proc_t new_proc, task_t old_task, task_t new_task, struct image_params *imgp, void **inherit)
4999 {
5000 boolean_t task_active;
5001 boolean_t proc_active;
5002 boolean_t thread_active;
5003 boolean_t reparent_traced_child = FALSE;
5004 thread_t old_thread = current_thread();
5005 thread_t new_thread = imgp->ip_new_thread;
5006
5007 thread_set_exec_promotion(old_thread);
5008 old_proc = proc_refdrain_will_exec(old_proc);
5009
5010 new_proc = proc_refdrain_will_exec(new_proc);
5011 /* extra proc ref returned to the caller */
5012
5013 assert(get_threadtask(new_thread) == new_task);
5014 task_active = task_is_active(new_task);
5015 proc_active = !(old_proc->p_lflag & P_LEXIT);
5016
5017 /* Check if the current thread is not aborted due to SIGKILL */
5018 thread_active = thread_is_active(old_thread);
5019
5020 /*
5021 * Do not switch the proc if the new task or proc is already terminated
5022 * as a result of error in exec past point of no return
5023 */
5024 if (proc_active && task_active && thread_active) {
5025 uthread_t new_uthread = get_bsdthread_info(new_thread);
5026 uthread_t old_uthread = current_uthread();
5027
5028 /* Clear dispatchqueue and workloop ast offset */
5029 new_proc->p_dispatchqueue_offset = 0;
5030 new_proc->p_dispatchqueue_serialno_offset = 0;
5031 new_proc->p_dispatchqueue_label_offset = 0;
5032 new_proc->p_return_to_kernel_offset = 0;
5033 new_proc->p_pthread_wq_quantum_offset = 0;
5034
5035 /* If old_proc is session leader, change the leader to new proc */
5036 session_replace_leader(old_proc, new_proc);
5037
5038 proc_lock(old_proc);
5039
5040 /* Copy the signal state, dtrace state and set bsd ast on new thread */
5041 act_set_astbsd(new_thread);
5042 new_uthread->uu_siglist |= old_uthread->uu_siglist;
5043 new_uthread->uu_siglist |= old_proc->p_siglist;
5044 new_uthread->uu_sigwait = old_uthread->uu_sigwait;
5045 new_uthread->uu_sigmask = old_uthread->uu_sigmask;
5046 new_uthread->uu_oldmask = old_uthread->uu_oldmask;
5047 new_uthread->uu_exit_reason = old_uthread->uu_exit_reason;
5048 #if CONFIG_DTRACE
5049 new_uthread->t_dtrace_sig = old_uthread->t_dtrace_sig;
5050 new_uthread->t_dtrace_stop = old_uthread->t_dtrace_stop;
5051 new_uthread->t_dtrace_resumepid = old_uthread->t_dtrace_resumepid;
5052 assert(new_uthread->t_dtrace_scratch == NULL);
5053 new_uthread->t_dtrace_scratch = old_uthread->t_dtrace_scratch;
5054
5055 old_uthread->t_dtrace_sig = 0;
5056 old_uthread->t_dtrace_stop = 0;
5057 old_uthread->t_dtrace_resumepid = 0;
5058 old_uthread->t_dtrace_scratch = NULL;
5059 #endif
5060
5061 #if CONFIG_PROC_UDATA_STORAGE
5062 new_proc->p_user_data = old_proc->p_user_data;
5063 #endif /* CONFIG_PROC_UDATA_STORAGE */
5064
5065 /* Copy the resource accounting info */
5066 thread_copy_resource_info(new_thread, current_thread());
5067
5068 /* Clear the exit reason and signal state on old thread */
5069 old_uthread->uu_exit_reason = NULL;
5070 old_uthread->uu_siglist = 0;
5071
5072 task_set_did_exec_flag(old_task);
5073 task_clear_exec_copy_flag(new_task);
5074
5075 task_copy_fields_for_exec(new_task, old_task);
5076
5077 /*
5078 * Need to transfer pending watch port boosts to the new task
5079 * while still making sure that the old task remains in the
5080 * importance linkage. Create an importance linkage from old task
5081 * to new task, then switch the task importance base of old task
5082 * and new task. After the switch the port watch boost will be
5083 * boosting the new task and new task will be donating importance
5084 * to old task.
5085 */
5086 *inherit = ipc_importance_exec_switch_task(old_task, new_task);
5087
5088 /* Transfer parent's ptrace state to child */
5089 new_proc->p_lflag &= ~(P_LTRACED | P_LSIGEXC | P_LNOATTACH);
5090 new_proc->p_lflag |= (old_proc->p_lflag & (P_LTRACED | P_LSIGEXC | P_LNOATTACH));
5091 new_proc->p_oppid = old_proc->p_oppid;
5092
5093 if (old_proc->p_pptr != new_proc->p_pptr) {
5094 reparent_traced_child = TRUE;
5095 new_proc->p_lflag |= P_LTRACE_WAIT;
5096 }
5097
5098 proc_unlock(old_proc);
5099
5100 /* Update the list of proc knotes */
5101 proc_transfer_knotes(old_proc, new_proc);
5102
5103 /* Update the proc interval timers */
5104 proc_inherit_itimers(old_proc, new_proc);
5105
5106 proc_list_lock();
5107
5108 /* Insert the new proc in child list of parent proc */
5109 p_reparentallchildren(old_proc, new_proc);
5110
5111 /* Switch proc in pid hash */
5112 phash_replace_locked(old_proc, new_proc);
5113
5114 /* Transfer the shadow flag to old proc */
5115 os_atomic_andnot(&new_proc->p_refcount, P_REF_SHADOW, relaxed);
5116 os_atomic_or(&old_proc->p_refcount, P_REF_SHADOW, relaxed);
5117
5118 /* Change init proc if launchd exec */
5119 if (old_proc == initproc) {
5120 /* Take the ref on new proc after proc_refwake_did_exec */
5121 initproc = new_proc;
5122 /* Drop the proc ref on old proc */
5123 proc_rele(old_proc);
5124 }
5125
5126 proc_list_unlock();
5127 #if CONFIG_EXCLAVES
5128 if (task_inherit_conclave(old_task, new_task, imgp->ip_vp,
5129 (int64_t)imgp->ip_arch_offset) != KERN_SUCCESS) {
5130 task_terminate_internal(new_task);
5131 }
5132 #endif
5133 } else {
5134 task_terminate_internal(new_task);
5135 }
5136
5137 proc_refwake_did_exec(new_proc);
5138 proc_refwake_did_exec(old_proc);
5139
5140 /* Take a ref on initproc if it changed */
5141 if (new_proc == initproc) {
5142 initproc = proc_ref(new_proc, false);
5143 assert(initproc != PROC_NULL);
5144 }
5145
5146 thread_clear_exec_promotion(old_thread);
5147 proc_rele(old_proc);
5148
5149 if (reparent_traced_child) {
5150 proc_t pp = proc_parent(old_proc);
5151 assert(pp != PROC_NULL);
5152
5153 proc_reparentlocked(new_proc, pp, 1, 0);
5154 proc_rele(pp);
5155
5156 proc_lock(new_proc);
5157 new_proc->p_lflag &= ~P_LTRACE_WAIT;
5158 proc_unlock(new_proc);
5159 }
5160
5161 return new_proc;
5162 }
5163
5164 /*
5165 * execve
5166 *
5167 * Parameters: uap->fname File name to exec
5168 * uap->argp Argument list
5169 * uap->envp Environment list
5170 *
5171 * Returns: 0 Success
5172 * __mac_execve:EINVAL Invalid argument
5173 * __mac_execve:ENOTSUP Invalid argument
5174 * __mac_execve:EACCES Permission denied
5175 * __mac_execve:EINTR Interrupted function
5176 * __mac_execve:ENOMEM Not enough space
5177 * __mac_execve:EFAULT Bad address
5178 * __mac_execve:ENAMETOOLONG Filename too long
5179 * __mac_execve:ENOEXEC Executable file format error
5180 * __mac_execve:ETXTBSY Text file busy [misuse of error code]
5181 * __mac_execve:???
5182 *
5183 * TODO: Dynamic linker header address on stack is copied via suword()
5184 */
5185 /* ARGSUSED */
5186 int
execve(proc_t p,struct execve_args * uap,int32_t * retval)5187 execve(proc_t p, struct execve_args *uap, int32_t *retval)
5188 {
5189 struct __mac_execve_args muap;
5190 int err;
5191
5192 memoryshot(DBG_VM_EXECVE, DBG_FUNC_NONE);
5193
5194 muap.fname = uap->fname;
5195 muap.argp = uap->argp;
5196 muap.envp = uap->envp;
5197 muap.mac_p = USER_ADDR_NULL;
5198 err = __mac_execve(p, &muap, retval);
5199
5200 return err;
5201 }
5202
5203 /*
5204 * __mac_execve
5205 *
5206 * Parameters: uap->fname File name to exec
5207 * uap->argp Argument list
5208 * uap->envp Environment list
5209 * uap->mac_p MAC label supplied by caller
5210 *
5211 * Returns: 0 Success
5212 * EINVAL Invalid argument
5213 * ENOTSUP Not supported
5214 * ENOEXEC Executable file format error
5215 * exec_activate_image:EINVAL Invalid argument
5216 * exec_activate_image:EACCES Permission denied
5217 * exec_activate_image:EINTR Interrupted function
5218 * exec_activate_image:ENOMEM Not enough space
5219 * exec_activate_image:EFAULT Bad address
5220 * exec_activate_image:ENAMETOOLONG Filename too long
5221 * exec_activate_image:ENOEXEC Executable file format error
5222 * exec_activate_image:ETXTBSY Text file busy [misuse of error code]
5223 * exec_activate_image:EBADEXEC The executable is corrupt/unknown
5224 * exec_activate_image:???
5225 * mac_execve_enter:???
5226 *
5227 * TODO: Dynamic linker header address on stack is copied via suword()
5228 */
5229 int
__mac_execve(proc_t p,struct __mac_execve_args * uap,int32_t * retval __unused)5230 __mac_execve(proc_t p, struct __mac_execve_args *uap, int32_t *retval __unused)
5231 {
5232 struct image_params *imgp = NULL;
5233 struct vnode_attr *vap = NULL;
5234 struct vnode_attr *origvap = NULL;
5235 int error;
5236 int is_64 = IS_64BIT_PROCESS(p);
5237 struct vfs_context context;
5238 struct uthread *uthread = NULL;
5239 task_t old_task = current_task();
5240 task_t new_task = NULL;
5241 boolean_t should_release_proc_ref = FALSE;
5242 boolean_t exec_done = FALSE;
5243 void *inherit = NULL;
5244 struct {
5245 struct image_params imgp;
5246 struct vnode_attr va;
5247 struct vnode_attr origva;
5248 } *__execve_data;
5249
5250 /* Allocate a big chunk for locals instead of using stack since these
5251 * structures a pretty big.
5252 */
5253 __execve_data = kalloc_type(typeof(*__execve_data), Z_WAITOK | Z_ZERO);
5254 if (__execve_data == NULL) {
5255 error = ENOMEM;
5256 goto exit_with_error;
5257 }
5258 imgp = &__execve_data->imgp;
5259 vap = &__execve_data->va;
5260 origvap = &__execve_data->origva;
5261
5262 /* Initialize the common data in the image_params structure */
5263 imgp->ip_user_fname = uap->fname;
5264 imgp->ip_user_argv = uap->argp;
5265 imgp->ip_user_envv = uap->envp;
5266 imgp->ip_vattr = vap;
5267 imgp->ip_origvattr = origvap;
5268 imgp->ip_vfs_context = &context;
5269 imgp->ip_flags = (is_64 ? IMGPF_WAS_64BIT_ADDR : IMGPF_NONE) | ((p->p_flag & P_DISABLE_ASLR) ? IMGPF_DISABLE_ASLR : IMGPF_NONE);
5270 imgp->ip_seg = (is_64 ? UIO_USERSPACE64 : UIO_USERSPACE32);
5271 imgp->ip_mac_return = 0;
5272 imgp->ip_cs_error = OS_REASON_NULL;
5273 imgp->ip_simulator_binary = IMGPF_SB_DEFAULT;
5274 imgp->ip_subsystem_root_path = NULL;
5275 uthread_set_exec_data(current_uthread(), imgp);
5276
5277 #if CONFIG_MACF
5278 if (uap->mac_p != USER_ADDR_NULL) {
5279 error = mac_execve_enter(uap->mac_p, imgp);
5280 if (error) {
5281 goto exit_with_error;
5282 }
5283 }
5284 #endif
5285 uthread = current_uthread();
5286 {
5287 imgp->ip_flags |= IMGPF_EXEC;
5288
5289 /* Adjust the user proc count */
5290 (void)chgproccnt(kauth_getruid(), 1);
5291 /*
5292 * For execve case, create a new proc, task and thread
5293 * but don't make the proc visible to userland. After
5294 * image activation, the new proc would take place of
5295 * the old proc in pid hash and other lists that make
5296 * the proc visible to the system.
5297 */
5298 imgp->ip_new_thread = cloneproc(old_task, NULL, p, CLONEPROC_EXEC);
5299 /* task and thread ref returned by cloneproc */
5300 if (imgp->ip_new_thread == NULL) {
5301 (void)chgproccnt(kauth_getruid(), -1);
5302 error = ENOMEM;
5303 goto exit_with_error;
5304 }
5305
5306 new_task = get_threadtask(imgp->ip_new_thread);
5307 }
5308
5309 p = (proc_t)get_bsdthreadtask_info(imgp->ip_new_thread);
5310
5311 context.vc_thread = imgp->ip_new_thread;
5312 context.vc_ucred = kauth_cred_proc_ref(p); /* XXX must NOT be kauth_cred_get() */
5313
5314 imgp->ip_subsystem_root_path = p->p_subsystem_root_path;
5315
5316 proc_transend(p, 0);
5317 proc_signalend(p, 0);
5318
5319
5320 /*
5321 * Activate the image.
5322 * Warning: If activation failed after point of no return, it returns error
5323 * as 0 and pretends the call succeeded.
5324 */
5325 error = exec_activate_image(imgp);
5326 /* thread and task ref returned for vfexec case */
5327
5328 if (imgp->ip_new_thread != NULL) {
5329 /*
5330 * task reference might be returned by exec_activate_image
5331 * for vfexec.
5332 */
5333 new_task = get_threadtask(imgp->ip_new_thread);
5334 #if defined(HAS_APPLE_PAC)
5335 ml_task_set_disable_user_jop(new_task, imgp->ip_flags & IMGPF_NOJOP ? TRUE : FALSE);
5336 ml_thread_set_disable_user_jop(imgp->ip_new_thread, imgp->ip_flags & IMGPF_NOJOP ? TRUE : FALSE);
5337 #endif
5338 }
5339
5340 if (!error) {
5341 p = proc_exec_switch_task(current_proc(), p, old_task, new_task, imgp, &inherit);
5342 /* proc ref returned */
5343 should_release_proc_ref = TRUE;
5344 }
5345
5346 kauth_cred_unref(&context.vc_ucred);
5347
5348 if (!error) {
5349 exec_done = TRUE;
5350 assert(imgp->ip_new_thread != NULL);
5351
5352 exec_resettextvp(p, imgp);
5353 /*
5354 * Enable new task IPC access if exec_activate_image() returned an
5355 * active task. (Checks active bit in ipc_task_enable() under lock).
5356 * Must enable after resettextvp so that task port policies are not evaluated
5357 * until the csblob in the textvp is accurately reflected.
5358 */
5359 ipc_task_enable(new_task);
5360 error = process_signature(p, imgp);
5361 }
5362
5363 #if defined(HAS_APPLE_PAC)
5364 if (imgp->ip_new_thread && !error) {
5365 ml_task_set_jop_pid_from_shared_region(new_task, imgp->ip_flags & IMGPF_NOJOP);
5366 ml_thread_set_jop_pid(imgp->ip_new_thread, new_task);
5367 }
5368 #endif /* defined(HAS_APPLE_PAC) */
5369
5370 /* flag exec has occurred, notify only if it has not failed due to FP Key error */
5371 if (exec_done && ((p->p_lflag & P_LTERM_DECRYPTFAIL) == 0)) {
5372 proc_knote(p, NOTE_EXEC);
5373 }
5374
5375 if (imgp->ip_vp != NULLVP) {
5376 vnode_put(imgp->ip_vp);
5377 }
5378 if (imgp->ip_scriptvp != NULLVP) {
5379 vnode_put(imgp->ip_scriptvp);
5380 }
5381 if (imgp->ip_free_map) {
5382 /* Free the map after dropping iocount on vnode to avoid deadlock */
5383 vm_map_deallocate(imgp->ip_free_map);
5384 }
5385 if (imgp->ip_strings) {
5386 execargs_free(imgp);
5387 }
5388 #if CONFIG_MACF
5389 if (imgp->ip_execlabelp) {
5390 mac_cred_label_free(imgp->ip_execlabelp);
5391 imgp->ip_execlabelp = NULL;
5392 }
5393 if (imgp->ip_scriptlabelp) {
5394 mac_vnode_label_free(imgp->ip_scriptlabelp);
5395 imgp->ip_scriptlabelp = NULL;
5396 }
5397 #endif
5398 if (imgp->ip_cs_error != OS_REASON_NULL) {
5399 os_reason_free(imgp->ip_cs_error);
5400 imgp->ip_cs_error = OS_REASON_NULL;
5401 }
5402
5403 if (!error) {
5404 /*
5405 * We need to initialize the bank context behind the protection of
5406 * the proc_trans lock to prevent a race with exit. We can't do this during
5407 * exec_activate_image because task_bank_init checks entitlements that
5408 * aren't loaded until subsequent calls (including exec_resettextvp).
5409 */
5410 error = proc_transstart(p, 0, 0);
5411 }
5412
5413 if (!error) {
5414 task_bank_init(new_task);
5415 proc_transend(p, 0);
5416
5417 // Don't inherit crash behavior across exec, but preserve crash behavior from bootargs
5418 p->p_crash_behavior = 0;
5419 p->p_crash_behavior_deadline = 0;
5420 #if (DEVELOPMENT || DEBUG)
5421 set_crash_behavior_from_bootarg(p);
5422 #endif
5423
5424 #if __arm64__
5425 proc_footprint_entitlement_hacks(p, new_task);
5426 #endif /* __arm64__ */
5427
5428 #if XNU_TARGET_OS_OSX
5429 if (IOTaskHasEntitlement(new_task, SINGLE_JIT_ENTITLEMENT)) {
5430 vm_map_single_jit(get_task_map(new_task));
5431 }
5432 #endif /* XNU_TARGET_OS_OSX */
5433
5434 /* Sever any extant thread affinity */
5435 thread_affinity_exec(current_thread());
5436
5437 /* Inherit task role from old task to new task for exec */
5438 proc_inherit_task_role(new_task, old_task);
5439
5440 thread_t main_thread = imgp->ip_new_thread;
5441
5442 task_set_main_thread_qos(new_task, main_thread);
5443
5444 #if __has_feature(ptrauth_calls)
5445 task_set_pac_exception_fatal_flag(new_task);
5446 #endif /* __has_feature(ptrauth_calls) */
5447 task_set_jit_exception_fatal_flag(new_task);
5448
5449 #if CONFIG_ARCADE
5450 /*
5451 * Check to see if we need to trigger an arcade upcall AST now
5452 * that the vnode has been reset on the task.
5453 */
5454 arcade_prepare(new_task, imgp->ip_new_thread);
5455 #endif /* CONFIG_ARCADE */
5456
5457 proc_apply_jit_and_vm_policies(imgp, p, new_task);
5458
5459 if (vm_darkwake_mode == TRUE) {
5460 /*
5461 * This process is being launched when the system
5462 * is in darkwake. So mark it specially. This will
5463 * cause all its pages to be entered in the background Q.
5464 */
5465 task_set_darkwake_mode(new_task, vm_darkwake_mode);
5466 }
5467
5468 #if CONFIG_DTRACE
5469 dtrace_thread_didexec(imgp->ip_new_thread);
5470
5471 if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL) {
5472 (*dtrace_proc_waitfor_hook)(p);
5473 }
5474 #endif
5475
5476 #if CONFIG_AUDIT
5477 if (!error && AUDIT_ENABLED() && p) {
5478 /* Add the CDHash of the new process to the audit record */
5479 uint8_t *cdhash = cs_get_cdhash(p);
5480 if (cdhash) {
5481 AUDIT_ARG(data, cdhash, sizeof(uint8_t), CS_CDHASH_LEN);
5482 }
5483 }
5484 #endif
5485 } else {
5486 DTRACE_PROC1(exec__failure, int, error);
5487 }
5488
5489 exit_with_error:
5490
5491 /* terminate the new task it if exec failed */
5492 if (new_task != NULL && task_is_exec_copy(new_task)) {
5493 task_terminate_internal(new_task);
5494 }
5495
5496 if (imgp != NULL) {
5497 /* Clear the initial wait on the thread transferring watchports */
5498 if (imgp->ip_new_thread) {
5499 task_clear_return_wait(get_threadtask(imgp->ip_new_thread), TCRW_CLEAR_INITIAL_WAIT);
5500 }
5501
5502 /* Transfer the watchport boost to new task */
5503 if (!error) {
5504 task_transfer_turnstile_watchports(old_task,
5505 new_task, imgp->ip_new_thread);
5506 }
5507 /*
5508 * Do not terminate the current task, if proc_exec_switch_task did not
5509 * switch the tasks, terminating the current task without the switch would
5510 * result in loosing the SIGKILL status.
5511 */
5512 if (task_did_exec(old_task)) {
5513 /* Terminate the current task, since exec will start in new task */
5514 task_terminate_internal(old_task);
5515 }
5516
5517 /* Release the thread ref returned by cloneproc */
5518 if (imgp->ip_new_thread) {
5519 /* clear the exec complete flag if there is an error before point of no-return */
5520 uint32_t clearwait_flags = TCRW_CLEAR_FINAL_WAIT;
5521 if (!exec_done && error != 0) {
5522 clearwait_flags |= TCRW_CLEAR_EXEC_COMPLETE;
5523 }
5524 /* wake up the new exec thread */
5525 task_clear_return_wait(get_threadtask(imgp->ip_new_thread), clearwait_flags);
5526 thread_deallocate(imgp->ip_new_thread);
5527 imgp->ip_new_thread = NULL;
5528 }
5529 }
5530
5531 /* Release the ref returned by fork_create_child */
5532 if (new_task) {
5533 task_deallocate(new_task);
5534 new_task = NULL;
5535 }
5536
5537 if (should_release_proc_ref) {
5538 proc_rele(p);
5539 }
5540
5541 uthread_set_exec_data(current_uthread(), NULL);
5542 kfree_type(typeof(*__execve_data), __execve_data);
5543
5544 if (inherit != NULL) {
5545 ipc_importance_release(inherit);
5546 }
5547
5548 return error;
5549 }
5550
5551
5552 /*
5553 * copyinptr
5554 *
5555 * Description: Copy a pointer in from user space to a user_addr_t in kernel
5556 * space, based on 32/64 bitness of the user space
5557 *
5558 * Parameters: froma User space address
5559 * toptr Address of kernel space user_addr_t
5560 * ptr_size 4/8, based on 'froma' address space
5561 *
5562 * Returns: 0 Success
5563 * EFAULT Bad 'froma'
5564 *
5565 * Implicit returns:
5566 * *ptr_size Modified
5567 */
5568 static int
copyinptr(user_addr_t froma,user_addr_t * toptr,int ptr_size)5569 copyinptr(user_addr_t froma, user_addr_t *toptr, int ptr_size)
5570 {
5571 int error;
5572
5573 if (ptr_size == 4) {
5574 /* 64 bit value containing 32 bit address */
5575 unsigned int i = 0;
5576
5577 error = copyin(froma, &i, 4);
5578 *toptr = CAST_USER_ADDR_T(i); /* SAFE */
5579 } else {
5580 error = copyin(froma, toptr, 8);
5581 }
5582 return error;
5583 }
5584
5585
5586 /*
5587 * copyoutptr
5588 *
5589 * Description: Copy a pointer out from a user_addr_t in kernel space to
5590 * user space, based on 32/64 bitness of the user space
5591 *
5592 * Parameters: ua User space address to copy to
5593 * ptr Address of kernel space user_addr_t
5594 * ptr_size 4/8, based on 'ua' address space
5595 *
5596 * Returns: 0 Success
5597 * EFAULT Bad 'ua'
5598 *
5599 */
5600 static int
copyoutptr(user_addr_t ua,user_addr_t ptr,int ptr_size)5601 copyoutptr(user_addr_t ua, user_addr_t ptr, int ptr_size)
5602 {
5603 int error;
5604
5605 if (ptr_size == 4) {
5606 /* 64 bit value containing 32 bit address */
5607 unsigned int i = CAST_DOWN_EXPLICIT(unsigned int, ua); /* SAFE */
5608
5609 error = copyout(&i, ptr, 4);
5610 } else {
5611 error = copyout(&ua, ptr, 8);
5612 }
5613 return error;
5614 }
5615
5616
5617 /*
5618 * exec_copyout_strings
5619 *
5620 * Copy out the strings segment to user space. The strings segment is put
5621 * on a preinitialized stack frame.
5622 *
5623 * Parameters: struct image_params * the image parameter block
5624 * int * a pointer to the stack offset variable
5625 *
5626 * Returns: 0 Success
5627 * !0 Faiure: errno
5628 *
5629 * Implicit returns:
5630 * (*stackp) The stack offset, modified
5631 *
5632 * Note: The strings segment layout is backward, from the beginning
5633 * of the top of the stack to consume the minimal amount of
5634 * space possible; the returned stack pointer points to the
5635 * end of the area consumed (stacks grow downward).
5636 *
5637 * argc is an int; arg[i] are pointers; env[i] are pointers;
5638 * the 0's are (void *)NULL's
5639 *
5640 * The stack frame layout is:
5641 *
5642 * +-------------+ <- p->user_stack
5643 * | 16b |
5644 * +-------------+
5645 * | STRING AREA |
5646 * | : |
5647 * | : |
5648 * | : |
5649 * +- -- -- -- --+
5650 * | PATH AREA |
5651 * +-------------+
5652 * | 0 |
5653 * +-------------+
5654 * | applev[n] |
5655 * +-------------+
5656 * :
5657 * :
5658 * +-------------+
5659 * | applev[1] |
5660 * +-------------+
5661 * | exec_path / |
5662 * | applev[0] |
5663 * +-------------+
5664 * | 0 |
5665 * +-------------+
5666 * | env[n] |
5667 * +-------------+
5668 * :
5669 * :
5670 * +-------------+
5671 * | env[0] |
5672 * +-------------+
5673 * | 0 |
5674 * +-------------+
5675 * | arg[argc-1] |
5676 * +-------------+
5677 * :
5678 * :
5679 * +-------------+
5680 * | arg[0] |
5681 * +-------------+
5682 * | argc |
5683 * sp-> +-------------+
5684 *
5685 * Although technically a part of the STRING AREA, we treat the PATH AREA as
5686 * a separate entity. This allows us to align the beginning of the PATH AREA
5687 * to a pointer boundary so that the exec_path, env[i], and argv[i] pointers
5688 * which preceed it on the stack are properly aligned.
5689 */
5690 __attribute__((noinline))
5691 static int
exec_copyout_strings(struct image_params * imgp,user_addr_t * stackp)5692 exec_copyout_strings(struct image_params *imgp, user_addr_t *stackp)
5693 {
5694 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
5695 int ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4;
5696 int ptr_area_size;
5697 void *ptr_buffer_start, *ptr_buffer;
5698 size_t string_size;
5699
5700 user_addr_t string_area; /* *argv[], *env[] */
5701 user_addr_t ptr_area; /* argv[], env[], applev[] */
5702 user_addr_t argc_area; /* argc */
5703 user_addr_t stack;
5704 int error;
5705
5706 unsigned i;
5707 struct copyout_desc {
5708 char *start_string;
5709 int count;
5710 #if CONFIG_DTRACE
5711 user_addr_t *dtrace_cookie;
5712 #endif
5713 boolean_t null_term;
5714 } descriptors[] = {
5715 {
5716 .start_string = imgp->ip_startargv,
5717 .count = imgp->ip_argc,
5718 #if CONFIG_DTRACE
5719 .dtrace_cookie = &p->p_dtrace_argv,
5720 #endif
5721 .null_term = TRUE
5722 },
5723 {
5724 .start_string = imgp->ip_endargv,
5725 .count = imgp->ip_envc,
5726 #if CONFIG_DTRACE
5727 .dtrace_cookie = &p->p_dtrace_envp,
5728 #endif
5729 .null_term = TRUE
5730 },
5731 {
5732 .start_string = imgp->ip_strings,
5733 .count = 1,
5734 #if CONFIG_DTRACE
5735 .dtrace_cookie = NULL,
5736 #endif
5737 .null_term = FALSE
5738 },
5739 {
5740 .start_string = imgp->ip_endenvv,
5741 .count = imgp->ip_applec - 1, /* exec_path handled above */
5742 #if CONFIG_DTRACE
5743 .dtrace_cookie = NULL,
5744 #endif
5745 .null_term = TRUE
5746 }
5747 };
5748
5749 stack = *stackp;
5750
5751 /*
5752 * All previous contributors to the string area
5753 * should have aligned their sub-area
5754 */
5755 if (imgp->ip_strspace % ptr_size != 0) {
5756 error = EINVAL;
5757 goto bad;
5758 }
5759
5760 /* Grow the stack down for the strings we've been building up */
5761 string_size = imgp->ip_strendp - imgp->ip_strings;
5762 stack -= string_size;
5763 string_area = stack;
5764
5765 /*
5766 * Need room for one pointer for each string, plus
5767 * one for the NULLs terminating the argv, envv, and apple areas.
5768 */
5769 ptr_area_size = (imgp->ip_argc + imgp->ip_envc + imgp->ip_applec + 3) * ptr_size;
5770 stack -= ptr_area_size;
5771 ptr_area = stack;
5772
5773 /* We'll construct all the pointer arrays in our string buffer,
5774 * which we already know is aligned properly, and ip_argspace
5775 * was used to verify we have enough space.
5776 */
5777 ptr_buffer_start = ptr_buffer = (void *)imgp->ip_strendp;
5778
5779 /*
5780 * Need room for pointer-aligned argc slot.
5781 */
5782 stack -= ptr_size;
5783 argc_area = stack;
5784
5785 /*
5786 * Record the size of the arguments area so that sysctl_procargs()
5787 * can return the argument area without having to parse the arguments.
5788 */
5789 proc_lock(p);
5790 p->p_argc = imgp->ip_argc;
5791 p->p_argslen = (int)(*stackp - string_area);
5792 proc_unlock(p);
5793
5794 /* Return the initial stack address: the location of argc */
5795 *stackp = stack;
5796
5797 /*
5798 * Copy out the entire strings area.
5799 */
5800 error = copyout(imgp->ip_strings, string_area,
5801 string_size);
5802 if (error) {
5803 goto bad;
5804 }
5805
5806 for (i = 0; i < sizeof(descriptors) / sizeof(descriptors[0]); i++) {
5807 char *cur_string = descriptors[i].start_string;
5808 int j;
5809
5810 #if CONFIG_DTRACE
5811 if (descriptors[i].dtrace_cookie) {
5812 proc_lock(p);
5813 *descriptors[i].dtrace_cookie = ptr_area + ((uintptr_t)ptr_buffer - (uintptr_t)ptr_buffer_start); /* dtrace convenience */
5814 proc_unlock(p);
5815 }
5816 #endif /* CONFIG_DTRACE */
5817
5818 /*
5819 * For each segment (argv, envv, applev), copy as many pointers as requested
5820 * to our pointer buffer.
5821 */
5822 for (j = 0; j < descriptors[i].count; j++) {
5823 user_addr_t cur_address = string_area + (cur_string - imgp->ip_strings);
5824
5825 /* Copy out the pointer to the current string. Alignment has been verified */
5826 if (ptr_size == 8) {
5827 *(uint64_t *)ptr_buffer = (uint64_t)cur_address;
5828 } else {
5829 *(uint32_t *)ptr_buffer = (uint32_t)cur_address;
5830 }
5831
5832 ptr_buffer = (void *)((uintptr_t)ptr_buffer + ptr_size);
5833 cur_string += strlen(cur_string) + 1; /* Only a NUL between strings in the same area */
5834 }
5835
5836 if (descriptors[i].null_term) {
5837 if (ptr_size == 8) {
5838 *(uint64_t *)ptr_buffer = 0ULL;
5839 } else {
5840 *(uint32_t *)ptr_buffer = 0;
5841 }
5842
5843 ptr_buffer = (void *)((uintptr_t)ptr_buffer + ptr_size);
5844 }
5845 }
5846
5847 /*
5848 * Copy out all our pointer arrays in bulk.
5849 */
5850 error = copyout(ptr_buffer_start, ptr_area,
5851 ptr_area_size);
5852 if (error) {
5853 goto bad;
5854 }
5855
5856 /* argc (int32, stored in a ptr_size area) */
5857 error = copyoutptr((user_addr_t)imgp->ip_argc, argc_area, ptr_size);
5858 if (error) {
5859 goto bad;
5860 }
5861
5862 bad:
5863 return error;
5864 }
5865
5866
5867 /*
5868 * exec_extract_strings
5869 *
5870 * Copy arguments and environment from user space into work area; we may
5871 * have already copied some early arguments into the work area, and if
5872 * so, any arguments opied in are appended to those already there.
5873 * This function is the primary manipulator of ip_argspace, since
5874 * these are the arguments the client of execve(2) knows about. After
5875 * each argv[]/envv[] string is copied, we charge the string length
5876 * and argv[]/envv[] pointer slot to ip_argspace, so that we can
5877 * full preflight the arg list size.
5878 *
5879 * Parameters: struct image_params * the image parameter block
5880 *
5881 * Returns: 0 Success
5882 * !0 Failure: errno
5883 *
5884 * Implicit returns;
5885 * (imgp->ip_argc) Count of arguments, updated
5886 * (imgp->ip_envc) Count of environment strings, updated
5887 * (imgp->ip_argspace) Count of remaining of NCARGS
5888 * (imgp->ip_interp_buffer) Interpreter and args (mutated in place)
5889 *
5890 *
5891 * Note: The argument and environment vectors are user space pointers
5892 * to arrays of user space pointers.
5893 */
5894 __attribute__((noinline))
5895 static int
exec_extract_strings(struct image_params * imgp)5896 exec_extract_strings(struct image_params *imgp)
5897 {
5898 int error = 0;
5899 int ptr_size = (imgp->ip_flags & IMGPF_WAS_64BIT_ADDR) ? 8 : 4;
5900 int new_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4;
5901 user_addr_t argv = imgp->ip_user_argv;
5902 user_addr_t envv = imgp->ip_user_envv;
5903
5904 /*
5905 * Adjust space reserved for the path name by however much padding it
5906 * needs. Doing this here since we didn't know if this would be a 32-
5907 * or 64-bit process back in exec_save_path.
5908 */
5909 while (imgp->ip_strspace % new_ptr_size != 0) {
5910 *imgp->ip_strendp++ = '\0';
5911 imgp->ip_strspace--;
5912 /* imgp->ip_argspace--; not counted towards exec args total */
5913 }
5914
5915 /*
5916 * From now on, we start attributing string space to ip_argspace
5917 */
5918 imgp->ip_startargv = imgp->ip_strendp;
5919 imgp->ip_argc = 0;
5920
5921 if ((imgp->ip_flags & IMGPF_INTERPRET) != 0) {
5922 user_addr_t arg;
5923 char *argstart, *ch;
5924
5925 /* First, the arguments in the "#!" string are tokenized and extracted. */
5926 argstart = imgp->ip_interp_buffer;
5927 while (argstart) {
5928 ch = argstart;
5929 while (*ch && !IS_WHITESPACE(*ch)) {
5930 ch++;
5931 }
5932
5933 if (*ch == '\0') {
5934 /* last argument, no need to NUL-terminate */
5935 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(argstart), UIO_SYSSPACE, TRUE);
5936 argstart = NULL;
5937 } else {
5938 /* NUL-terminate */
5939 *ch = '\0';
5940 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(argstart), UIO_SYSSPACE, TRUE);
5941
5942 /*
5943 * Find the next string. We know spaces at the end of the string have already
5944 * been stripped.
5945 */
5946 argstart = ch + 1;
5947 while (IS_WHITESPACE(*argstart)) {
5948 argstart++;
5949 }
5950 }
5951
5952 /* Error-check, regardless of whether this is the last interpreter arg or not */
5953 if (error) {
5954 goto bad;
5955 }
5956 if (imgp->ip_argspace < new_ptr_size) {
5957 error = E2BIG;
5958 goto bad;
5959 }
5960 imgp->ip_argspace -= new_ptr_size; /* to hold argv[] entry */
5961 imgp->ip_argc++;
5962 }
5963
5964 if (argv != 0LL) {
5965 /*
5966 * If we are running an interpreter, replace the av[0] that was
5967 * passed to execve() with the path name that was
5968 * passed to execve() for interpreters which do not use the PATH
5969 * to locate their script arguments.
5970 */
5971 error = copyinptr(argv, &arg, ptr_size);
5972 if (error) {
5973 goto bad;
5974 }
5975 if (arg != 0LL) {
5976 argv += ptr_size; /* consume without using */
5977 }
5978 }
5979
5980 if (imgp->ip_interp_sugid_fd != -1) {
5981 char temp[19]; /* "/dev/fd/" + 10 digits + NUL */
5982 snprintf(temp, sizeof(temp), "/dev/fd/%d", imgp->ip_interp_sugid_fd);
5983 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(temp), UIO_SYSSPACE, TRUE);
5984 } else {
5985 error = exec_add_user_string(imgp, imgp->ip_user_fname, imgp->ip_seg, TRUE);
5986 }
5987
5988 if (error) {
5989 goto bad;
5990 }
5991 if (imgp->ip_argspace < new_ptr_size) {
5992 error = E2BIG;
5993 goto bad;
5994 }
5995 imgp->ip_argspace -= new_ptr_size; /* to hold argv[] entry */
5996 imgp->ip_argc++;
5997 }
5998
5999 while (argv != 0LL) {
6000 user_addr_t arg;
6001
6002 error = copyinptr(argv, &arg, ptr_size);
6003 if (error) {
6004 goto bad;
6005 }
6006
6007 if (arg == 0LL) {
6008 break;
6009 }
6010
6011 argv += ptr_size;
6012
6013 /*
6014 * av[n...] = arg[n]
6015 */
6016 error = exec_add_user_string(imgp, arg, imgp->ip_seg, TRUE);
6017 if (error) {
6018 goto bad;
6019 }
6020 if (imgp->ip_argspace < new_ptr_size) {
6021 error = E2BIG;
6022 goto bad;
6023 }
6024 imgp->ip_argspace -= new_ptr_size; /* to hold argv[] entry */
6025 imgp->ip_argc++;
6026 }
6027
6028 /* Save space for argv[] NULL terminator */
6029 if (imgp->ip_argspace < new_ptr_size) {
6030 error = E2BIG;
6031 goto bad;
6032 }
6033 imgp->ip_argspace -= new_ptr_size;
6034
6035 /* Note where the args ends and env begins. */
6036 imgp->ip_endargv = imgp->ip_strendp;
6037 imgp->ip_envc = 0;
6038
6039 /* Now, get the environment */
6040 while (envv != 0LL) {
6041 user_addr_t env;
6042
6043 error = copyinptr(envv, &env, ptr_size);
6044 if (error) {
6045 goto bad;
6046 }
6047
6048 envv += ptr_size;
6049 if (env == 0LL) {
6050 break;
6051 }
6052 /*
6053 * av[n...] = env[n]
6054 */
6055 error = exec_add_user_string(imgp, env, imgp->ip_seg, TRUE);
6056 if (error) {
6057 goto bad;
6058 }
6059 if (imgp->ip_argspace < new_ptr_size) {
6060 error = E2BIG;
6061 goto bad;
6062 }
6063 imgp->ip_argspace -= new_ptr_size; /* to hold envv[] entry */
6064 imgp->ip_envc++;
6065 }
6066
6067 /* Save space for envv[] NULL terminator */
6068 if (imgp->ip_argspace < new_ptr_size) {
6069 error = E2BIG;
6070 goto bad;
6071 }
6072 imgp->ip_argspace -= new_ptr_size;
6073
6074 /* Align the tail of the combined argv+envv area */
6075 while (imgp->ip_strspace % new_ptr_size != 0) {
6076 if (imgp->ip_argspace < 1) {
6077 error = E2BIG;
6078 goto bad;
6079 }
6080 *imgp->ip_strendp++ = '\0';
6081 imgp->ip_strspace--;
6082 imgp->ip_argspace--;
6083 }
6084
6085 /* Note where the envv ends and applev begins. */
6086 imgp->ip_endenvv = imgp->ip_strendp;
6087
6088 /*
6089 * From now on, we are no longer charging argument
6090 * space to ip_argspace.
6091 */
6092
6093 bad:
6094 return error;
6095 }
6096
6097 /*
6098 * Libc has an 8-element array set up for stack guard values. It only fills
6099 * in one of those entries, and both gcc and llvm seem to use only a single
6100 * 8-byte guard. Until somebody needs more than an 8-byte guard value, don't
6101 * do the work to construct them.
6102 */
6103 #define GUARD_VALUES 1
6104 #define GUARD_KEY "stack_guard="
6105
6106 /*
6107 * System malloc needs some entropy when it is initialized.
6108 */
6109 #define ENTROPY_VALUES 2
6110 #define ENTROPY_KEY "malloc_entropy="
6111
6112 /*
6113 * libplatform needs a random pointer-obfuscation value when it is initialized.
6114 */
6115 #define PTR_MUNGE_VALUES 1
6116 #define PTR_MUNGE_KEY "ptr_munge="
6117
6118 /*
6119 * System malloc engages nanozone for UIAPP.
6120 */
6121 #define NANO_ENGAGE_KEY "MallocNanoZone=1"
6122
6123 /*
6124 * Used to pass experiment flags up to libmalloc.
6125 */
6126 #define LIBMALLOC_EXPERIMENT_FACTORS_KEY "MallocExperiment="
6127
6128 /*
6129 * Passes information about hardened runtime entitlements to libsystem/libmalloc
6130 */
6131 #define HARDENED_RUNTIME_KEY "HardenedRuntime="
6132
6133 #define PFZ_KEY "pfz="
6134 extern user32_addr_t commpage_text32_location;
6135 extern user64_addr_t commpage_text64_location;
6136
6137 extern uuid_string_t bootsessionuuid_string;
6138 static TUNABLE(uint32_t, exe_boothash_salt, "exe_boothash_salt", 0);
6139
6140 __startup_func
6141 static void
exe_boothash_salt_generate(void)6142 exe_boothash_salt_generate(void)
6143 {
6144 if (!PE_parse_boot_argn("exe_boothash_salt", NULL, 0)) {
6145 read_random(&exe_boothash_salt, sizeof(exe_boothash_salt));
6146 }
6147 }
6148 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, exe_boothash_salt_generate);
6149
6150
6151 #define MAIN_STACK_VALUES 4
6152 #define MAIN_STACK_KEY "main_stack="
6153
6154 #define FSID_KEY "executable_file="
6155 #define DYLD_FSID_KEY "dyld_file="
6156 #define CDHASH_KEY "executable_cdhash="
6157 #define DYLD_FLAGS_KEY "dyld_flags="
6158 #define SUBSYSTEM_ROOT_PATH_KEY "subsystem_root_path="
6159 #define APP_BOOT_SESSION_KEY "executable_boothash="
6160 #if __has_feature(ptrauth_calls)
6161 #define PTRAUTH_DISABLED_FLAG "ptrauth_disabled=1"
6162 #define DYLD_ARM64E_ABI_KEY "arm64e_abi="
6163 #endif /* __has_feature(ptrauth_calls) */
6164 #define MAIN_TH_PORT_KEY "th_port="
6165
6166 #define FSID_MAX_STRING "0x1234567890abcdef,0x1234567890abcdef"
6167
6168 #define HEX_STR_LEN 18 // 64-bit hex value "0x0123456701234567"
6169 #define HEX_STR_LEN32 10 // 32-bit hex value "0x01234567"
6170
6171 #if XNU_TARGET_OS_OSX && _POSIX_SPAWN_FORCE_4K_PAGES && PMAP_CREATE_FORCE_4K_PAGES
6172 #define VM_FORCE_4K_PAGES_KEY "vm_force_4k_pages=1"
6173 #endif /* XNU_TARGET_OS_OSX && _POSIX_SPAWN_FORCE_4K_PAGES && PMAP_CREATE_FORCE_4K_PAGES */
6174
6175 static int
exec_add_entropy_key(struct image_params * imgp,const char * key,int values,boolean_t embedNUL)6176 exec_add_entropy_key(struct image_params *imgp,
6177 const char *key,
6178 int values,
6179 boolean_t embedNUL)
6180 {
6181 const int limit = 8;
6182 uint64_t entropy[limit];
6183 char str[strlen(key) + (HEX_STR_LEN + 1) * limit + 1];
6184 if (values > limit) {
6185 values = limit;
6186 }
6187
6188 read_random(entropy, sizeof(entropy[0]) * values);
6189
6190 if (embedNUL) {
6191 entropy[0] &= ~(0xffull << 8);
6192 }
6193
6194 int len = scnprintf(str, sizeof(str), "%s0x%llx", key, entropy[0]);
6195 size_t remaining = sizeof(str) - len;
6196 for (int i = 1; i < values && remaining > 0; ++i) {
6197 size_t start = sizeof(str) - remaining;
6198 len = scnprintf(&str[start], remaining, ",0x%llx", entropy[i]);
6199 remaining -= len;
6200 }
6201
6202 return exec_add_user_string(imgp, CAST_USER_ADDR_T(str), UIO_SYSSPACE, FALSE);
6203 }
6204
6205 /*
6206 * Build up the contents of the apple[] string vector
6207 */
6208 #if (DEVELOPMENT || DEBUG)
6209 extern uint64_t dyld_flags;
6210 #endif
6211
6212 #if __has_feature(ptrauth_calls)
6213 static inline bool
is_arm64e_running_as_arm64(const struct image_params * imgp)6214 is_arm64e_running_as_arm64(const struct image_params *imgp)
6215 {
6216 return (imgp->ip_origcpusubtype & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_ARM64E &&
6217 (imgp->ip_flags & IMGPF_NOJOP);
6218 }
6219 #endif /* __has_feature(ptrauth_calls) */
6220
6221 _Atomic uint64_t libmalloc_experiment_factors = 0;
6222
6223 static int
exec_add_apple_strings(struct image_params * imgp,const load_result_t * load_result)6224 exec_add_apple_strings(struct image_params *imgp,
6225 const load_result_t *load_result)
6226 {
6227 int error;
6228 int img_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4;
6229 thread_t new_thread;
6230 ipc_port_t sright;
6231 uint64_t local_experiment_factors = 0;
6232
6233 /* exec_save_path stored the first string */
6234 imgp->ip_applec = 1;
6235
6236 /* adding the pfz string */
6237 {
6238 char pfz_string[strlen(PFZ_KEY) + HEX_STR_LEN + 1];
6239
6240 if (img_ptr_size == 8) {
6241 __assert_only size_t ret = snprintf(pfz_string, sizeof(pfz_string), PFZ_KEY "0x%llx", commpage_text64_location);
6242 assert(ret < sizeof(pfz_string));
6243 } else {
6244 snprintf(pfz_string, sizeof(pfz_string), PFZ_KEY "0x%x", commpage_text32_location);
6245 }
6246 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(pfz_string), UIO_SYSSPACE, FALSE);
6247 if (error) {
6248 printf("Failed to add the pfz string with error %d\n", error);
6249 goto bad;
6250 }
6251 imgp->ip_applec++;
6252 }
6253
6254 /* adding the NANO_ENGAGE_KEY key */
6255 if (imgp->ip_px_sa) {
6256 struct _posix_spawnattr* psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
6257 int proc_flags = psa->psa_flags;
6258
6259 if ((proc_flags & _POSIX_SPAWN_NANO_ALLOCATOR) == _POSIX_SPAWN_NANO_ALLOCATOR) {
6260 const char *nano_string = NANO_ENGAGE_KEY;
6261 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(nano_string), UIO_SYSSPACE, FALSE);
6262 if (error) {
6263 goto bad;
6264 }
6265 imgp->ip_applec++;
6266 }
6267 }
6268
6269 /*
6270 * Supply libc with a collection of random values to use when
6271 * implementing -fstack-protector.
6272 *
6273 * (The first random string always contains an embedded NUL so that
6274 * __stack_chk_guard also protects against C string vulnerabilities)
6275 */
6276 error = exec_add_entropy_key(imgp, GUARD_KEY, GUARD_VALUES, TRUE);
6277 if (error) {
6278 goto bad;
6279 }
6280 imgp->ip_applec++;
6281
6282 /*
6283 * Supply libc with entropy for system malloc.
6284 */
6285 error = exec_add_entropy_key(imgp, ENTROPY_KEY, ENTROPY_VALUES, FALSE);
6286 if (error) {
6287 goto bad;
6288 }
6289 imgp->ip_applec++;
6290
6291 /*
6292 * Supply libpthread & libplatform with a random value to use for pointer
6293 * obfuscation.
6294 */
6295 error = exec_add_entropy_key(imgp, PTR_MUNGE_KEY, PTR_MUNGE_VALUES, FALSE);
6296 if (error) {
6297 goto bad;
6298 }
6299 imgp->ip_applec++;
6300
6301 /*
6302 * Add MAIN_STACK_KEY: Supplies the address and size of the main thread's
6303 * stack if it was allocated by the kernel.
6304 *
6305 * The guard page is not included in this stack size as libpthread
6306 * expects to add it back in after receiving this value.
6307 */
6308 if (load_result->unixproc) {
6309 char stack_string[strlen(MAIN_STACK_KEY) + (HEX_STR_LEN + 1) * MAIN_STACK_VALUES + 1];
6310 snprintf(stack_string, sizeof(stack_string),
6311 MAIN_STACK_KEY "0x%llx,0x%llx,0x%llx,0x%llx",
6312 (uint64_t)load_result->user_stack,
6313 (uint64_t)load_result->user_stack_size,
6314 (uint64_t)load_result->user_stack_alloc,
6315 (uint64_t)load_result->user_stack_alloc_size);
6316 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(stack_string), UIO_SYSSPACE, FALSE);
6317 if (error) {
6318 goto bad;
6319 }
6320 imgp->ip_applec++;
6321 }
6322
6323 if (imgp->ip_vattr) {
6324 uint64_t fsid = vnode_get_va_fsid(imgp->ip_vattr);
6325 uint64_t fsobjid = imgp->ip_vattr->va_fileid;
6326
6327 char fsid_string[strlen(FSID_KEY) + strlen(FSID_MAX_STRING) + 1];
6328 snprintf(fsid_string, sizeof(fsid_string),
6329 FSID_KEY "0x%llx,0x%llx", fsid, fsobjid);
6330 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(fsid_string), UIO_SYSSPACE, FALSE);
6331 if (error) {
6332 goto bad;
6333 }
6334 imgp->ip_applec++;
6335 }
6336
6337 if (imgp->ip_dyld_fsid || imgp->ip_dyld_fsobjid) {
6338 char fsid_string[strlen(DYLD_FSID_KEY) + strlen(FSID_MAX_STRING) + 1];
6339 snprintf(fsid_string, sizeof(fsid_string),
6340 DYLD_FSID_KEY "0x%llx,0x%llx", imgp->ip_dyld_fsid, imgp->ip_dyld_fsobjid);
6341 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(fsid_string), UIO_SYSSPACE, FALSE);
6342 if (error) {
6343 goto bad;
6344 }
6345 imgp->ip_applec++;
6346 }
6347
6348 uint8_t cdhash[SHA1_RESULTLEN];
6349 int cdhash_errror = ubc_cs_getcdhash(imgp->ip_vp, imgp->ip_arch_offset, cdhash);
6350 if (cdhash_errror == 0) {
6351 char hash_string[strlen(CDHASH_KEY) + 2 * SHA1_RESULTLEN + 1];
6352 strncpy(hash_string, CDHASH_KEY, sizeof(hash_string));
6353 char *p = hash_string + sizeof(CDHASH_KEY) - 1;
6354 for (int i = 0; i < SHA1_RESULTLEN; i++) {
6355 snprintf(p, 3, "%02x", (int) cdhash[i]);
6356 p += 2;
6357 }
6358 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(hash_string), UIO_SYSSPACE, FALSE);
6359 if (error) {
6360 goto bad;
6361 }
6362 imgp->ip_applec++;
6363
6364 /* hash together cd-hash and boot-session-uuid */
6365 uint8_t sha_digest[SHA256_DIGEST_LENGTH];
6366 SHA256_CTX sha_ctx;
6367 SHA256_Init(&sha_ctx);
6368 SHA256_Update(&sha_ctx, &exe_boothash_salt, sizeof(exe_boothash_salt));
6369 SHA256_Update(&sha_ctx, bootsessionuuid_string, sizeof(bootsessionuuid_string));
6370 SHA256_Update(&sha_ctx, cdhash, sizeof(cdhash));
6371 SHA256_Final(sha_digest, &sha_ctx);
6372 char app_boot_string[strlen(APP_BOOT_SESSION_KEY) + 2 * SHA1_RESULTLEN + 1];
6373 strncpy(app_boot_string, APP_BOOT_SESSION_KEY, sizeof(app_boot_string));
6374 char *s = app_boot_string + sizeof(APP_BOOT_SESSION_KEY) - 1;
6375 for (int i = 0; i < SHA1_RESULTLEN; i++) {
6376 snprintf(s, 3, "%02x", (int) sha_digest[i]);
6377 s += 2;
6378 }
6379 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(app_boot_string), UIO_SYSSPACE, FALSE);
6380 if (error) {
6381 goto bad;
6382 }
6383 imgp->ip_applec++;
6384 }
6385 #if (DEVELOPMENT || DEBUG)
6386 if (dyld_flags) {
6387 char dyld_flags_string[strlen(DYLD_FLAGS_KEY) + HEX_STR_LEN + 1];
6388 snprintf(dyld_flags_string, sizeof(dyld_flags_string), DYLD_FLAGS_KEY "0x%llx", dyld_flags);
6389 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(dyld_flags_string), UIO_SYSSPACE, FALSE);
6390 if (error) {
6391 goto bad;
6392 }
6393 imgp->ip_applec++;
6394 }
6395 #endif
6396 if (imgp->ip_subsystem_root_path) {
6397 size_t buffer_len = MAXPATHLEN + strlen(SUBSYSTEM_ROOT_PATH_KEY);
6398 char subsystem_root_path_string[buffer_len];
6399 int required_len = snprintf(subsystem_root_path_string, buffer_len, SUBSYSTEM_ROOT_PATH_KEY "%s", imgp->ip_subsystem_root_path);
6400
6401 if (((size_t)required_len >= buffer_len) || (required_len < 0)) {
6402 error = ENAMETOOLONG;
6403 goto bad;
6404 }
6405
6406 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(subsystem_root_path_string), UIO_SYSSPACE, FALSE);
6407 if (error) {
6408 goto bad;
6409 }
6410
6411 imgp->ip_applec++;
6412 }
6413 #if __has_feature(ptrauth_calls)
6414 if (is_arm64e_running_as_arm64(imgp)) {
6415 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(PTRAUTH_DISABLED_FLAG), UIO_SYSSPACE, FALSE);
6416 if (error) {
6417 goto bad;
6418 }
6419
6420 imgp->ip_applec++;
6421 }
6422 #endif /* __has_feature(ptrauth_calls) */
6423
6424
6425 #if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX)
6426 {
6427 char dyld_abi_string[strlen(DYLD_ARM64E_ABI_KEY) + 8];
6428 strlcpy(dyld_abi_string, DYLD_ARM64E_ABI_KEY, sizeof(dyld_abi_string));
6429 bool allowAll = bootarg_arm64e_preview_abi;
6430 strlcat(dyld_abi_string, (allowAll ? "all" : "os"), sizeof(dyld_abi_string));
6431 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(dyld_abi_string), UIO_SYSSPACE, FALSE);
6432 if (error) {
6433 goto bad;
6434 }
6435
6436 imgp->ip_applec++;
6437 }
6438 #endif
6439 /*
6440 * Add main thread mach port name
6441 * +1 uref on main thread port, this ref will be extracted by libpthread in __pthread_init
6442 * and consumed in _bsdthread_terminate. Leaking the main thread port name if not linked
6443 * against libpthread.
6444 */
6445 if ((new_thread = imgp->ip_new_thread) != THREAD_NULL) {
6446 thread_reference(new_thread);
6447 sright = convert_thread_to_port_pinned(new_thread);
6448 task_t new_task = get_threadtask(new_thread);
6449 mach_port_name_t name = ipc_port_copyout_send(sright, get_task_ipcspace(new_task));
6450 char port_name_hex_str[strlen(MAIN_TH_PORT_KEY) + HEX_STR_LEN32 + 1];
6451 snprintf(port_name_hex_str, sizeof(port_name_hex_str), MAIN_TH_PORT_KEY "0x%x", name);
6452
6453 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(port_name_hex_str), UIO_SYSSPACE, FALSE);
6454 if (error) {
6455 goto bad;
6456 }
6457 imgp->ip_applec++;
6458 }
6459
6460 #if XNU_TARGET_OS_OSX && _POSIX_SPAWN_FORCE_4K_PAGES && PMAP_CREATE_FORCE_4K_PAGES
6461 if (imgp->ip_px_sa != NULL) {
6462 struct _posix_spawnattr* psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
6463 if (psa->psa_flags & _POSIX_SPAWN_FORCE_4K_PAGES) {
6464 const char *vm_force_4k_string = VM_FORCE_4K_PAGES_KEY;
6465 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(vm_force_4k_string), UIO_SYSSPACE, FALSE);
6466 if (error) {
6467 goto bad;
6468 }
6469 imgp->ip_applec++;
6470 }
6471 }
6472 #endif /* XNU_TARGET_OS_OSX && _POSIX_SPAWN_FORCE_4K_PAGES && PMAP_CREATE_FORCE_4K_PAGES */
6473
6474 /* adding the libmalloc experiment string */
6475 local_experiment_factors = os_atomic_load_wide(&libmalloc_experiment_factors, relaxed);
6476 if (__improbable(local_experiment_factors != 0)) {
6477 char libmalloc_experiment_factors_string[strlen(LIBMALLOC_EXPERIMENT_FACTORS_KEY) + HEX_STR_LEN + 1];
6478
6479 snprintf(
6480 libmalloc_experiment_factors_string,
6481 sizeof(libmalloc_experiment_factors_string),
6482 LIBMALLOC_EXPERIMENT_FACTORS_KEY "0x%llx",
6483 local_experiment_factors);
6484 error = exec_add_user_string(
6485 imgp,
6486 CAST_USER_ADDR_T(libmalloc_experiment_factors_string),
6487 UIO_SYSSPACE,
6488 FALSE);
6489 if (error) {
6490 printf("Failed to add the libmalloc experiment factors string with error %d\n", error);
6491 goto bad;
6492 }
6493 imgp->ip_applec++;
6494 }
6495
6496
6497 /* tell dyld that it can leverage hardware for its read-only/read-write trusted path */
6498 if (imgp->ip_flags & IMGPF_HW_TPRO) {
6499 const char *dyld_hw_tpro = "dyld_hw_tpro=1";
6500 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(dyld_hw_tpro), UIO_SYSSPACE, FALSE);
6501 if (error) {
6502 printf("Failed to add dyld hw tpro setting with error %d\n", error);
6503 goto bad;
6504 }
6505
6506 imgp->ip_applec++;
6507
6508 }
6509
6510 if (load_result->hardened_runtime_binary) {
6511 const size_t HR_STRING_SIZE = sizeof(HARDENED_RUNTIME_KEY) + HR_FLAGS_NUM_NIBBLES + 2 + 1;
6512 char hardened_runtime[HR_STRING_SIZE];
6513 snprintf(hardened_runtime, HR_STRING_SIZE, HARDENED_RUNTIME_KEY"0x%x", load_result->hardened_runtime_binary);
6514 error = exec_add_user_string(imgp, CAST_USER_ADDR_T(hardened_runtime), UIO_SYSSPACE, FALSE);
6515 if (error) {
6516 printf("Failed to add hardened runtime flag with error %d\n", error);
6517 goto bad;
6518 }
6519 imgp->ip_applec++;
6520 }
6521 /* Align the tail of the combined applev area */
6522 while (imgp->ip_strspace % img_ptr_size != 0) {
6523 *imgp->ip_strendp++ = '\0';
6524 imgp->ip_strspace--;
6525 }
6526
6527 bad:
6528 return error;
6529 }
6530
6531 /*
6532 * exec_check_permissions
6533 *
6534 * Description: Verify that the file that is being attempted to be executed
6535 * is in fact allowed to be executed based on it POSIX file
6536 * permissions and other access control criteria
6537 *
6538 * Parameters: struct image_params * the image parameter block
6539 *
6540 * Returns: 0 Success
6541 * EACCES Permission denied
6542 * ENOEXEC Executable file format error
6543 * ETXTBSY Text file busy [misuse of error code]
6544 * vnode_getattr:???
6545 * vnode_authorize:???
6546 */
6547 static int
exec_check_permissions(struct image_params * imgp)6548 exec_check_permissions(struct image_params *imgp)
6549 {
6550 struct vnode *vp = imgp->ip_vp;
6551 struct vnode_attr *vap = imgp->ip_vattr;
6552 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
6553 int error;
6554 kauth_action_t action;
6555
6556 /* Only allow execution of regular files */
6557 if (!vnode_isreg(vp)) {
6558 return EACCES;
6559 }
6560
6561 /* Get the file attributes that we will be using here and elsewhere */
6562 VATTR_INIT(vap);
6563 VATTR_WANTED(vap, va_uid);
6564 VATTR_WANTED(vap, va_gid);
6565 VATTR_WANTED(vap, va_mode);
6566 VATTR_WANTED(vap, va_fsid);
6567 VATTR_WANTED(vap, va_fsid64);
6568 VATTR_WANTED(vap, va_fileid);
6569 VATTR_WANTED(vap, va_data_size);
6570 if ((error = vnode_getattr(vp, vap, imgp->ip_vfs_context)) != 0) {
6571 return error;
6572 }
6573
6574 /*
6575 * Ensure that at least one execute bit is on - otherwise root
6576 * will always succeed, and we don't want to happen unless the
6577 * file really is executable.
6578 */
6579 if (!vfs_authopaque(vnode_mount(vp)) && ((vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0)) {
6580 return EACCES;
6581 }
6582
6583 /* Disallow zero length files */
6584 if (vap->va_data_size == 0) {
6585 return ENOEXEC;
6586 }
6587
6588 imgp->ip_arch_offset = (user_size_t)0;
6589 #if __LP64__
6590 imgp->ip_arch_size = vap->va_data_size;
6591 #else
6592 if (vap->va_data_size > UINT32_MAX) {
6593 return ENOEXEC;
6594 }
6595 imgp->ip_arch_size = (user_size_t)vap->va_data_size;
6596 #endif
6597
6598 /* Disable setuid-ness for traced programs or if MNT_NOSUID */
6599 if ((vp->v_mount->mnt_flag & MNT_NOSUID) || (p->p_lflag & P_LTRACED)) {
6600 vap->va_mode &= ~(VSUID | VSGID);
6601 }
6602
6603 /*
6604 * Disable _POSIX_SPAWN_ALLOW_DATA_EXEC and _POSIX_SPAWN_DISABLE_ASLR
6605 * flags for setuid/setgid binaries.
6606 */
6607 if (vap->va_mode & (VSUID | VSGID)) {
6608 imgp->ip_flags &= ~(IMGPF_ALLOW_DATA_EXEC | IMGPF_DISABLE_ASLR);
6609 }
6610
6611 #if CONFIG_MACF
6612 error = mac_vnode_check_exec(imgp->ip_vfs_context, vp, imgp);
6613 if (error) {
6614 return error;
6615 }
6616 #endif
6617
6618 /* Check for execute permission */
6619 action = KAUTH_VNODE_EXECUTE;
6620 /* Traced images must also be readable */
6621 if (p->p_lflag & P_LTRACED) {
6622 action |= KAUTH_VNODE_READ_DATA;
6623 }
6624 if ((error = vnode_authorize(vp, NULL, action, imgp->ip_vfs_context)) != 0) {
6625 return error;
6626 }
6627
6628 #if 0
6629 /* Don't let it run if anyone had it open for writing */
6630 vnode_lock(vp);
6631 if (vp->v_writecount) {
6632 panic("going to return ETXTBSY %x", vp);
6633 vnode_unlock(vp);
6634 return ETXTBSY;
6635 }
6636 vnode_unlock(vp);
6637 #endif
6638
6639 /* XXX May want to indicate to underlying FS that vnode is open */
6640
6641 return error;
6642 }
6643
6644
6645 /*
6646 * exec_handle_sugid
6647 *
6648 * Initially clear the P_SUGID in the process flags; if an SUGID process is
6649 * exec'ing a non-SUGID image, then this is the point of no return.
6650 *
6651 * If the image being activated is SUGID, then replace the credential with a
6652 * copy, disable tracing (unless the tracing process is root), reset the
6653 * mach task port to revoke it, set the P_SUGID bit,
6654 *
6655 * If the saved user and group ID will be changing, then make sure it happens
6656 * to a new credential, rather than a shared one.
6657 *
6658 * Set the security token (this is probably obsolete, given that the token
6659 * should not technically be separate from the credential itself).
6660 *
6661 * Parameters: struct image_params * the image parameter block
6662 *
6663 * Returns: void No failure indication
6664 *
6665 * Implicit returns:
6666 * <process credential> Potentially modified/replaced
6667 * <task port> Potentially revoked
6668 * <process flags> P_SUGID bit potentially modified
6669 * <security token> Potentially modified
6670 */
6671 __attribute__((noinline))
6672 static int
exec_handle_sugid(struct image_params * imgp)6673 exec_handle_sugid(struct image_params *imgp)
6674 {
6675 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
6676 kauth_cred_t cred = vfs_context_ucred(imgp->ip_vfs_context);
6677 int i;
6678 int leave_sugid_clear = 0;
6679 int mac_reset_ipc = 0;
6680 int error = 0;
6681 #if CONFIG_MACF
6682 int mac_transition, disjoint_cred = 0;
6683 int label_update_return = 0;
6684
6685 /*
6686 * Determine whether a call to update the MAC label will result in the
6687 * credential changing.
6688 *
6689 * Note: MAC policies which do not actually end up modifying
6690 * the label subsequently are strongly encouraged to
6691 * return 0 for this check, since a non-zero answer will
6692 * slow down the exec fast path for normal binaries.
6693 */
6694 mac_transition = mac_cred_check_label_update_execve(
6695 imgp->ip_vfs_context,
6696 imgp->ip_vp,
6697 imgp->ip_arch_offset,
6698 imgp->ip_scriptvp,
6699 imgp->ip_scriptlabelp,
6700 imgp->ip_execlabelp,
6701 p,
6702 &imgp->ip_px_smpx);
6703 #endif
6704
6705 OSBitAndAtomic(~((uint32_t)P_SUGID), &p->p_flag);
6706
6707 /*
6708 * Order of the following is important; group checks must go last,
6709 * as we use the success of the 'ismember' check combined with the
6710 * failure of the explicit match to indicate that we will be setting
6711 * the egid of the process even though the new process did not
6712 * require VSUID/VSGID bits in order for it to set the new group as
6713 * its egid.
6714 *
6715 * Note: Technically, by this we are implying a call to
6716 * setegid() in the new process, rather than implying
6717 * it used its VSGID bit to set the effective group,
6718 * even though there is no code in that process to make
6719 * such a call.
6720 */
6721 if (((imgp->ip_origvattr->va_mode & VSUID) != 0 &&
6722 kauth_cred_getuid(cred) != imgp->ip_origvattr->va_uid) ||
6723 ((imgp->ip_origvattr->va_mode & VSGID) != 0 &&
6724 ((kauth_cred_ismember_gid(cred, imgp->ip_origvattr->va_gid, &leave_sugid_clear) || !leave_sugid_clear) ||
6725 (kauth_cred_getgid(cred) != imgp->ip_origvattr->va_gid)))) {
6726 #if CONFIG_MACF
6727 /* label for MAC transition and neither VSUID nor VSGID */
6728 handle_mac_transition:
6729 #endif
6730
6731 #if CONFIG_SETUID
6732 /*
6733 * Replace the credential with a copy of itself if euid or
6734 * egid change.
6735 *
6736 * Note: setuid binaries will automatically opt out of
6737 * group resolver participation as a side effect
6738 * of this operation. This is an intentional
6739 * part of the security model, which requires a
6740 * participating credential be established by
6741 * escalating privilege, setting up all other
6742 * aspects of the credential including whether
6743 * or not to participate in external group
6744 * membership resolution, then dropping their
6745 * effective privilege to that of the desired
6746 * final credential state.
6747 *
6748 * Modifications to p_ucred must be guarded using the
6749 * proc's ucred lock. This prevents others from accessing
6750 * a garbage credential.
6751 */
6752
6753 if (imgp->ip_origvattr->va_mode & VSUID) {
6754 kauth_cred_proc_update(p, PROC_SETTOKEN_NONE,
6755 ^bool (kauth_cred_t parent __unused, kauth_cred_t model) {
6756 return kauth_cred_model_setresuid(model,
6757 KAUTH_UID_NONE,
6758 imgp->ip_origvattr->va_uid,
6759 imgp->ip_origvattr->va_uid,
6760 KAUTH_UID_NONE);
6761 });
6762 }
6763
6764 if (imgp->ip_origvattr->va_mode & VSGID) {
6765 kauth_cred_proc_update(p, PROC_SETTOKEN_NONE,
6766 ^bool (kauth_cred_t parent __unused, kauth_cred_t model) {
6767 return kauth_cred_model_setresgid(model,
6768 KAUTH_GID_NONE,
6769 imgp->ip_origvattr->va_gid,
6770 imgp->ip_origvattr->va_gid);
6771 });
6772 }
6773 #endif /* CONFIG_SETUID */
6774
6775 #if CONFIG_MACF
6776 /*
6777 * If a policy has indicated that it will transition the label,
6778 * before making the call into the MAC policies, get a new
6779 * duplicate credential, so they can modify it without
6780 * modifying any others sharing it.
6781 */
6782 if (mac_transition) {
6783 /*
6784 * This hook may generate upcalls that require
6785 * importance donation from the kernel.
6786 * (23925818)
6787 */
6788 thread_t thread = current_thread();
6789 thread_enable_send_importance(thread, TRUE);
6790 kauth_proc_label_update_execve(p,
6791 imgp->ip_vfs_context,
6792 imgp->ip_vp,
6793 imgp->ip_arch_offset,
6794 imgp->ip_scriptvp,
6795 imgp->ip_scriptlabelp,
6796 imgp->ip_execlabelp,
6797 &imgp->ip_csflags,
6798 &imgp->ip_px_smpx,
6799 &disjoint_cred, /* will be non zero if disjoint */
6800 &label_update_return);
6801 thread_enable_send_importance(thread, FALSE);
6802
6803 if (disjoint_cred) {
6804 /*
6805 * If updating the MAC label resulted in a
6806 * disjoint credential, flag that we need to
6807 * set the P_SUGID bit. This protects
6808 * against debuggers being attached by an
6809 * insufficiently privileged process onto the
6810 * result of a transition to a more privileged
6811 * credential.
6812 */
6813 leave_sugid_clear = 0;
6814 }
6815
6816 imgp->ip_mac_return = label_update_return;
6817 }
6818
6819 mac_reset_ipc = mac_proc_check_inherit_ipc_ports(p, p->p_textvp, p->p_textoff, imgp->ip_vp, imgp->ip_arch_offset, imgp->ip_scriptvp);
6820
6821 #endif /* CONFIG_MACF */
6822
6823 /*
6824 * If 'leave_sugid_clear' is non-zero, then we passed the
6825 * VSUID and MACF checks, and successfully determined that
6826 * the previous cred was a member of the VSGID group, but
6827 * that it was not the default at the time of the execve,
6828 * and that the post-labelling credential was not disjoint.
6829 * So we don't set the P_SUGID or reset mach ports and fds
6830 * on the basis of simply running this code.
6831 */
6832 if (mac_reset_ipc || !leave_sugid_clear) {
6833 /*
6834 * Have mach reset the task and thread ports.
6835 * We don't want anyone who had the ports before
6836 * a setuid exec to be able to access/control the
6837 * task/thread after.
6838 */
6839 ipc_task_reset((imgp->ip_new_thread != NULL) ?
6840 get_threadtask(imgp->ip_new_thread) : proc_task(p));
6841 ipc_thread_reset((imgp->ip_new_thread != NULL) ?
6842 imgp->ip_new_thread : current_thread());
6843 }
6844
6845 if (!leave_sugid_clear) {
6846 /*
6847 * Flag the process as setuid.
6848 */
6849 OSBitOrAtomic(P_SUGID, &p->p_flag);
6850
6851 /*
6852 * Radar 2261856; setuid security hole fix
6853 * XXX For setuid processes, attempt to ensure that
6854 * stdin, stdout, and stderr are already allocated.
6855 * We do not want userland to accidentally allocate
6856 * descriptors in this range which has implied meaning
6857 * to libc.
6858 */
6859 for (i = 0; i < 3; i++) {
6860 if (fp_get_noref_locked(p, i) != NULL) {
6861 continue;
6862 }
6863
6864 /*
6865 * Do the kernel equivalent of
6866 *
6867 * if i == 0
6868 * (void) open("/dev/null", O_RDONLY);
6869 * else
6870 * (void) open("/dev/null", O_WRONLY);
6871 */
6872
6873 struct fileproc *fp;
6874 int indx;
6875 int flag;
6876 struct nameidata *ndp = NULL;
6877
6878 if (i == 0) {
6879 flag = FREAD;
6880 } else {
6881 flag = FWRITE;
6882 }
6883
6884 if ((error = falloc_exec(p, imgp->ip_vfs_context,
6885 &fp, &indx)) != 0) {
6886 continue;
6887 }
6888
6889 ndp = kalloc_type(struct nameidata,
6890 Z_WAITOK | Z_ZERO | Z_NOFAIL);
6891
6892 NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW, UIO_SYSSPACE,
6893 CAST_USER_ADDR_T("/dev/null"),
6894 imgp->ip_vfs_context);
6895
6896 if ((error = vn_open(ndp, flag, 0)) != 0) {
6897 fp_free(p, indx, fp);
6898 kfree_type(struct nameidata, ndp);
6899 break;
6900 }
6901
6902 struct fileglob *fg = fp->fp_glob;
6903
6904 fg->fg_flag = flag;
6905 fg->fg_ops = &vnops;
6906 fp_set_data(fp, ndp->ni_vp);
6907
6908 vnode_put(ndp->ni_vp);
6909
6910 proc_fdlock(p);
6911 procfdtbl_releasefd(p, indx, NULL);
6912 fp_drop(p, indx, fp, 1);
6913 proc_fdunlock(p);
6914
6915 kfree_type(struct nameidata, ndp);
6916 }
6917 }
6918 }
6919 #if CONFIG_MACF
6920 else {
6921 /*
6922 * We are here because we were told that the MAC label will
6923 * be transitioned, and the binary is not VSUID or VSGID; to
6924 * deal with this case, we could either duplicate a lot of
6925 * code, or we can indicate we want to default the P_SUGID
6926 * bit clear and jump back up.
6927 */
6928 if (mac_transition) {
6929 leave_sugid_clear = 1;
6930 goto handle_mac_transition;
6931 }
6932 }
6933
6934 #endif /* CONFIG_MACF */
6935
6936 /* Update the process' identity version and set the security token */
6937 proc_setpidversion(p, OSIncrementAtomic(&nextpidversion));
6938 task_set_uniqueid(proc_task(p));
6939
6940 /*
6941 * Implement the semantic where the effective user and group become
6942 * the saved user and group in exec'ed programs.
6943 */
6944 kauth_cred_proc_update(p, PROC_SETTOKEN_ALWAYS,
6945 ^bool (kauth_cred_t parent __unused, kauth_cred_t model) {
6946 posix_cred_t pcred = posix_cred_get(model);
6947
6948 if (pcred->cr_svuid == pcred->cr_uid &&
6949 pcred->cr_svgid == pcred->cr_gid) {
6950 return false;
6951 }
6952
6953 pcred->cr_svuid = pcred->cr_uid;
6954 pcred->cr_svgid = pcred->cr_gid;
6955 return true;
6956 });
6957
6958 return error;
6959 }
6960
6961
6962 /*
6963 * create_unix_stack
6964 *
6965 * Description: Set the user stack address for the process to the provided
6966 * address. If a custom stack was not set as a result of the
6967 * load process (i.e. as specified by the image file for the
6968 * executable), then allocate the stack in the provided map and
6969 * set up appropriate guard pages for enforcing administrative
6970 * limits on stack growth, if they end up being needed.
6971 *
6972 * Parameters: p Process to set stack on
6973 * load_result Information from mach-o load commands
6974 * map Address map in which to allocate the new stack
6975 *
6976 * Returns: KERN_SUCCESS Stack successfully created
6977 * !KERN_SUCCESS Mach failure code
6978 */
6979 __attribute__((noinline))
6980 static kern_return_t
create_unix_stack(vm_map_t map,load_result_t * load_result,proc_t p)6981 create_unix_stack(vm_map_t map, load_result_t* load_result,
6982 proc_t p)
6983 {
6984 mach_vm_size_t size, prot_size;
6985 mach_vm_offset_t addr, prot_addr;
6986 kern_return_t kr;
6987
6988 mach_vm_address_t user_stack = load_result->user_stack;
6989
6990 proc_lock(p);
6991 p->user_stack = (uintptr_t)user_stack;
6992 if (load_result->custom_stack) {
6993 p->p_lflag |= P_LCUSTOM_STACK;
6994 }
6995 proc_unlock(p);
6996 if (vm_map_page_shift(map) < (int)PAGE_SHIFT) {
6997 DEBUG4K_LOAD("map %p user_stack 0x%llx custom %d user_stack_alloc_size 0x%llx\n", map, user_stack, load_result->custom_stack, load_result->user_stack_alloc_size);
6998 }
6999
7000 if (load_result->user_stack_alloc_size > 0) {
7001 /*
7002 * Allocate enough space for the maximum stack size we
7003 * will ever authorize and an extra page to act as
7004 * a guard page for stack overflows. For default stacks,
7005 * vm_initial_limit_stack takes care of the extra guard page.
7006 * Otherwise we must allocate it ourselves.
7007 */
7008 if (mach_vm_round_page_overflow(load_result->user_stack_alloc_size, &size)) {
7009 return KERN_INVALID_ARGUMENT;
7010 }
7011 addr = vm_map_trunc_page(load_result->user_stack - size,
7012 vm_map_page_mask(map));
7013 kr = mach_vm_allocate_kernel(map, &addr, size,
7014 VM_MAP_KERNEL_FLAGS_FIXED(.vm_tag = VM_MEMORY_STACK));
7015 if (kr != KERN_SUCCESS) {
7016 // Can't allocate at default location, try anywhere
7017 addr = 0;
7018 kr = mach_vm_allocate_kernel(map, &addr, size,
7019 VM_MAP_KERNEL_FLAGS_ANYWHERE(.vm_tag = VM_MEMORY_STACK));
7020 if (kr != KERN_SUCCESS) {
7021 return kr;
7022 }
7023
7024 user_stack = addr + size;
7025 load_result->user_stack = (user_addr_t)user_stack;
7026
7027 proc_lock(p);
7028 p->user_stack = (uintptr_t)user_stack;
7029 proc_unlock(p);
7030 }
7031
7032 load_result->user_stack_alloc = (user_addr_t)addr;
7033
7034 /*
7035 * And prevent access to what's above the current stack
7036 * size limit for this process.
7037 */
7038 if (load_result->user_stack_size == 0) {
7039 load_result->user_stack_size = proc_limitgetcur(p, RLIMIT_STACK);
7040 prot_size = vm_map_trunc_page(size - load_result->user_stack_size, vm_map_page_mask(map));
7041 } else {
7042 prot_size = PAGE_SIZE;
7043 }
7044
7045 prot_addr = addr;
7046 kr = mach_vm_protect(map,
7047 prot_addr,
7048 prot_size,
7049 FALSE,
7050 VM_PROT_NONE);
7051 if (kr != KERN_SUCCESS) {
7052 (void)mach_vm_deallocate(map, addr, size);
7053 return kr;
7054 }
7055 }
7056
7057 return KERN_SUCCESS;
7058 }
7059
7060 #include <sys/reboot.h>
7061
7062 /*
7063 * load_init_program_at_path
7064 *
7065 * Description: Load the "init" program; in most cases, this will be "launchd"
7066 *
7067 * Parameters: p Process to call execve() to create
7068 * the "init" program
7069 * scratch_addr Page in p, scratch space
7070 * path NULL terminated path
7071 *
7072 * Returns: KERN_SUCCESS Success
7073 * !KERN_SUCCESS See execve/mac_execve for error codes
7074 *
7075 * Notes: The process that is passed in is the first manufactured
7076 * process on the system, and gets here via bsd_ast() firing
7077 * for the first time. This is done to ensure that bsd_init()
7078 * has run to completion.
7079 *
7080 * The address map of the first manufactured process matches the
7081 * word width of the kernel. Once the self-exec completes, the
7082 * initproc might be different.
7083 */
7084 static int
load_init_program_at_path(proc_t p,user_addr_t scratch_addr,const char * path)7085 load_init_program_at_path(proc_t p, user_addr_t scratch_addr, const char* path)
7086 {
7087 int retval[2];
7088 int error;
7089 struct execve_args init_exec_args;
7090 user_addr_t argv0 = USER_ADDR_NULL, argv1 = USER_ADDR_NULL;
7091
7092 /*
7093 * Validate inputs and pre-conditions
7094 */
7095 assert(p);
7096 assert(scratch_addr);
7097 assert(path);
7098
7099 /*
7100 * Copy out program name.
7101 */
7102 size_t path_length = strlen(path) + 1;
7103 argv0 = scratch_addr;
7104 error = copyout(path, argv0, path_length);
7105 if (error) {
7106 return error;
7107 }
7108
7109 scratch_addr = USER_ADDR_ALIGN(scratch_addr + path_length, sizeof(user_addr_t));
7110
7111 /*
7112 * Put out first (and only) argument, similarly.
7113 * Assumes everything fits in a page as allocated above.
7114 */
7115 if (boothowto & RB_SINGLE) {
7116 const char *init_args = "-s";
7117 size_t init_args_length = strlen(init_args) + 1;
7118
7119 argv1 = scratch_addr;
7120 error = copyout(init_args, argv1, init_args_length);
7121 if (error) {
7122 return error;
7123 }
7124
7125 scratch_addr = USER_ADDR_ALIGN(scratch_addr + init_args_length, sizeof(user_addr_t));
7126 }
7127
7128 if (proc_is64bit(p)) {
7129 user64_addr_t argv64bit[3] = {};
7130
7131 argv64bit[0] = argv0;
7132 argv64bit[1] = argv1;
7133 argv64bit[2] = USER_ADDR_NULL;
7134
7135 error = copyout(argv64bit, scratch_addr, sizeof(argv64bit));
7136 if (error) {
7137 return error;
7138 }
7139 } else {
7140 user32_addr_t argv32bit[3] = {};
7141
7142 argv32bit[0] = (user32_addr_t)argv0;
7143 argv32bit[1] = (user32_addr_t)argv1;
7144 argv32bit[2] = USER_ADDR_NULL;
7145
7146 error = copyout(argv32bit, scratch_addr, sizeof(argv32bit));
7147 if (error) {
7148 return error;
7149 }
7150 }
7151
7152 /*
7153 * Set up argument block for fake call to execve.
7154 */
7155 init_exec_args.fname = argv0;
7156 init_exec_args.argp = scratch_addr;
7157 init_exec_args.envp = USER_ADDR_NULL;
7158
7159 /*
7160 * So that init task is set with uid,gid 0 token
7161 *
7162 * The access to the cred is safe:
7163 * the proc isn't running yet, it's stable.
7164 */
7165 set_security_token(p, proc_ucred_unsafe(p));
7166
7167 return execve(p, &init_exec_args, retval);
7168 }
7169
7170 static const char * init_programs[] = {
7171 #if DEBUG
7172 "/usr/appleinternal/sbin/launchd.debug",
7173 #endif
7174 #if DEVELOPMENT || DEBUG
7175 "/usr/appleinternal/sbin/launchd.development",
7176 #endif
7177 "/sbin/launchd",
7178 };
7179
7180 /*
7181 * load_init_program
7182 *
7183 * Description: Load the "init" program; in most cases, this will be "launchd"
7184 *
7185 * Parameters: p Process to call execve() to create
7186 * the "init" program
7187 *
7188 * Returns: (void)
7189 *
7190 * Notes: The process that is passed in is the first manufactured
7191 * process on the system, and gets here via bsd_ast() firing
7192 * for the first time. This is done to ensure that bsd_init()
7193 * has run to completion.
7194 *
7195 * In DEBUG & DEVELOPMENT builds, the launchdsuffix boot-arg
7196 * may be used to select a specific launchd executable. As with
7197 * the kcsuffix boot-arg, setting launchdsuffix to "" or "release"
7198 * will force /sbin/launchd to be selected.
7199 *
7200 * Search order by build:
7201 *
7202 * DEBUG DEVELOPMENT RELEASE PATH
7203 * ----------------------------------------------------------------------------------
7204 * 1 1 NA /usr/appleinternal/sbin/launchd.$LAUNCHDSUFFIX
7205 * 2 NA NA /usr/appleinternal/sbin/launchd.debug
7206 * 3 2 NA /usr/appleinternal/sbin/launchd.development
7207 * 4 3 1 /sbin/launchd
7208 */
7209 void
load_init_program(proc_t p)7210 load_init_program(proc_t p)
7211 {
7212 uint32_t i;
7213 int error;
7214 vm_map_t map = current_map();
7215 mach_vm_offset_t scratch_addr = 0;
7216 mach_vm_size_t map_page_size = vm_map_page_size(map);
7217
7218 (void) mach_vm_allocate_kernel(map, &scratch_addr, map_page_size,
7219 VM_MAP_KERNEL_FLAGS_ANYWHERE());
7220 #if CONFIG_MEMORYSTATUS
7221 (void) memorystatus_init_at_boot_snapshot();
7222 #endif /* CONFIG_MEMORYSTATUS */
7223
7224 #if DEBUG || DEVELOPMENT
7225 /* Check for boot-arg suffix first */
7226 char launchd_suffix[64];
7227 if (PE_parse_boot_argn("launchdsuffix", launchd_suffix, sizeof(launchd_suffix))) {
7228 char launchd_path[128];
7229 boolean_t is_release_suffix = ((launchd_suffix[0] == 0) ||
7230 (strcmp(launchd_suffix, "release") == 0));
7231
7232 if (is_release_suffix) {
7233 printf("load_init_program: attempting to load /sbin/launchd\n");
7234 error = load_init_program_at_path(p, (user_addr_t)scratch_addr, "/sbin/launchd");
7235 if (!error) {
7236 return;
7237 }
7238
7239 panic("Process 1 exec of launchd.release failed, errno %d", error);
7240 } else {
7241 strlcpy(launchd_path, "/usr/appleinternal/sbin/launchd.", sizeof(launchd_path));
7242 strlcat(launchd_path, launchd_suffix, sizeof(launchd_path));
7243
7244 printf("load_init_program: attempting to load %s\n", launchd_path);
7245 error = load_init_program_at_path(p, (user_addr_t)scratch_addr, launchd_path);
7246 if (!error) {
7247 return;
7248 } else if (error != ENOENT) {
7249 printf("load_init_program: failed loading %s: errno %d\n", launchd_path, error);
7250 }
7251 }
7252 }
7253 #endif
7254
7255 error = ENOENT;
7256 for (i = 0; i < sizeof(init_programs) / sizeof(init_programs[0]); i++) {
7257 printf("load_init_program: attempting to load %s\n", init_programs[i]);
7258 error = load_init_program_at_path(p, (user_addr_t)scratch_addr, init_programs[i]);
7259 if (!error) {
7260 return;
7261 } else if (error != ENOENT) {
7262 printf("load_init_program: failed loading %s: errno %d\n", init_programs[i], error);
7263 }
7264 }
7265
7266 panic("Process 1 exec of %s failed, errno %d", ((i == 0) ? "<null>" : init_programs[i - 1]), error);
7267 }
7268
7269 /*
7270 * load_return_to_errno
7271 *
7272 * Description: Convert a load_return_t (Mach error) to an errno (BSD error)
7273 *
7274 * Parameters: lrtn Mach error number
7275 *
7276 * Returns: (int) BSD error number
7277 * 0 Success
7278 * EBADARCH Bad architecture
7279 * EBADMACHO Bad Mach object file
7280 * ESHLIBVERS Bad shared library version
7281 * ENOMEM Out of memory/resource shortage
7282 * EACCES Access denied
7283 * ENOENT Entry not found (usually "file does
7284 * does not exist")
7285 * EIO An I/O error occurred
7286 * EBADEXEC The executable is corrupt/unknown
7287 */
7288 static int
load_return_to_errno(load_return_t lrtn)7289 load_return_to_errno(load_return_t lrtn)
7290 {
7291 switch (lrtn) {
7292 case LOAD_SUCCESS:
7293 return 0;
7294 case LOAD_BADARCH:
7295 return EBADARCH;
7296 case LOAD_BADMACHO:
7297 case LOAD_BADMACHO_UPX:
7298 return EBADMACHO;
7299 case LOAD_SHLIB:
7300 return ESHLIBVERS;
7301 case LOAD_NOSPACE:
7302 case LOAD_RESOURCE:
7303 return ENOMEM;
7304 case LOAD_PROTECT:
7305 return EACCES;
7306 case LOAD_ENOENT:
7307 return ENOENT;
7308 case LOAD_IOERROR:
7309 return EIO;
7310 case LOAD_DECRYPTFAIL:
7311 return EAUTH;
7312 case LOAD_FAILURE:
7313 default:
7314 return EBADEXEC;
7315 }
7316 }
7317
7318 #include <mach/mach_types.h>
7319 #include <mach/vm_prot.h>
7320 #include <mach/semaphore.h>
7321 #include <mach/sync_policy.h>
7322 #include <kern/clock.h>
7323 #include <mach/kern_return.h>
7324
7325 /*
7326 * execargs_alloc
7327 *
7328 * Description: Allocate the block of memory used by the execve arguments.
7329 * At the same time, we allocate a page so that we can read in
7330 * the first page of the image.
7331 *
7332 * Parameters: struct image_params * the image parameter block
7333 *
7334 * Returns: 0 Success
7335 * EINVAL Invalid argument
7336 * EACCES Permission denied
7337 * EINTR Interrupted function
7338 * ENOMEM Not enough space
7339 *
7340 * Notes: This is a temporary allocation into the kernel address space
7341 * to enable us to copy arguments in from user space. This is
7342 * necessitated by not mapping the process calling execve() into
7343 * the kernel address space during the execve() system call.
7344 *
7345 * We assemble the argument and environment, etc., into this
7346 * region before copying it as a single block into the child
7347 * process address space (at the top or bottom of the stack,
7348 * depending on which way the stack grows; see the function
7349 * exec_copyout_strings() for details).
7350 *
7351 * This ends up with a second (possibly unnecessary) copy compared
7352 * with assembing the data directly into the child address space,
7353 * instead, but since we cannot be guaranteed that the parent has
7354 * not modified its environment, we can't really know that it's
7355 * really a block there as well.
7356 */
7357
7358
7359 static int execargs_waiters = 0;
7360 static LCK_MTX_DECLARE_ATTR(execargs_cache_lock, &proc_lck_grp, &proc_lck_attr);
7361
7362 static void
execargs_lock_lock(void)7363 execargs_lock_lock(void)
7364 {
7365 lck_mtx_lock_spin(&execargs_cache_lock);
7366 }
7367
7368 static void
execargs_lock_unlock(void)7369 execargs_lock_unlock(void)
7370 {
7371 lck_mtx_unlock(&execargs_cache_lock);
7372 }
7373
7374 static wait_result_t
execargs_lock_sleep(void)7375 execargs_lock_sleep(void)
7376 {
7377 return lck_mtx_sleep(&execargs_cache_lock, LCK_SLEEP_DEFAULT, &execargs_free_count, THREAD_INTERRUPTIBLE);
7378 }
7379
7380 static kern_return_t
execargs_purgeable_allocate(char ** execarg_address)7381 execargs_purgeable_allocate(char **execarg_address)
7382 {
7383 mach_vm_offset_t addr = 0;
7384 kern_return_t kr = mach_vm_allocate_kernel(bsd_pageable_map, &addr,
7385 BSD_PAGEABLE_SIZE_PER_EXEC,
7386 VM_MAP_KERNEL_FLAGS_ANYWHERE(.vmf_purgeable = true));
7387 *execarg_address = (char *)addr;
7388 assert(kr == KERN_SUCCESS);
7389 return kr;
7390 }
7391
7392 static kern_return_t
execargs_purgeable_reference(void * execarg_address)7393 execargs_purgeable_reference(void *execarg_address)
7394 {
7395 int state = VM_PURGABLE_NONVOLATILE;
7396 kern_return_t kr = vm_map_purgable_control(bsd_pageable_map,
7397 (vm_offset_t) execarg_address, VM_PURGABLE_SET_STATE, &state);
7398
7399 assert(kr == KERN_SUCCESS);
7400 return kr;
7401 }
7402
7403 static kern_return_t
execargs_purgeable_volatilize(void * execarg_address)7404 execargs_purgeable_volatilize(void *execarg_address)
7405 {
7406 int state = VM_PURGABLE_VOLATILE | VM_PURGABLE_ORDERING_OBSOLETE;
7407 kern_return_t kr;
7408 kr = vm_map_purgable_control(bsd_pageable_map,
7409 (vm_offset_t) execarg_address, VM_PURGABLE_SET_STATE, &state);
7410
7411 assert(kr == KERN_SUCCESS);
7412
7413 return kr;
7414 }
7415
7416 static void
execargs_wakeup_waiters(void)7417 execargs_wakeup_waiters(void)
7418 {
7419 thread_wakeup(&execargs_free_count);
7420 }
7421
7422 static int
execargs_alloc(struct image_params * imgp)7423 execargs_alloc(struct image_params *imgp)
7424 {
7425 kern_return_t kret;
7426 wait_result_t res;
7427 int i, cache_index = -1;
7428
7429 execargs_lock_lock();
7430
7431 while (execargs_free_count == 0) {
7432 execargs_waiters++;
7433 res = execargs_lock_sleep();
7434 execargs_waiters--;
7435 if (res != THREAD_AWAKENED) {
7436 execargs_lock_unlock();
7437 return EINTR;
7438 }
7439 }
7440
7441 execargs_free_count--;
7442
7443 for (i = 0; i < execargs_cache_size; i++) {
7444 vm_offset_t element = execargs_cache[i];
7445 if (element) {
7446 cache_index = i;
7447 imgp->ip_strings = (char *)(execargs_cache[i]);
7448 execargs_cache[i] = 0;
7449 break;
7450 }
7451 }
7452
7453 assert(execargs_free_count >= 0);
7454
7455 execargs_lock_unlock();
7456
7457 if (cache_index == -1) {
7458 kret = execargs_purgeable_allocate(&imgp->ip_strings);
7459 } else {
7460 kret = execargs_purgeable_reference(imgp->ip_strings);
7461 }
7462
7463 assert(kret == KERN_SUCCESS);
7464 if (kret != KERN_SUCCESS) {
7465 return ENOMEM;
7466 }
7467
7468 /* last page used to read in file headers */
7469 imgp->ip_vdata = imgp->ip_strings + (NCARGS + PAGE_SIZE);
7470 imgp->ip_strendp = imgp->ip_strings;
7471 imgp->ip_argspace = NCARGS;
7472 imgp->ip_strspace = (NCARGS + PAGE_SIZE);
7473
7474 return 0;
7475 }
7476
7477 /*
7478 * execargs_free
7479 *
7480 * Description: Free the block of memory used by the execve arguments and the
7481 * first page of the executable by a previous call to the function
7482 * execargs_alloc().
7483 *
7484 * Parameters: struct image_params * the image parameter block
7485 *
7486 * Returns: 0 Success
7487 * EINVAL Invalid argument
7488 * EINTR Oeration interrupted
7489 */
7490 static int
execargs_free(struct image_params * imgp)7491 execargs_free(struct image_params *imgp)
7492 {
7493 kern_return_t kret;
7494 int i;
7495 boolean_t needs_wakeup = FALSE;
7496
7497 kret = execargs_purgeable_volatilize(imgp->ip_strings);
7498
7499 execargs_lock_lock();
7500 execargs_free_count++;
7501
7502 for (i = 0; i < execargs_cache_size; i++) {
7503 vm_offset_t element = execargs_cache[i];
7504 if (element == 0) {
7505 execargs_cache[i] = (vm_offset_t) imgp->ip_strings;
7506 imgp->ip_strings = NULL;
7507 break;
7508 }
7509 }
7510
7511 assert(imgp->ip_strings == NULL);
7512
7513 if (execargs_waiters > 0) {
7514 needs_wakeup = TRUE;
7515 }
7516
7517 execargs_lock_unlock();
7518
7519 if (needs_wakeup == TRUE) {
7520 execargs_wakeup_waiters();
7521 }
7522
7523 return kret == KERN_SUCCESS ? 0 : EINVAL;
7524 }
7525
7526 void
uthread_set_exec_data(struct uthread * uth,struct image_params * imgp)7527 uthread_set_exec_data(struct uthread *uth, struct image_params *imgp)
7528 {
7529 uth->uu_save.uus_exec_data.imgp = imgp;
7530 }
7531
7532 size_t
thread_get_current_exec_path(char * path,size_t size)7533 thread_get_current_exec_path(char *path, size_t size)
7534 {
7535 struct uthread *uth = current_uthread();
7536 struct image_params *imgp = uth->uu_save.uus_exec_data.imgp;
7537 size_t string_size = 0;
7538 char *exec_path;
7539
7540 if (path == NULL || imgp == NULL || imgp->ip_strings == NULL) {
7541 return 0;
7542 }
7543
7544 exec_path = imgp->ip_strings + strlen(EXECUTABLE_KEY);
7545 string_size = imgp->ip_strendp - exec_path;
7546 string_size = MIN(MAXPATHLEN, string_size);
7547 string_size = MIN(size, string_size);
7548
7549 string_size = strlcpy(path, exec_path, string_size);
7550 return string_size;
7551 }
7552 static void
exec_resettextvp(proc_t p,struct image_params * imgp)7553 exec_resettextvp(proc_t p, struct image_params *imgp)
7554 {
7555 vnode_t vp;
7556 off_t offset;
7557 vnode_t tvp = p->p_textvp;
7558 int ret;
7559
7560 vp = imgp->ip_vp;
7561 offset = imgp->ip_arch_offset;
7562
7563 if (vp == NULLVP) {
7564 panic("exec_resettextvp: expected valid vp");
7565 }
7566
7567 ret = vnode_ref(vp);
7568 proc_lock(p);
7569 if (ret == 0) {
7570 p->p_textvp = vp;
7571 p->p_textoff = offset;
7572 } else {
7573 p->p_textvp = NULLVP; /* this is paranoia */
7574 p->p_textoff = 0;
7575 }
7576 proc_unlock(p);
7577
7578 if (tvp != NULLVP) {
7579 if (vnode_getwithref(tvp) == 0) {
7580 vnode_rele(tvp);
7581 vnode_put(tvp);
7582 }
7583 }
7584 }
7585
7586 // Includes the 0-byte (therefore "SIZE" instead of "LEN").
7587 static const size_t CS_CDHASH_STRING_SIZE = CS_CDHASH_LEN * 2 + 1;
7588
7589 static void
cdhash_to_string(char str[CS_CDHASH_STRING_SIZE],uint8_t const * const cdhash)7590 cdhash_to_string(char str[CS_CDHASH_STRING_SIZE], uint8_t const * const cdhash)
7591 {
7592 static char const nibble[] = "0123456789abcdef";
7593
7594 /* Apparently still the safest way to get a hex representation
7595 * of binary data.
7596 * xnu's printf routines have %*D/%20D in theory, but "not really", see:
7597 * <rdar://problem/33328859> confusion around %*D/%nD in printf
7598 */
7599 for (int i = 0; i < CS_CDHASH_LEN; ++i) {
7600 str[i * 2] = nibble[(cdhash[i] & 0xf0) >> 4];
7601 str[i * 2 + 1] = nibble[cdhash[i] & 0x0f];
7602 }
7603 str[CS_CDHASH_STRING_SIZE - 1] = 0;
7604 }
7605
7606 /*
7607 * __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__
7608 *
7609 * Description: Waits for the userspace daemon to respond to the request
7610 * we made. Function declared non inline to be visible in
7611 * stackshots and spindumps as well as debugging.
7612 */
7613 __attribute__((noinline)) int
__EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__(mach_port_t task_access_port,int32_t new_pid)7614 __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__(mach_port_t task_access_port, int32_t new_pid)
7615 {
7616 return find_code_signature(task_access_port, new_pid);
7617 }
7618
7619 /*
7620 * Update signature dependent process state, called by
7621 * process_signature.
7622 */
7623 static int
proc_process_signature(proc_t p,os_reason_t * signature_failure_reason)7624 proc_process_signature(proc_t p, os_reason_t *signature_failure_reason)
7625 {
7626 int error = 0;
7627 char const *error_msg = NULL;
7628
7629 kern_return_t kr = machine_task_process_signature(proc_get_task_raw(p), proc_platform(p), proc_sdk(p), &error_msg);
7630
7631 if (kr != KERN_SUCCESS) {
7632 error = EINVAL;
7633
7634 if (error_msg != NULL) {
7635 uint32_t error_msg_len = (uint32_t)strlen(error_msg) + 1;
7636 mach_vm_address_t data_addr = 0;
7637 int reason_error = 0;
7638 int kcdata_error = 0;
7639
7640 os_reason_t reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_SECURITY_POLICY);
7641 reason->osr_flags = OS_REASON_FLAG_GENERATE_CRASH_REPORT | OS_REASON_FLAG_CONSISTENT_FAILURE;
7642
7643 if ((reason_error = os_reason_alloc_buffer_noblock(reason,
7644 kcdata_estimate_required_buffer_size(1, error_msg_len))) == 0 &&
7645 (kcdata_error = kcdata_get_memory_addr(&reason->osr_kcd_descriptor,
7646 EXIT_REASON_USER_DESC, error_msg_len,
7647 &data_addr)) == KERN_SUCCESS) {
7648 kern_return_t mc_error = kcdata_memcpy(&reason->osr_kcd_descriptor, (mach_vm_address_t)data_addr,
7649 error_msg, error_msg_len);
7650
7651 if (mc_error != KERN_SUCCESS) {
7652 printf("process_signature: failed to copy reason string (kcdata_memcpy error: %d)\n",
7653 mc_error);
7654 }
7655 } else {
7656 printf("failed to allocate space for reason string (os_reason_alloc_buffer error: %d, kcdata error: %d, length: %u)\n",
7657 reason_error, kcdata_error, error_msg_len);
7658 }
7659
7660 assert(*signature_failure_reason == NULL); // shouldn't have gotten so far
7661 *signature_failure_reason = reason;
7662 }
7663 }
7664 return error;
7665 }
7666
7667 static int
process_signature(proc_t p,struct image_params * imgp)7668 process_signature(proc_t p, struct image_params *imgp)
7669 {
7670 mach_port_t port = IPC_PORT_NULL;
7671 kern_return_t kr = KERN_FAILURE;
7672 int error = EACCES;
7673 boolean_t unexpected_failure = FALSE;
7674 struct cs_blob *csb;
7675 boolean_t require_success = FALSE;
7676 int spawn = (imgp->ip_flags & IMGPF_SPAWN);
7677 const int vfexec = 0;
7678 os_reason_t signature_failure_reason = OS_REASON_NULL;
7679
7680 /*
7681 * Override inherited code signing flags with the
7682 * ones for the process that is being successfully
7683 * loaded
7684 */
7685 proc_lock(p);
7686 proc_csflags_update(p, imgp->ip_csflags);
7687 proc_unlock(p);
7688
7689 /* Set the switch_protect flag on the map */
7690 if (proc_getcsflags(p) & (CS_HARD | CS_KILL)) {
7691 vm_map_switch_protect(get_task_map(proc_task(p)), TRUE);
7692 }
7693 /* set the cs_enforced flags in the map */
7694 if (proc_getcsflags(p) & CS_ENFORCEMENT) {
7695 vm_map_cs_enforcement_set(get_task_map(proc_task(p)), TRUE);
7696 } else {
7697 vm_map_cs_enforcement_set(get_task_map(proc_task(p)), FALSE);
7698 }
7699
7700 /*
7701 * image activation may be failed due to policy
7702 * which is unexpected but security framework does not
7703 * approve of exec, kill and return immediately.
7704 */
7705 if (imgp->ip_mac_return != 0) {
7706 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
7707 proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_SECURITY_POLICY, 0, 0);
7708 signature_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_SECURITY_POLICY);
7709 error = imgp->ip_mac_return;
7710 unexpected_failure = TRUE;
7711 goto done;
7712 }
7713
7714 if (imgp->ip_cs_error != OS_REASON_NULL) {
7715 signature_failure_reason = imgp->ip_cs_error;
7716 imgp->ip_cs_error = OS_REASON_NULL;
7717 error = EACCES;
7718 goto done;
7719 }
7720
7721 /* call the launch constraints hook */
7722 os_reason_t launch_constraint_reason;
7723 if ((error = mac_proc_check_launch_constraints(p, imgp, &launch_constraint_reason)) != 0) {
7724 signature_failure_reason = launch_constraint_reason;
7725 goto done;
7726 }
7727
7728 #if XNU_TARGET_OS_OSX
7729 /* Check for platform passed in spawn attr if iOS binary is being spawned */
7730 if (proc_platform(p) == PLATFORM_IOS) {
7731 struct _posix_spawnattr *psa = (struct _posix_spawnattr *) imgp->ip_px_sa;
7732 if (psa == NULL || psa->psa_platform == 0) {
7733 boolean_t no_sandbox_entitled = FALSE;
7734 #if DEBUG || DEVELOPMENT
7735 /*
7736 * Allow iOS binaries to spawn on internal systems
7737 * if no-sandbox entitlement is present of unentitled_ios_sim_launch
7738 * boot-arg set to true
7739 */
7740 if (unentitled_ios_sim_launch) {
7741 no_sandbox_entitled = TRUE;
7742 } else {
7743 no_sandbox_entitled = IOVnodeHasEntitlement(imgp->ip_vp,
7744 (int64_t)imgp->ip_arch_offset, "com.apple.private.security.no-sandbox");
7745 }
7746 #endif /* DEBUG || DEVELOPMENT */
7747 if (!no_sandbox_entitled) {
7748 signature_failure_reason = os_reason_create(OS_REASON_EXEC,
7749 EXEC_EXIT_REASON_WRONG_PLATFORM);
7750 error = EACCES;
7751 goto done;
7752 }
7753 printf("Allowing spawn of iOS binary %s since it has "
7754 "com.apple.private.security.no-sandbox entitlement or unentitled_ios_sim_launch "
7755 "boot-arg set to true\n", p->p_name);
7756 } else if (psa->psa_platform != PLATFORM_IOS) {
7757 /* Simulator binary spawned with wrong platform */
7758 signature_failure_reason = os_reason_create(OS_REASON_EXEC,
7759 EXEC_EXIT_REASON_WRONG_PLATFORM);
7760 error = EACCES;
7761 goto done;
7762 } else {
7763 printf("Allowing spawn of iOS binary %s since correct platform was passed in spawn\n",
7764 p->p_name);
7765 }
7766 }
7767 #endif /* XNU_TARGET_OS_OSX */
7768
7769 /* If the code signature came through the image activation path, we skip the
7770 * taskgated / externally attached path. */
7771 if (imgp->ip_csflags & CS_SIGNED) {
7772 error = 0;
7773 goto done;
7774 }
7775
7776 /* The rest of the code is for signatures that either already have been externally
7777 * attached (likely, but not necessarily by a previous run through the taskgated
7778 * path), or that will now be attached by taskgated. */
7779
7780 kr = task_get_task_access_port(proc_task(p), &port);
7781 if (KERN_SUCCESS != kr || !IPC_PORT_VALID(port)) {
7782 error = 0;
7783 if (require_success) {
7784 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
7785 proc_getpid(p), OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASK_ACCESS_PORT, 0, 0);
7786 signature_failure_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASK_ACCESS_PORT);
7787 error = EACCES;
7788 }
7789 goto done;
7790 }
7791
7792 /*
7793 * taskgated returns KERN_SUCCESS if it has completed its work
7794 * and the exec should continue, KERN_FAILURE if the exec should
7795 * fail, or it may error out with different error code in an
7796 * event of mig failure (e.g. process was signalled during the
7797 * rpc call, taskgated died, mig server died etc.).
7798 */
7799
7800 kr = __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__(port, proc_getpid(p));
7801 switch (kr) {
7802 case KERN_SUCCESS:
7803 error = 0;
7804 break;
7805 case KERN_FAILURE:
7806 error = EACCES;
7807
7808 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
7809 proc_getpid(p), OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASKGATED_INVALID_SIG, 0, 0);
7810 signature_failure_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASKGATED_INVALID_SIG);
7811 goto done;
7812 default:
7813 error = EACCES;
7814
7815 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
7816 proc_getpid(p), OS_REASON_EXEC, EXEC_EXIT_REASON_TASKGATED_OTHER, 0, 0);
7817 signature_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_TASKGATED_OTHER);
7818 unexpected_failure = TRUE;
7819 goto done;
7820 }
7821
7822 /* Only do this if exec_resettextvp() did not fail */
7823 if (p->p_textvp != NULLVP) {
7824 csb = ubc_cs_blob_get(p->p_textvp, -1, -1, p->p_textoff);
7825
7826 if (csb != NULL) {
7827 /* As the enforcement we can do here is very limited, we only allow things that
7828 * are the only reason why this code path still exists:
7829 * Adhoc signed non-platform binaries without special cs_flags and without any
7830 * entitlements (unrestricted ones still pass AMFI). */
7831 if (
7832 /* Revalidate the blob if necessary through bumped generation count. */
7833 (ubc_cs_generation_check(p->p_textvp) == 0 ||
7834 ubc_cs_blob_revalidate(p->p_textvp, csb, imgp, 0, proc_platform(p)) == 0) &&
7835 /* Only CS_ADHOC, no CS_KILL, CS_HARD etc. */
7836 (csb->csb_flags & CS_ALLOWED_MACHO) == CS_ADHOC &&
7837 /* If it has a CMS blob, it's not adhoc. The CS_ADHOC flag can lie. */
7838 csblob_find_blob_bytes((const uint8_t *)csb->csb_mem_kaddr, csb->csb_mem_size,
7839 CSSLOT_SIGNATURESLOT,
7840 CSMAGIC_BLOBWRAPPER) == NULL &&
7841 /* It could still be in a trust cache (unlikely with CS_ADHOC), or a magic path. */
7842 csb->csb_platform_binary == 0 &&
7843 /* No entitlements, not even unrestricted ones. */
7844 csb->csb_entitlements_blob == NULL &&
7845 csb->csb_der_entitlements_blob == NULL) {
7846 proc_lock(p);
7847 proc_csflags_set(p, CS_SIGNED | CS_VALID);
7848 proc_unlock(p);
7849 } else {
7850 uint8_t cdhash[CS_CDHASH_LEN];
7851 char cdhash_string[CS_CDHASH_STRING_SIZE];
7852 proc_getcdhash(p, cdhash);
7853 cdhash_to_string(cdhash_string, cdhash);
7854 printf("ignoring detached code signature on '%s' with cdhash '%s' "
7855 "because it is invalid, or not a simple adhoc signature.\n",
7856 p->p_name, cdhash_string);
7857 }
7858 }
7859 }
7860
7861 done:
7862 if (0 == error) {
7863 /*
7864 * Update the new process's signature-dependent process state.
7865 * state.
7866 */
7867
7868 error = proc_process_signature(p, &signature_failure_reason);
7869 }
7870
7871 if (0 == error) {
7872 /*
7873 * Update the new main thread's signature-dependent thread
7874 * state. This was also called when the thread was created,
7875 * but for the main thread the signature was not yet attached
7876 * at that time.
7877 */
7878 kr = thread_process_signature(imgp->ip_new_thread, proc_get_task_raw(p));
7879
7880 if (kr != KERN_SUCCESS) {
7881 error = EINVAL;
7882 signature_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_MACHINE_THREAD);
7883 }
7884 }
7885
7886 if (0 == error) {
7887 /* The process's code signature related properties are
7888 * fully set up, so this is an opportune moment to log
7889 * platform binary execution, if desired. */
7890 if (platform_exec_logging != 0 && csproc_get_platform_binary(p)) {
7891 uint8_t cdhash[CS_CDHASH_LEN];
7892 char cdhash_string[CS_CDHASH_STRING_SIZE];
7893 proc_getcdhash(p, cdhash);
7894 cdhash_to_string(cdhash_string, cdhash);
7895
7896 os_log(peLog, "CS Platform Exec Logging: Executing platform signed binary "
7897 "'%s' with cdhash %s\n", p->p_name, cdhash_string);
7898 }
7899 } else {
7900 if (!unexpected_failure) {
7901 proc_csflags_set(p, CS_KILLED);
7902 }
7903 /* make very sure execution fails */
7904 if (vfexec || spawn) {
7905 assert(signature_failure_reason != OS_REASON_NULL);
7906 psignal_vfork_with_reason(p, proc_task(p), imgp->ip_new_thread,
7907 SIGKILL, signature_failure_reason);
7908 signature_failure_reason = OS_REASON_NULL;
7909 error = 0;
7910 } else {
7911 assert(signature_failure_reason != OS_REASON_NULL);
7912 psignal_with_reason(p, SIGKILL, signature_failure_reason);
7913 signature_failure_reason = OS_REASON_NULL;
7914 }
7915 }
7916
7917 if (port != IPC_PORT_NULL) {
7918 ipc_port_release_send(port);
7919 }
7920
7921 /* If we hit this, we likely would have leaked an exit reason */
7922 assert(signature_failure_reason == OS_REASON_NULL);
7923 return error;
7924 }
7925
7926 /*
7927 * Typically as soon as we start executing this process, the
7928 * first instruction will trigger a VM fault to bring the text
7929 * pages (as executable) into the address space, followed soon
7930 * thereafter by dyld data structures (for dynamic executable).
7931 * To optimize this, as well as improve support for hardware
7932 * debuggers that can only access resident pages present
7933 * in the process' page tables, we prefault some pages if
7934 * possible. Errors are non-fatal.
7935 */
7936 #ifndef PREVENT_CALLER_STACK_USE
7937 #define PREVENT_CALLER_STACK_USE __attribute__((noinline))
7938 #endif
7939
7940 /*
7941 * Prefaulting dyld data does not work (rdar://76621401)
7942 */
7943 #define FIXED_76621401 0
7944 static void PREVENT_CALLER_STACK_USE
exec_prefault_data(__unused proc_t p,__unused struct image_params * imgp,__unused load_result_t * load_result)7945 exec_prefault_data(
7946 __unused proc_t p,
7947 __unused struct image_params *imgp,
7948 __unused load_result_t *load_result)
7949 {
7950 #if FIXED_76621401
7951 int ret;
7952 size_t expected_all_image_infos_size;
7953 #endif /* FIXED_76621401 */
7954 kern_return_t kr;
7955
7956 /*
7957 * Prefault executable or dyld entry point.
7958 */
7959 if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) {
7960 DEBUG4K_LOAD("entry_point 0x%llx\n", (uint64_t)load_result->entry_point);
7961 }
7962 kr = vm_fault(current_map(),
7963 vm_map_trunc_page(load_result->entry_point,
7964 vm_map_page_mask(current_map())),
7965 VM_PROT_READ | VM_PROT_EXECUTE,
7966 FALSE, VM_KERN_MEMORY_NONE,
7967 THREAD_UNINT, NULL, 0);
7968 if (kr != KERN_SUCCESS) {
7969 DEBUG4K_ERROR("map %p va 0x%llx -> 0x%x\n", current_map(), (uint64_t)vm_map_trunc_page(load_result->entry_point, vm_map_page_mask(current_map())), kr);
7970 }
7971
7972 #if FIXED_76621401
7973 if (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) {
7974 expected_all_image_infos_size = sizeof(struct user64_dyld_all_image_infos);
7975 } else {
7976 expected_all_image_infos_size = sizeof(struct user32_dyld_all_image_infos);
7977 }
7978
7979 /* Decode dyld anchor structure from <mach-o/dyld_images.h> */
7980 if (load_result->dynlinker &&
7981 load_result->all_image_info_addr &&
7982 load_result->all_image_info_size >= expected_all_image_infos_size) {
7983 union {
7984 struct user64_dyld_all_image_infos infos64;
7985 struct user32_dyld_all_image_infos infos32;
7986 } all_image_infos;
7987
7988 /*
7989 * Pre-fault to avoid copyin() going through the trap handler
7990 * and recovery path.
7991 */
7992 if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) {
7993 DEBUG4K_LOAD("all_image_info_addr 0x%llx\n", load_result->all_image_info_addr);
7994 }
7995 kr = vm_fault(current_map(),
7996 vm_map_trunc_page(load_result->all_image_info_addr,
7997 vm_map_page_mask(current_map())),
7998 VM_PROT_READ | VM_PROT_WRITE,
7999 FALSE, VM_KERN_MEMORY_NONE,
8000 THREAD_UNINT, NULL, 0);
8001 if (kr != KERN_SUCCESS) {
8002 // printf("%s:%d map %p va 0x%llx -> 0x%x\n", __FUNCTION__, __LINE__, current_map(), vm_map_trunc_page(load_result->all_image_info_addr, vm_map_page_mask(current_map())), kr);
8003 }
8004 if ((load_result->all_image_info_addr & PAGE_MASK) + expected_all_image_infos_size > PAGE_SIZE) {
8005 /* all_image_infos straddles a page */
8006 kr = vm_fault(current_map(),
8007 vm_map_trunc_page(load_result->all_image_info_addr + expected_all_image_infos_size - 1,
8008 vm_map_page_mask(current_map())),
8009 VM_PROT_READ | VM_PROT_WRITE,
8010 FALSE, VM_KERN_MEMORY_NONE,
8011 THREAD_UNINT, NULL, 0);
8012 if (kr != KERN_SUCCESS) {
8013 // printf("%s:%d map %p va 0x%llx -> 0x%x\n", __FUNCTION__, __LINE__, current_map(), vm_map_trunc_page(load_result->all_image_info_addr + expected_all_image_infos_size -1, vm_map_page_mask(current_map())), kr);
8014 }
8015 }
8016
8017 if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) {
8018 DEBUG4K_LOAD("copyin(0x%llx, 0x%lx)\n", load_result->all_image_info_addr, expected_all_image_infos_size);
8019 }
8020 ret = copyin((user_addr_t)load_result->all_image_info_addr,
8021 &all_image_infos,
8022 expected_all_image_infos_size);
8023 if (ret == 0 && all_image_infos.infos32.version >= DYLD_ALL_IMAGE_INFOS_ADDRESS_MINIMUM_VERSION) {
8024 user_addr_t notification_address;
8025 user_addr_t dyld_image_address;
8026 user_addr_t dyld_version_address;
8027 user_addr_t dyld_all_image_infos_address;
8028 user_addr_t dyld_slide_amount;
8029
8030 if (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) {
8031 notification_address = (user_addr_t)all_image_infos.infos64.notification;
8032 dyld_image_address = (user_addr_t)all_image_infos.infos64.dyldImageLoadAddress;
8033 dyld_version_address = (user_addr_t)all_image_infos.infos64.dyldVersion;
8034 dyld_all_image_infos_address = (user_addr_t)all_image_infos.infos64.dyldAllImageInfosAddress;
8035 } else {
8036 notification_address = all_image_infos.infos32.notification;
8037 dyld_image_address = all_image_infos.infos32.dyldImageLoadAddress;
8038 dyld_version_address = all_image_infos.infos32.dyldVersion;
8039 dyld_all_image_infos_address = all_image_infos.infos32.dyldAllImageInfosAddress;
8040 }
8041
8042 /*
8043 * dyld statically sets up the all_image_infos in its Mach-O
8044 * binary at static link time, with pointers relative to its default
8045 * load address. Since ASLR might slide dyld before its first
8046 * instruction is executed, "dyld_slide_amount" tells us how far
8047 * dyld was loaded compared to its default expected load address.
8048 * All other pointers into dyld's image should be adjusted by this
8049 * amount. At some point later, dyld will fix up pointers to take
8050 * into account the slide, at which point the all_image_infos_address
8051 * field in the structure will match the runtime load address, and
8052 * "dyld_slide_amount" will be 0, if we were to consult it again.
8053 */
8054
8055 dyld_slide_amount = (user_addr_t)load_result->all_image_info_addr - dyld_all_image_infos_address;
8056
8057 #if 0
8058 kprintf("exec_prefault: 0x%016llx 0x%08x 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
8059 (uint64_t)load_result->all_image_info_addr,
8060 all_image_infos.infos32.version,
8061 (uint64_t)notification_address,
8062 (uint64_t)dyld_image_address,
8063 (uint64_t)dyld_version_address,
8064 (uint64_t)dyld_all_image_infos_address);
8065 #endif
8066
8067 if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) {
8068 DEBUG4K_LOAD("notification_address 0x%llx dyld_slide_amount 0x%llx\n", (uint64_t)notification_address, (uint64_t)dyld_slide_amount);
8069 }
8070 kr = vm_fault(current_map(),
8071 vm_map_trunc_page(notification_address + dyld_slide_amount,
8072 vm_map_page_mask(current_map())),
8073 VM_PROT_READ | VM_PROT_EXECUTE,
8074 FALSE, VM_KERN_MEMORY_NONE,
8075 THREAD_UNINT, NULL, 0);
8076 if (kr != KERN_SUCCESS) {
8077 // printf("%s:%d map %p va 0x%llx -> 0x%x\n", __FUNCTION__, __LINE__, current_map(), vm_map_trunc_page(notification_address + dyld_slide_amount, vm_map_page_mask(current_map())), kr);
8078 }
8079 if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) {
8080 DEBUG4K_LOAD("dyld_image_address 0x%llx dyld_slide_amount 0x%llx\n", (uint64_t)dyld_image_address, (uint64_t)dyld_slide_amount);
8081 }
8082 kr = vm_fault(current_map(),
8083 vm_map_trunc_page(dyld_image_address + dyld_slide_amount,
8084 vm_map_page_mask(current_map())),
8085 VM_PROT_READ | VM_PROT_EXECUTE,
8086 FALSE, VM_KERN_MEMORY_NONE,
8087 THREAD_UNINT, NULL, 0);
8088 if (kr != KERN_SUCCESS) {
8089 // printf("%s:%d map %p va 0x%llx -> 0x%x\n", __FUNCTION__, __LINE__, current_map(), vm_map_trunc_page(dyld_image_address + dyld_slide_amount, vm_map_page_mask(current_map())), kr);
8090 }
8091 if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) {
8092 DEBUG4K_LOAD("dyld_version_address 0x%llx dyld_slide_amount 0x%llx\n", (uint64_t)dyld_version_address, (uint64_t)dyld_slide_amount);
8093 }
8094 kr = vm_fault(current_map(),
8095 vm_map_trunc_page(dyld_version_address + dyld_slide_amount,
8096 vm_map_page_mask(current_map())),
8097 VM_PROT_READ,
8098 FALSE, VM_KERN_MEMORY_NONE,
8099 THREAD_UNINT, NULL, 0);
8100 if (kr != KERN_SUCCESS) {
8101 // printf("%s:%d map %p va 0x%llx -> 0x%x\n", __FUNCTION__, __LINE__, current_map(), vm_map_trunc_page(dyld_version_address + dyld_slide_amount, vm_map_page_mask(current_map())), kr);
8102 }
8103 if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) {
8104 DEBUG4K_LOAD("dyld_all_image_infos_address 0x%llx dyld_slide_amount 0x%llx\n", (uint64_t)dyld_version_address, (uint64_t)dyld_slide_amount);
8105 }
8106 kr = vm_fault(current_map(),
8107 vm_map_trunc_page(dyld_all_image_infos_address + dyld_slide_amount,
8108 vm_map_page_mask(current_map())),
8109 VM_PROT_READ | VM_PROT_WRITE,
8110 FALSE, VM_KERN_MEMORY_NONE,
8111 THREAD_UNINT, NULL, 0);
8112 if (kr != KERN_SUCCESS) {
8113 // printf("%s:%d map %p va 0x%llx -> 0x%x\n", __FUNCTION__, __LINE__, current_map(), vm_map_trunc_page(dyld_all_image_infos_address + dyld_slide_amount, vm_map_page_mask(current_map())), kr);
8114 }
8115 }
8116 }
8117 #endif /* FIXED_76621401 */
8118 }
8119
8120 static int
8121 sysctl_libmalloc_experiments SYSCTL_HANDLER_ARGS
8122 {
8123 #pragma unused(oidp, arg2, req)
8124 int changed;
8125 errno_t error;
8126 uint64_t value = os_atomic_load_wide(&libmalloc_experiment_factors, relaxed);
8127
8128 error = sysctl_io_number(req, value, sizeof(value), &value, &changed);
8129 if (error) {
8130 return error;
8131 }
8132
8133 if (changed) {
8134 os_atomic_store_wide(&libmalloc_experiment_factors, value, relaxed);
8135 }
8136
8137 return 0;
8138 }
8139
8140 EXPERIMENT_FACTOR_PROC(_kern, libmalloc_experiments, CTLTYPE_QUAD | CTLFLAG_RW, 0, 0, &sysctl_libmalloc_experiments, "A", "");
8141
8142 SYSCTL_NODE(_kern, OID_AUTO, sec_transition,
8143 CTLFLAG_RD | CTLFLAG_LOCKED, 0, "sec_transition");
8144
8145
8146 SYSCTL_INT(_kern_sec_transition, OID_AUTO, available,
8147 CTLFLAG_RD | CTLFLAG_LOCKED, (int *)NULL, 0, "");
8148
8149