xref: /xnu-11215/bsd/kern/kern_exit.c (revision 8d741a5d)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1982, 1986, 1989, 1991, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  * (c) UNIX System Laboratories, Inc.
33  * All or some portions of this file are derived from material licensed
34  * to the University of California by American Telephone and Telegraph
35  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36  * the permission of UNIX System Laboratories, Inc.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)kern_exit.c	8.7 (Berkeley) 2/12/94
67  */
68 /*
69  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70  * support for mandatory and extensible security protections.  This notice
71  * is included in support of clause 2.2 (b) of the Apple Public License,
72  * Version 2.0.
73  */
74 
75 #include <machine/reg.h>
76 #include <machine/psl.h>
77 #include <stdatomic.h>
78 
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/ioctl.h>
82 #include <sys/proc_internal.h>
83 #include <sys/proc.h>
84 #include <sys/kauth.h>
85 #include <sys/tty.h>
86 #include <sys/time.h>
87 #include <sys/resource.h>
88 #include <sys/kernel.h>
89 #include <sys/wait.h>
90 #include <sys/file_internal.h>
91 #include <sys/vnode_internal.h>
92 #include <sys/syslog.h>
93 #include <sys/malloc.h>
94 #include <sys/resourcevar.h>
95 #include <sys/ptrace.h>
96 #include <sys/proc_info.h>
97 #include <sys/reason.h>
98 #include <sys/_types/_timeval64.h>
99 #include <sys/user.h>
100 #include <sys/aio_kern.h>
101 #include <sys/sysproto.h>
102 #include <sys/signalvar.h>
103 #include <sys/kdebug.h>
104 #include <sys/kdebug_triage.h>
105 #include <sys/acct.h> /* acct_process */
106 #include <sys/codesign.h>
107 #include <sys/event.h> /* kevent_proc_copy_uptrs */
108 #include <sys/sdt.h>
109 #include <sys/bsdtask_info.h> /* bsd_getthreadname */
110 #include <sys/spawn.h>
111 #include <sys/ubc.h>
112 #include <sys/code_signing.h>
113 
114 #include <security/audit/audit.h>
115 #include <bsm/audit_kevents.h>
116 
117 #include <mach/mach_types.h>
118 #include <mach/task.h>
119 #include <mach/thread_act.h>
120 
121 #include <kern/exc_resource.h>
122 #include <kern/kern_types.h>
123 #include <kern/kalloc.h>
124 #include <kern/task.h>
125 #include <corpses/task_corpse.h>
126 #include <kern/thread.h>
127 #include <kern/thread_call.h>
128 #include <kern/sched_prim.h>
129 #include <kern/assert.h>
130 #include <kern/locks.h>
131 #include <kern/policy_internal.h>
132 #include <kern/exc_guard.h>
133 #include <kern/backtrace.h>
134 #include <vm/vm_map_xnu.h>
135 
136 #include <vm/vm_protos.h>
137 #include <os/log.h>
138 #include <os/system_event_log.h>
139 
140 #include <pexpert/pexpert.h>
141 
142 #include <kdp/kdp_dyld.h>
143 
144 #if SYSV_SHM
145 #include <sys/shm_internal.h>   /* shmexit */
146 #endif /* SYSV_SHM */
147 #if CONFIG_PERSONAS
148 #include <sys/persona.h>
149 #endif /* CONFIG_PERSONAS */
150 #if CONFIG_MEMORYSTATUS
151 #include <sys/kern_memorystatus.h>
152 #endif /* CONFIG_MEMORYSTATUS */
153 #if CONFIG_DTRACE
154 /* Do not include dtrace.h, it redefines kmem_[alloc/free] */
155 void dtrace_proc_exit(proc_t p);
156 #include <sys/dtrace_ptss.h>
157 #endif /* CONFIG_DTRACE */
158 #if CONFIG_MACF
159 #include <security/mac_framework.h>
160 #include <security/mac_mach_internal.h>
161 #include <sys/syscall.h>
162 #endif /* CONFIG_MACF */
163 
164 #ifdef CONFIG_EXCLAVES
165 void
166 task_add_conclave_crash_info(task_t task, void *crash_info_ptr);
167 #endif /* CONFIG_EXCLAVES */
168 
169 #if CONFIG_MEMORYSTATUS
170 static void proc_memorystatus_remove(proc_t p);
171 #endif /* CONFIG_MEMORYSTATUS */
172 void proc_prepareexit(proc_t p, int rv, boolean_t perf_notify);
173 void gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task,
174     mach_exception_data_type_t code, mach_exception_data_type_t subcode,
175     uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype);
176 mach_exception_data_type_t proc_encode_exit_exception_code(proc_t p);
177 exception_type_t get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info);
178 __private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p);
179 __private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p);
180 static void populate_corpse_crashinfo(proc_t p, task_t corpse_task,
181     struct rusage_superset *rup, mach_exception_data_type_t code,
182     mach_exception_data_type_t subcode, uint64_t *udata_buffer,
183     int num_udata, os_reason_t reason, exception_type_t etype);
184 static void proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode);
185 extern int proc_pidpathinfo_internal(proc_t p, uint64_t arg, char *buffer, uint32_t buffersize, int32_t *retval);
186 extern void proc_piduniqidentifierinfo(proc_t p, struct proc_uniqidentifierinfo *p_uniqidinfo);
187 extern void task_coalition_ids(task_t task, uint64_t ids[COALITION_NUM_TYPES]);
188 extern uint64_t get_task_phys_footprint_limit(task_t);
189 int proc_list_uptrs(void *p, uint64_t *udata_buffer, int size);
190 extern uint64_t task_corpse_get_crashed_thread_id(task_t corpse_task);
191 
192 extern unsigned int exception_log_max_pid;
193 
194 extern void IOUserServerRecordExitReason(task_t task, os_reason_t reason);
195 
196 /*
197  * Flags for `reap_child_locked`.
198  */
199 __options_decl(reap_flags_t, uint32_t, {
200 	/*
201 	 * Parent is exiting, so the kernel is responsible for reaping children.
202 	 */
203 	REAP_DEAD_PARENT = 0x01,
204 	/*
205 	 * Childr process was re-parented to initproc.
206 	 */
207 	REAP_REPARENTED_TO_INIT = 0x02,
208 	/*
209 	 * `proc_list_lock` is held on entry.
210 	 */
211 	REAP_LOCKED = 0x04,
212 	/*
213 	 * Drop the `proc_list_lock` on return.  Note that the `proc_list_lock` will
214 	 * be dropped internally by the function regardless.
215 	 */
216 	REAP_DROP_LOCK = 0x08,
217 });
218 static void reap_child_locked(proc_t parent, proc_t child, reap_flags_t flags);
219 
220 static KALLOC_TYPE_DEFINE(zombie_zone, struct rusage_superset, KT_DEFAULT);
221 
222 /*
223  * Things which should have prototypes in headers, but don't
224  */
225 void    proc_exit(proc_t p);
226 int     wait1continue(int result);
227 int     waitidcontinue(int result);
228 kern_return_t sys_perf_notify(thread_t thread, int pid);
229 kern_return_t task_exception_notify(exception_type_t exception,
230     mach_exception_data_type_t code, mach_exception_data_type_t subcode, bool fatal);
231 void    delay(int);
232 
233 #if DEVELOPMENT || DEBUG
234 static LCK_GRP_DECLARE(proc_exit_lpexit_spin_lock_grp, "proc_exit_lpexit_spin");
235 static LCK_MTX_DECLARE(proc_exit_lpexit_spin_lock, &proc_exit_lpexit_spin_lock_grp);
236 static pid_t proc_exit_lpexit_spin_pid = -1;            /* wakeup point */
237 static int proc_exit_lpexit_spin_pos = -1;              /* point to block */
238 static int proc_exit_lpexit_spinning = 0;
239 enum {
240 	PELS_POS_START = 0,             /* beginning of proc_exit */
241 	PELS_POS_PRE_TASK_DETACH,       /* before task/proc detach */
242 	PELS_POS_POST_TASK_DETACH,      /* after task/proc detach */
243 	PELS_POS_END,                   /* end of proc_exit */
244 	PELS_NPOS                       /* # valid values */
245 };
246 
247 /* Panic if matching processes (delimited by ',') exit on error. */
248 static TUNABLE_STR(panic_on_eexit_pcomms, 128, "panic_on_error_exit", "");
249 
250 static int
251 proc_exit_lpexit_spin_pid_sysctl SYSCTL_HANDLER_ARGS
252 {
253 #pragma unused(oidp, arg1, arg2)
254 	pid_t new_value;
255 	int changed;
256 	int error;
257 
258 	if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
259 		return ENOENT;
260 	}
261 
262 	error = sysctl_io_number(req, proc_exit_lpexit_spin_pid,
263 	    sizeof(proc_exit_lpexit_spin_pid), &new_value, &changed);
264 	if (error == 0 && changed != 0) {
265 		if (new_value < -1) {
266 			return EINVAL;
267 		}
268 		lck_mtx_lock(&proc_exit_lpexit_spin_lock);
269 		proc_exit_lpexit_spin_pid = new_value;
270 		wakeup(&proc_exit_lpexit_spin_pid);
271 		proc_exit_lpexit_spinning = 0;
272 		lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
273 	}
274 	return error;
275 }
276 
277 static int
278 proc_exit_lpexit_spin_pos_sysctl SYSCTL_HANDLER_ARGS
279 {
280 #pragma unused(oidp, arg1, arg2)
281 	int new_value;
282 	int changed;
283 	int error;
284 
285 	if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
286 		return ENOENT;
287 	}
288 
289 	error = sysctl_io_number(req, proc_exit_lpexit_spin_pos,
290 	    sizeof(proc_exit_lpexit_spin_pos), &new_value, &changed);
291 	if (error == 0 && changed != 0) {
292 		if (new_value < -1 || new_value >= PELS_NPOS) {
293 			return EINVAL;
294 		}
295 		lck_mtx_lock(&proc_exit_lpexit_spin_lock);
296 		proc_exit_lpexit_spin_pos = new_value;
297 		wakeup(&proc_exit_lpexit_spin_pid);
298 		proc_exit_lpexit_spinning = 0;
299 		lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
300 	}
301 	return error;
302 }
303 
304 static int
305 proc_exit_lpexit_spinning_sysctl SYSCTL_HANDLER_ARGS
306 {
307 #pragma unused(oidp, arg1, arg2)
308 	int new_value;
309 	int changed;
310 	int error;
311 
312 	if (!PE_parse_boot_argn("enable_proc_exit_lpexit_spin", NULL, 0)) {
313 		return ENOENT;
314 	}
315 
316 	error = sysctl_io_number(req, proc_exit_lpexit_spinning,
317 	    sizeof(proc_exit_lpexit_spinning), &new_value, &changed);
318 	if (error == 0 && changed != 0) {
319 		return EINVAL;
320 	}
321 	return error;
322 }
323 
324 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spin_pid,
325     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
326     NULL, sizeof(pid_t),
327     proc_exit_lpexit_spin_pid_sysctl, "I", "PID to hold in proc_exit");
328 
329 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spin_pos,
330     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
331     NULL, sizeof(int),
332     proc_exit_lpexit_spin_pos_sysctl, "I", "position to hold in proc_exit");
333 
334 SYSCTL_PROC(_debug, OID_AUTO, proc_exit_lpexit_spinning,
335     CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
336     NULL, sizeof(int),
337     proc_exit_lpexit_spinning_sysctl, "I", "is a thread at requested pid/pos");
338 
339 static inline void
proc_exit_lpexit_check(pid_t pid,int pos)340 proc_exit_lpexit_check(pid_t pid, int pos)
341 {
342 	if (proc_exit_lpexit_spin_pid == pid) {
343 		bool slept = false;
344 		lck_mtx_lock(&proc_exit_lpexit_spin_lock);
345 		while (proc_exit_lpexit_spin_pid == pid &&
346 		    proc_exit_lpexit_spin_pos == pos) {
347 			if (!slept) {
348 				os_log(OS_LOG_DEFAULT,
349 				    "proc_exit_lpexit_check: Process[%d] waiting during proc_exit at pos %d as requested", pid, pos);
350 				slept = true;
351 			}
352 			proc_exit_lpexit_spinning = 1;
353 			msleep(&proc_exit_lpexit_spin_pid, &proc_exit_lpexit_spin_lock,
354 			    PWAIT, "proc_exit_lpexit_check", NULL);
355 			proc_exit_lpexit_spinning = 0;
356 		}
357 		lck_mtx_unlock(&proc_exit_lpexit_spin_lock);
358 		if (slept) {
359 			os_log(OS_LOG_DEFAULT,
360 			    "proc_exit_lpexit_check: Process[%d] driving on from pos %d", pid, pos);
361 		}
362 	}
363 }
364 #endif /* DEVELOPMENT || DEBUG */
365 
366 /*
367  * NOTE: Source and target may *NOT* overlap!
368  * XXX Should share code with bsd/dev/ppc/unix_signal.c
369  */
370 void
siginfo_user_to_user32(user_siginfo_t * in,user32_siginfo_t * out)371 siginfo_user_to_user32(user_siginfo_t *in, user32_siginfo_t *out)
372 {
373 	out->si_signo   = in->si_signo;
374 	out->si_errno   = in->si_errno;
375 	out->si_code    = in->si_code;
376 	out->si_pid     = in->si_pid;
377 	out->si_uid     = in->si_uid;
378 	out->si_status  = in->si_status;
379 	out->si_addr    = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_addr);
380 	/* following cast works for sival_int because of padding */
381 	out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_value.sival_ptr);
382 	out->si_band    = (user32_long_t)in->si_band;                  /* range reduction */
383 }
384 
385 void
siginfo_user_to_user64(user_siginfo_t * in,user64_siginfo_t * out)386 siginfo_user_to_user64(user_siginfo_t *in, user64_siginfo_t *out)
387 {
388 	out->si_signo   = in->si_signo;
389 	out->si_errno   = in->si_errno;
390 	out->si_code    = in->si_code;
391 	out->si_pid     = in->si_pid;
392 	out->si_uid     = in->si_uid;
393 	out->si_status  = in->si_status;
394 	out->si_addr    = in->si_addr;
395 	/* following cast works for sival_int because of padding */
396 	out->si_value.sival_ptr = in->si_value.sival_ptr;
397 	out->si_band    = in->si_band;                  /* range reduction */
398 }
399 
400 static int
copyoutsiginfo(user_siginfo_t * native,boolean_t is64,user_addr_t uaddr)401 copyoutsiginfo(user_siginfo_t *native, boolean_t is64, user_addr_t uaddr)
402 {
403 	if (is64) {
404 		user64_siginfo_t sinfo64;
405 
406 		bzero(&sinfo64, sizeof(sinfo64));
407 		siginfo_user_to_user64(native, &sinfo64);
408 		return copyout(&sinfo64, uaddr, sizeof(sinfo64));
409 	} else {
410 		user32_siginfo_t sinfo32;
411 
412 		bzero(&sinfo32, sizeof(sinfo32));
413 		siginfo_user_to_user32(native, &sinfo32);
414 		return copyout(&sinfo32, uaddr, sizeof(sinfo32));
415 	}
416 }
417 
418 void
gather_populate_corpse_crashinfo(proc_t p,task_t corpse_task,mach_exception_data_type_t code,mach_exception_data_type_t subcode,uint64_t * udata_buffer,int num_udata,void * reason,exception_type_t etype)419 gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task,
420     mach_exception_data_type_t code, mach_exception_data_type_t subcode,
421     uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype)
422 {
423 	struct rusage_superset rup;
424 
425 	gather_rusage_info(p, &rup.ri, RUSAGE_INFO_CURRENT);
426 	rup.ri.ri_phys_footprint = 0;
427 	populate_corpse_crashinfo(p, corpse_task, &rup, code, subcode,
428 	    udata_buffer, num_udata, reason, etype);
429 }
430 
431 static void
proc_update_corpse_exception_codes(proc_t p,mach_exception_data_type_t * code,mach_exception_data_type_t * subcode)432 proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode)
433 {
434 	mach_exception_data_type_t code_update = *code;
435 	mach_exception_data_type_t subcode_update = *subcode;
436 	if (p->p_exit_reason == OS_REASON_NULL) {
437 		return;
438 	}
439 
440 	switch (p->p_exit_reason->osr_namespace) {
441 	case OS_REASON_JETSAM:
442 		if (p->p_exit_reason->osr_code == JETSAM_REASON_MEMORY_PERPROCESSLIMIT) {
443 			/* Update the code with EXC_RESOURCE code for high memory watermark */
444 			EXC_RESOURCE_ENCODE_TYPE(code_update, RESOURCE_TYPE_MEMORY);
445 			EXC_RESOURCE_ENCODE_FLAVOR(code_update, FLAVOR_HIGH_WATERMARK);
446 			EXC_RESOURCE_HWM_ENCODE_LIMIT(code_update, ((get_task_phys_footprint_limit(proc_task(p))) >> 20));
447 			subcode_update = 0;
448 			break;
449 		}
450 
451 		break;
452 	default:
453 		break;
454 	}
455 
456 	*code = code_update;
457 	*subcode = subcode_update;
458 	return;
459 }
460 
461 mach_exception_data_type_t
proc_encode_exit_exception_code(proc_t p)462 proc_encode_exit_exception_code(proc_t p)
463 {
464 	uint64_t subcode = 0;
465 
466 	if (p->p_exit_reason == OS_REASON_NULL) {
467 		return 0;
468 	}
469 
470 	/* Embed first 32 bits of osr_namespace and osr_code in exception code */
471 	ENCODE_OSR_NAMESPACE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_namespace);
472 	ENCODE_OSR_CODE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_code);
473 	return (mach_exception_data_type_t)subcode;
474 }
475 
476 static void
populate_corpse_crashinfo(proc_t p,task_t corpse_task,struct rusage_superset * rup,mach_exception_data_type_t code,mach_exception_data_type_t subcode,uint64_t * udata_buffer,int num_udata,os_reason_t reason,exception_type_t etype)477 populate_corpse_crashinfo(proc_t p, task_t corpse_task, struct rusage_superset *rup,
478     mach_exception_data_type_t code, mach_exception_data_type_t subcode,
479     uint64_t *udata_buffer, int num_udata, os_reason_t reason, exception_type_t etype)
480 {
481 	mach_vm_address_t uaddr = 0;
482 	mach_exception_data_type_t exc_codes[EXCEPTION_CODE_MAX];
483 	exc_codes[0] = code;
484 	exc_codes[1] = subcode;
485 	cpu_type_t cputype;
486 	struct proc_uniqidentifierinfo p_uniqidinfo;
487 	struct proc_workqueueinfo pwqinfo;
488 	int retval = 0;
489 	uint64_t crashed_threadid = task_corpse_get_crashed_thread_id(corpse_task);
490 	boolean_t is_corpse_fork;
491 	uint32_t csflags;
492 	unsigned int pflags = 0;
493 	uint64_t max_footprint_mb;
494 	uint64_t max_footprint;
495 
496 	uint64_t ledger_internal;
497 	uint64_t ledger_internal_compressed;
498 	uint64_t ledger_iokit_mapped;
499 	uint64_t ledger_alternate_accounting;
500 	uint64_t ledger_alternate_accounting_compressed;
501 	uint64_t ledger_purgeable_nonvolatile;
502 	uint64_t ledger_purgeable_nonvolatile_compressed;
503 	uint64_t ledger_page_table;
504 	uint64_t ledger_phys_footprint;
505 	uint64_t ledger_phys_footprint_lifetime_max;
506 	uint64_t ledger_network_nonvolatile;
507 	uint64_t ledger_network_nonvolatile_compressed;
508 	uint64_t ledger_wired_mem;
509 	uint64_t ledger_tagged_footprint;
510 	uint64_t ledger_tagged_footprint_compressed;
511 	uint64_t ledger_media_footprint;
512 	uint64_t ledger_media_footprint_compressed;
513 	uint64_t ledger_graphics_footprint;
514 	uint64_t ledger_graphics_footprint_compressed;
515 	uint64_t ledger_neural_footprint;
516 	uint64_t ledger_neural_footprint_compressed;
517 
518 	void *crash_info_ptr = task_get_corpseinfo(corpse_task);
519 
520 #if CONFIG_MEMORYSTATUS
521 	int memstat_dirty_flags = 0;
522 #endif
523 
524 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_EXCEPTION_CODES, sizeof(exc_codes), &uaddr)) {
525 		kcdata_memcpy(crash_info_ptr, uaddr, exc_codes, sizeof(exc_codes));
526 	}
527 
528 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PID, sizeof(pid_t), &uaddr)) {
529 		pid_t pid = proc_getpid(p);
530 		kcdata_memcpy(crash_info_ptr, uaddr, &pid, sizeof(pid));
531 	}
532 
533 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PPID, sizeof(p->p_ppid), &uaddr)) {
534 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_ppid, sizeof(p->p_ppid));
535 	}
536 
537 	/* Don't include the crashed thread ID if there's an exit reason that indicates it's irrelevant */
538 	if ((p->p_exit_reason == OS_REASON_NULL) || !(p->p_exit_reason->osr_flags & OS_REASON_FLAG_NO_CRASHED_TID)) {
539 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CRASHED_THREADID, sizeof(uint64_t), &uaddr)) {
540 			kcdata_memcpy(crash_info_ptr, uaddr, &crashed_threadid, sizeof(uint64_t));
541 		}
542 	}
543 
544 	static_assert(sizeof(struct proc_uniqidentifierinfo) == sizeof(struct crashinfo_proc_uniqidentifierinfo));
545 	if (KERN_SUCCESS ==
546 	    kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_BSDINFOWITHUNIQID, sizeof(struct proc_uniqidentifierinfo), &uaddr)) {
547 		proc_piduniqidentifierinfo(p, &p_uniqidinfo);
548 		kcdata_memcpy(crash_info_ptr, uaddr, &p_uniqidinfo, sizeof(struct proc_uniqidentifierinfo));
549 	}
550 
551 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RUSAGE_INFO, sizeof(rusage_info_current), &uaddr)) {
552 		kcdata_memcpy(crash_info_ptr, uaddr, &rup->ri, sizeof(rusage_info_current));
553 	}
554 
555 	csflags = (uint32_t)proc_getcsflags(p);
556 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_CSFLAGS, sizeof(csflags), &uaddr)) {
557 		kcdata_memcpy(crash_info_ptr, uaddr, &csflags, sizeof(csflags));
558 	}
559 
560 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_NAME, sizeof(p->p_comm), &uaddr)) {
561 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_comm, sizeof(p->p_comm));
562 	}
563 
564 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_STARTTIME, sizeof(p->p_start), &uaddr)) {
565 		struct timeval64 t64;
566 		t64.tv_sec = (int64_t)p->p_start.tv_sec;
567 		t64.tv_usec = (int64_t)p->p_start.tv_usec;
568 		kcdata_memcpy(crash_info_ptr, uaddr, &t64, sizeof(t64));
569 	}
570 
571 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_USERSTACK, sizeof(p->user_stack), &uaddr)) {
572 		kcdata_memcpy(crash_info_ptr, uaddr, &p->user_stack, sizeof(p->user_stack));
573 	}
574 
575 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_ARGSLEN, sizeof(p->p_argslen), &uaddr)) {
576 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_argslen, sizeof(p->p_argslen));
577 	}
578 
579 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_ARGC, sizeof(p->p_argc), &uaddr)) {
580 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_argc, sizeof(p->p_argc));
581 	}
582 
583 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PATH, MAXPATHLEN, &uaddr)) {
584 		char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO);
585 		proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, &retval);
586 		kcdata_memcpy(crash_info_ptr, uaddr, buf, MAXPATHLEN);
587 		zfree(ZV_NAMEI, buf);
588 	}
589 
590 	pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED);
591 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_FLAGS, sizeof(pflags), &uaddr)) {
592 		kcdata_memcpy(crash_info_ptr, uaddr, &pflags, sizeof(pflags));
593 	}
594 
595 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_UID, sizeof(p->p_uid), &uaddr)) {
596 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_uid, sizeof(p->p_uid));
597 	}
598 
599 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_GID, sizeof(p->p_gid), &uaddr)) {
600 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_gid, sizeof(p->p_gid));
601 	}
602 
603 	cputype = cpu_type() & ~CPU_ARCH_MASK;
604 	if (IS_64BIT_PROCESS(p)) {
605 		cputype |= CPU_ARCH_ABI64;
606 	} else if (proc_is64bit_data(p)) {
607 		cputype |= CPU_ARCH_ABI64_32;
608 	}
609 
610 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CPUTYPE, sizeof(cpu_type_t), &uaddr)) {
611 		kcdata_memcpy(crash_info_ptr, uaddr, &cputype, sizeof(cpu_type_t));
612 	}
613 
614 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_CPUTYPE, sizeof(cpu_type_t), &uaddr)) {
615 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_cputype, sizeof(cpu_type_t));
616 	}
617 
618 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT, sizeof(max_footprint_mb), &uaddr)) {
619 		max_footprint = get_task_phys_footprint_limit(proc_task(p));
620 		max_footprint_mb = max_footprint >> 20;
621 		kcdata_memcpy(crash_info_ptr, uaddr, &max_footprint_mb, sizeof(max_footprint_mb));
622 	}
623 
624 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT_LIFETIME_MAX, sizeof(ledger_phys_footprint_lifetime_max), &uaddr)) {
625 		ledger_phys_footprint_lifetime_max = get_task_phys_footprint_lifetime_max(proc_task(p));
626 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint_lifetime_max, sizeof(ledger_phys_footprint_lifetime_max));
627 	}
628 
629 	// In the forking case, the current ledger info is copied into the corpse while the original task is suspended for consistency
630 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL, sizeof(ledger_internal), &uaddr)) {
631 		ledger_internal = get_task_internal(corpse_task);
632 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal, sizeof(ledger_internal));
633 	}
634 
635 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL_COMPRESSED, sizeof(ledger_internal_compressed), &uaddr)) {
636 		ledger_internal_compressed = get_task_internal_compressed(corpse_task);
637 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal_compressed, sizeof(ledger_internal_compressed));
638 	}
639 
640 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_IOKIT_MAPPED, sizeof(ledger_iokit_mapped), &uaddr)) {
641 		ledger_iokit_mapped = get_task_iokit_mapped(corpse_task);
642 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_iokit_mapped, sizeof(ledger_iokit_mapped));
643 	}
644 
645 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING, sizeof(ledger_alternate_accounting), &uaddr)) {
646 		ledger_alternate_accounting = get_task_alternate_accounting(corpse_task);
647 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting, sizeof(ledger_alternate_accounting));
648 	}
649 
650 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING_COMPRESSED, sizeof(ledger_alternate_accounting_compressed), &uaddr)) {
651 		ledger_alternate_accounting_compressed = get_task_alternate_accounting_compressed(corpse_task);
652 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting_compressed, sizeof(ledger_alternate_accounting_compressed));
653 	}
654 
655 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE, sizeof(ledger_purgeable_nonvolatile), &uaddr)) {
656 		ledger_purgeable_nonvolatile = get_task_purgeable_nonvolatile(corpse_task);
657 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile, sizeof(ledger_purgeable_nonvolatile));
658 	}
659 
660 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE_COMPRESSED, sizeof(ledger_purgeable_nonvolatile_compressed), &uaddr)) {
661 		ledger_purgeable_nonvolatile_compressed = get_task_purgeable_nonvolatile_compressed(corpse_task);
662 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile_compressed, sizeof(ledger_purgeable_nonvolatile_compressed));
663 	}
664 
665 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PAGE_TABLE, sizeof(ledger_page_table), &uaddr)) {
666 		ledger_page_table = get_task_page_table(corpse_task);
667 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_page_table, sizeof(ledger_page_table));
668 	}
669 
670 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT, sizeof(ledger_phys_footprint), &uaddr)) {
671 		ledger_phys_footprint = get_task_phys_footprint(corpse_task);
672 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint, sizeof(ledger_phys_footprint));
673 	}
674 
675 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE, sizeof(ledger_network_nonvolatile), &uaddr)) {
676 		ledger_network_nonvolatile = get_task_network_nonvolatile(corpse_task);
677 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile, sizeof(ledger_network_nonvolatile));
678 	}
679 
680 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE_COMPRESSED, sizeof(ledger_network_nonvolatile_compressed), &uaddr)) {
681 		ledger_network_nonvolatile_compressed = get_task_network_nonvolatile_compressed(corpse_task);
682 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile_compressed, sizeof(ledger_network_nonvolatile_compressed));
683 	}
684 
685 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_WIRED_MEM, sizeof(ledger_wired_mem), &uaddr)) {
686 		ledger_wired_mem = get_task_wired_mem(corpse_task);
687 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_wired_mem, sizeof(ledger_wired_mem));
688 	}
689 
690 	bzero(&pwqinfo, sizeof(struct proc_workqueueinfo));
691 	retval = fill_procworkqueue(p, &pwqinfo);
692 	if (retval == 0) {
693 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_WORKQUEUEINFO, sizeof(struct proc_workqueueinfo), &uaddr)) {
694 			kcdata_memcpy(crash_info_ptr, uaddr, &pwqinfo, sizeof(struct proc_workqueueinfo));
695 		}
696 	}
697 
698 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RESPONSIBLE_PID, sizeof(p->p_responsible_pid), &uaddr)) {
699 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_responsible_pid, sizeof(p->p_responsible_pid));
700 	}
701 
702 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PERSONA_ID, sizeof(uid_t), &uaddr)) {
703 		uid_t persona_id = proc_persona_id(p);
704 		kcdata_memcpy(crash_info_ptr, uaddr, &persona_id, sizeof(persona_id));
705 	}
706 
707 #if CONFIG_COALITIONS
708 	if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_COALITION_ID, sizeof(uint64_t), COALITION_NUM_TYPES, &uaddr)) {
709 		uint64_t coalition_ids[COALITION_NUM_TYPES];
710 		task_coalition_ids(proc_task(p), coalition_ids);
711 		kcdata_memcpy(crash_info_ptr, uaddr, coalition_ids, sizeof(coalition_ids));
712 	}
713 #endif /* CONFIG_COALITIONS */
714 
715 #if CONFIG_MEMORYSTATUS
716 	memstat_dirty_flags = memorystatus_dirty_get(p, FALSE);
717 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_DIRTY_FLAGS, sizeof(memstat_dirty_flags), &uaddr)) {
718 		kcdata_memcpy(crash_info_ptr, uaddr, &memstat_dirty_flags, sizeof(memstat_dirty_flags));
719 	}
720 #endif
721 
722 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT_INCREASE, sizeof(p->p_memlimit_increase), &uaddr)) {
723 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memlimit_increase, sizeof(p->p_memlimit_increase));
724 	}
725 
726 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT, sizeof(ledger_tagged_footprint), &uaddr)) {
727 		ledger_tagged_footprint = get_task_tagged_footprint(corpse_task);
728 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint, sizeof(ledger_tagged_footprint));
729 	}
730 
731 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT_COMPRESSED, sizeof(ledger_tagged_footprint_compressed), &uaddr)) {
732 		ledger_tagged_footprint_compressed = get_task_tagged_footprint_compressed(corpse_task);
733 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint_compressed, sizeof(ledger_tagged_footprint_compressed));
734 	}
735 
736 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT, sizeof(ledger_media_footprint), &uaddr)) {
737 		ledger_media_footprint = get_task_media_footprint(corpse_task);
738 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint, sizeof(ledger_media_footprint));
739 	}
740 
741 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT_COMPRESSED, sizeof(ledger_media_footprint_compressed), &uaddr)) {
742 		ledger_media_footprint_compressed = get_task_media_footprint_compressed(corpse_task);
743 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint_compressed, sizeof(ledger_media_footprint_compressed));
744 	}
745 
746 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT, sizeof(ledger_graphics_footprint), &uaddr)) {
747 		ledger_graphics_footprint = get_task_graphics_footprint(corpse_task);
748 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint, sizeof(ledger_graphics_footprint));
749 	}
750 
751 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT_COMPRESSED, sizeof(ledger_graphics_footprint_compressed), &uaddr)) {
752 		ledger_graphics_footprint_compressed = get_task_graphics_footprint_compressed(corpse_task);
753 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint_compressed, sizeof(ledger_graphics_footprint_compressed));
754 	}
755 
756 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT, sizeof(ledger_neural_footprint), &uaddr)) {
757 		ledger_neural_footprint = get_task_neural_footprint(corpse_task);
758 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint, sizeof(ledger_neural_footprint));
759 	}
760 
761 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT_COMPRESSED, sizeof(ledger_neural_footprint_compressed), &uaddr)) {
762 		ledger_neural_footprint_compressed = get_task_neural_footprint_compressed(corpse_task);
763 		kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint_compressed, sizeof(ledger_neural_footprint_compressed));
764 	}
765 
766 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORYSTATUS_EFFECTIVE_PRIORITY, sizeof(p->p_memstat_effectivepriority), &uaddr)) {
767 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memstat_effectivepriority, sizeof(p->p_memstat_effectivepriority));
768 	}
769 
770 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_KERNEL_TRIAGE_INFO_V1, sizeof(struct kernel_triage_info_v1), &uaddr)) {
771 		char triage_strings[KDBG_TRIAGE_MAX_STRINGS][KDBG_TRIAGE_MAX_STRLEN];
772 		ktriage_extract(thread_tid(current_thread()), triage_strings, KDBG_TRIAGE_MAX_STRINGS * KDBG_TRIAGE_MAX_STRLEN);
773 		kcdata_memcpy(crash_info_ptr, uaddr, (void*) triage_strings, sizeof(struct kernel_triage_info_v1));
774 	}
775 
776 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_TASK_IS_CORPSE_FORK, sizeof(is_corpse_fork), &uaddr)) {
777 		is_corpse_fork = is_corpsefork(corpse_task);
778 		kcdata_memcpy(crash_info_ptr, uaddr, &is_corpse_fork, sizeof(is_corpse_fork));
779 	}
780 
781 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_EXCEPTION_TYPE, sizeof(etype), &uaddr)) {
782 		kcdata_memcpy(crash_info_ptr, uaddr, &etype, sizeof(etype));
783 	}
784 
785 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CRASH_COUNT, sizeof(int), &uaddr)) {
786 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_crash_count, sizeof(int));
787 	}
788 
789 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_THROTTLE_TIMEOUT, sizeof(int), &uaddr)) {
790 		kcdata_memcpy(crash_info_ptr, uaddr, &p->p_throttle_timeout, sizeof(int));
791 	}
792 
793 	char signing_id[MAX_CRASHINFO_SIGNING_ID_LEN] = {};
794 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_SIGNING_ID, sizeof(signing_id), &uaddr)) {
795 		const char * id = cs_identity_get(p);
796 		if (id) {
797 			strlcpy(signing_id, id, sizeof(signing_id));
798 		}
799 		kcdata_memcpy(crash_info_ptr, uaddr, &signing_id, sizeof(signing_id));
800 	}
801 	char team_id[MAX_CRASHINFO_TEAM_ID_LEN] = {};
802 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_TEAM_ID, sizeof(team_id), &uaddr)) {
803 		const char * id = csproc_get_teamid(p);
804 		if (id) {
805 			strlcpy(team_id, id, sizeof(team_id));
806 		}
807 		kcdata_memcpy(crash_info_ptr, uaddr, &team_id, sizeof(team_id));
808 	}
809 
810 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_VALIDATION_CATEGORY, sizeof(uint32_t), &uaddr)) {
811 		uint32_t category = 0;
812 		if (csproc_get_validation_category(p, &category) != KERN_SUCCESS) {
813 			category = CS_VALIDATION_CATEGORY_INVALID;
814 		}
815 		kcdata_memcpy(crash_info_ptr, uaddr, &category, sizeof(category));
816 	}
817 
818 	if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CS_TRUST_LEVEL, sizeof(uint32_t), &uaddr)) {
819 		uint32_t trust = 0;
820 		kern_return_t ret = get_trust_level_kdp(get_task_pmap(corpse_task), &trust);
821 		if (ret != KERN_SUCCESS) {
822 			trust = KCDATA_INVALID_CS_TRUST_LEVEL;
823 		}
824 		kcdata_memcpy(crash_info_ptr, uaddr, &trust, sizeof(trust));
825 	}
826 
827 	uint64_t jit_start_addr = 0;
828 	uint64_t jit_end_addr = 0;
829 	kern_return_t ret = get_jit_address_range_kdp(get_task_pmap(corpse_task), (uintptr_t*)&jit_start_addr, (uintptr_t*)&jit_end_addr);
830 	if (KERN_SUCCESS == ret) {
831 		struct crashinfo_jit_address_range range = {};
832 		range.start_address = jit_start_addr;
833 		range.end_address = jit_end_addr;
834 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_JIT_ADDRESS_RANGE, sizeof(struct crashinfo_jit_address_range), &uaddr)) {
835 			kcdata_memcpy(crash_info_ptr, uaddr, &range, sizeof(range));
836 		}
837 	}
838 
839 	if (p->p_exit_reason != OS_REASON_NULL && reason == OS_REASON_NULL) {
840 		reason = p->p_exit_reason;
841 	}
842 	if (reason != OS_REASON_NULL) {
843 		if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, EXIT_REASON_SNAPSHOT, sizeof(struct exit_reason_snapshot), &uaddr)) {
844 			struct exit_reason_snapshot ers = {
845 				.ers_namespace = reason->osr_namespace,
846 				.ers_code = reason->osr_code,
847 				.ers_flags = reason->osr_flags
848 			};
849 
850 			kcdata_memcpy(crash_info_ptr, uaddr, &ers, sizeof(ers));
851 		}
852 
853 		if (reason->osr_kcd_buf != 0) {
854 			uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor);
855 			assert(reason_buf_size != 0);
856 
857 			if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, KCDATA_TYPE_NESTED_KCDATA, reason_buf_size, &uaddr)) {
858 				kcdata_memcpy(crash_info_ptr, uaddr, reason->osr_kcd_buf, reason_buf_size);
859 			}
860 		}
861 	}
862 
863 	if (num_udata > 0) {
864 		if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_UDATA_PTRS,
865 		    sizeof(uint64_t), num_udata, &uaddr)) {
866 			kcdata_memcpy(crash_info_ptr, uaddr, udata_buffer, sizeof(uint64_t) * num_udata);
867 		}
868 	}
869 
870 #if CONFIG_EXCLAVES
871 	task_add_conclave_crash_info(corpse_task, crash_info_ptr);
872 #endif /* CONFIG_EXCLAVES */
873 }
874 
875 exception_type_t
get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info)876 get_exception_from_corpse_crashinfo(kcdata_descriptor_t corpse_info)
877 {
878 	kcdata_iter_t iter = kcdata_iter((void *)corpse_info->kcd_addr_begin,
879 	    corpse_info->kcd_length);
880 	__assert_only uint32_t type = kcdata_iter_type(iter);
881 	assert(type == KCDATA_BUFFER_BEGIN_CRASHINFO);
882 
883 	iter = kcdata_iter_find_type(iter, TASK_CRASHINFO_EXCEPTION_TYPE);
884 	exception_type_t *etype = kcdata_iter_payload(iter);
885 	return *etype;
886 }
887 
888 /*
889  * Collect information required for generating lightwight corpse for current
890  * task, which can be terminating.
891  */
892 kern_return_t
current_thread_collect_backtrace_info(kcdata_descriptor_t * new_desc,exception_type_t etype,mach_exception_data_t code,mach_msg_type_number_t codeCnt,void * reasonp)893 current_thread_collect_backtrace_info(
894 	kcdata_descriptor_t *new_desc,
895 	exception_type_t etype,
896 	mach_exception_data_t code,
897 	mach_msg_type_number_t codeCnt,
898 	void *reasonp)
899 {
900 	kcdata_descriptor_t kcdata;
901 	kern_return_t kr;
902 	int frame_count = 0, max_frames = 100;
903 	mach_vm_address_t uuid_info_addr = 0;
904 	uint32_t uuid_info_count         = 0;
905 	uint32_t btinfo_flag             = 0;
906 	mach_vm_address_t btinfo_flag_addr = 0, kaddr = 0;
907 	natural_t alloc_size = BTINFO_ALLOCATION_SIZE;
908 	mach_msg_type_number_t th_info_count = THREAD_IDENTIFIER_INFO_COUNT;
909 	thread_identifier_info_data_t th_info;
910 	char threadname[MAXTHREADNAMESIZE];
911 	void *btdata_kernel = NULL;
912 	typedef uintptr_t user_btframe_t __kernel_data_semantics;
913 	user_btframe_t *btframes = NULL;
914 	os_reason_t reason = (os_reason_t)reasonp;
915 	struct backtrace_user_info info = BTUINFO_INIT;
916 	struct rusage_superset rup;
917 	uint32_t platform;
918 
919 	task_t task = current_task();
920 	proc_t p = current_proc();
921 
922 	bool has_64bit_addr = task_get_64bit_addr(current_task());
923 	bool has_64bit_data = task_get_64bit_data(current_task());
924 
925 	if (new_desc == NULL) {
926 		return KERN_INVALID_ARGUMENT;
927 	}
928 
929 	/* First, collect backtrace frames */
930 	btframes = kalloc_data(max_frames * sizeof(btframes[0]), Z_WAITOK | Z_ZERO);
931 	if (!btframes) {
932 		return KERN_RESOURCE_SHORTAGE;
933 	}
934 
935 	frame_count = backtrace_user(btframes, max_frames, NULL, &info);
936 	if (info.btui_error || frame_count == 0) {
937 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
938 		return KERN_FAILURE;
939 	}
940 
941 	if ((info.btui_info & BTI_TRUNCATED) != 0) {
942 		btinfo_flag |= TASK_BTINFO_FLAG_BT_TRUNCATED;
943 	}
944 
945 	/* Captured in kcdata descriptor below */
946 	btdata_kernel = kalloc_data(alloc_size, Z_WAITOK | Z_ZERO);
947 	if (!btdata_kernel) {
948 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
949 		return KERN_RESOURCE_SHORTAGE;
950 	}
951 
952 	kcdata = task_btinfo_alloc_init((mach_vm_address_t)btdata_kernel, alloc_size);
953 	if (!kcdata) {
954 		kfree_data(btdata_kernel, alloc_size);
955 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
956 		return KERN_RESOURCE_SHORTAGE;
957 	}
958 
959 	/* First reserve space in kcdata blob for the btinfo flag fields */
960 	if (KERN_SUCCESS != kcdata_get_memory_addr(kcdata, TASK_BTINFO_FLAGS,
961 	    sizeof(uint32_t), &btinfo_flag_addr)) {
962 		kfree_data(btdata_kernel, alloc_size);
963 		kfree_data(btframes, max_frames * sizeof(btframes[0]));
964 		kcdata_memory_destroy(kcdata);
965 		return KERN_RESOURCE_SHORTAGE;
966 	}
967 
968 	if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
969 	    (has_64bit_addr ? TASK_BTINFO_BACKTRACE64 : TASK_BTINFO_BACKTRACE),
970 	    sizeof(uintptr_t), frame_count, &kaddr)) {
971 		kcdata_memcpy(kcdata, kaddr, btframes, sizeof(uintptr_t) * frame_count);
972 	}
973 
974 #if __LP64__
975 	/* We only support async stacks on 64-bit kernels */
976 	frame_count = 0;
977 
978 	if (info.btui_async_frame_addr != 0) {
979 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_ASYNC_START_INDEX,
980 		    sizeof(uint32_t), &kaddr)) {
981 			uint32_t idx = info.btui_async_start_index;
982 			kcdata_memcpy(kcdata, kaddr, &idx, sizeof(uint32_t));
983 		}
984 		struct backtrace_control ctl = {
985 			.btc_frame_addr = info.btui_async_frame_addr,
986 			.btc_addr_offset = BTCTL_ASYNC_ADDR_OFFSET,
987 		};
988 
989 		info = BTUINFO_INIT;
990 		frame_count = backtrace_user(btframes, max_frames, &ctl, &info);
991 		if (info.btui_error == 0 && frame_count > 0) {
992 			if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
993 			    TASK_BTINFO_ASYNC_BACKTRACE64,
994 			    sizeof(uintptr_t), frame_count, &kaddr)) {
995 				kcdata_memcpy(kcdata, kaddr, btframes, sizeof(uintptr_t) * frame_count);
996 			}
997 		}
998 
999 		if ((info.btui_info & BTI_TRUNCATED) != 0) {
1000 			btinfo_flag |= TASK_BTINFO_FLAG_ASYNC_BT_TRUNCATED;
1001 		}
1002 	}
1003 #endif
1004 
1005 	/* Backtrace collection done, free the frames buffer */
1006 	kfree_data(btframes, max_frames * sizeof(btframes[0]));
1007 	btframes = NULL;
1008 
1009 	thread_set_exec_promotion(current_thread());
1010 	/* Next, suspend the task briefly and collect image load infos */
1011 	task_suspend_internal(task);
1012 
1013 	/* all_image_info struct is ABI, in agreement with address width */
1014 	if (has_64bit_addr) {
1015 		struct user64_dyld_all_image_infos task_image_infos = {};
1016 		struct btinfo_sc_load_info64 sc_info;
1017 		(void)copyin((user_addr_t)task_get_all_image_info_addr(task), &task_image_infos,
1018 		    sizeof(struct user64_dyld_all_image_infos));
1019 		uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
1020 		uuid_info_addr = task_image_infos.uuidArray;
1021 
1022 		sc_info.sharedCacheSlide = task_image_infos.sharedCacheSlide;
1023 		sc_info.sharedCacheBaseAddress = task_image_infos.sharedCacheBaseAddress;
1024 		memcpy(&sc_info.sharedCacheUUID, &task_image_infos.sharedCacheUUID,
1025 		    sizeof(task_image_infos.sharedCacheUUID));
1026 
1027 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata,
1028 		    TASK_BTINFO_SC_LOADINFO64, sizeof(sc_info), &kaddr)) {
1029 			kcdata_memcpy(kcdata, kaddr, &sc_info, sizeof(sc_info));
1030 		}
1031 	} else {
1032 		struct user32_dyld_all_image_infos task_image_infos = {};
1033 		struct btinfo_sc_load_info sc_info;
1034 		(void)copyin((user_addr_t)task_get_all_image_info_addr(task), &task_image_infos,
1035 		    sizeof(struct user32_dyld_all_image_infos));
1036 		uuid_info_count = task_image_infos.uuidArrayCount;
1037 		uuid_info_addr = task_image_infos.uuidArray;
1038 
1039 		sc_info.sharedCacheSlide = task_image_infos.sharedCacheSlide;
1040 		sc_info.sharedCacheBaseAddress = task_image_infos.sharedCacheBaseAddress;
1041 		memcpy(&sc_info.sharedCacheUUID, &task_image_infos.sharedCacheUUID,
1042 		    sizeof(task_image_infos.sharedCacheUUID));
1043 
1044 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata,
1045 		    TASK_BTINFO_SC_LOADINFO, sizeof(sc_info), &kaddr)) {
1046 			kcdata_memcpy(kcdata, kaddr, &sc_info, sizeof(sc_info));
1047 		}
1048 	}
1049 
1050 	if (!uuid_info_addr) {
1051 		/*
1052 		 * Can happen when we catch dyld in the middle of updating
1053 		 * this data structure, or copyin of all_image_info struct failed.
1054 		 */
1055 		task_resume_internal(task);
1056 		thread_clear_exec_promotion(current_thread());
1057 		kfree_data(btdata_kernel, alloc_size);
1058 		kcdata_memory_destroy(kcdata);
1059 		return KERN_MEMORY_ERROR;
1060 	}
1061 
1062 	if (uuid_info_count > 0) {
1063 		uint32_t uuid_info_size = (uint32_t)(has_64bit_addr ?
1064 		    sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info));
1065 
1066 		if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata,
1067 		    (has_64bit_addr ? TASK_BTINFO_DYLD_LOADINFO64 : TASK_BTINFO_DYLD_LOADINFO),
1068 		    uuid_info_size, uuid_info_count, &kaddr)) {
1069 			if (copyin((user_addr_t)uuid_info_addr, (void *)kaddr, uuid_info_size * uuid_info_count)) {
1070 				task_resume_internal(task);
1071 				thread_clear_exec_promotion(current_thread());
1072 				kfree_data(btdata_kernel, alloc_size);
1073 				kcdata_memory_destroy(kcdata);
1074 				return KERN_MEMORY_ERROR;
1075 			}
1076 		}
1077 	}
1078 
1079 	task_resume_internal(task);
1080 	thread_clear_exec_promotion(current_thread());
1081 
1082 	/* Next, collect all other information */
1083 	thread_flavor_t tsflavor;
1084 	mach_msg_type_number_t tscount;
1085 
1086 #if defined(__x86_64__) || defined(__i386__)
1087 	tsflavor = x86_THREAD_STATE;      /* unified */
1088 	tscount  = x86_THREAD_STATE_COUNT;
1089 #else
1090 	tsflavor = ARM_THREAD_STATE;      /* unified */
1091 	tscount  = ARM_UNIFIED_THREAD_STATE_COUNT;
1092 #endif
1093 
1094 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_STATE,
1095 	    sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount, &kaddr)) {
1096 		struct btinfo_thread_state_data_t *bt_thread_state = (struct btinfo_thread_state_data_t *)kaddr;
1097 		bt_thread_state->flavor = tsflavor;
1098 		bt_thread_state->count = tscount;
1099 		/* variable-sized tstate array follows */
1100 
1101 		kr = thread_getstatus_to_user(current_thread(), bt_thread_state->flavor,
1102 		    (thread_state_t)&bt_thread_state->tstate, &bt_thread_state->count, TSSF_FLAGS_NONE);
1103 		if (kr != KERN_SUCCESS) {
1104 			bzero((void *)kaddr, sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount);
1105 			if (kr == KERN_TERMINATED) {
1106 				btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1107 			}
1108 		}
1109 	}
1110 
1111 #if defined(__x86_64__) || defined(__i386__)
1112 	tsflavor = x86_EXCEPTION_STATE;       /* unified */
1113 	tscount  = x86_EXCEPTION_STATE_COUNT;
1114 #else
1115 #if defined(__arm64__)
1116 	if (has_64bit_data) {
1117 		tsflavor = ARM_EXCEPTION_STATE64;
1118 		tscount  = ARM_EXCEPTION_STATE64_COUNT;
1119 	} else
1120 #endif /* defined(__arm64__) */
1121 	{
1122 		tsflavor = ARM_EXCEPTION_STATE;
1123 		tscount  = ARM_EXCEPTION_STATE_COUNT;
1124 	}
1125 #endif /* defined(__x86_64__) || defined(__i386__) */
1126 
1127 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_EXCEPTION_STATE,
1128 	    sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount, &kaddr)) {
1129 		struct btinfo_thread_state_data_t *bt_thread_state = (struct btinfo_thread_state_data_t *)kaddr;
1130 		bt_thread_state->flavor = tsflavor;
1131 		bt_thread_state->count = tscount;
1132 		/* variable-sized tstate array follows */
1133 
1134 		kr = thread_getstatus_to_user(current_thread(), bt_thread_state->flavor,
1135 		    (thread_state_t)&bt_thread_state->tstate, &bt_thread_state->count, TSSF_FLAGS_NONE);
1136 		if (kr != KERN_SUCCESS) {
1137 			bzero((void *)kaddr, sizeof(struct btinfo_thread_state_data_t) + sizeof(int) * tscount);
1138 			if (kr == KERN_TERMINATED) {
1139 				btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1140 			}
1141 		}
1142 	}
1143 
1144 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PID, sizeof(pid_t), &kaddr)) {
1145 		pid_t pid = proc_getpid(p);
1146 		kcdata_memcpy(kcdata, kaddr, &pid, sizeof(pid));
1147 	}
1148 
1149 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PPID, sizeof(p->p_ppid), &kaddr)) {
1150 		kcdata_memcpy(kcdata, kaddr, &p->p_ppid, sizeof(p->p_ppid));
1151 	}
1152 
1153 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_NAME, sizeof(p->p_comm), &kaddr)) {
1154 		kcdata_memcpy(kcdata, kaddr, &p->p_comm, sizeof(p->p_comm));
1155 	}
1156 
1157 #if CONFIG_COALITIONS
1158 	if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(kcdata, TASK_BTINFO_COALITION_ID, sizeof(uint64_t), COALITION_NUM_TYPES, &kaddr)) {
1159 		uint64_t coalition_ids[COALITION_NUM_TYPES];
1160 		task_coalition_ids(proc_task(p), coalition_ids);
1161 		kcdata_memcpy(kcdata, kaddr, coalition_ids, sizeof(coalition_ids));
1162 	}
1163 #endif /* CONFIG_COALITIONS */
1164 
1165 	/* V0 is sufficient for ReportCrash */
1166 	gather_rusage_info(current_proc(), &rup.ri, RUSAGE_INFO_V0);
1167 	rup.ri.ri_phys_footprint = 0;
1168 	/* Soft crash, proc did not exit */
1169 	rup.ri.ri_proc_exit_abstime = 0;
1170 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_RUSAGE_INFO, sizeof(struct rusage_info_v0), &kaddr)) {
1171 		kcdata_memcpy(kcdata, kaddr, &rup.ri, sizeof(struct rusage_info_v0));
1172 	}
1173 
1174 	platform = proc_platform(current_proc());
1175 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PLATFORM, sizeof(platform), &kaddr)) {
1176 		kcdata_memcpy(kcdata, kaddr, &platform, sizeof(platform));
1177 	}
1178 
1179 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_PATH, MAXPATHLEN, &kaddr)) {
1180 		char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO);
1181 		proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, NULL);
1182 		kcdata_memcpy(kcdata, kaddr, buf, MAXPATHLEN);
1183 		zfree(ZV_NAMEI, buf);
1184 	}
1185 
1186 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_UID, sizeof(p->p_uid), &kaddr)) {
1187 		kcdata_memcpy(kcdata, kaddr, &p->p_uid, sizeof(p->p_uid));
1188 	}
1189 
1190 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_GID, sizeof(p->p_gid), &kaddr)) {
1191 		kcdata_memcpy(kcdata, kaddr, &p->p_gid, sizeof(p->p_gid));
1192 	}
1193 
1194 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_PROC_FLAGS, sizeof(unsigned int), &kaddr)) {
1195 		unsigned int pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED);
1196 		kcdata_memcpy(kcdata, kaddr, &pflags, sizeof(pflags));
1197 	}
1198 
1199 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_CPUTYPE, sizeof(cpu_type_t), &kaddr)) {
1200 		cpu_type_t cputype = cpu_type() & ~CPU_ARCH_MASK;
1201 		if (has_64bit_addr) {
1202 			cputype |= CPU_ARCH_ABI64;
1203 		} else if (has_64bit_data) {
1204 			cputype |= CPU_ARCH_ABI64_32;
1205 		}
1206 		kcdata_memcpy(kcdata, kaddr, &cputype, sizeof(cpu_type_t));
1207 	}
1208 
1209 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_EXCEPTION_TYPE, sizeof(etype), &kaddr)) {
1210 		kcdata_memcpy(kcdata, kaddr, &etype, sizeof(etype));
1211 	}
1212 
1213 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_CRASH_COUNT, sizeof(int), &kaddr)) {
1214 		kcdata_memcpy(kcdata, kaddr, &p->p_crash_count, sizeof(int));
1215 	}
1216 
1217 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THROTTLE_TIMEOUT, sizeof(int), &kaddr)) {
1218 		kcdata_memcpy(kcdata, kaddr, &p->p_throttle_timeout, sizeof(int));
1219 	}
1220 
1221 	assert(codeCnt <= EXCEPTION_CODE_MAX);
1222 
1223 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_EXCEPTION_CODES,
1224 	    sizeof(mach_exception_code_t) * codeCnt, &kaddr)) {
1225 		kcdata_memcpy(kcdata, kaddr, code, sizeof(mach_exception_code_t) * codeCnt);
1226 	}
1227 
1228 	if (reason != OS_REASON_NULL) {
1229 		if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, EXIT_REASON_SNAPSHOT, sizeof(struct exit_reason_snapshot), &kaddr)) {
1230 			struct exit_reason_snapshot ers = {
1231 				.ers_namespace = reason->osr_namespace,
1232 				.ers_code = reason->osr_code,
1233 				.ers_flags = reason->osr_flags
1234 			};
1235 
1236 			kcdata_memcpy(kcdata, kaddr, &ers, sizeof(ers));
1237 		}
1238 
1239 		if (reason->osr_kcd_buf != 0) {
1240 			uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor);
1241 			assert(reason_buf_size != 0);
1242 
1243 			if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, KCDATA_TYPE_NESTED_KCDATA, reason_buf_size, &kaddr)) {
1244 				kcdata_memcpy(kcdata, kaddr, reason->osr_kcd_buf, reason_buf_size);
1245 			}
1246 		}
1247 	}
1248 
1249 	threadname[0] = '\0';
1250 	if (KERN_SUCCESS == kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_NAME,
1251 	    sizeof(threadname), &kaddr)) {
1252 		bsd_getthreadname(get_bsdthread_info(current_thread()), threadname);
1253 		kcdata_memcpy(kcdata, kaddr, threadname, sizeof(threadname));
1254 	}
1255 
1256 	kr = thread_info(current_thread(), THREAD_IDENTIFIER_INFO, (thread_info_t)&th_info, &th_info_count);
1257 	if (kr == KERN_TERMINATED) {
1258 		btinfo_flag |= TASK_BTINFO_FLAG_TASK_TERMINATED;
1259 	}
1260 
1261 
1262 	kern_return_t last_kr = kcdata_get_memory_addr(kcdata, TASK_BTINFO_THREAD_ID,
1263 	    sizeof(uint64_t), &kaddr);
1264 
1265 	/*
1266 	 * If the last kcdata_get_memory_addr() failed (unlikely), signal to exception
1267 	 * handler (ReportCrash) that lw corpse collection ran out of space and the
1268 	 * result is incomplete.
1269 	 */
1270 	if (last_kr != KERN_SUCCESS) {
1271 		btinfo_flag |= TASK_BTINFO_FLAG_KCDATA_INCOMPLETE;
1272 	}
1273 
1274 	if (KERN_SUCCESS == kr && KERN_SUCCESS == last_kr) {
1275 		kcdata_memcpy(kcdata, kaddr, &th_info.thread_id, sizeof(uint64_t));
1276 	}
1277 
1278 	/* Lastly, copy the flags to the address we reserved at the beginning. */
1279 	kcdata_memcpy(kcdata, btinfo_flag_addr, &btinfo_flag, sizeof(uint32_t));
1280 
1281 	*new_desc = kcdata;
1282 
1283 	return KERN_SUCCESS;
1284 }
1285 
1286 /*
1287  * We only parse exit reason kcdata blobs for critical process before they die
1288  * and we're going to panic or for opt-in, limited diagnostic tools.
1289  *
1290  * Meant to be called immediately before panicking or limited diagnostic
1291  * scenarios.
1292  */
1293 char *
exit_reason_get_string_desc(os_reason_t exit_reason)1294 exit_reason_get_string_desc(os_reason_t exit_reason)
1295 {
1296 	kcdata_iter_t iter;
1297 
1298 	if (exit_reason == OS_REASON_NULL || exit_reason->osr_kcd_buf == NULL ||
1299 	    exit_reason->osr_bufsize == 0) {
1300 		return NULL;
1301 	}
1302 
1303 	iter = kcdata_iter(exit_reason->osr_kcd_buf, exit_reason->osr_bufsize);
1304 	if (!kcdata_iter_valid(iter)) {
1305 #if DEBUG || DEVELOPMENT
1306 		printf("exit reason has invalid exit reason buffer\n");
1307 #endif
1308 		return NULL;
1309 	}
1310 
1311 	if (kcdata_iter_type(iter) != KCDATA_BUFFER_BEGIN_OS_REASON) {
1312 #if DEBUG || DEVELOPMENT
1313 		printf("exit reason buffer type mismatch, expected %d got %d\n",
1314 		    KCDATA_BUFFER_BEGIN_OS_REASON, kcdata_iter_type(iter));
1315 #endif
1316 		return NULL;
1317 	}
1318 
1319 	iter = kcdata_iter_find_type(iter, EXIT_REASON_USER_DESC);
1320 	if (!kcdata_iter_valid(iter)) {
1321 		return NULL;
1322 	}
1323 
1324 	return (char *)kcdata_iter_payload(iter);
1325 }
1326 
1327 static int initproc_spawned = 0;
1328 
1329 static int
sysctl_initproc_spawned(struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)1330 sysctl_initproc_spawned(struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1331 {
1332 	if (req->newptr != 0 && (proc_getpid(req->p) != 1 || initproc_spawned != 0)) {
1333 		// Can only ever be set by launchd, and only once at boot
1334 		return EPERM;
1335 	}
1336 	return sysctl_handle_int(oidp, &initproc_spawned, 0, req);
1337 }
1338 
1339 SYSCTL_PROC(_kern, OID_AUTO, initproc_spawned,
1340     CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_LOCKED, 0, 0,
1341     sysctl_initproc_spawned, "I", "Boolean indicator that launchd has reached main");
1342 
1343 #if DEVELOPMENT || DEBUG
1344 
1345 /* disable user faults */
1346 static TUNABLE(bool, bootarg_disable_user_faults, "-disable_user_faults", false);
1347 #endif /* DEVELOPMENT || DEBUG */
1348 
1349 #define OS_REASON_IFLAG_USER_FAULT 0x1
1350 
1351 #define OS_REASON_TOTAL_USER_FAULTS_PER_PROC  5
1352 
1353 static int
abort_with_payload_internal(proc_t p,uint32_t reason_namespace,uint64_t reason_code,user_addr_t payload,uint32_t payload_size,user_addr_t reason_string,uint64_t reason_flags,uint32_t internal_flags)1354 abort_with_payload_internal(proc_t p,
1355     uint32_t reason_namespace, uint64_t reason_code,
1356     user_addr_t payload, uint32_t payload_size,
1357     user_addr_t reason_string, uint64_t reason_flags,
1358     uint32_t internal_flags)
1359 {
1360 	os_reason_t exit_reason = OS_REASON_NULL;
1361 	kern_return_t kr = KERN_SUCCESS;
1362 
1363 	if (internal_flags & OS_REASON_IFLAG_USER_FAULT) {
1364 		uint32_t old_value = atomic_load_explicit(&p->p_user_faults,
1365 		    memory_order_relaxed);
1366 
1367 #if DEVELOPMENT || DEBUG
1368 		if (bootarg_disable_user_faults) {
1369 			return EQFULL;
1370 		}
1371 #endif /* DEVELOPMENT || DEBUG */
1372 
1373 		for (;;) {
1374 			if (old_value >= OS_REASON_TOTAL_USER_FAULTS_PER_PROC) {
1375 				return EQFULL;
1376 			}
1377 			// this reloads the value in old_value
1378 			if (atomic_compare_exchange_strong_explicit(&p->p_user_faults,
1379 			    &old_value, old_value + 1, memory_order_relaxed,
1380 			    memory_order_relaxed)) {
1381 				break;
1382 			}
1383 		}
1384 	}
1385 
1386 	KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1387 	    proc_getpid(p), reason_namespace,
1388 	    reason_code, 0, 0);
1389 
1390 	exit_reason = build_userspace_exit_reason(reason_namespace, reason_code,
1391 	    payload, payload_size, reason_string, reason_flags | OS_REASON_FLAG_ABORT);
1392 
1393 	if (internal_flags & OS_REASON_IFLAG_USER_FAULT) {
1394 		mach_exception_code_t code = 0;
1395 
1396 		EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_USER); /* simulated EXC_GUARD */
1397 		EXC_GUARD_ENCODE_FLAVOR(code, 0);
1398 		EXC_GUARD_ENCODE_TARGET(code, reason_namespace);
1399 
1400 		if (exit_reason == OS_REASON_NULL) {
1401 			kr = KERN_RESOURCE_SHORTAGE;
1402 		} else {
1403 			kr = task_violated_guard(code, reason_code, exit_reason, TRUE);
1404 		}
1405 		os_reason_free(exit_reason);
1406 	} else {
1407 		/*
1408 		 * We use SIGABRT (rather than calling exit directly from here) so that
1409 		 * the debugger can catch abort_with_{reason,payload} calls.
1410 		 */
1411 		psignal_try_thread_with_reason(p, current_thread(), SIGABRT, exit_reason);
1412 	}
1413 
1414 	switch (kr) {
1415 	case KERN_SUCCESS:
1416 		return 0;
1417 	case KERN_NOT_SUPPORTED:
1418 		return ENOTSUP;
1419 	case KERN_INVALID_ARGUMENT:
1420 		return EINVAL;
1421 	case KERN_RESOURCE_SHORTAGE:
1422 	default:
1423 		return EBUSY;
1424 	}
1425 }
1426 
1427 int
abort_with_payload(struct proc * cur_proc,struct abort_with_payload_args * args,__unused void * retval)1428 abort_with_payload(struct proc *cur_proc, struct abort_with_payload_args *args,
1429     __unused void *retval)
1430 {
1431 	abort_with_payload_internal(cur_proc, args->reason_namespace,
1432 	    args->reason_code, args->payload, args->payload_size,
1433 	    args->reason_string, args->reason_flags, 0);
1434 
1435 	return 0;
1436 }
1437 
1438 int
os_fault_with_payload(struct proc * cur_proc,struct os_fault_with_payload_args * args,__unused int * retval)1439 os_fault_with_payload(struct proc *cur_proc,
1440     struct os_fault_with_payload_args *args, __unused int *retval)
1441 {
1442 	return abort_with_payload_internal(cur_proc, args->reason_namespace,
1443 	           args->reason_code, args->payload, args->payload_size,
1444 	           args->reason_string, args->reason_flags, OS_REASON_IFLAG_USER_FAULT);
1445 }
1446 
1447 
1448 /*
1449  * exit --
1450  *	Death of process.
1451  */
1452 __attribute__((noreturn))
1453 void
exit(proc_t p,struct exit_args * uap,int * retval)1454 exit(proc_t p, struct exit_args *uap, int *retval)
1455 {
1456 	p->p_xhighbits = ((uint32_t)(uap->rval) & 0xFF000000) >> 24;
1457 	exit1(p, W_EXITCODE((uint32_t)uap->rval, 0), retval);
1458 
1459 	thread_exception_return();
1460 	/* NOTREACHED */
1461 	while (TRUE) {
1462 		thread_block(THREAD_CONTINUE_NULL);
1463 	}
1464 	/* NOTREACHED */
1465 }
1466 
1467 /*
1468  * Exit: deallocate address space and other resources, change proc state
1469  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
1470  * status and rusage for wait().  Check for child processes and orphan them.
1471  */
1472 int
exit1(proc_t p,int rv,int * retval)1473 exit1(proc_t p, int rv, int *retval)
1474 {
1475 	return exit1_internal(p, rv, retval, FALSE, TRUE, 0);
1476 }
1477 
1478 int
exit1_internal(proc_t p,int rv,int * retval,boolean_t thread_can_terminate,boolean_t perf_notify,int jetsam_flags)1479 exit1_internal(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify,
1480     int jetsam_flags)
1481 {
1482 	return exit_with_reason(p, rv, retval, thread_can_terminate, perf_notify, jetsam_flags, OS_REASON_NULL);
1483 }
1484 
1485 /*
1486  * NOTE: exit_with_reason drops a reference on the passed exit_reason
1487  */
1488 int
exit_with_reason(proc_t p,int rv,int * retval,boolean_t thread_can_terminate,boolean_t perf_notify,int jetsam_flags,struct os_reason * exit_reason)1489 exit_with_reason(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify,
1490     int jetsam_flags, struct os_reason *exit_reason)
1491 {
1492 	thread_t self = current_thread();
1493 	struct task *task = proc_task(p);
1494 	struct uthread *ut;
1495 	int error = 0;
1496 	bool proc_exiting = false;
1497 
1498 #if DEVELOPMENT || DEBUG
1499 	/*
1500 	 * Debug boot-arg: panic here if matching process is exiting with non-zero code.
1501 	 * Example usage: panic_on_error_exit=launchd,logd,watchdogd
1502 	 */
1503 	if (rv && strnstr(panic_on_eexit_pcomms, p->p_comm, sizeof(panic_on_eexit_pcomms))) {
1504 		panic("%s: Process %s with pid %d exited on error with code 0x%x.",
1505 		    __FUNCTION__, p->p_comm, proc_getpid(p), rv);
1506 	}
1507 #endif
1508 
1509 	/*
1510 	 * If a thread in this task has already
1511 	 * called exit(), then halt any others
1512 	 * right here.
1513 	 */
1514 
1515 	ut = get_bsdthread_info(self);
1516 	(void)retval;
1517 
1518 	/*
1519 	 * The parameter list of audit_syscall_exit() was augmented to
1520 	 * take the Darwin syscall number as the first parameter,
1521 	 * which is currently required by mac_audit_postselect().
1522 	 */
1523 
1524 	/*
1525 	 * The BSM token contains two components: an exit status as passed
1526 	 * to exit(), and a return value to indicate what sort of exit it
1527 	 * was.  The exit status is WEXITSTATUS(rv), but it's not clear
1528 	 * what the return value is.
1529 	 */
1530 	AUDIT_ARG(exit, WEXITSTATUS(rv), 0);
1531 	/*
1532 	 * TODO: what to audit here when jetsam calls exit and the uthread,
1533 	 * 'ut' does not belong to the proc, 'p'.
1534 	 */
1535 	AUDIT_SYSCALL_EXIT(SYS_exit, p, ut, 0); /* Exit is always successfull */
1536 
1537 	DTRACE_PROC1(exit, int, CLD_EXITED);
1538 
1539 	/* mark process is going to exit and pull out of DBG/disk throttle */
1540 	/* TODO: This should be done after becoming exit thread */
1541 	proc_set_task_policy(proc_task(p), TASK_POLICY_ATTRIBUTE,
1542 	    TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
1543 
1544 	proc_lock(p);
1545 	error = proc_transstart(p, 1, (jetsam_flags ? 1 : 0));
1546 	if (error == EDEADLK) {
1547 		/*
1548 		 * If proc_transstart() returns EDEADLK, then another thread
1549 		 * is either exec'ing or exiting. Return an error and allow
1550 		 * the other thread to continue.
1551 		 */
1552 		proc_unlock(p);
1553 		os_reason_free(exit_reason);
1554 		if (current_proc() == p) {
1555 			if (p->exit_thread == self) {
1556 				panic("exit_thread failed to exit");
1557 			}
1558 
1559 			if (thread_can_terminate) {
1560 				thread_exception_return();
1561 			}
1562 		}
1563 
1564 		return error;
1565 	}
1566 
1567 	proc_exiting = !!(p->p_lflag & P_LEXIT);
1568 
1569 	while (proc_exiting || p->exit_thread != self) {
1570 		if (proc_exiting || sig_try_locked(p) <= 0) {
1571 			proc_transend(p, 1);
1572 			os_reason_free(exit_reason);
1573 
1574 			if (get_threadtask(self) != task) {
1575 				proc_unlock(p);
1576 				return 0;
1577 			}
1578 			proc_unlock(p);
1579 
1580 			thread_terminate(self);
1581 			if (!thread_can_terminate) {
1582 				return 0;
1583 			}
1584 
1585 			thread_exception_return();
1586 			/* NOTREACHED */
1587 		}
1588 		sig_lock_to_exit(p);
1589 	}
1590 
1591 	if (exit_reason != OS_REASON_NULL) {
1592 		KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_COMMIT) | DBG_FUNC_NONE,
1593 		    proc_getpid(p), exit_reason->osr_namespace,
1594 		    exit_reason->osr_code, 0, 0);
1595 	}
1596 
1597 	assert(p->p_exit_reason == OS_REASON_NULL);
1598 	p->p_exit_reason = exit_reason;
1599 
1600 	p->p_lflag |= P_LEXIT;
1601 	p->p_xstat = rv;
1602 	p->p_lflag |= jetsam_flags;
1603 
1604 	proc_transend(p, 1);
1605 	proc_unlock(p);
1606 
1607 	proc_prepareexit(p, rv, perf_notify);
1608 
1609 	/* Last thread to terminate will call proc_exit() */
1610 	task_terminate_internal(task);
1611 
1612 	return 0;
1613 }
1614 
1615 #if CONFIG_MEMORYSTATUS
1616 /*
1617  * Remove this process from jetsam bands for freezing or exiting. Note this will block, if the process
1618  * is currently being frozen.
1619  * The proc_list_lock is held by the caller.
1620  * NB: If the process should be ineligible for future freezing or jetsaming the caller should first set
1621  * the p_refcount P_REF_DEAD bit.
1622  */
1623 static void
proc_memorystatus_remove(proc_t p)1624 proc_memorystatus_remove(proc_t p)
1625 {
1626 	LCK_MTX_ASSERT(&proc_list_mlock, LCK_MTX_ASSERT_OWNED);
1627 	while (memorystatus_remove(p) == EAGAIN) {
1628 		os_log(OS_LOG_DEFAULT, "memorystatus_remove: Process[%d] tried to exit while being frozen. Blocking exit until freeze completes.", proc_getpid(p));
1629 		msleep(&p->p_memstat_state, &proc_list_mlock, PWAIT, "proc_memorystatus_remove", NULL);
1630 	}
1631 }
1632 #endif
1633 
1634 #if DEVELOPMENT
1635 boolean_t crash_behavior_test_mode = FALSE;
1636 boolean_t crash_behavior_test_would_panic = FALSE;
1637 SYSCTL_UINT(_kern, OID_AUTO, crash_behavior_test_mode, CTLFLAG_RW, &crash_behavior_test_mode, 0, "");
1638 SYSCTL_UINT(_kern, OID_AUTO, crash_behavior_test_would_panic, CTLFLAG_RW, &crash_behavior_test_would_panic, 0, "");
1639 #endif /* DEVELOPMENT */
1640 
1641 static bool
_proc_is_crashing_signal(int sig)1642 _proc_is_crashing_signal(int sig)
1643 {
1644 	bool result = false;
1645 	switch (sig) {
1646 	case SIGILL:
1647 	case SIGABRT:
1648 	case SIGFPE:
1649 	case SIGBUS:
1650 	case SIGSEGV:
1651 	case SIGSYS:
1652 	/*
1653 	 * If SIGTRAP is the terminating signal, then we can safely assume the
1654 	 * process crashed. (On iOS, SIGTRAP will be the terminating signal when
1655 	 * a process calls __builtin_trap(), which will abort.)
1656 	 */
1657 	case SIGTRAP:
1658 		result = true;
1659 	}
1660 
1661 	return result;
1662 }
1663 
1664 static bool
_proc_is_fatal_reason(os_reason_t reason)1665 _proc_is_fatal_reason(os_reason_t reason)
1666 {
1667 	if ((reason->osr_flags & OS_REASON_FLAG_ABORT) != 0) {
1668 		/* Abort is always fatal even if there is no crash report generated */
1669 		return true;
1670 	}
1671 	if ((reason->osr_flags & OS_REASON_FLAG_NO_CRASH_REPORT) != 0) {
1672 		/*
1673 		 * No crash report means this reason shouldn't be considered fatal
1674 		 * unless we are in test mode
1675 		 */
1676 #if DEVELOPMENT
1677 		if (crash_behavior_test_mode) {
1678 			return true;
1679 		}
1680 #endif /* DEVELOPMENT */
1681 		return false;
1682 	}
1683 	// By default all OS_REASON are fatal
1684 	return true;
1685 }
1686 
1687 static TUNABLE(bool, panic_on_crash_disabled, "panic_on_crash_disabled", false);
1688 
1689 static bool
proc_should_trigger_panic(proc_t p,int rv)1690 proc_should_trigger_panic(proc_t p, int rv)
1691 {
1692 	if (p == initproc) {
1693 		/* Always panic for launchd */
1694 		return true;
1695 	}
1696 
1697 	if (panic_on_crash_disabled) {
1698 		printf("panic-on-crash disabled via boot-arg\n");
1699 		return false;
1700 	}
1701 
1702 	if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_EXIT) != 0) {
1703 		return true;
1704 	}
1705 
1706 	if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_SPAWN_FAIL) != 0) {
1707 		return true;
1708 	}
1709 
1710 	if (p->p_posix_spawn_failed) {
1711 		/* posix_spawn failures normally don't qualify for panics */
1712 		return false;
1713 	}
1714 
1715 	bool deadline_expired = (mach_continuous_time() > p->p_crash_behavior_deadline);
1716 	if (p->p_crash_behavior_deadline != 0 && deadline_expired) {
1717 		return false;
1718 	}
1719 
1720 	if (WIFEXITED(rv)) {
1721 		int code = WEXITSTATUS(rv);
1722 
1723 		if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_NON_ZERO_EXIT) != 0) {
1724 			if (code == 0) {
1725 				/* No panic if we exit 0 */
1726 				return false;
1727 			} else {
1728 				/* Panic on non-zero exit */
1729 				return true;
1730 			}
1731 		} else {
1732 			/* No panic on normal exit if the process doesn't have the non-zero flag set */
1733 			return false;
1734 		}
1735 	} else if (WIFSIGNALED(rv)) {
1736 		int signal = WTERMSIG(rv);
1737 		/* This is a crash (non-normal exit) */
1738 		if ((p->p_crash_behavior & POSIX_SPAWN_PANIC_ON_CRASH) != 0) {
1739 			os_reason_t reason = p->p_exit_reason;
1740 			if (reason != OS_REASON_NULL) {
1741 				if (!_proc_is_fatal_reason(reason)) {
1742 					// Skip non-fatal terminate_with_reason
1743 					return false;
1744 				}
1745 				if (reason->osr_namespace == OS_REASON_SIGNAL) {
1746 					/*
1747 					 * OS_REASON_SIGNAL delivers as a SIGKILL with the actual signal
1748 					 * in osr_code, so we should check that signal here
1749 					 */
1750 					return _proc_is_crashing_signal((int)reason->osr_code);
1751 				} else {
1752 					/*
1753 					 * This branch covers the case of terminate_with_reason which
1754 					 * delivers a SIGTERM which is still considered a crash even
1755 					 * thought the signal is not considered a crashing signal
1756 					 */
1757 					return true;
1758 				}
1759 			}
1760 			return _proc_is_crashing_signal(signal);
1761 		} else {
1762 			return false;
1763 		}
1764 	} else {
1765 		/*
1766 		 * This branch implies that we didn't exit normally nor did we receive
1767 		 * a signal. This should be unreachable.
1768 		 */
1769 		return true;
1770 	}
1771 }
1772 
1773 static void
proc_crash_coredump(proc_t p)1774 proc_crash_coredump(proc_t p)
1775 {
1776 	(void)p;
1777 #if (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP
1778 	/*
1779 	 * For debugging purposes, generate a core file of initproc before
1780 	 * panicking. Leave at least 300 MB free on the root volume, and ignore
1781 	 * the process's corefile ulimit. fsync() the file to ensure it lands on disk
1782 	 * before the panic hits.
1783 	 */
1784 
1785 	int             err;
1786 	uint64_t        coredump_start = mach_absolute_time();
1787 	uint64_t        coredump_end;
1788 	clock_sec_t     tv_sec;
1789 	clock_usec_t    tv_usec;
1790 	uint32_t        tv_msec;
1791 
1792 
1793 	err = coredump(p, 300, COREDUMP_IGNORE_ULIMIT | COREDUMP_FULLFSYNC);
1794 
1795 	coredump_end = mach_absolute_time();
1796 
1797 	absolutetime_to_microtime(coredump_end - coredump_start, &tv_sec, &tv_usec);
1798 
1799 	tv_msec = tv_usec / 1000;
1800 
1801 	if (err != 0) {
1802 		printf("Failed to generate core file for pid: %d: error %d, took %d.%03d seconds\n",
1803 		    proc_getpid(p), err, (uint32_t)tv_sec, tv_msec);
1804 	} else {
1805 		printf("Generated core file for pid: %d in %d.%03d seconds\n",
1806 		    proc_getpid(p), (uint32_t)tv_sec, tv_msec);
1807 	}
1808 #endif /* (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP */
1809 }
1810 
1811 static void
proc_handle_critical_exit(proc_t p,int rv)1812 proc_handle_critical_exit(proc_t p, int rv)
1813 {
1814 	if (!proc_should_trigger_panic(p, rv)) {
1815 		// No panic, bail out
1816 		return;
1817 	}
1818 
1819 #if DEVELOPMENT
1820 	if (crash_behavior_test_mode) {
1821 		crash_behavior_test_would_panic = TRUE;
1822 		// Force test mode off after hitting a panic
1823 		crash_behavior_test_mode = FALSE;
1824 		return;
1825 	}
1826 #endif /* DEVELOPMENT */
1827 
1828 	char *exit_reason_desc = exit_reason_get_string_desc(p->p_exit_reason);
1829 
1830 	if (p->p_exit_reason == OS_REASON_NULL) {
1831 		printf("pid %d exited -- no exit reason available -- (signal %d, exit %d)\n",
1832 		    proc_getpid(p), WTERMSIG(rv), WEXITSTATUS(rv));
1833 	} else {
1834 		printf("pid %d exited -- exit reason namespace %d subcode 0x%llx, description %s\n", proc_getpid(p),
1835 		    p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code, exit_reason_desc ?
1836 		    exit_reason_desc : "none");
1837 	}
1838 
1839 	const char *prefix_str;
1840 	char prefix_str_buf[128];
1841 
1842 	if (p == initproc) {
1843 		if (strnstr(p->p_name, "preinit", sizeof(p->p_name))) {
1844 			prefix_str = "LTE preinit process exited";
1845 		} else if (initproc_spawned) {
1846 			prefix_str = "initproc exited";
1847 		} else {
1848 			prefix_str = "initproc failed to start";
1849 		}
1850 	} else {
1851 		/* For processes that aren't launchd, just use the process name and pid */
1852 		snprintf(prefix_str_buf, sizeof(prefix_str_buf), "%s[%d] exited", p->p_name, proc_getpid(p));
1853 		prefix_str = prefix_str_buf;
1854 	}
1855 
1856 	proc_crash_coredump(p);
1857 
1858 	sync(p, (void *)NULL, (int *)NULL);
1859 	const uint64_t panic_options_mask = DEBUGGER_OPTION_INITPROC_PANIC | DEBUGGER_OPTION_USERSPACE_INITIATED_PANIC;
1860 
1861 	if (p->p_exit_reason == OS_REASON_NULL) {
1862 		panic_with_options(0, NULL, panic_options_mask, "%s -- no exit reason available -- (signal %d, exit status %d %s)",
1863 		    prefix_str, WTERMSIG(rv), WEXITSTATUS(rv), ((proc_getcsflags(p) & CS_KILLED) ? "CS_KILLED" : ""));
1864 	} else {
1865 		panic_with_options(0, NULL, panic_options_mask, "%s %s -- exit reason namespace %d subcode 0x%llx description: %." LAUNCHD_PANIC_REASON_STRING_MAXLEN "s",
1866 		    ((proc_getcsflags(p) & CS_KILLED) ? "CS_KILLED" : ""),
1867 		    prefix_str, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code,
1868 		    exit_reason_desc ? exit_reason_desc : "none");
1869 	}
1870 }
1871 
1872 void
proc_prepareexit(proc_t p,int rv,boolean_t perf_notify)1873 proc_prepareexit(proc_t p, int rv, boolean_t perf_notify)
1874 {
1875 	mach_exception_data_type_t code = 0, subcode = 0;
1876 	exception_type_t etype;
1877 
1878 	struct uthread *ut;
1879 	thread_t self = current_thread();
1880 	ut = get_bsdthread_info(self);
1881 	struct rusage_superset *rup;
1882 	int kr = 0;
1883 	int create_corpse = FALSE;
1884 	bool corpse_source = false;
1885 	task_t task = proc_task(p);
1886 
1887 
1888 	if (p->p_crash_behavior != 0 || p == initproc) {
1889 		proc_handle_critical_exit(p, rv);
1890 	}
1891 
1892 	if (task) {
1893 		corpse_source = vm_map_is_corpse_source(get_task_map(task));
1894 	}
1895 
1896 	/*
1897 	 * Generate a corefile/crashlog if:
1898 	 *      The process doesn't have an exit reason that indicates no crash report should be created
1899 	 *      AND any of the following are true:
1900 	 *	- The process was terminated due to a fatal signal that generates a core
1901 	 *	- The process was killed due to a code signing violation
1902 	 *	- The process has an exit reason that indicates we should generate a crash report
1903 	 *
1904 	 * The first condition is necessary because abort_with_reason()/payload() use SIGABRT
1905 	 * (which normally triggers a core) but may indicate that no crash report should be created.
1906 	 */
1907 	if (!(PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) & OS_REASON_FLAG_NO_CRASH_REPORT)) &&
1908 	    (hassigprop(WTERMSIG(rv), SA_CORE) || ((proc_getcsflags(p) & CS_KILLED) != 0) ||
1909 	    (PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) &
1910 	    OS_REASON_FLAG_GENERATE_CRASH_REPORT)))) {
1911 		/*
1912 		 * Workaround for processes checking up on PT_DENY_ATTACH:
1913 		 * should be backed out post-Leopard (details in 5431025).
1914 		 */
1915 		if ((SIGSEGV == WTERMSIG(rv)) &&
1916 		    (p->p_pptr->p_lflag & P_LNOATTACH)) {
1917 			goto skipcheck;
1918 		}
1919 
1920 		/*
1921 		 * Crash Reporter looks for the signal value, original exception
1922 		 * type, and low 20 bits of the original code in code[0]
1923 		 * (8, 4, and 20 bits respectively). code[1] is unmodified.
1924 		 */
1925 		code = ((WTERMSIG(rv) & 0xff) << 24) |
1926 		    ((ut->uu_exception & 0x0f) << 20) |
1927 		    ((int)ut->uu_code & 0xfffff);
1928 		subcode = ut->uu_subcode;
1929 		etype = ut->uu_exception;
1930 
1931 		/* Defualt to EXC_CRASH if the exception is not an EXC_RESOURCE or EXC_GUARD */
1932 		if (etype != EXC_RESOURCE || etype != EXC_GUARD) {
1933 			etype = EXC_CRASH;
1934 		}
1935 
1936 #if (DEVELOPMENT || DEBUG)
1937 		if (p->p_pid <= exception_log_max_pid) {
1938 			const char *proc_name = proc_best_name(p);
1939 			if (PROC_HAS_EXITREASON(p)) {
1940 				record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit",
1941 				    "pid: %d -- process name: %s -- exit reason namespace: %d -- subcode: 0x%llx -- description: %s",
1942 				    proc_getpid(p), proc_name, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code,
1943 				    exit_reason_get_string_desc(p->p_exit_reason));
1944 			} else {
1945 				record_system_event(SYSTEM_EVENT_TYPE_INFO, SYSTEM_EVENT_SUBSYSTEM_PROCESS, "process exit",
1946 				    "pid: %d -- process name: %s -- exit status %d",
1947 				    proc_getpid(p), proc_name, WEXITSTATUS(rv));
1948 			}
1949 		}
1950 #endif
1951 		const bool fatal = false;
1952 		kr = task_exception_notify(EXC_CRASH, code, subcode, fatal);
1953 		/* Nobody handled EXC_CRASH?? remember to make corpse */
1954 		if ((kr != 0 || corpse_source) && p == current_proc()) {
1955 			/*
1956 			 * Do not create corpse when exit is called from jetsam thread.
1957 			 * Corpse creation code requires that proc_prepareexit is
1958 			 * called by the exiting proc and not the kernel_proc.
1959 			 */
1960 			create_corpse = TRUE;
1961 		}
1962 
1963 		/*
1964 		 * Revalidate the code signing of the text pages around current PC.
1965 		 * This is an attempt to detect and repair faults due to memory
1966 		 * corruption of text pages.
1967 		 *
1968 		 * The goal here is to fixup infrequent memory corruptions due to
1969 		 * things like aging RAM bit flips. So the approach is to only expect
1970 		 * to have to fixup one thing per crash. This also limits the amount
1971 		 * of extra work we cause in case this is a development kernel with an
1972 		 * active memory stomp happening.
1973 		 */
1974 		uintptr_t bt[2];
1975 		struct backtrace_user_info btinfo = BTUINFO_INIT;
1976 		unsigned int frame_count = backtrace_user(bt, 2, NULL, &btinfo);
1977 		int bt_err = btinfo.btui_error;
1978 		if (bt_err == 0 && frame_count >= 1) {
1979 			/*
1980 			 * First check at the page containing the current PC.
1981 			 * This passes if the page code signs -or- if we can't figure out
1982 			 * what is at that address. The latter action is so we continue checking
1983 			 * previous pages which may be corrupt and caused a wild branch.
1984 			 */
1985 			kr = revalidate_text_page(task, bt[0]);
1986 
1987 			/* No corruption found, check the previous sequential page */
1988 			if (kr == KERN_SUCCESS) {
1989 				kr = revalidate_text_page(task, bt[0] - get_task_page_size(task));
1990 			}
1991 
1992 			/* Still no corruption found, check the current function's caller */
1993 			if (kr == KERN_SUCCESS) {
1994 				if (frame_count > 1 &&
1995 				    atop(bt[0]) != atop(bt[1]) &&           /* don't recheck PC page */
1996 				    atop(bt[0]) - 1 != atop(bt[1])) {       /* don't recheck page before */
1997 					kr = revalidate_text_page(task, (vm_map_offset_t)bt[1]);
1998 				}
1999 			}
2000 
2001 			/*
2002 			 * Log that we found a corruption.
2003 			 */
2004 			if (kr != KERN_SUCCESS) {
2005 				os_log(OS_LOG_DEFAULT,
2006 				    "Text page corruption detected in dying process %d\n", proc_getpid(p));
2007 			}
2008 		}
2009 	}
2010 
2011 skipcheck:
2012 	if (task_is_driver(task) && PROC_HAS_EXITREASON(p)) {
2013 		IOUserServerRecordExitReason(task, p->p_exit_reason);
2014 	}
2015 
2016 	/* Notify the perf server? */
2017 	if (perf_notify) {
2018 		(void)sys_perf_notify(self, proc_getpid(p));
2019 	}
2020 
2021 
2022 	/* stash the usage into corpse data if making_corpse == true */
2023 	if (create_corpse == TRUE) {
2024 		kr = task_mark_corpse(task);
2025 		if (kr != KERN_SUCCESS) {
2026 			if (kr == KERN_NO_SPACE) {
2027 				printf("Process[%d] has no vm space for corpse info.\n", proc_getpid(p));
2028 			} else if (kr == KERN_NOT_SUPPORTED) {
2029 				printf("Process[%d] was destined to be corpse. But corpse is disabled by config.\n", proc_getpid(p));
2030 			} else if (kr == KERN_TERMINATED) {
2031 				printf("Process[%d] has been terminated before it could be converted to a corpse.\n", proc_getpid(p));
2032 			} else {
2033 				printf("Process[%d] crashed: %s. Too many corpses being created.\n", proc_getpid(p), p->p_comm);
2034 			}
2035 			create_corpse = FALSE;
2036 		}
2037 	}
2038 
2039 	if (corpse_source && !create_corpse) {
2040 		/* vm_map was marked for corpse, but we decided to not create one, unmark the vmmap */
2041 		vm_map_unset_corpse_source(get_task_map(task));
2042 	}
2043 
2044 	if (!proc_is_shadow(p)) {
2045 		/*
2046 		 * Before this process becomes a zombie, stash resource usage
2047 		 * stats in the proc for external observers to query
2048 		 * via proc_pid_rusage().
2049 		 *
2050 		 * If the zombie allocation fails, just punt the stats.
2051 		 */
2052 		rup = zalloc(zombie_zone);
2053 		gather_rusage_info(p, &rup->ri, RUSAGE_INFO_CURRENT);
2054 		rup->ri.ri_phys_footprint = 0;
2055 		rup->ri.ri_proc_exit_abstime = mach_absolute_time();
2056 		/*
2057 		 * Make the rusage_info visible to external observers
2058 		 * only after it has been completely filled in.
2059 		 */
2060 		p->p_ru = rup;
2061 	}
2062 
2063 	if (create_corpse) {
2064 		int est_knotes = 0, num_knotes = 0;
2065 		uint64_t *buffer = NULL;
2066 		uint32_t buf_size = 0;
2067 
2068 		/* Get all the udata pointers from kqueue */
2069 		est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
2070 		if (est_knotes > 0) {
2071 			buf_size = (uint32_t)((est_knotes + 32) * sizeof(uint64_t));
2072 			buffer = kalloc_data(buf_size, Z_WAITOK);
2073 			if (buffer) {
2074 				num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
2075 				if (num_knotes > est_knotes + 32) {
2076 					num_knotes = est_knotes + 32;
2077 				}
2078 			}
2079 		}
2080 
2081 		/* Update the code, subcode based on exit reason */
2082 		proc_update_corpse_exception_codes(p, &code, &subcode);
2083 		populate_corpse_crashinfo(p, task, rup,
2084 		    code, subcode, buffer, num_knotes, NULL, etype);
2085 		kfree_data(buffer, buf_size);
2086 	}
2087 	/*
2088 	 * Remove proc from allproc queue and from pidhash chain.
2089 	 * Need to do this before we do anything that can block.
2090 	 * Not doing causes things like mount() find this on allproc
2091 	 * in partially cleaned state.
2092 	 */
2093 
2094 	proc_list_lock();
2095 
2096 #if CONFIG_MEMORYSTATUS
2097 	proc_memorystatus_remove(p);
2098 #endif
2099 
2100 	LIST_REMOVE(p, p_list);
2101 	LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */
2102 	/* will not be visible via proc_find */
2103 	os_atomic_or(&p->p_refcount, P_REF_DEAD, relaxed);
2104 
2105 	proc_list_unlock();
2106 
2107 	/*
2108 	 * If parent is waiting for us to exit or exec,
2109 	 * P_LPPWAIT is set; we will wakeup the parent below.
2110 	 */
2111 	proc_lock(p);
2112 	p->p_lflag &= ~(P_LTRACED | P_LPPWAIT);
2113 	p->p_sigignore = ~(sigcantmask);
2114 
2115 	/*
2116 	 * If a thread is already waiting for us in proc_exit,
2117 	 * P_LTERM is set, wakeup the thread.
2118 	 */
2119 	if (p->p_lflag & P_LTERM) {
2120 		wakeup(&p->exit_thread);
2121 	} else {
2122 		p->p_lflag |= P_LTERM;
2123 	}
2124 
2125 	/* If current proc is exiting, ignore signals on the exit thread */
2126 	if (p == current_proc()) {
2127 		ut->uu_siglist = 0;
2128 	}
2129 	proc_unlock(p);
2130 }
2131 
2132 void
proc_exit(proc_t p)2133 proc_exit(proc_t p)
2134 {
2135 	proc_t q;
2136 	proc_t pp;
2137 	struct task *task = proc_task(p);
2138 	vnode_t tvp = NULLVP;
2139 	struct pgrp * pg;
2140 	struct session *sessp;
2141 	struct uthread * uth;
2142 	pid_t pid;
2143 	int exitval;
2144 	int knote_hint;
2145 
2146 	uth = current_uthread();
2147 
2148 	proc_lock(p);
2149 	proc_transstart(p, 1, 0);
2150 	if (!(p->p_lflag & P_LEXIT)) {
2151 		/*
2152 		 * This can happen if a thread_terminate() occurs
2153 		 * in a single-threaded process.
2154 		 */
2155 		p->p_lflag |= P_LEXIT;
2156 		proc_transend(p, 1);
2157 		proc_unlock(p);
2158 		proc_prepareexit(p, 0, TRUE);
2159 		(void) task_terminate_internal(task);
2160 		proc_lock(p);
2161 	} else if (!(p->p_lflag & P_LTERM)) {
2162 		proc_transend(p, 1);
2163 		/* Jetsam is in middle of calling proc_prepareexit, wait for it */
2164 		p->p_lflag |= P_LTERM;
2165 		msleep(&p->exit_thread, &p->p_mlock, PWAIT, "proc_prepareexit_wait", NULL);
2166 	} else {
2167 		proc_transend(p, 1);
2168 	}
2169 
2170 	p->p_lflag |= P_LPEXIT;
2171 
2172 	/*
2173 	 * Other kernel threads may be in the middle of signalling this process.
2174 	 * Wait for those threads to wrap it up before making the process
2175 	 * disappear on them.
2176 	 */
2177 	if ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 0)) {
2178 		p->p_sigwaitcnt++;
2179 		while ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 1)) {
2180 			msleep(&p->p_sigmask, &p->p_mlock, PWAIT, "proc_sigdrain", NULL);
2181 		}
2182 		p->p_sigwaitcnt--;
2183 	}
2184 
2185 	proc_unlock(p);
2186 	pid = proc_getpid(p);
2187 	exitval = p->p_xstat;
2188 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2189 	    BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_START,
2190 	    pid, exitval, 0, 0, 0);
2191 
2192 #if DEVELOPMENT || DEBUG
2193 	proc_exit_lpexit_check(pid, PELS_POS_START);
2194 #endif
2195 
2196 #if CONFIG_DTRACE
2197 	dtrace_proc_exit(p);
2198 #endif
2199 
2200 	proc_refdrain(p);
2201 	/* We now have unique ref to the proc */
2202 
2203 	/* if any pending cpu limits action, clear it */
2204 	task_clear_cpuusage(proc_task(p), TRUE);
2205 
2206 	workq_mark_exiting(p);
2207 
2208 	/*
2209 	 * need to cancel async IO requests that can be cancelled and wait for those
2210 	 * already active.  MAY BLOCK!
2211 	 */
2212 	_aio_exit( p );
2213 
2214 	/*
2215 	 * Close open files and release open-file table.
2216 	 * This may block!
2217 	 */
2218 	fdt_invalidate(p);
2219 
2220 	/*
2221 	 * Once all the knotes, kqueues & workloops are destroyed, get rid of the
2222 	 * workqueue.
2223 	 */
2224 	workq_exit(p);
2225 
2226 	if (uth->uu_lowpri_window) {
2227 		/*
2228 		 * task is marked as a low priority I/O type
2229 		 * and the I/O we issued while in flushing files on close
2230 		 * collided with normal I/O operations...
2231 		 * no need to throttle this thread since its going away
2232 		 * but we do need to update our bookeeping w/r to throttled threads
2233 		 */
2234 		throttle_lowpri_io(0);
2235 	}
2236 
2237 	if (p->p_lflag & P_LNSPACE_RESOLVER) {
2238 		/*
2239 		 * The namespace resolver is exiting; there may be
2240 		 * outstanding materialization requests to clean up.
2241 		 */
2242 		nspace_resolver_exited(p);
2243 	}
2244 
2245 #if SYSV_SHM
2246 	/* Close ref SYSV Shared memory*/
2247 	if (p->vm_shm) {
2248 		shmexit(p);
2249 	}
2250 #endif
2251 #if SYSV_SEM
2252 	/* Release SYSV semaphores */
2253 	semexit(p);
2254 #endif
2255 
2256 #if PSYNCH
2257 	pth_proc_hashdelete(p);
2258 #endif /* PSYNCH */
2259 
2260 	pg = proc_pgrp(p, &sessp);
2261 	if (SESS_LEADER(p, sessp)) {
2262 		if (sessp->s_ttyvp != NULLVP) {
2263 			struct vnode *ttyvp;
2264 			int ttyvid;
2265 			int cttyflag = 0;
2266 			struct vfs_context context;
2267 			struct tty *tp;
2268 			struct pgrp *tpgrp = PGRP_NULL;
2269 
2270 			/*
2271 			 * Controlling process.
2272 			 * Signal foreground pgrp,
2273 			 * drain controlling terminal
2274 			 * and revoke access to controlling terminal.
2275 			 */
2276 
2277 			proc_list_lock(); /* prevent any t_pgrp from changing */
2278 			session_lock(sessp);
2279 			if (sessp->s_ttyp && sessp->s_ttyp->t_session == sessp) {
2280 				tpgrp = tty_pgrp_locked(sessp->s_ttyp);
2281 			}
2282 			proc_list_unlock();
2283 
2284 			if (tpgrp != PGRP_NULL) {
2285 				session_unlock(sessp);
2286 				pgsignal(tpgrp, SIGHUP, 1);
2287 				pgrp_rele(tpgrp);
2288 				session_lock(sessp);
2289 			}
2290 
2291 			cttyflag = (os_atomic_andnot_orig(&sessp->s_refcount,
2292 			    S_CTTYREF, relaxed) & S_CTTYREF);
2293 			ttyvp = sessp->s_ttyvp;
2294 			ttyvid = sessp->s_ttyvid;
2295 			tp = session_clear_tty_locked(sessp);
2296 			if (ttyvp) {
2297 				vnode_hold(ttyvp);
2298 			}
2299 			session_unlock(sessp);
2300 
2301 			if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) {
2302 				if (tp != TTY_NULL) {
2303 					tty_lock(tp);
2304 					(void) ttywait(tp);
2305 					tty_unlock(tp);
2306 				}
2307 
2308 				context.vc_thread = NULL;
2309 				context.vc_ucred = kauth_cred_proc_ref(p);
2310 				VNOP_REVOKE(ttyvp, REVOKEALL, &context);
2311 				if (cttyflag) {
2312 					/*
2313 					 * Release the extra usecount taken in cttyopen.
2314 					 * usecount should be released after VNOP_REVOKE is called.
2315 					 * This usecount was taken to ensure that
2316 					 * the VNOP_REVOKE results in a close to
2317 					 * the tty since cttyclose is a no-op.
2318 					 */
2319 					vnode_rele(ttyvp);
2320 				}
2321 				vnode_put(ttyvp);
2322 				kauth_cred_unref(&context.vc_ucred);
2323 				vnode_drop(ttyvp);
2324 				ttyvp = NULLVP;
2325 			}
2326 			if (ttyvp) {
2327 				vnode_drop(ttyvp);
2328 			}
2329 			if (tp) {
2330 				ttyfree(tp);
2331 			}
2332 		}
2333 		session_lock(sessp);
2334 		sessp->s_leader = NULL;
2335 		session_unlock(sessp);
2336 	}
2337 
2338 	if (!proc_is_shadow(p)) {
2339 		fixjobc(p, pg, 0);
2340 	}
2341 	pgrp_rele(pg);
2342 
2343 	/*
2344 	 * Change RLIMIT_FSIZE for accounting/debugging.
2345 	 */
2346 	proc_limitsetcur_fsize(p, RLIM_INFINITY);
2347 
2348 	(void)acct_process(p);
2349 
2350 	proc_list_lock();
2351 
2352 	if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) {
2353 		p->p_listflag &= ~P_LIST_EXITCOUNT;
2354 		proc_shutdown_exitcount--;
2355 		if (proc_shutdown_exitcount == 0) {
2356 			wakeup(&proc_shutdown_exitcount);
2357 		}
2358 	}
2359 
2360 	/* wait till parentrefs are dropped and grant no more */
2361 	proc_childdrainstart(p);
2362 	while ((q = p->p_children.lh_first) != NULL) {
2363 		if (q->p_stat == SZOMB) {
2364 			if (p != q->p_pptr) {
2365 				panic("parent child linkage broken");
2366 			}
2367 			/* check for sysctl zomb lookup */
2368 			while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
2369 				msleep(&q->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2370 			}
2371 			q->p_listflag |= P_LIST_WAITING;
2372 			/*
2373 			 * This is a named reference and it is not granted
2374 			 * if the reap is already in progress. So we get
2375 			 * the reference here exclusively and their can be
2376 			 * no waiters. So there is no need for a wakeup
2377 			 * after we are done.  Also the reap frees the structure
2378 			 * and the proc struct cannot be used for wakeups as well.
2379 			 * It is safe to use q here as this is system reap
2380 			 */
2381 			reap_flags_t reparent_flags = (q->p_listflag & P_LIST_DEADPARENT) ?
2382 			    REAP_REPARENTED_TO_INIT : 0;
2383 			reap_child_locked(p, q,
2384 			    REAP_DEAD_PARENT | REAP_LOCKED | reparent_flags);
2385 		} else {
2386 			/*
2387 			 * Traced processes are killed
2388 			 * since their existence means someone is messing up.
2389 			 */
2390 			if (q->p_lflag & P_LTRACED) {
2391 				struct proc *opp;
2392 
2393 				/*
2394 				 * Take a reference on the child process to
2395 				 * ensure it doesn't exit and disappear between
2396 				 * the time we drop the list_lock and attempt
2397 				 * to acquire its proc_lock.
2398 				 */
2399 				if (proc_ref(q, true) != q) {
2400 					continue;
2401 				}
2402 
2403 				proc_list_unlock();
2404 
2405 				opp = proc_find(q->p_oppid);
2406 				if (opp != PROC_NULL) {
2407 					proc_list_lock();
2408 					q->p_oppid = 0;
2409 					proc_list_unlock();
2410 					proc_reparentlocked(q, opp, 0, 0);
2411 					proc_rele(opp);
2412 				} else {
2413 					/* original parent exited while traced */
2414 					proc_list_lock();
2415 					q->p_listflag |= P_LIST_DEADPARENT;
2416 					q->p_oppid = 0;
2417 					proc_list_unlock();
2418 					proc_reparentlocked(q, initproc, 0, 0);
2419 				}
2420 
2421 				proc_lock(q);
2422 				q->p_lflag &= ~P_LTRACED;
2423 
2424 				if (q->sigwait_thread) {
2425 					thread_t thread = q->sigwait_thread;
2426 
2427 					proc_unlock(q);
2428 					/*
2429 					 * The sigwait_thread could be stopped at a
2430 					 * breakpoint. Wake it up to kill.
2431 					 * Need to do this as it could be a thread which is not
2432 					 * the first thread in the task. So any attempts to kill
2433 					 * the process would result into a deadlock on q->sigwait.
2434 					 */
2435 					thread_resume(thread);
2436 					clear_wait(thread, THREAD_INTERRUPTED);
2437 					threadsignal(thread, SIGKILL, 0, TRUE);
2438 				} else {
2439 					proc_unlock(q);
2440 				}
2441 
2442 				psignal(q, SIGKILL);
2443 				proc_list_lock();
2444 				proc_rele(q);
2445 			} else {
2446 				q->p_listflag |= P_LIST_DEADPARENT;
2447 				proc_reparentlocked(q, initproc, 0, 1);
2448 			}
2449 		}
2450 	}
2451 
2452 	proc_childdrainend(p);
2453 	proc_list_unlock();
2454 
2455 #if CONFIG_MACF
2456 	if (!proc_is_shadow(p)) {
2457 		/*
2458 		 * Notify MAC policies that proc is dead.
2459 		 * This should be replaced with proper label management
2460 		 * (rdar://problem/32126399).
2461 		 */
2462 		mac_proc_notify_exit(p);
2463 	}
2464 #endif
2465 
2466 	/*
2467 	 * Release reference to text vnode
2468 	 */
2469 	tvp = p->p_textvp;
2470 	p->p_textvp = NULL;
2471 	if (tvp != NULLVP) {
2472 		vnode_rele(tvp);
2473 	}
2474 
2475 	/*
2476 	 * Save exit status and final rusage info, adding in child rusage
2477 	 * info and self times.  If we were unable to allocate a zombie
2478 	 * structure, this information is lost.
2479 	 */
2480 	if (p->p_ru != NULL) {
2481 		calcru(p, &p->p_stats->p_ru.ru_utime, &p->p_stats->p_ru.ru_stime, NULL);
2482 		p->p_ru->ru = p->p_stats->p_ru;
2483 
2484 		ruadd(&(p->p_ru->ru), &p->p_stats->p_cru);
2485 	}
2486 
2487 	/*
2488 	 * Free up profiling buffers.
2489 	 */
2490 	{
2491 		struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn;
2492 
2493 		p1 = p0->pr_next;
2494 		p0->pr_next = NULL;
2495 		p0->pr_scale = 0;
2496 
2497 		for (; p1 != NULL; p1 = pn) {
2498 			pn = p1->pr_next;
2499 			kfree_type(struct uprof, p1);
2500 		}
2501 	}
2502 
2503 	proc_free_realitimer(p);
2504 
2505 	/*
2506 	 * Other substructures are freed from wait().
2507 	 */
2508 	zfree(proc_stats_zone, p->p_stats);
2509 	p->p_stats = NULL;
2510 
2511 	if (p->p_subsystem_root_path) {
2512 		zfree(ZV_NAMEI, p->p_subsystem_root_path);
2513 		p->p_subsystem_root_path = NULL;
2514 	}
2515 
2516 	proc_limitdrop(p);
2517 
2518 #if DEVELOPMENT || DEBUG
2519 	proc_exit_lpexit_check(pid, PELS_POS_PRE_TASK_DETACH);
2520 #endif
2521 
2522 	/*
2523 	 * Finish up by terminating the task
2524 	 * and halt this thread (only if a
2525 	 * member of the task exiting).
2526 	 */
2527 	proc_set_task(p, TASK_NULL);
2528 	set_bsdtask_info(task, NULL);
2529 	clear_thread_ro_proc(get_machthread(uth));
2530 
2531 #if DEVELOPMENT || DEBUG
2532 	proc_exit_lpexit_check(pid, PELS_POS_POST_TASK_DETACH);
2533 #endif
2534 
2535 	knote_hint = NOTE_EXIT | (p->p_xstat & 0xffff);
2536 	proc_knote(p, knote_hint);
2537 
2538 	/* mark the thread as the one that is doing proc_exit
2539 	 * no need to hold proc lock in uthread_free
2540 	 */
2541 	uth->uu_flag |= UT_PROCEXIT;
2542 	/*
2543 	 * Notify parent that we're gone.
2544 	 */
2545 	pp = proc_parent(p);
2546 	if (proc_is_shadow(p)) {
2547 		/* kernel can reap this one, no need to move it to launchd */
2548 		proc_list_lock();
2549 		p->p_listflag |= P_LIST_DEADPARENT;
2550 		proc_list_unlock();
2551 	} else if (pp->p_flag & P_NOCLDWAIT) {
2552 		if (p->p_ru != NULL) {
2553 			proc_lock(pp);
2554 #if 3839178
2555 			/*
2556 			 * If the parent is ignoring SIGCHLD, then POSIX requires
2557 			 * us to not add the resource usage to the parent process -
2558 			 * we are only going to hand it off to init to get reaped.
2559 			 * We should contest the standard in this case on the basis
2560 			 * of RLIMIT_CPU.
2561 			 */
2562 #else   /* !3839178 */
2563 			/*
2564 			 * Add child resource usage to parent before giving
2565 			 * zombie to init.  If we were unable to allocate a
2566 			 * zombie structure, this information is lost.
2567 			 */
2568 			ruadd(&pp->p_stats->p_cru, &p->p_ru->ru);
2569 #endif  /* !3839178 */
2570 			update_rusage_info_child(&pp->p_stats->ri_child, &p->p_ru->ri);
2571 			proc_unlock(pp);
2572 		}
2573 
2574 		/* kernel can reap this one, no need to move it to launchd */
2575 		proc_list_lock();
2576 		p->p_listflag |= P_LIST_DEADPARENT;
2577 		proc_list_unlock();
2578 	}
2579 	if (!proc_is_shadow(p) &&
2580 	    ((p->p_listflag & P_LIST_DEADPARENT) == 0 || p->p_oppid)) {
2581 		if (pp != initproc) {
2582 			proc_lock(pp);
2583 			pp->si_pid = proc_getpid(p);
2584 			pp->p_xhighbits = p->p_xhighbits;
2585 			p->p_xhighbits = 0;
2586 			pp->si_status = p->p_xstat;
2587 			pp->si_code = CLD_EXITED;
2588 			/*
2589 			 * p_ucred usage is safe as it is an exiting process
2590 			 * and reference is dropped in reap
2591 			 */
2592 			pp->si_uid = kauth_cred_getruid(proc_ucred_unsafe(p));
2593 			proc_unlock(pp);
2594 		}
2595 		/* mark as a zombie */
2596 		/* No need to take proc lock as all refs are drained and
2597 		 * no one except parent (reaping ) can look at this.
2598 		 * The write is to an int and is coherent. Also parent is
2599 		 *  keyed off of list lock for reaping
2600 		 */
2601 		DTRACE_PROC2(exited, proc_t, p, int, exitval);
2602 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2603 		    BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
2604 		    pid, exitval, 0, 0, 0);
2605 		p->p_stat = SZOMB;
2606 		/*
2607 		 * The current process can be reaped so, no one
2608 		 * can depend on this
2609 		 */
2610 
2611 		psignal(pp, SIGCHLD);
2612 
2613 		/* and now wakeup the parent */
2614 		proc_list_lock();
2615 		wakeup((caddr_t)pp);
2616 		proc_list_unlock();
2617 	} else {
2618 		/* should be fine as parent proc would be initproc */
2619 		/* mark as a zombie */
2620 		/* No need to take proc lock as all refs are drained and
2621 		 * no one except parent (reaping ) can look at this.
2622 		 * The write is to an int and is coherent. Also parent is
2623 		 *  keyed off of list lock for reaping
2624 		 */
2625 		DTRACE_PROC2(exited, proc_t, p, int, exitval);
2626 		proc_list_lock();
2627 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON,
2628 		    BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END,
2629 		    pid, exitval, 0, 0, 0);
2630 		/* check for sysctl zomb lookup */
2631 		while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) {
2632 			msleep(&p->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2633 		}
2634 		/* safe to use p as this is a system reap */
2635 		p->p_stat = SZOMB;
2636 		p->p_listflag |= P_LIST_WAITING;
2637 
2638 		/*
2639 		 * This is a named reference and it is not granted
2640 		 * if the reap is already in progress. So we get
2641 		 * the reference here exclusively and their can be
2642 		 * no waiters. So there is no need for a wakeup
2643 		 * after we are done. AlsO  the reap frees the structure
2644 		 * and the proc struct cannot be used for wakeups as well.
2645 		 * It is safe to use p here as this is system reap
2646 		 */
2647 		reap_child_locked(pp, p,
2648 		    REAP_DEAD_PARENT | REAP_LOCKED | REAP_DROP_LOCK);
2649 	}
2650 	if (uth->uu_lowpri_window) {
2651 		/*
2652 		 * task is marked as a low priority I/O type and we've
2653 		 * somehow picked up another throttle during exit processing...
2654 		 * no need to throttle this thread since its going away
2655 		 * but we do need to update our bookeeping w/r to throttled threads
2656 		 */
2657 		throttle_lowpri_io(0);
2658 	}
2659 
2660 	proc_rele(pp);
2661 #if DEVELOPMENT || DEBUG
2662 	proc_exit_lpexit_check(pid, PELS_POS_END);
2663 #endif
2664 }
2665 
2666 
2667 /*
2668  * reap_child_locked
2669  *
2670  * Finalize a child exit once its status has been saved.
2671  *
2672  * If ptrace has attached, detach it and return it to its real parent.  Free any
2673  * remaining resources.
2674  *
2675  * Parameters:
2676  * - proc_t parent      Parent of process being reaped
2677  * - proc_t child       Process to reap
2678  * - reap_flags_t flags Control locking and re-parenting behavior
2679  */
2680 static void
reap_child_locked(proc_t parent,proc_t child,reap_flags_t flags)2681 reap_child_locked(proc_t parent, proc_t child, reap_flags_t flags)
2682 {
2683 	struct pgrp *pg;
2684 	boolean_t shadow_proc = proc_is_shadow(child);
2685 
2686 	if (flags & REAP_LOCKED) {
2687 		proc_list_unlock();
2688 	}
2689 
2690 	/*
2691 	 * Under ptrace, the child should now be re-parented back to its original
2692 	 * parent, unless that parent was initproc or it didn't come to initproc
2693 	 * through re-parenting.
2694 	 */
2695 	bool child_ptraced = child->p_oppid != 0;
2696 	if (!shadow_proc && child_ptraced) {
2697 		int knote_hint;
2698 		pid_t orig_ppid = 0;
2699 		proc_t orig_parent = PROC_NULL;
2700 
2701 		proc_lock(child);
2702 		orig_ppid = child->p_oppid;
2703 		child->p_oppid = 0;
2704 		knote_hint = NOTE_EXIT | (child->p_xstat & 0xffff);
2705 		proc_unlock(child);
2706 
2707 		orig_parent = proc_find(orig_ppid);
2708 		if (orig_parent) {
2709 			/*
2710 			 * Only re-parent the process if its original parent was not
2711 			 * initproc and it did not come to initproc from re-parenting.
2712 			 */
2713 			bool reparenting = orig_parent != initproc ||
2714 			    (flags & REAP_REPARENTED_TO_INIT) == 0;
2715 			if (reparenting) {
2716 				if (orig_parent != initproc) {
2717 					/*
2718 					 * Internal fields should be safe to access here because the
2719 					 * child is exited and not reaped or re-parented yet.
2720 					 */
2721 					proc_lock(orig_parent);
2722 					orig_parent->si_pid = proc_getpid(child);
2723 					orig_parent->si_status = child->p_xstat;
2724 					orig_parent->si_code = CLD_CONTINUED;
2725 					orig_parent->si_uid = kauth_cred_getruid(proc_ucred_unsafe(child));
2726 					proc_unlock(orig_parent);
2727 				}
2728 				proc_reparentlocked(child, orig_parent, 1, 0);
2729 
2730 				/*
2731 				 * After re-parenting, re-send the child's NOTE_EXIT to the
2732 				 * original parent.
2733 				 */
2734 				proc_knote(child, knote_hint);
2735 				psignal(orig_parent, SIGCHLD);
2736 
2737 				proc_list_lock();
2738 				wakeup((caddr_t)orig_parent);
2739 				child->p_listflag &= ~P_LIST_WAITING;
2740 				wakeup(&child->p_stat);
2741 				proc_list_unlock();
2742 
2743 				proc_rele(orig_parent);
2744 				if ((flags & REAP_LOCKED) && !(flags & REAP_DROP_LOCK)) {
2745 					proc_list_lock();
2746 				}
2747 				return;
2748 			} else {
2749 				/*
2750 				 * Satisfy the knote lifecycle because ptraced processes don't
2751 				 * broadcast NOTE_EXIT during initial child termination.
2752 				 */
2753 				proc_knote(child, knote_hint);
2754 				proc_rele(orig_parent);
2755 			}
2756 		}
2757 	}
2758 
2759 #pragma clang diagnostic push
2760 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2761 	proc_knote(child, NOTE_REAP);
2762 #pragma clang diagnostic pop
2763 
2764 	proc_knote_drain(child);
2765 
2766 	child->p_xstat = 0;
2767 	if (!shadow_proc && child->p_ru) {
2768 		/*
2769 		 * Roll up the rusage statistics to the parent, unless the parent is
2770 		 * ignoring SIGCHLD.  POSIX requires the children's resources of such a
2771 		 * parent to not be included in the parent's usage (seems odd given
2772 		 * RLIMIT_CPU, though).
2773 		 */
2774 		proc_lock(parent);
2775 		bool rollup_child = (parent->p_flag & P_NOCLDWAIT) == 0;
2776 		if (rollup_child) {
2777 			ruadd(&parent->p_stats->p_cru, &child->p_ru->ru);
2778 		}
2779 		update_rusage_info_child(&parent->p_stats->ri_child, &child->p_ru->ri);
2780 		proc_unlock(parent);
2781 		zfree(zombie_zone, child->p_ru);
2782 		child->p_ru = NULL;
2783 	} else if (!shadow_proc) {
2784 		printf("Warning : lost p_ru for %s\n", child->p_comm);
2785 	} else {
2786 		assert(child->p_ru == NULL);
2787 	}
2788 
2789 	AUDIT_SESSION_PROCEXIT(child);
2790 
2791 #if CONFIG_PERSONAS
2792 	persona_proc_drop(child);
2793 #endif /* CONFIG_PERSONAS */
2794 	/* proc_ucred_unsafe is safe, because child is not running */
2795 	(void)chgproccnt(kauth_cred_getruid(proc_ucred_unsafe(child)), -1);
2796 
2797 	os_reason_free(child->p_exit_reason);
2798 
2799 	proc_list_lock();
2800 
2801 	pg = pgrp_leave_locked(child);
2802 	LIST_REMOVE(child, p_list);
2803 	parent->p_childrencnt--;
2804 	LIST_REMOVE(child, p_sibling);
2805 	bool no_more_children = (flags & REAP_DEAD_PARENT) &&
2806 	    LIST_EMPTY(&parent->p_children);
2807 	if (no_more_children) {
2808 		wakeup((caddr_t)parent);
2809 	}
2810 	child->p_listflag &= ~P_LIST_WAITING;
2811 	wakeup(&child->p_stat);
2812 
2813 	/* Take it out of process hash */
2814 	if (!shadow_proc) {
2815 		phash_remove_locked(child);
2816 	}
2817 	proc_checkdeadrefs(child);
2818 	nprocs--;
2819 	if (flags & REAP_DEAD_PARENT) {
2820 		child->p_listflag |= P_LIST_DEADPARENT;
2821 	}
2822 
2823 	proc_list_unlock();
2824 
2825 	pgrp_rele(pg);
2826 	fdt_destroy(child);
2827 	lck_mtx_destroy(&child->p_mlock, &proc_mlock_grp);
2828 	lck_mtx_destroy(&child->p_ucred_mlock, &proc_ucred_mlock_grp);
2829 #if CONFIG_AUDIT
2830 	lck_mtx_destroy(&child->p_audit_mlock, &proc_ucred_mlock_grp);
2831 #endif /* CONFIG_AUDIT */
2832 #if CONFIG_DTRACE
2833 	lck_mtx_destroy(&child->p_dtrace_sprlock, &proc_lck_grp);
2834 #endif
2835 	lck_spin_destroy(&child->p_slock, &proc_slock_grp);
2836 	proc_wait_release(child);
2837 
2838 	if ((flags & REAP_LOCKED) && (flags & REAP_DROP_LOCK) == 0) {
2839 		proc_list_lock();
2840 	}
2841 }
2842 
2843 int
wait1continue(int result)2844 wait1continue(int result)
2845 {
2846 	proc_t p;
2847 	thread_t thread;
2848 	uthread_t uth;
2849 	struct _wait4_data *wait4_data;
2850 	struct wait4_nocancel_args *uap;
2851 	int *retval;
2852 
2853 	if (result) {
2854 		return result;
2855 	}
2856 
2857 	p = current_proc();
2858 	thread = current_thread();
2859 	uth = (struct uthread *)get_bsdthread_info(thread);
2860 
2861 	wait4_data = &uth->uu_save.uus_wait4_data;
2862 	uap = wait4_data->args;
2863 	retval = wait4_data->retval;
2864 	return wait4_nocancel(p, uap, retval);
2865 }
2866 
2867 int
wait4(proc_t q,struct wait4_args * uap,int32_t * retval)2868 wait4(proc_t q, struct wait4_args *uap, int32_t *retval)
2869 {
2870 	__pthread_testcancel(1);
2871 	return wait4_nocancel(q, (struct wait4_nocancel_args *)uap, retval);
2872 }
2873 
2874 int
wait4_nocancel(proc_t q,struct wait4_nocancel_args * uap,int32_t * retval)2875 wait4_nocancel(proc_t q, struct wait4_nocancel_args *uap, int32_t *retval)
2876 {
2877 	int nfound;
2878 	int sibling_count;
2879 	proc_t p;
2880 	int status, error;
2881 	uthread_t uth;
2882 	struct _wait4_data *wait4_data;
2883 
2884 	AUDIT_ARG(pid, uap->pid);
2885 
2886 	if (uap->pid == 0) {
2887 		uap->pid = -q->p_pgrpid;
2888 	}
2889 
2890 	if (uap->pid == INT_MIN) {
2891 		return EINVAL;
2892 	}
2893 
2894 loop:
2895 	proc_list_lock();
2896 loop1:
2897 	nfound = 0;
2898 	sibling_count = 0;
2899 
2900 	PCHILDREN_FOREACH(q, p) {
2901 		if (p->p_sibling.le_next != 0) {
2902 			sibling_count++;
2903 		}
2904 		if (uap->pid != WAIT_ANY &&
2905 		    proc_getpid(p) != uap->pid &&
2906 		    p->p_pgrpid != -(uap->pid)) {
2907 			continue;
2908 		}
2909 
2910 		if (proc_is_shadow(p)) {
2911 			continue;
2912 		}
2913 
2914 		nfound++;
2915 
2916 		/* XXX This is racy because we don't get the lock!!!! */
2917 
2918 		if (p->p_listflag & P_LIST_WAITING) {
2919 			/* we're not using a continuation here but we still need to stash
2920 			 * the args for stackshot. */
2921 			uth = current_uthread();
2922 			wait4_data = &uth->uu_save.uus_wait4_data;
2923 			wait4_data->args = uap;
2924 			thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess);
2925 
2926 			(void)msleep(&p->p_stat, &proc_list_mlock, PWAIT, "waitcoll", 0);
2927 			goto loop1;
2928 		}
2929 		p->p_listflag |= P_LIST_WAITING;   /* only allow single thread to wait() */
2930 
2931 
2932 		if (p->p_stat == SZOMB) {
2933 			reap_flags_t reap_flags = (p->p_listflag & P_LIST_DEADPARENT) ?
2934 			    REAP_REPARENTED_TO_INIT : 0;
2935 
2936 			proc_list_unlock();
2937 #if CONFIG_MACF
2938 			if ((error = mac_proc_check_wait(q, p)) != 0) {
2939 				goto out;
2940 			}
2941 #endif
2942 			retval[0] = proc_getpid(p);
2943 			if (uap->status) {
2944 				/* Legacy apps expect only 8 bits of status */
2945 				status = 0xffff & p->p_xstat;   /* convert to int */
2946 				error = copyout((caddr_t)&status,
2947 				    uap->status,
2948 				    sizeof(status));
2949 				if (error) {
2950 					goto out;
2951 				}
2952 			}
2953 			if (uap->rusage) {
2954 				if (p->p_ru == NULL) {
2955 					error = ENOMEM;
2956 				} else {
2957 					if (IS_64BIT_PROCESS(q)) {
2958 						struct user64_rusage    my_rusage = {};
2959 						munge_user64_rusage(&p->p_ru->ru, &my_rusage);
2960 						error = copyout((caddr_t)&my_rusage,
2961 						    uap->rusage,
2962 						    sizeof(my_rusage));
2963 					} else {
2964 						struct user32_rusage    my_rusage = {};
2965 						munge_user32_rusage(&p->p_ru->ru, &my_rusage);
2966 						error = copyout((caddr_t)&my_rusage,
2967 						    uap->rusage,
2968 						    sizeof(my_rusage));
2969 					}
2970 				}
2971 				/* information unavailable? */
2972 				if (error) {
2973 					goto out;
2974 				}
2975 			}
2976 
2977 			/* Conformance change for 6577252.
2978 			 * When SIGCHLD is blocked and wait() returns because the status
2979 			 * of a child process is available and there are no other
2980 			 * children processes, then any pending SIGCHLD signal is cleared.
2981 			 */
2982 			if (sibling_count == 0) {
2983 				int mask = sigmask(SIGCHLD);
2984 				uth = current_uthread();
2985 
2986 				if ((uth->uu_sigmask & mask) != 0) {
2987 					/* we are blocking SIGCHLD signals.  clear any pending SIGCHLD.
2988 					 * This locking looks funny but it is protecting access to the
2989 					 * thread via p_uthlist.
2990 					 */
2991 					proc_lock(q);
2992 					uth->uu_siglist &= ~mask;       /* clear pending signal */
2993 					proc_unlock(q);
2994 				}
2995 			}
2996 
2997 			/* Clean up */
2998 			(void)reap_child_locked(q, p, reap_flags);
2999 
3000 			return 0;
3001 		}
3002 		if (p->p_stat == SSTOP && (p->p_lflag & P_LWAITED) == 0 &&
3003 		    (p->p_lflag & P_LTRACED || uap->options & WUNTRACED)) {
3004 			proc_list_unlock();
3005 #if CONFIG_MACF
3006 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3007 				goto out;
3008 			}
3009 #endif
3010 			proc_lock(p);
3011 			p->p_lflag |= P_LWAITED;
3012 			proc_unlock(p);
3013 			retval[0] = proc_getpid(p);
3014 			if (uap->status) {
3015 				status = W_STOPCODE(p->p_xstat);
3016 				error = copyout((caddr_t)&status,
3017 				    uap->status,
3018 				    sizeof(status));
3019 			} else {
3020 				error = 0;
3021 			}
3022 			goto out;
3023 		}
3024 		/*
3025 		 * If we are waiting for continued processses, and this
3026 		 * process was continued
3027 		 */
3028 		if ((uap->options & WCONTINUED) &&
3029 		    (p->p_flag & P_CONTINUED)) {
3030 			proc_list_unlock();
3031 #if CONFIG_MACF
3032 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3033 				goto out;
3034 			}
3035 #endif
3036 
3037 			/* Prevent other process for waiting for this event */
3038 			OSBitAndAtomic(~((uint32_t)P_CONTINUED), &p->p_flag);
3039 			retval[0] = proc_getpid(p);
3040 			if (uap->status) {
3041 				status = W_STOPCODE(SIGCONT);
3042 				error = copyout((caddr_t)&status,
3043 				    uap->status,
3044 				    sizeof(status));
3045 			} else {
3046 				error = 0;
3047 			}
3048 			goto out;
3049 		}
3050 		p->p_listflag &= ~P_LIST_WAITING;
3051 		wakeup(&p->p_stat);
3052 	}
3053 	/* list lock is held when we get here any which way */
3054 	if (nfound == 0) {
3055 		proc_list_unlock();
3056 		return ECHILD;
3057 	}
3058 
3059 	if (uap->options & WNOHANG) {
3060 		retval[0] = 0;
3061 		proc_list_unlock();
3062 		return 0;
3063 	}
3064 
3065 	/* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */
3066 	uth = current_uthread();
3067 	wait4_data = &uth->uu_save.uus_wait4_data;
3068 	wait4_data->args = uap;
3069 	wait4_data->retval = retval;
3070 
3071 	thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess);
3072 	if ((error = msleep0((caddr_t)q, &proc_list_mlock, PWAIT | PCATCH | PDROP, "wait", 0, wait1continue))) {
3073 		return error;
3074 	}
3075 
3076 	goto loop;
3077 out:
3078 	proc_list_lock();
3079 	p->p_listflag &= ~P_LIST_WAITING;
3080 	wakeup(&p->p_stat);
3081 	proc_list_unlock();
3082 	return error;
3083 }
3084 
3085 #if DEBUG
3086 #define ASSERT_LCK_MTX_OWNED(lock)      \
3087 	                        lck_mtx_assert(lock, LCK_MTX_ASSERT_OWNED)
3088 #else
3089 #define ASSERT_LCK_MTX_OWNED(lock)      /* nothing */
3090 #endif
3091 
3092 int
waitidcontinue(int result)3093 waitidcontinue(int result)
3094 {
3095 	proc_t p;
3096 	thread_t thread;
3097 	uthread_t uth;
3098 	struct _waitid_data *waitid_data;
3099 	struct waitid_nocancel_args *uap;
3100 	int *retval;
3101 
3102 	if (result) {
3103 		return result;
3104 	}
3105 
3106 	p = current_proc();
3107 	thread = current_thread();
3108 	uth = (struct uthread *)get_bsdthread_info(thread);
3109 
3110 	waitid_data = &uth->uu_save.uus_waitid_data;
3111 	uap = waitid_data->args;
3112 	retval = waitid_data->retval;
3113 	return waitid_nocancel(p, uap, retval);
3114 }
3115 
3116 /*
3117  * Description:	Suspend the calling thread until one child of the process
3118  *		containing the calling thread changes state.
3119  *
3120  * Parameters:	uap->idtype		one of P_PID, P_PGID, P_ALL
3121  *		uap->id			pid_t or gid_t or ignored
3122  *		uap->infop		Address of siginfo_t struct in
3123  *					user space into which to return status
3124  *		uap->options		flag values
3125  *
3126  * Returns:	0			Success
3127  *		!0			Error returning status to user space
3128  */
3129 int
waitid(proc_t q,struct waitid_args * uap,int32_t * retval)3130 waitid(proc_t q, struct waitid_args *uap, int32_t *retval)
3131 {
3132 	__pthread_testcancel(1);
3133 	return waitid_nocancel(q, (struct waitid_nocancel_args *)uap, retval);
3134 }
3135 
3136 int
waitid_nocancel(proc_t q,struct waitid_nocancel_args * uap,__unused int32_t * retval)3137 waitid_nocancel(proc_t q, struct waitid_nocancel_args *uap,
3138     __unused int32_t *retval)
3139 {
3140 	user_siginfo_t  siginfo;        /* siginfo data to return to caller */
3141 	boolean_t caller64 = IS_64BIT_PROCESS(q);
3142 	int nfound;
3143 	proc_t p;
3144 	int error;
3145 	uthread_t uth;
3146 	struct _waitid_data *waitid_data;
3147 
3148 	if (uap->options == 0 ||
3149 	    (uap->options & ~(WNOHANG | WNOWAIT | WCONTINUED | WSTOPPED | WEXITED))) {
3150 		return EINVAL;        /* bits set that aren't recognized */
3151 	}
3152 	switch (uap->idtype) {
3153 	case P_PID:     /* child with process ID equal to... */
3154 	case P_PGID:    /* child with process group ID equal to... */
3155 		if (((int)uap->id) < 0) {
3156 			return EINVAL;
3157 		}
3158 		break;
3159 	case P_ALL:     /* any child */
3160 		break;
3161 	}
3162 
3163 loop:
3164 	proc_list_lock();
3165 loop1:
3166 	nfound = 0;
3167 
3168 	PCHILDREN_FOREACH(q, p) {
3169 		switch (uap->idtype) {
3170 		case P_PID:     /* child with process ID equal to... */
3171 			if (proc_getpid(p) != (pid_t)uap->id) {
3172 				continue;
3173 			}
3174 			break;
3175 		case P_PGID:    /* child with process group ID equal to... */
3176 			if (p->p_pgrpid != (pid_t)uap->id) {
3177 				continue;
3178 			}
3179 			break;
3180 		case P_ALL:     /* any child */
3181 			break;
3182 		}
3183 
3184 		if (proc_is_shadow(p)) {
3185 			continue;
3186 		}
3187 		/* XXX This is racy because we don't get the lock!!!! */
3188 
3189 		/*
3190 		 * Wait collision; go to sleep and restart; used to maintain
3191 		 * the single return for waited process guarantee.
3192 		 */
3193 		if (p->p_listflag & P_LIST_WAITING) {
3194 			(void) msleep(&p->p_stat, &proc_list_mlock,
3195 			    PWAIT, "waitidcoll", 0);
3196 			goto loop1;
3197 		}
3198 		p->p_listflag |= P_LIST_WAITING;                /* mark busy */
3199 
3200 		nfound++;
3201 
3202 		bzero(&siginfo, sizeof(siginfo));
3203 
3204 		switch (p->p_stat) {
3205 		case SZOMB:             /* Exited */
3206 			if (!(uap->options & WEXITED)) {
3207 				break;
3208 			}
3209 			proc_list_unlock();
3210 #if CONFIG_MACF
3211 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3212 				goto out;
3213 			}
3214 #endif
3215 			siginfo.si_signo = SIGCHLD;
3216 			siginfo.si_pid = proc_getpid(p);
3217 
3218 			/* If the child terminated abnormally due to a signal, the signum
3219 			 * needs to be preserved in the exit status.
3220 			 */
3221 			if (WIFSIGNALED(p->p_xstat)) {
3222 				siginfo.si_code = WCOREDUMP(p->p_xstat) ?
3223 				    CLD_DUMPED : CLD_KILLED;
3224 				siginfo.si_status = WTERMSIG(p->p_xstat);
3225 			} else {
3226 				siginfo.si_code = CLD_EXITED;
3227 				siginfo.si_status = WEXITSTATUS(p->p_xstat) & 0x00FFFFFF;
3228 			}
3229 			siginfo.si_status |= (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000);
3230 			p->p_xhighbits = 0;
3231 
3232 			if ((error = copyoutsiginfo(&siginfo,
3233 			    caller64, uap->infop)) != 0) {
3234 				goto out;
3235 			}
3236 
3237 			/* Prevent other process for waiting for this event? */
3238 			if (!(uap->options & WNOWAIT)) {
3239 				reap_child_locked(q, p, 0);
3240 				return 0;
3241 			}
3242 			goto out;
3243 
3244 		case SSTOP:             /* Stopped */
3245 			/*
3246 			 * If we are not interested in stopped processes, then
3247 			 * ignore this one.
3248 			 */
3249 			if (!(uap->options & WSTOPPED)) {
3250 				break;
3251 			}
3252 
3253 			/*
3254 			 * If someone has already waited it, we lost a race
3255 			 * to be the one to return status.
3256 			 */
3257 			if ((p->p_lflag & P_LWAITED) != 0) {
3258 				break;
3259 			}
3260 			proc_list_unlock();
3261 #if CONFIG_MACF
3262 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3263 				goto out;
3264 			}
3265 #endif
3266 			siginfo.si_signo = SIGCHLD;
3267 			siginfo.si_pid = proc_getpid(p);
3268 			siginfo.si_status = p->p_xstat; /* signal number */
3269 			siginfo.si_code = CLD_STOPPED;
3270 
3271 			if ((error = copyoutsiginfo(&siginfo,
3272 			    caller64, uap->infop)) != 0) {
3273 				goto out;
3274 			}
3275 
3276 			/* Prevent other process for waiting for this event? */
3277 			if (!(uap->options & WNOWAIT)) {
3278 				proc_lock(p);
3279 				p->p_lflag |= P_LWAITED;
3280 				proc_unlock(p);
3281 			}
3282 			goto out;
3283 
3284 		default:                /* All other states => Continued */
3285 			if (!(uap->options & WCONTINUED)) {
3286 				break;
3287 			}
3288 
3289 			/*
3290 			 * If the flag isn't set, then this process has not
3291 			 * been stopped and continued, or the status has
3292 			 * already been reaped by another caller of waitid().
3293 			 */
3294 			if ((p->p_flag & P_CONTINUED) == 0) {
3295 				break;
3296 			}
3297 			proc_list_unlock();
3298 #if CONFIG_MACF
3299 			if ((error = mac_proc_check_wait(q, p)) != 0) {
3300 				goto out;
3301 			}
3302 #endif
3303 			siginfo.si_signo = SIGCHLD;
3304 			siginfo.si_code = CLD_CONTINUED;
3305 			proc_lock(p);
3306 			siginfo.si_pid = p->p_contproc;
3307 			siginfo.si_status = p->p_xstat;
3308 			proc_unlock(p);
3309 
3310 			if ((error = copyoutsiginfo(&siginfo,
3311 			    caller64, uap->infop)) != 0) {
3312 				goto out;
3313 			}
3314 
3315 			/* Prevent other process for waiting for this event? */
3316 			if (!(uap->options & WNOWAIT)) {
3317 				OSBitAndAtomic(~((uint32_t)P_CONTINUED),
3318 				    &p->p_flag);
3319 			}
3320 			goto out;
3321 		}
3322 		ASSERT_LCK_MTX_OWNED(&proc_list_mlock);
3323 
3324 		/* Not a process we are interested in; go on to next child */
3325 
3326 		p->p_listflag &= ~P_LIST_WAITING;
3327 		wakeup(&p->p_stat);
3328 	}
3329 	ASSERT_LCK_MTX_OWNED(&proc_list_mlock);
3330 
3331 	/* No child processes that could possibly satisfy the request? */
3332 
3333 	if (nfound == 0) {
3334 		proc_list_unlock();
3335 		return ECHILD;
3336 	}
3337 
3338 	if (uap->options & WNOHANG) {
3339 		proc_list_unlock();
3340 #if CONFIG_MACF
3341 		if ((error = mac_proc_check_wait(q, p)) != 0) {
3342 			return error;
3343 		}
3344 #endif
3345 		/*
3346 		 * The state of the siginfo structure in this case
3347 		 * is undefined.  Some implementations bzero it, some
3348 		 * (like here) leave it untouched for efficiency.
3349 		 *
3350 		 * Thus the most portable check for "no matching pid with
3351 		 * WNOHANG" is to store a zero into si_pid before
3352 		 * invocation, then check for a non-zero value afterwards.
3353 		 */
3354 		return 0;
3355 	}
3356 
3357 	/* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */
3358 	uth = current_uthread();
3359 	waitid_data = &uth->uu_save.uus_waitid_data;
3360 	waitid_data->args = uap;
3361 	waitid_data->retval = retval;
3362 
3363 	if ((error = msleep0(q, &proc_list_mlock,
3364 	    PWAIT | PCATCH | PDROP, "waitid", 0, waitidcontinue)) != 0) {
3365 		return error;
3366 	}
3367 
3368 	goto loop;
3369 out:
3370 	proc_list_lock();
3371 	p->p_listflag &= ~P_LIST_WAITING;
3372 	wakeup(&p->p_stat);
3373 	proc_list_unlock();
3374 	return error;
3375 }
3376 
3377 /*
3378  * make process 'parent' the new parent of process 'child'.
3379  */
3380 void
proc_reparentlocked(proc_t child,proc_t parent,int signallable,int locked)3381 proc_reparentlocked(proc_t child, proc_t parent, int signallable, int locked)
3382 {
3383 	proc_t oldparent = PROC_NULL;
3384 
3385 	if (child->p_pptr == parent) {
3386 		return;
3387 	}
3388 
3389 	if (locked == 0) {
3390 		proc_list_lock();
3391 	}
3392 
3393 	oldparent = child->p_pptr;
3394 #if __PROC_INTERNAL_DEBUG
3395 	if (oldparent == PROC_NULL) {
3396 		panic("proc_reparent: process %p does not have a parent", child);
3397 	}
3398 #endif
3399 
3400 	LIST_REMOVE(child, p_sibling);
3401 #if __PROC_INTERNAL_DEBUG
3402 	if (oldparent->p_childrencnt == 0) {
3403 		panic("process children count already 0");
3404 	}
3405 #endif
3406 	oldparent->p_childrencnt--;
3407 #if __PROC_INTERNAL_DEBUG
3408 	if (oldparent->p_childrencnt < 0) {
3409 		panic("process children count -ve");
3410 	}
3411 #endif
3412 	LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
3413 	parent->p_childrencnt++;
3414 	child->p_pptr = parent;
3415 	child->p_ppid = proc_getpid(parent);
3416 
3417 	proc_list_unlock();
3418 
3419 	if ((signallable != 0) && (initproc == parent) && (child->p_stat == SZOMB)) {
3420 		psignal(initproc, SIGCHLD);
3421 	}
3422 	if (locked == 1) {
3423 		proc_list_lock();
3424 	}
3425 }
3426 
3427 /*
3428  * Exit: deallocate address space and other resources, change proc state
3429  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
3430  * status and rusage for wait().  Check for child processes and orphan them.
3431  */
3432 
3433 
3434 /*
3435  * munge_rusage
3436  *	LP64 support - long is 64 bits if we are dealing with a 64 bit user
3437  *	process.  We munge the kernel version of rusage into the
3438  *	64 bit version.
3439  */
3440 __private_extern__  void
munge_user64_rusage(struct rusage * a_rusage_p,struct user64_rusage * a_user_rusage_p)3441 munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p)
3442 {
3443 	/* Zero-out struct so that padding is cleared */
3444 	bzero(a_user_rusage_p, sizeof(struct user64_rusage));
3445 
3446 	/* timeval changes size, so utime and stime need special handling */
3447 	a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec;
3448 	a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
3449 	a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec;
3450 	a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
3451 	/*
3452 	 * everything else can be a direct assign, since there is no loss
3453 	 * of precision implied boing 32->64.
3454 	 */
3455 	a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss;
3456 	a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss;
3457 	a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss;
3458 	a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss;
3459 	a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt;
3460 	a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt;
3461 	a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap;
3462 	a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock;
3463 	a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock;
3464 	a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd;
3465 	a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv;
3466 	a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals;
3467 	a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw;
3468 	a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw;
3469 }
3470 
3471 /* For a 64-bit kernel and 32-bit userspace, munging may be needed */
3472 __private_extern__  void
munge_user32_rusage(struct rusage * a_rusage_p,struct user32_rusage * a_user_rusage_p)3473 munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p)
3474 {
3475 	bzero(a_user_rusage_p, sizeof(struct user32_rusage));
3476 
3477 	/* timeval changes size, so utime and stime need special handling */
3478 	a_user_rusage_p->ru_utime.tv_sec = (user32_time_t)a_rusage_p->ru_utime.tv_sec;
3479 	a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec;
3480 	a_user_rusage_p->ru_stime.tv_sec = (user32_time_t)a_rusage_p->ru_stime.tv_sec;
3481 	a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec;
3482 	/*
3483 	 * everything else can be a direct assign. We currently ignore
3484 	 * the loss of precision
3485 	 */
3486 	a_user_rusage_p->ru_maxrss = (user32_long_t)a_rusage_p->ru_maxrss;
3487 	a_user_rusage_p->ru_ixrss = (user32_long_t)a_rusage_p->ru_ixrss;
3488 	a_user_rusage_p->ru_idrss = (user32_long_t)a_rusage_p->ru_idrss;
3489 	a_user_rusage_p->ru_isrss = (user32_long_t)a_rusage_p->ru_isrss;
3490 	a_user_rusage_p->ru_minflt = (user32_long_t)a_rusage_p->ru_minflt;
3491 	a_user_rusage_p->ru_majflt = (user32_long_t)a_rusage_p->ru_majflt;
3492 	a_user_rusage_p->ru_nswap = (user32_long_t)a_rusage_p->ru_nswap;
3493 	a_user_rusage_p->ru_inblock = (user32_long_t)a_rusage_p->ru_inblock;
3494 	a_user_rusage_p->ru_oublock = (user32_long_t)a_rusage_p->ru_oublock;
3495 	a_user_rusage_p->ru_msgsnd = (user32_long_t)a_rusage_p->ru_msgsnd;
3496 	a_user_rusage_p->ru_msgrcv = (user32_long_t)a_rusage_p->ru_msgrcv;
3497 	a_user_rusage_p->ru_nsignals = (user32_long_t)a_rusage_p->ru_nsignals;
3498 	a_user_rusage_p->ru_nvcsw = (user32_long_t)a_rusage_p->ru_nvcsw;
3499 	a_user_rusage_p->ru_nivcsw = (user32_long_t)a_rusage_p->ru_nivcsw;
3500 }
3501 
3502 void
kdp_wait4_find_process(thread_t thread,__unused event64_t wait_event,thread_waitinfo_t * waitinfo)3503 kdp_wait4_find_process(thread_t thread, __unused event64_t wait_event, thread_waitinfo_t *waitinfo)
3504 {
3505 	assert(thread != NULL);
3506 	assert(waitinfo != NULL);
3507 
3508 	struct uthread *ut = get_bsdthread_info(thread);
3509 	waitinfo->context = 0;
3510 	// ensure wmesg is consistent with a thread waiting in wait4
3511 	assert(!strcmp(ut->uu_wmesg, "waitcoll") || !strcmp(ut->uu_wmesg, "wait"));
3512 	struct wait4_nocancel_args *args = ut->uu_save.uus_wait4_data.args;
3513 	// May not actually contain a pid; this is just the argument to wait4.
3514 	// See man wait4 for other valid wait4 arguments.
3515 	waitinfo->owner = args->pid;
3516 }
3517 
3518 static int
exit_with_exception_internal(struct proc * p,exception_info_t exception,uint32_t flags)3519 exit_with_exception_internal(
3520 	struct proc *p,
3521 	exception_info_t exception,
3522 	uint32_t flags)
3523 {
3524 	os_reason_t reason = OS_REASON_NULL;
3525 	struct uthread *ut = NULL;
3526 
3527 	if (p == PROC_NULL) {
3528 		panic("exception type %d without a valid proc",
3529 		    exception.os_reason);
3530 	}
3531 
3532 	if (!(flags & PX_DEBUG_NO_HONOR)
3533 	    && address_space_debugged(p) == KERN_SUCCESS) {
3534 		return 0;
3535 	}
3536 
3537 	if ((flags & PX_KTRIAGE)) {
3538 		/* Leave a ktriage record */
3539 		ktriage_record(
3540 			thread_tid(current_thread()),
3541 			KDBG_TRIAGE_EVENTID(
3542 				exception.kt_info.kt_subsys,
3543 				KDBG_TRIAGE_RESERVED,
3544 				exception.kt_info.kt_error),
3545 			0);
3546 	}
3547 
3548 	if ((flags & PX_PSIGNAL)) {
3549 		int signal = (exception.signal > 0) ? exception.signal : SIGKILL;
3550 
3551 		printf("[%s%s] sending signal %d to process\n", proc_best_name(p),
3552 		    (signal == SIGKILL) ? ": killed" : "", signal);
3553 		psignal(p, signal);
3554 		return 0;
3555 	} else {
3556 		assert(exception.exception_type > 0);
3557 
3558 		reason = os_reason_create(
3559 			exception.os_reason,
3560 			(uint64_t)exception.mx_code);
3561 		assert(reason != OS_REASON_NULL);
3562 		reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
3563 
3564 		if (!(flags & PX_NO_EXCEPTION_UTHREAD)) {
3565 			ut = get_bsdthread_info(current_thread());
3566 			ut->uu_exception = exception.exception_type;
3567 			ut->uu_code = exception.mx_code;
3568 			ut->uu_subcode = exception.mx_subcode;
3569 		}
3570 
3571 		printf("[%s: killed] sending signal %d and force exiting process\n",
3572 		    proc_best_name(p), SIGKILL);
3573 		return exit_with_reason(p, W_EXITCODE(0, SIGKILL), NULL,
3574 		           FALSE, FALSE, 0, reason);
3575 	}
3576 }
3577 
3578 /*
3579  * Use a separate function call for mach and exclave exceptions so that we
3580  * see the exception's origin show up clearly in the backtrace on dev kernels.
3581  */
3582 
3583 int
exit_with_mach_exception(struct proc * p,exception_info_t exception,uint32_t flags)3584 exit_with_mach_exception(
3585 	struct proc *p,
3586 	exception_info_t exception,
3587 	uint32_t flags)
3588 {
3589 	return exit_with_exception_internal(p, exception, flags);
3590 }
3591 
3592 
3593 #if CONFIG_EXCLAVES
3594 int
exit_with_exclave_exception(struct proc * p,exception_info_t exception,uint32_t flags)3595 exit_with_exclave_exception(
3596 	struct proc *p,
3597 	exception_info_t exception,
3598 	uint32_t flags)
3599 {
3600 	return exit_with_exception_internal(p, exception, flags);
3601 }
3602 #endif /* CONFIG_EXCLAVES */
3603