1 /*
2 * Copyright (c) 2012-2013, 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 /*
31 * Corpses Overview
32 * ================
33 *
34 * A corpse is a state of process that is past the point of its death. This means that process has
35 * completed all its termination operations like releasing file descriptors, mach ports, sockets and
36 * other constructs used to identify a process. For all the processes this mimics the behavior as if
37 * the process has died and no longer available by any means.
38 *
39 * Why do we need Corpses?
40 * -----------------------
41 * For crash inspection we need to inspect the state and data that is associated with process so that
42 * crash reporting infrastructure can build backtraces, find leaks etc. For example a crash
43 *
44 * Corpses functionality in kernel
45 * ===============================
46 * The corpse functionality is an extension of existing exception reporting mechanisms we have. The
47 * exception_triage calls will try to deliver the first round of exceptions allowing
48 * task/debugger/ReportCrash/launchd level exception handlers to respond to exception. If even after
49 * notification the exception is not handled, then the process begins the death operations and during
50 * proc_prepareexit, we decide to create a corpse for inspection. Following is a sample run through
51 * of events and data shuffling that happens when corpses is enabled.
52 *
53 * * a process causes an exception during normal execution of threads.
54 * * The exception generated by either mach(e.g GUARDED_MARCHPORT) or bsd(eg SIGABORT, GUARDED_FD
55 * etc) side is passed through the exception_triage() function to follow the thread -> task -> host
56 * level exception handling system. This set of steps are same as before and allow for existing
57 * crash reporting systems (both internal and 3rd party) to catch and create reports as required.
58 * * If above exception handling returns failed (when nobody handles the notification), then the
59 * proc_prepareexit path has logic to decide to create corpse.
60 * * The task_mark_corpse function allocates userspace vm memory and attaches the information
61 * kcdata_descriptor_t to task->corpse_info field of task.
62 * - All the task's threads are marked with the "inspection" flag which signals the termination
63 * daemon to not reap them but hold until they are being inspected.
64 * - task flags t_flags reflect the corpse bit and also a PENDING_CORPSE bit. PENDING_CORPSE
65 * prevents task_terminate from stripping important data from task.
66 * - It marks all the threads to terminate and return to AST for termination.
67 * - The allocation logic takes into account the rate limiting policy of allowing only
68 * TOTAL_CORPSES_ALLOWED in flight.
69 * * The proc exit threads continues and collects required information in the allocated vm region.
70 * Once complete it marks itself for termination.
71 * * In the thread_terminate_self(), the last thread to enter will do a call to proc_exit().
72 * Following this is a check to see if task is marked for corpse notification and will
73 * invoke the the task_deliver_crash_notification().
74 * * Once EXC_CORPSE_NOTIFY is delivered, it removes the PENDING_CORPSE flag from task (and
75 * inspection flag from all its threads) and allows task_terminate to go ahead and continue
76 * the mach task termination process.
77 * * ASIDE: The rest of the threads that are reaching the thread_terminate_daemon() with the
78 * inspection flag set are just bounced to another holding queue (crashed_threads_queue).
79 * Only after the corpse notification these are pulled out from holding queue and enqueued
80 * back to termination queue
81 *
82 *
83 * Corpse info format
84 * ==================
85 * The kernel (task_mark_corpse()) makes a vm allocation in the dead task's vm space (with tag
86 * VM_MEMORY_CORPSEINFO (80)). Within this memory all corpse information is saved by various
87 * subsystems like
88 * * bsd proc exit path may write down pid, parent pid, number of file descriptors etc
89 * * mach side may append data regarding ledger usage, memory stats etc
90 * See detailed info about the memory structure and format in kern_cdata.h documentation.
91 *
92 * Configuring Corpses functionality
93 * =================================
94 * boot-arg: -no_corpses disables the corpse generation. This can be added/removed without affecting
95 * any other subsystem.
96 * TOTAL_CORPSES_ALLOWED : (recompilation required) - Changing this number allows for controlling
97 * the number of corpse instances to be held for inspection before allowing memory to be reclaimed
98 * by system.
99 * CORPSEINFO_ALLOCATION_SIZE: is the default size of vm allocation. If in future there is much more
100 * data to be put in, then please re-tune this parameter.
101 *
102 * Debugging/Visibility
103 * ====================
104 * * lldbmacros for thread and task summary are updated to show "C" flag for corpse task/threads.
105 * * there are macros to see list of threads in termination queue (dumpthread_terminate_queue)
106 * and holding queue (dumpcrashed_thread_queue).
107 * * In case of corpse creation is disabled of ignored then the system log is updated with
108 * printf data with reason.
109 *
110 * Limitations of Corpses
111 * ======================
112 * With holding off memory for inspection, it creates vm pressure which might not be desirable
113 * on low memory devices. There are limits to max corpses being inspected at a time which is
114 * marked by TOTAL_CORPSES_ALLOWED.
115 *
116 */
117
118
119 #include <stdatomic.h>
120 #include <kern/assert.h>
121 #include <mach/mach_types.h>
122 #include <mach/boolean.h>
123 #include <mach/vm_param.h>
124 #include <mach/task.h>
125 #include <mach/thread_act.h>
126 #include <mach/host_priv.h>
127 #include <kern/host.h>
128 #include <kern/kern_types.h>
129 #include <kern/mach_param.h>
130 #include <kern/policy_internal.h>
131 #include <kern/thread.h>
132 #include <kern/task.h>
133 #include <corpses/task_corpse.h>
134 #include <kern/kalloc.h>
135 #include <kern/kern_cdata.h>
136 #include <mach/mach_vm.h>
137 #include <kern/exc_guard.h>
138 #include <os/log.h>
139 #include <sys/kdebug_triage.h>
140 #include <vm/vm_kern_xnu.h>
141 #include <vm/vm_map_xnu.h>
142
143 #if CONFIG_MACF
144 #include <security/mac_mach_internal.h>
145 #endif
146
147 /*
148 * Exported interfaces
149 */
150 #include <mach/task_server.h>
151
152 union corpse_creation_gate {
153 struct {
154 uint16_t user_faults;
155 uint16_t corpses;
156 };
157 uint32_t value;
158 };
159
160 static _Atomic uint32_t inflight_corpses;
161 unsigned long total_corpses_created = 0;
162
163 static TUNABLE(bool, corpses_disabled, "-no_corpses", false);
164
165 #if !XNU_TARGET_OS_OSX
166 /* Use lightweight corpse on embedded */
167 static TUNABLE(bool, lw_corpses_enabled, "lw_corpses", true);
168 #else
169 static TUNABLE(bool, lw_corpses_enabled, "lw_corpses", false);
170 #endif
171
172 #if DEBUG || DEVELOPMENT
173 /* bootarg to generate corpse with size up to max_footprint_mb */
174 TUNABLE(bool, corpse_threshold_system_limit, "corpse_threshold_system_limit", false);
175 #endif /* DEBUG || DEVELOPMENT */
176
177 /* bootarg to turn on corpse forking for EXC_RESOURCE */
178 TUNABLE(bool, exc_via_corpse_forking, "exc_via_corpse_forking", true);
179
180 /* bootarg to generate corpse for fatal high memory watermark violation */
181 TUNABLE(bool, corpse_for_fatal_memkill, "corpse_for_fatal_memkill", true);
182
183 extern int IS_64BIT_PROCESS(void *);
184 extern void gather_populate_corpse_crashinfo(void *p, task_t task,
185 mach_exception_data_type_t code, mach_exception_data_type_t subcode,
186 uint64_t *udata_buffer, int num_udata, void *reason, exception_type_t etype);
187 extern void *proc_find(int pid);
188 extern int proc_rele(void *p);
189 extern task_t proc_get_task_raw(void *proc);
190 extern const char *proc_best_name(struct proc *proc);
191
192
193 /*
194 * Routine: corpses_enabled
195 * returns FALSE if not enabled
196 */
197 boolean_t
corpses_enabled(void)198 corpses_enabled(void)
199 {
200 return !corpses_disabled;
201 }
202
203 unsigned long
total_corpses_count(void)204 total_corpses_count(void)
205 {
206 union corpse_creation_gate gate;
207
208 gate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed);
209 return gate.corpses;
210 }
211
212 extern int proc_pid(struct proc *);
213
214 /*
215 * Routine: task_crashinfo_get_ref()
216 * Grab a slot at creating a corpse.
217 * Returns: KERN_SUCCESS if the policy allows for creating a corpse.
218 */
219 static kern_return_t
task_crashinfo_get_ref(corpse_flags_t kcd_u_flags)220 task_crashinfo_get_ref(corpse_flags_t kcd_u_flags)
221 {
222 union corpse_creation_gate oldgate, newgate;
223 struct proc *p = (void *)current_proc();
224
225 assert(kcd_u_flags & CORPSE_CRASHINFO_HAS_REF);
226
227 oldgate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed);
228 for (;;) {
229 newgate = oldgate;
230 if (kcd_u_flags & CORPSE_CRASHINFO_USER_FAULT) {
231 if (newgate.user_faults++ >= TOTAL_USER_FAULTS_ALLOWED) {
232 os_log(OS_LOG_DEFAULT, "%s[%d] Corpse failure, too many faults %d\n",
233 proc_best_name(p), proc_pid(p), newgate.user_faults);
234 return KERN_RESOURCE_SHORTAGE;
235 }
236 }
237 if (newgate.corpses++ >= TOTAL_CORPSES_ALLOWED) {
238 os_log(OS_LOG_DEFAULT, "%s[%d] Corpse failure, too many %d\n",
239 proc_best_name(p), proc_pid(p), newgate.corpses);
240 return KERN_RESOURCE_SHORTAGE;
241 }
242
243 // this reloads the value in oldgate
244 if (atomic_compare_exchange_strong_explicit(&inflight_corpses,
245 &oldgate.value, newgate.value, memory_order_relaxed,
246 memory_order_relaxed)) {
247 os_log(OS_LOG_DEFAULT, "%s[%d] Corpse allowed %d of %d\n",
248 proc_best_name(p), proc_pid(p), newgate.corpses, TOTAL_CORPSES_ALLOWED);
249 return KERN_SUCCESS;
250 }
251 }
252 }
253
254 /*
255 * Routine: task_crashinfo_release_ref
256 * release the slot for corpse being used.
257 */
258 static kern_return_t
task_crashinfo_release_ref(corpse_flags_t kcd_u_flags)259 task_crashinfo_release_ref(corpse_flags_t kcd_u_flags)
260 {
261 union corpse_creation_gate oldgate, newgate;
262
263 assert(kcd_u_flags & CORPSE_CRASHINFO_HAS_REF);
264
265 oldgate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed);
266 for (;;) {
267 newgate = oldgate;
268 if (kcd_u_flags & CORPSE_CRASHINFO_USER_FAULT) {
269 if (newgate.user_faults-- == 0) {
270 panic("corpse in flight count over-release");
271 }
272 }
273 if (newgate.corpses-- == 0) {
274 panic("corpse in flight count over-release");
275 }
276 // this reloads the value in oldgate
277 if (atomic_compare_exchange_strong_explicit(&inflight_corpses,
278 &oldgate.value, newgate.value, memory_order_relaxed,
279 memory_order_relaxed)) {
280 os_log(OS_LOG_DEFAULT, "Corpse released, count at %d\n", newgate.corpses);
281 return KERN_SUCCESS;
282 }
283 }
284 }
285
286
287 kcdata_descriptor_t
task_crashinfo_alloc_init(mach_vm_address_t crash_data_p,unsigned size,corpse_flags_t kc_u_flags,unsigned kc_flags)288 task_crashinfo_alloc_init(mach_vm_address_t crash_data_p, unsigned size,
289 corpse_flags_t kc_u_flags, unsigned kc_flags)
290 {
291 kcdata_descriptor_t kcdata;
292
293 if (kc_u_flags & CORPSE_CRASHINFO_HAS_REF) {
294 if (KERN_SUCCESS != task_crashinfo_get_ref(kc_u_flags)) {
295 return NULL;
296 }
297 }
298
299 kcdata = kcdata_memory_alloc_init(crash_data_p, TASK_CRASHINFO_BEGIN, size,
300 kc_flags);
301 if (kcdata) {
302 kcdata->kcd_user_flags = kc_u_flags;
303 } else if (kc_u_flags & CORPSE_CRASHINFO_HAS_REF) {
304 task_crashinfo_release_ref(kc_u_flags);
305 }
306 return kcdata;
307 }
308
309 kcdata_descriptor_t
task_btinfo_alloc_init(mach_vm_address_t addr,unsigned size)310 task_btinfo_alloc_init(mach_vm_address_t addr, unsigned size)
311 {
312 kcdata_descriptor_t kcdata;
313
314 kcdata = kcdata_memory_alloc_init(addr, TASK_BTINFO_BEGIN, size, KCFLAG_USE_MEMCOPY);
315
316 return kcdata;
317 }
318
319
320 /*
321 * Free up the memory associated with task_crashinfo_data
322 */
323 kern_return_t
task_crashinfo_destroy(kcdata_descriptor_t data)324 task_crashinfo_destroy(kcdata_descriptor_t data)
325 {
326 if (!data) {
327 return KERN_INVALID_ARGUMENT;
328 }
329 if (data->kcd_user_flags & CORPSE_CRASHINFO_HAS_REF) {
330 task_crashinfo_release_ref(data->kcd_user_flags);
331 }
332 return kcdata_memory_destroy(data);
333 }
334
335 /*
336 * Routine: task_get_corpseinfo
337 * params: task - task which has corpse info setup.
338 * returns: crash info data attached to task.
339 * NULL if task is null or has no corpse info
340 */
341 kcdata_descriptor_t
task_get_corpseinfo(task_t task)342 task_get_corpseinfo(task_t task)
343 {
344 kcdata_descriptor_t retval = NULL;
345 if (task != NULL) {
346 retval = task->corpse_info;
347 }
348 return retval;
349 }
350
351 /*
352 * Routine: task_add_to_corpse_task_list
353 * params: task - task to be added to corpse task list
354 * returns: None.
355 */
356 void
task_add_to_corpse_task_list(task_t corpse_task)357 task_add_to_corpse_task_list(task_t corpse_task)
358 {
359 lck_mtx_lock(&tasks_corpse_lock);
360 queue_enter(&corpse_tasks, corpse_task, task_t, corpse_tasks);
361 lck_mtx_unlock(&tasks_corpse_lock);
362 }
363
364 /*
365 * Routine: task_remove_from_corpse_task_list
366 * params: task - task to be removed from corpse task list
367 * returns: None.
368 */
369 void
task_remove_from_corpse_task_list(task_t corpse_task)370 task_remove_from_corpse_task_list(task_t corpse_task)
371 {
372 lck_mtx_lock(&tasks_corpse_lock);
373 queue_remove(&corpse_tasks, corpse_task, task_t, corpse_tasks);
374 lck_mtx_unlock(&tasks_corpse_lock);
375 }
376
377 /*
378 * Routine: task_purge_all_corpses
379 * params: None.
380 * returns: None.
381 */
382 void
task_purge_all_corpses(void)383 task_purge_all_corpses(void)
384 {
385 task_t task;
386
387 lck_mtx_lock(&tasks_corpse_lock);
388 /* Iterate through all the corpse tasks and clear all map entries */
389 queue_iterate(&corpse_tasks, task, task_t, corpse_tasks) {
390 os_log(OS_LOG_DEFAULT, "Memory pressure corpse purge for pid %d.\n", task_pid(task));
391 vm_map_terminate(task->map);
392 }
393 lck_mtx_unlock(&tasks_corpse_lock);
394 }
395
396 /*
397 * Routine: find_corpse_task_by_uniqueid_grp
398 * params: task_uniqueid - uniqueid of the corpse
399 * target - target task [Out Param]
400 * grp - task reference group
401 * returns:
402 * KERN_SUCCESS if a matching corpse if found, gives a ref.
403 * KERN_FAILURE corpse with given uniqueid is not found.
404 */
405 kern_return_t
find_corpse_task_by_uniqueid_grp(uint64_t task_uniqueid,task_t * target,task_grp_t grp)406 find_corpse_task_by_uniqueid_grp(
407 uint64_t task_uniqueid,
408 task_t *target,
409 task_grp_t grp)
410 {
411 task_t task;
412
413 lck_mtx_lock(&tasks_corpse_lock);
414
415 queue_iterate(&corpse_tasks, task, task_t, corpse_tasks) {
416 if (task->task_uniqueid == task_uniqueid) {
417 task_reference_grp(task, grp);
418 lck_mtx_unlock(&tasks_corpse_lock);
419 *target = task;
420 return KERN_SUCCESS;
421 }
422 }
423
424 lck_mtx_unlock(&tasks_corpse_lock);
425 return KERN_FAILURE;
426 }
427
428 /*
429 * Routine: task_generate_corpse
430 * params: task - task to fork a corpse
431 * corpse_task - task port of the generated corpse
432 * returns: KERN_SUCCESS on Success.
433 * KERN_FAILURE on Failure.
434 * KERN_NOT_SUPPORTED on corpse disabled.
435 * KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse.
436 */
437 kern_return_t
task_generate_corpse(task_t task,ipc_port_t * corpse_task_port)438 task_generate_corpse(
439 task_t task,
440 ipc_port_t *corpse_task_port)
441 {
442 task_t new_task;
443 kern_return_t kr;
444 thread_t thread, th_iter;
445 ipc_port_t corpse_port;
446
447 if (task == kernel_task || task == TASK_NULL) {
448 return KERN_INVALID_ARGUMENT;
449 }
450
451 task_lock(task);
452 if (task_is_a_corpse_fork(task)) {
453 task_unlock(task);
454 return KERN_INVALID_ARGUMENT;
455 }
456 task_unlock(task);
457
458 thread_set_exec_promotion(current_thread());
459 /* Generate a corpse for the given task, will return with a ref on corpse task */
460 kr = task_generate_corpse_internal(task, &new_task, &thread, 0, 0, 0, NULL);
461 thread_clear_exec_promotion(current_thread());
462 if (kr != KERN_SUCCESS) {
463 return kr;
464 }
465 if (thread != THREAD_NULL) {
466 thread_deallocate(thread);
467 }
468
469 /* wait for all the threads in the task to terminate */
470 task_lock(new_task);
471 task_wait_till_threads_terminate_locked(new_task);
472
473 /* Reset thread ports of all the threads in task */
474 queue_iterate(&new_task->threads, th_iter, thread_t, task_threads)
475 {
476 /* Do not reset the thread port for inactive threads */
477 if (th_iter->corpse_dup == FALSE) {
478 ipc_thread_reset(th_iter);
479 }
480 }
481 task_unlock(new_task);
482
483 /* transfer the task ref to port and arm the no-senders notification */
484 corpse_port = convert_corpse_to_port_and_nsrequest(new_task);
485 assert(IP_NULL != corpse_port);
486
487 *corpse_task_port = corpse_port;
488 return KERN_SUCCESS;
489 }
490
491 /*
492 * Only generate lightweight corpse if any of thread, task, or host level registers
493 * EXC_CORPSE_NOTIFY with behavior EXCEPTION_BACKTRACE.
494 *
495 * Save a send right and behavior of those ports on out param EXC_PORTS.
496 */
497 static boolean_t
task_should_generate_lightweight_corpse(task_t task,ipc_port_t exc_ports[static BT_EXC_PORTS_COUNT])498 task_should_generate_lightweight_corpse(
499 task_t task,
500 ipc_port_t exc_ports[static BT_EXC_PORTS_COUNT])
501 {
502 kern_return_t kr;
503 boolean_t should_generate = FALSE;
504
505 exception_mask_t mask;
506 mach_msg_type_number_t nmasks;
507 exception_port_t exc_port = IP_NULL;
508 exception_behavior_t behavior;
509 thread_state_flavor_t flavor;
510
511 if (task != current_task()) {
512 return FALSE;
513 }
514
515 if (!lw_corpses_enabled) {
516 return FALSE;
517 }
518
519 for (unsigned int i = 0; i < BT_EXC_PORTS_COUNT; i++) {
520 nmasks = 1;
521
522 /* thread, task, and host level, in this order */
523 if (i == 0) {
524 kr = thread_get_exception_ports(current_thread(), EXC_MASK_CORPSE_NOTIFY,
525 &mask, &nmasks, &exc_port, &behavior, &flavor);
526 } else if (i == 1) {
527 kr = task_get_exception_ports(current_task(), EXC_MASK_CORPSE_NOTIFY,
528 &mask, &nmasks, &exc_port, &behavior, &flavor);
529 } else {
530 kr = host_get_exception_ports(host_priv_self(), EXC_MASK_CORPSE_NOTIFY,
531 &mask, &nmasks, &exc_port, &behavior, &flavor);
532 }
533
534 if (kr != KERN_SUCCESS || nmasks == 0) {
535 exc_port = IP_NULL;
536 }
537
538 /* thread level can return KERN_SUCCESS && nmasks 0 */
539 assert(nmasks == 1 || i == 0);
540
541 if (IP_VALID(exc_port) && (behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED)) {
542 assert(behavior & MACH_EXCEPTION_CODES);
543 exc_ports[i] = exc_port; /* transfers right to array */
544 exc_port = NULL;
545 should_generate = TRUE;
546 } else {
547 exc_ports[i] = IP_NULL;
548 }
549
550 ipc_port_release_send(exc_port);
551 }
552
553 return should_generate;
554 }
555
556 /*
557 * Routine: task_enqueue_exception_with_corpse
558 * params: task - task to generate a corpse and enqueue it
559 * etype - EXC_RESOURCE or EXC_GUARD
560 * code - exception code to be enqueued
561 * codeCnt - code array count - code and subcode
562 *
563 * returns: KERN_SUCCESS on Success.
564 * KERN_FAILURE on Failure.
565 * KERN_INVALID_ARGUMENT on invalid arguments passed.
566 * KERN_NOT_SUPPORTED on corpse disabled.
567 * KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse.
568 */
569 kern_return_t
task_enqueue_exception_with_corpse(task_t task,exception_type_t etype,mach_exception_data_t code,mach_msg_type_number_t codeCnt,void * reason,boolean_t lightweight)570 task_enqueue_exception_with_corpse(
571 task_t task,
572 exception_type_t etype,
573 mach_exception_data_t code,
574 mach_msg_type_number_t codeCnt,
575 void *reason,
576 boolean_t lightweight)
577 {
578 kern_return_t kr;
579 ipc_port_t exc_ports[BT_EXC_PORTS_COUNT]; /* send rights in thread, task, host order */
580 const char *procname = proc_best_name(get_bsdtask_info(task));
581
582 if (codeCnt < 2) {
583 return KERN_INVALID_ARGUMENT;
584 }
585
586 if (lightweight && task_should_generate_lightweight_corpse(task, exc_ports)) {
587 /* port rights captured in exc_ports */
588 kcdata_descriptor_t desc = NULL;
589 kcdata_object_t obj = KCDATA_OBJECT_NULL;
590 bool lw_corpse_enqueued = false;
591
592 assert(task == current_task());
593 assert(etype == EXC_GUARD);
594
595 kr = kcdata_object_throttle_get(KCDATA_OBJECT_TYPE_LW_CORPSE);
596 if (kr != KERN_SUCCESS) {
597 goto out;
598 }
599
600 kr = current_thread_collect_backtrace_info(&desc, etype, code, codeCnt, reason);
601 if (kr != KERN_SUCCESS) {
602 kcdata_object_throttle_release(KCDATA_OBJECT_TYPE_LW_CORPSE);
603 goto out;
604 }
605
606 kr = kcdata_create_object(desc, KCDATA_OBJECT_TYPE_LW_CORPSE, BTINFO_ALLOCATION_SIZE, &obj);
607 assert(kr == KERN_SUCCESS);
608 /* desc ref and throttle slot captured in obj ref */
609
610 thread_backtrace_enqueue(obj, exc_ports, etype);
611 os_log(OS_LOG_DEFAULT, "Lightweight corpse enqueued for %s\n", procname);
612 /* obj ref and exc_ports send rights consumed */
613 lw_corpse_enqueued = true;
614
615 out:
616 if (!lw_corpse_enqueued) {
617 for (unsigned int i = 0; i < BT_EXC_PORTS_COUNT; i++) {
618 ipc_port_release_send(exc_ports[i]);
619 }
620 }
621 } else {
622 task_t corpse = TASK_NULL;
623 thread_t thread = THREAD_NULL;
624
625 thread_set_exec_promotion(current_thread());
626 /* Generate a corpse for the given task, will return with a ref on corpse task */
627 kr = task_generate_corpse_internal(task, &corpse, &thread, etype,
628 code[0], code[1], reason);
629 thread_clear_exec_promotion(current_thread());
630 if (kr == KERN_SUCCESS) {
631 if (thread == THREAD_NULL) {
632 return KERN_FAILURE;
633 }
634 assert(corpse != TASK_NULL);
635 assert(etype == EXC_RESOURCE || etype == EXC_GUARD);
636 thread_exception_enqueue(corpse, thread, etype);
637 os_log(OS_LOG_DEFAULT, "Full corpse enqueued for %s\n", procname);
638 }
639 }
640
641 return kr;
642 }
643
644 /*
645 * Routine: task_generate_corpse_internal
646 * params: task - task to fork a corpse
647 * corpse_task - task of the generated corpse
648 * exc_thread - equivalent thread in corpse enqueuing exception
649 * etype - EXC_RESOURCE or EXC_GUARD or 0
650 * code - mach exception code to be passed in corpse blob
651 * subcode - mach exception subcode to be passed in corpse blob
652 * returns: KERN_SUCCESS on Success.
653 * KERN_FAILURE on Failure.
654 * KERN_NOT_SUPPORTED on corpse disabled.
655 * KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse.
656 */
657 kern_return_t
task_generate_corpse_internal(task_t task,task_t * corpse_task,thread_t * exc_thread,exception_type_t etype,mach_exception_data_type_t code,mach_exception_data_type_t subcode,void * reason)658 task_generate_corpse_internal(
659 task_t task,
660 task_t *corpse_task,
661 thread_t *exc_thread,
662 exception_type_t etype,
663 mach_exception_data_type_t code,
664 mach_exception_data_type_t subcode,
665 void *reason)
666 {
667 task_t new_task = TASK_NULL;
668 thread_t thread = THREAD_NULL;
669 thread_t thread_next = THREAD_NULL;
670 kern_return_t kr;
671 struct proc *p = NULL;
672 int is_64bit_addr;
673 int is_64bit_data;
674 uint32_t t_flags;
675 uint32_t t_flags_ro;
676 uint64_t *udata_buffer = NULL;
677 int size = 0;
678 int num_udata = 0;
679 corpse_flags_t kc_u_flags = CORPSE_CRASHINFO_HAS_REF;
680 void *corpse_proc = NULL;
681 thread_t self = current_thread();
682
683 #if CONFIG_MACF
684 struct label *label = NULL;
685 #endif
686
687 if (!corpses_enabled()) {
688 ktriage_record(thread_tid(self), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CORPSE, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CORPSES_DISABLED), 0 /* arg */);
689 return KERN_NOT_SUPPORTED;
690 }
691
692 if (task_corpse_forking_disabled(task)) {
693 os_log(OS_LOG_DEFAULT, "corpse for pid %d disabled via SPI\n", task_pid(task));
694 ktriage_record(thread_tid(self), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CORPSE, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CORPSE_DISABLED_FOR_PROC), 0 /* arg */);
695 return KERN_FAILURE;
696 }
697
698 if (etype == EXC_GUARD && EXC_GUARD_DECODE_GUARD_TYPE(code) == GUARD_TYPE_USER) {
699 kc_u_flags |= CORPSE_CRASHINFO_USER_FAULT;
700 }
701
702 kr = task_crashinfo_get_ref(kc_u_flags);
703 if (kr != KERN_SUCCESS) {
704 return kr;
705 }
706
707 /* Having a task reference does not guarantee a proc reference */
708 p = proc_find(task_pid(task));
709 if (p == NULL) {
710 kr = KERN_INVALID_TASK;
711 goto error_task_generate_corpse;
712 }
713
714 is_64bit_addr = IS_64BIT_PROCESS(p);
715 is_64bit_data = (task == TASK_NULL) ? is_64bit_addr : task_get_64bit_data(task);
716 t_flags = TF_CORPSE_FORK |
717 TF_PENDING_CORPSE |
718 (is_64bit_addr ? TF_64B_ADDR : TF_NONE) |
719 (is_64bit_data ? TF_64B_DATA : TF_NONE);
720 t_flags_ro = TFRO_CORPSE;
721
722 #if CONFIG_MACF
723 /* Create the corpse label credentials from the process. */
724 label = mac_exc_create_label_for_proc(p);
725 #endif
726
727 corpse_proc = zalloc_flags(proc_task_zone, Z_WAITOK | Z_ZERO);
728 new_task = proc_get_task_raw(corpse_proc);
729
730 /* Create a task for corpse */
731 kr = task_create_internal(task,
732 NULL,
733 NULL,
734 TRUE,
735 is_64bit_addr,
736 is_64bit_data,
737 t_flags,
738 t_flags_ro,
739 TPF_NONE,
740 TWF_NONE,
741 new_task);
742 if (kr != KERN_SUCCESS) {
743 new_task = TASK_NULL;
744 goto error_task_generate_corpse;
745 }
746
747 /* Enable IPC access to the corpse task */
748 ipc_task_enable(new_task);
749
750 /* new task is now referenced, do not free the struct in error case */
751 corpse_proc = NULL;
752
753 /* Create and copy threads from task, returns a ref to thread */
754 kr = task_duplicate_map_and_threads(task, p, new_task, &thread,
755 &udata_buffer, &size, &num_udata, (etype != 0));
756 if (kr != KERN_SUCCESS) {
757 goto error_task_generate_corpse;
758 }
759
760 kr = task_collect_crash_info(new_task,
761 #if CONFIG_MACF
762 label,
763 #endif
764 TRUE);
765 if (kr != KERN_SUCCESS) {
766 goto error_task_generate_corpse;
767 }
768
769 /* transfer our references to the corpse info */
770 assert(new_task->corpse_info->kcd_user_flags == 0);
771 new_task->corpse_info->kcd_user_flags = kc_u_flags;
772 kc_u_flags = 0;
773
774 kr = task_start_halt(new_task);
775 if (kr != KERN_SUCCESS) {
776 goto error_task_generate_corpse;
777 }
778
779 /* terminate the ipc space */
780 ipc_space_terminate(new_task->itk_space);
781
782 /* Populate the corpse blob, use the proc struct of task instead of corpse task */
783 gather_populate_corpse_crashinfo(p, new_task,
784 code, subcode, udata_buffer, num_udata, reason, etype);
785
786 /* Add it to global corpse task list */
787 task_add_to_corpse_task_list(new_task);
788
789 *corpse_task = new_task;
790 *exc_thread = thread;
791
792 error_task_generate_corpse:
793 #if CONFIG_MACF
794 if (label) {
795 mac_exc_free_label(label);
796 }
797 #endif
798
799 /* Release the proc reference */
800 if (p != NULL) {
801 proc_rele(p);
802 }
803
804 if (corpse_proc != NULL) {
805 zfree(proc_task_zone, corpse_proc);
806 }
807
808 if (kr != KERN_SUCCESS) {
809 if (thread != THREAD_NULL) {
810 thread_deallocate(thread);
811 }
812 if (new_task != TASK_NULL) {
813 task_lock(new_task);
814 /* Terminate all the other threads in the task. */
815 queue_iterate(&new_task->threads, thread_next, thread_t, task_threads)
816 {
817 thread_terminate_internal(thread_next);
818 }
819 /* wait for all the threads in the task to terminate */
820 task_wait_till_threads_terminate_locked(new_task);
821 task_unlock(new_task);
822
823 task_clear_corpse(new_task);
824 task_terminate_internal(new_task);
825 task_deallocate(new_task);
826 }
827 if (kc_u_flags) {
828 task_crashinfo_release_ref(kc_u_flags);
829 }
830 }
831 /* Free the udata buffer allocated in task_duplicate_map_and_threads */
832 kfree_data(udata_buffer, size);
833
834 return kr;
835 }
836
837 static kern_return_t
task_map_kcdata_64(task_t task,void * kcdata_addr,mach_vm_address_t * uaddr,mach_vm_size_t kcd_size,vm_tag_t tag)838 task_map_kcdata_64(
839 task_t task,
840 void *kcdata_addr,
841 mach_vm_address_t *uaddr,
842 mach_vm_size_t kcd_size,
843 vm_tag_t tag)
844 {
845 kern_return_t kr;
846 mach_vm_offset_t udata_ptr;
847
848 kr = mach_vm_allocate_kernel(task->map, &udata_ptr, (size_t)kcd_size,
849 VM_MAP_KERNEL_FLAGS_ANYWHERE(.vm_tag = tag));
850 if (kr != KERN_SUCCESS) {
851 return kr;
852 }
853 copyout(kcdata_addr, (user_addr_t)udata_ptr, (size_t)kcd_size);
854 *uaddr = udata_ptr;
855
856 return KERN_SUCCESS;
857 }
858
859 /*
860 * Routine: task_map_corpse_info
861 * params: task - Map the corpse info in task's address space
862 * corpse_task - task port of the corpse
863 * kcd_addr_begin - address of the mapped corpse info
864 * kcd_addr_begin - size of the mapped corpse info
865 * returns: KERN_SUCCESS on Success.
866 * KERN_FAILURE on Failure.
867 * KERN_INVALID_ARGUMENT on invalid arguments.
868 * Note: Temporary function, will be deleted soon.
869 */
870 kern_return_t
task_map_corpse_info(task_t task,task_t corpse_task,vm_address_t * kcd_addr_begin,uint32_t * kcd_size)871 task_map_corpse_info(
872 task_t task,
873 task_t corpse_task,
874 vm_address_t *kcd_addr_begin,
875 uint32_t *kcd_size)
876 {
877 kern_return_t kr;
878 mach_vm_address_t kcd_addr_begin_64;
879 mach_vm_size_t size_64;
880
881 kr = task_map_corpse_info_64(task, corpse_task, &kcd_addr_begin_64, &size_64);
882 if (kr != KERN_SUCCESS) {
883 return kr;
884 }
885
886 *kcd_addr_begin = (vm_address_t)kcd_addr_begin_64;
887 *kcd_size = (uint32_t) size_64;
888 return KERN_SUCCESS;
889 }
890
891 /*
892 * Routine: task_map_corpse_info_64
893 * params: task - Map the corpse info in task's address space
894 * corpse_task - task port of the corpse
895 * kcd_addr_begin - address of the mapped corpse info (takes mach_vm_addess_t *)
896 * kcd_size - size of the mapped corpse info (takes mach_vm_size_t *)
897 * returns: KERN_SUCCESS on Success.
898 * KERN_FAILURE on Failure.
899 * KERN_INVALID_ARGUMENT on invalid arguments.
900 */
901 kern_return_t
task_map_corpse_info_64(task_t task,task_t corpse_task,mach_vm_address_t * kcd_addr_begin,mach_vm_size_t * kcd_size)902 task_map_corpse_info_64(
903 task_t task,
904 task_t corpse_task,
905 mach_vm_address_t *kcd_addr_begin,
906 mach_vm_size_t *kcd_size)
907 {
908 kern_return_t kr;
909 mach_vm_offset_t crash_data_ptr = 0;
910 const mach_vm_size_t size = CORPSEINFO_ALLOCATION_SIZE;
911 void *corpse_info_kernel = NULL;
912
913 if (task == TASK_NULL || task_is_a_corpse(task) ||
914 corpse_task == TASK_NULL || !task_is_a_corpse(corpse_task)) {
915 return KERN_INVALID_ARGUMENT;
916 }
917
918 corpse_info_kernel = kcdata_memory_get_begin_addr(corpse_task->corpse_info);
919 if (corpse_info_kernel == NULL) {
920 return KERN_INVALID_ARGUMENT;
921 }
922
923 kr = task_map_kcdata_64(task, corpse_info_kernel, &crash_data_ptr, size,
924 VM_MEMORY_CORPSEINFO);
925
926 if (kr == KERN_SUCCESS) {
927 *kcd_addr_begin = crash_data_ptr;
928 *kcd_size = size;
929 }
930
931 return kr;
932 }
933
934 /*
935 * Routine: task_map_kcdata_object_64
936 * params: task - Map the underlying kcdata in task's address space
937 * kcdata_obj - Object representing the data
938 * kcd_addr_begin - Address of the mapped kcdata
939 * kcd_size - Size of the mapped kcdata
940 * returns: KERN_SUCCESS on Success.
941 * KERN_FAILURE on Failure.
942 * KERN_INVALID_ARGUMENT on invalid arguments.
943 */
944 kern_return_t
task_map_kcdata_object_64(task_t task,kcdata_object_t kcdata_obj,mach_vm_address_t * kcd_addr_begin,mach_vm_size_t * kcd_size)945 task_map_kcdata_object_64(
946 task_t task,
947 kcdata_object_t kcdata_obj,
948 mach_vm_address_t *kcd_addr_begin,
949 mach_vm_size_t *kcd_size)
950 {
951 kern_return_t kr;
952 mach_vm_offset_t bt_data_ptr = 0;
953 const mach_vm_size_t size = BTINFO_ALLOCATION_SIZE;
954 void *bt_info_kernel = NULL;
955
956 if (task == TASK_NULL || task_is_a_corpse(task) ||
957 kcdata_obj == KCDATA_OBJECT_NULL) {
958 return KERN_INVALID_ARGUMENT;
959 }
960
961 bt_info_kernel = kcdata_memory_get_begin_addr(kcdata_obj->ko_data);
962 if (bt_info_kernel == NULL) {
963 return KERN_INVALID_ARGUMENT;
964 }
965
966 kr = task_map_kcdata_64(task, bt_info_kernel, &bt_data_ptr, size,
967 VM_MEMORY_BTINFO);
968
969 if (kr == KERN_SUCCESS) {
970 *kcd_addr_begin = bt_data_ptr;
971 *kcd_size = size;
972 }
973
974 return kr;
975 }
976
977 uint64_t
task_corpse_get_crashed_thread_id(task_t corpse_task)978 task_corpse_get_crashed_thread_id(task_t corpse_task)
979 {
980 return corpse_task->crashed_thread_id;
981 }
982