1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * File: kern/task.c
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59 * David Black
60 *
61 * Task management primitives implementation.
62 */
63 /*
64 * Copyright (c) 1993 The University of Utah and
65 * the Computer Systems Laboratory (CSL). All rights reserved.
66 *
67 * Permission to use, copy, modify and distribute this software and its
68 * documentation is hereby granted, provided that both the copyright
69 * notice and this permission notice appear in all copies of the
70 * software, derivative works or modified versions, and any portions
71 * thereof, and that both notices appear in supporting documentation.
72 *
73 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76 *
77 * CSL requests users of this software to return to [email protected] any
78 * improvements that they make and grant CSL redistribution rights.
79 *
80 */
81 /*
82 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83 * support for mandatory and extensible security protections. This notice
84 * is included in support of clause 2.2 (b) of the Apple Public License,
85 * Version 2.0.
86 * Copyright (c) 2005 SPARTA, Inc.
87 */
88
89 #include <mach/mach_types.h>
90 #include <mach/boolean.h>
91 #include <mach/host_priv.h>
92 #include <mach/machine/vm_types.h>
93 #include <mach/vm_param.h>
94 #include <mach/mach_vm.h>
95 #include <mach/semaphore.h>
96 #include <mach/task_info.h>
97 #include <mach/task_inspect.h>
98 #include <mach/task_special_ports.h>
99 #include <mach/sdt.h>
100 #include <mach/mach_test_upcall.h>
101
102 #include <ipc/ipc_importance.h>
103 #include <ipc/ipc_types.h>
104 #include <ipc/ipc_space.h>
105 #include <ipc/ipc_entry.h>
106 #include <ipc/ipc_hash.h>
107 #include <ipc/ipc_init.h>
108
109 #include <kern/kern_types.h>
110 #include <kern/mach_param.h>
111 #include <kern/misc_protos.h>
112 #include <kern/task.h>
113 #include <kern/thread.h>
114 #include <kern/coalition.h>
115 #include <kern/zalloc.h>
116 #include <kern/kalloc.h>
117 #include <kern/kern_cdata.h>
118 #include <kern/processor.h>
119 #include <kern/recount.h>
120 #include <kern/sched_prim.h> /* for thread_wakeup */
121 #include <kern/ipc_tt.h>
122 #include <kern/host.h>
123 #include <kern/clock.h>
124 #include <kern/timer.h>
125 #include <kern/assert.h>
126 #include <kern/affinity.h>
127 #include <kern/exc_resource.h>
128 #include <kern/machine.h>
129 #include <kern/policy_internal.h>
130 #include <kern/restartable.h>
131 #include <kern/ipc_kobject.h>
132
133 #include <corpses/task_corpse.h>
134 #if CONFIG_TELEMETRY
135 #include <kern/telemetry.h>
136 #endif
137
138 #if CONFIG_PERVASIVE_CPI
139 #include <kern/monotonic.h>
140 #include <machine/monotonic.h>
141 #endif /* CONFIG_PERVASIVE_CPI */
142
143 #if CONFIG_EXCLAVES
144 #include "exclaves_boot.h"
145 #include "exclaves_resource.h"
146 #include "exclaves_boot.h"
147 #include "exclaves_inspection.h"
148 #include "exclaves_conclave.h"
149 #endif /* CONFIG_EXCLAVES */
150
151 #include <os/log.h>
152
153 #include <vm/pmap.h>
154 #include <vm/vm_map_xnu.h>
155 #include <vm/vm_kern_xnu.h> /* for kernel_map, ipc_kernel_map */
156 #include <vm/vm_pageout_xnu.h>
157 #include <vm/vm_protos.h>
158 #include <vm/vm_purgeable_xnu.h>
159 #include <vm/vm_compressor_pager_xnu.h>
160 #include <vm/vm_reclaim_xnu.h>
161 #include <vm/vm_compressor_xnu.h>
162
163 #include <sys/kdebug.h>
164 #include <sys/proc_ro.h>
165 #include <sys/resource.h>
166 #include <sys/signalvar.h> /* for coredump */
167 #include <sys/bsdtask_info.h>
168 #include <sys/kdebug_triage.h>
169 #include <sys/code_signing.h> /* for address_space_debugged */
170 #include <sys/reason.h>
171
172 /*
173 * Exported interfaces
174 */
175
176 #include <mach/task_server.h>
177 #include <mach/mach_host_server.h>
178 #include <mach/mach_port_server.h>
179
180 #include <vm/vm_shared_region_xnu.h>
181
182 #include <libkern/OSDebug.h>
183 #include <libkern/OSAtomic.h>
184 #include <libkern/section_keywords.h>
185
186 #include <mach-o/loader.h>
187 #include <kdp/kdp_dyld.h>
188
189 #include <kern/sfi.h> /* picks up ledger.h */
190
191 #if CONFIG_MACF
192 #include <security/mac_mach_internal.h>
193 #endif
194
195 #include <IOKit/IOBSD.h>
196 #include <kdp/processor_core.h>
197
198 #include <string.h>
199
200 #if KPERF
201 extern int kpc_force_all_ctrs(task_t, int);
202 #endif
203
204 SECURITY_READ_ONLY_LATE(task_t) kernel_task;
205
206 int64_t next_taskuniqueid = 0;
207 const size_t task_alignment = _Alignof(struct task);
208 extern const size_t proc_alignment;
209 extern size_t proc_struct_size;
210 extern size_t proc_and_task_size;
211 size_t task_struct_size;
212
213 extern uint32_t ipc_control_port_options;
214
215 extern int large_corpse_count;
216
217 extern boolean_t proc_send_synchronous_EXC_RESOURCE(void *p);
218 extern boolean_t proc_is_simulated(const proc_t);
219
220 static void task_port_no_senders(ipc_port_t, mach_msg_type_number_t);
221 static void task_port_with_flavor_no_senders(ipc_port_t, mach_msg_type_number_t);
222 static void task_suspension_no_senders(ipc_port_t, mach_msg_type_number_t);
223 static inline void task_zone_init(void);
224
225 #if CONFIG_EXCLAVES
226 static bool task_should_panic_on_exit_due_to_conclave_taint(task_t task);
227 static bool task_is_conclave_tainted(task_t task);
228 static void task_set_conclave_taint(task_t task);
229 kern_return_t task_crash_info_conclave_upcall(task_t task,
230 const struct conclave_sharedbuffer_t *shared_buf, uint32_t length);
231 #endif /* CONFIG_EXCLAVES */
232
233 IPC_KOBJECT_DEFINE(IKOT_TASK_NAME);
234 IPC_KOBJECT_DEFINE(IKOT_TASK_CONTROL,
235 .iko_op_no_senders = task_port_no_senders);
236 IPC_KOBJECT_DEFINE(IKOT_TASK_READ,
237 .iko_op_no_senders = task_port_with_flavor_no_senders);
238 IPC_KOBJECT_DEFINE(IKOT_TASK_INSPECT,
239 .iko_op_no_senders = task_port_with_flavor_no_senders);
240 IPC_KOBJECT_DEFINE(IKOT_TASK_RESUME,
241 .iko_op_no_senders = task_suspension_no_senders);
242
243 #if CONFIG_PROC_RESOURCE_LIMITS
244 static void task_fatal_port_no_senders(ipc_port_t, mach_msg_type_number_t);
245 static mach_port_t task_allocate_fatal_port(void);
246
247 IPC_KOBJECT_DEFINE(IKOT_TASK_FATAL,
248 .iko_op_stable = true,
249 .iko_op_no_senders = task_fatal_port_no_senders);
250
251 extern void task_id_token_set_port(task_id_token_t token, ipc_port_t port);
252 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
253
254 /* Flag set by core audio when audio is playing. Used to stifle EXC_RESOURCE generation when active. */
255 int audio_active = 0;
256
257 /*
258 * structure for tracking zone usage
259 * Used either one per task/thread for all zones or <per-task,per-zone>.
260 */
261 typedef struct zinfo_usage_store_t {
262 /* These fields may be updated atomically, and so must be 8 byte aligned */
263 uint64_t alloc __attribute__((aligned(8))); /* allocation counter */
264 uint64_t free __attribute__((aligned(8))); /* free counter */
265 } zinfo_usage_store_t;
266
267 /**
268 * Return codes related to diag threshold and memory limit
269 */
270 __options_decl(diagthreshold_check_return, int, {
271 THRESHOLD_IS_SAME_AS_LIMIT_FLAG_DISABLED = 0,
272 THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED = 1,
273 THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED = 2,
274 THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_ENABLED = 3,
275 });
276
277 /**
278 * Return codes related to diag threshold and memory limit
279 */
280 __options_decl(current_, int, {
281 THRESHOLD_IS_SAME_AS_LIMIT = 0,
282 THRESHOLD_IS_NOT_SAME_AS_LIMIT = 1
283 });
284
285 zinfo_usage_store_t tasks_tkm_private;
286 zinfo_usage_store_t tasks_tkm_shared;
287
288 /* A container to accumulate statistics for expired tasks */
289 expired_task_statistics_t dead_task_statistics;
290 LCK_SPIN_DECLARE_ATTR(dead_task_statistics_lock, &task_lck_grp, &task_lck_attr);
291
292 ledger_template_t task_ledger_template = NULL;
293
294 /* global lock for task_dyld_process_info_notify_{register, deregister, get_trap} */
295 LCK_GRP_DECLARE(g_dyldinfo_mtx_grp, "g_dyldinfo");
296 LCK_MTX_DECLARE(g_dyldinfo_mtx, &g_dyldinfo_mtx_grp);
297
298 SECURITY_READ_ONLY_LATE(struct _task_ledger_indices) task_ledgers __attribute__((used)) =
299 {.cpu_time = -1,
300 .tkm_private = -1,
301 .tkm_shared = -1,
302 .phys_mem = -1,
303 .wired_mem = -1,
304 .internal = -1,
305 .iokit_mapped = -1,
306 .external = -1,
307 .reusable = -1,
308 .alternate_accounting = -1,
309 .alternate_accounting_compressed = -1,
310 .page_table = -1,
311 .phys_footprint = -1,
312 .internal_compressed = -1,
313 .purgeable_volatile = -1,
314 .purgeable_nonvolatile = -1,
315 .purgeable_volatile_compressed = -1,
316 .purgeable_nonvolatile_compressed = -1,
317 .tagged_nofootprint = -1,
318 .tagged_footprint = -1,
319 .tagged_nofootprint_compressed = -1,
320 .tagged_footprint_compressed = -1,
321 .network_volatile = -1,
322 .network_nonvolatile = -1,
323 .network_volatile_compressed = -1,
324 .network_nonvolatile_compressed = -1,
325 .media_nofootprint = -1,
326 .media_footprint = -1,
327 .media_nofootprint_compressed = -1,
328 .media_footprint_compressed = -1,
329 .graphics_nofootprint = -1,
330 .graphics_footprint = -1,
331 .graphics_nofootprint_compressed = -1,
332 .graphics_footprint_compressed = -1,
333 .neural_nofootprint = -1,
334 .neural_footprint = -1,
335 .neural_nofootprint_compressed = -1,
336 .neural_footprint_compressed = -1,
337 .neural_nofootprint_total = -1,
338 .platform_idle_wakeups = -1,
339 .interrupt_wakeups = -1,
340 #if CONFIG_SCHED_SFI
341 .sfi_wait_times = { 0 /* initialized at runtime */},
342 #endif /* CONFIG_SCHED_SFI */
343 .cpu_time_billed_to_me = -1,
344 .cpu_time_billed_to_others = -1,
345 .physical_writes = -1,
346 .logical_writes = -1,
347 .logical_writes_to_external = -1,
348 #if DEBUG || DEVELOPMENT
349 .pages_grabbed = -1,
350 .pages_grabbed_kern = -1,
351 .pages_grabbed_iopl = -1,
352 .pages_grabbed_upl = -1,
353 #endif
354 #if CONFIG_FREEZE
355 .frozen_to_swap = -1,
356 #endif /* CONFIG_FREEZE */
357 .energy_billed_to_me = -1,
358 .energy_billed_to_others = -1,
359 #if CONFIG_PHYS_WRITE_ACCT
360 .fs_metadata_writes = -1,
361 #endif /* CONFIG_PHYS_WRITE_ACCT */
362 #if CONFIG_MEMORYSTATUS
363 .memorystatus_dirty_time = -1,
364 #endif /* CONFIG_MEMORYSTATUS */
365 .swapins = -1,
366 .conclave_mem = -1, };
367
368 /* System sleep state */
369 boolean_t tasks_suspend_state;
370
371 __options_decl(send_exec_resource_is_fatal, bool, {
372 IS_NOT_FATAL = false,
373 IS_FATAL = true
374 });
375
376 __options_decl(send_exec_resource_is_diagnostics, bool, {
377 IS_NOT_DIAGNOSTICS = false,
378 IS_DIAGNOSTICS = true
379 });
380
381 __options_decl(send_exec_resource_is_warning, bool, {
382 IS_NOT_WARNING = false,
383 IS_WARNING = true
384 });
385
386 __options_decl(send_exec_resource_options_t, uint8_t, {
387 EXEC_RESOURCE_FATAL = 0x01,
388 EXEC_RESOURCE_DIAGNOSTIC = 0x02,
389 EXEC_RESOURCE_WARNING = 0x04,
390 });
391
392 /**
393 * Actions to take when a process has reached the memory limit or the diagnostics threshold limits
394 */
395 static inline void task_process_crossed_limit_no_diag(task_t task, ledger_amount_t ledger_limit_size, bool memlimit_is_fatal, bool memlimit_is_active, send_exec_resource_is_warning is_warning);
396 #if DEBUG || DEVELOPMENT
397 static inline void task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size);
398 #endif
399 void init_task_ledgers(void);
400 void task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1);
401 void task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1);
402 void task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1);
403 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void);
404 void __attribute__((noinline)) PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, send_exec_resource_options_t exception_options);
405 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor);
406 #if CONFIG_PROC_RESOURCE_LIMITS
407 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit);
408 mach_port_name_t current_task_get_fatal_port_name(void);
409 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task, int current_size, int soft_limit, int hard_limit);
410 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
411
412 kern_return_t task_suspend_internal(task_t);
413 kern_return_t task_resume_internal(task_t);
414 static kern_return_t task_start_halt_locked(task_t task, boolean_t should_mark_corpse);
415
416 extern kern_return_t iokit_task_terminate(task_t task, int phase);
417 extern void iokit_task_app_suspended_changed(task_t task);
418
419 extern kern_return_t exception_deliver(thread_t, exception_type_t, mach_exception_data_t, mach_msg_type_number_t, struct exception_action *, lck_mtx_t *);
420 extern void bsd_copythreadname(void *dst_uth, void *src_uth);
421 extern kern_return_t thread_resume(thread_t thread);
422
423 // Condition to include diag footprints
424 #define RESETTABLE_DIAG_FOOTPRINT_LIMITS ((DEBUG || DEVELOPMENT) && CONFIG_MEMORYSTATUS)
425
426 // Warn tasks when they hit 80% of their memory limit.
427 #define PHYS_FOOTPRINT_WARNING_LEVEL 80
428
429 #define TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT 150 /* wakeups per second */
430 #define TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL 300 /* in seconds. */
431
432 /*
433 * Level (in terms of percentage of the limit) at which the wakeups monitor triggers telemetry.
434 *
435 * (ie when the task's wakeups rate exceeds 70% of the limit, start taking user
436 * stacktraces, aka micro-stackshots)
437 */
438 #define TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER 70
439
440 int task_wakeups_monitor_interval; /* In seconds. Time period over which wakeups rate is observed */
441 int task_wakeups_monitor_rate; /* In hz. Maximum allowable wakeups per task before EXC_RESOURCE is sent */
442
443 unsigned int task_wakeups_monitor_ustackshots_trigger_pct; /* Percentage. Level at which we start gathering telemetry. */
444
445 TUNABLE(bool, disable_exc_resource, "disable_exc_resource", false); /* Global override to suppress EXC_RESOURCE for resource monitor violations. */
446 TUNABLE(bool, disable_exc_resource_during_audio, "disable_exc_resource_during_audio", true); /* Global override to suppress EXC_RESOURCE while audio is active */
447
448 ledger_amount_t max_task_footprint = 0; /* Per-task limit on physical memory consumption in bytes */
449 unsigned int max_task_footprint_warning_level = 0; /* Per-task limit warning percentage */
450
451 /*
452 * Configure per-task memory limit.
453 * The boot-arg is interpreted as Megabytes,
454 * and takes precedence over the device tree.
455 * Setting the boot-arg to 0 disables task limits.
456 */
457 TUNABLE_DT_WRITEABLE(int, max_task_footprint_mb, "/defaults", "kern.max_task_pmem", "max_task_pmem", 0, TUNABLE_DT_NONE);
458
459 /* I/O Monitor Limits */
460 #define IOMON_DEFAULT_LIMIT (20480ull) /* MB of logical/physical I/O */
461 #define IOMON_DEFAULT_INTERVAL (86400ull) /* in seconds */
462
463 uint64_t task_iomon_limit_mb; /* Per-task I/O monitor limit in MBs */
464 uint64_t task_iomon_interval_secs; /* Per-task I/O monitor interval in secs */
465
466 #define IO_TELEMETRY_DEFAULT_LIMIT (10ll * 1024ll * 1024ll)
467 int64_t io_telemetry_limit; /* Threshold to take a microstackshot (0 indicated I/O telemetry is turned off) */
468 int64_t global_logical_writes_count = 0; /* Global count for logical writes */
469 int64_t global_logical_writes_to_external_count = 0; /* Global count for logical writes to external storage*/
470 static boolean_t global_update_logical_writes(int64_t, int64_t*);
471
472 #if DEBUG || DEVELOPMENT
473 static diagthreshold_check_return task_check_memorythreshold_is_valid(task_t task, uint64_t new_limit, bool is_diagnostics_value);
474 #endif
475 #define TASK_MAX_THREAD_LIMIT 256
476
477 #if MACH_ASSERT
478 int pmap_ledgers_panic = 1;
479 int pmap_ledgers_panic_leeway = 3;
480 #endif /* MACH_ASSERT */
481
482 int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
483
484 #if CONFIG_COREDUMP
485 int hwm_user_cores = 0; /* high watermark violations generate user core files */
486 #endif
487
488 #ifdef MACH_BSD
489 extern uint32_t proc_platform(const struct proc *);
490 extern uint32_t proc_sdk(struct proc *);
491 extern void proc_getexecutableuuid(void *, unsigned char *, unsigned long);
492 extern int proc_pid(struct proc *p);
493 extern int proc_selfpid(void);
494 extern struct proc *current_proc(void);
495 extern char *proc_name_address(struct proc *p);
496 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
497 extern int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, uint32_t bufsize);
498 extern void workq_proc_suspended(struct proc *p);
499 extern void workq_proc_resumed(struct proc *p);
500 extern struct proc *kernproc;
501
502 #if CONFIG_MEMORYSTATUS
503 extern void proc_memstat_skip(struct proc* p, boolean_t set);
504 extern void memorystatus_on_ledger_footprint_exceeded(int warning, bool memlimit_is_active, bool memlimit_is_fatal);
505 extern void memorystatus_log_exception(const int max_footprint_mb, bool memlimit_is_active, bool memlimit_is_fatal);
506 extern void memorystatus_log_diag_threshold_exception(const int diag_threshold_value);
507 extern boolean_t memorystatus_allowed_vm_map_fork(task_t task, bool *is_large);
508 extern uint64_t memorystatus_available_memory_internal(struct proc *p);
509
510 #if DEVELOPMENT || DEBUG
511 extern void memorystatus_abort_vm_map_fork(task_t);
512 #endif
513
514 #endif /* CONFIG_MEMORYSTATUS */
515
516 #endif /* MACH_BSD */
517
518 /* Boot-arg that turns on fatal pac exception delivery for all first-party apps */
519 static TUNABLE(bool, enable_pac_exception, "enable_pac_exception", false);
520
521 /*
522 * Defaults for controllable EXC_GUARD behaviors
523 *
524 * Internal builds are fatal by default (except BRIDGE).
525 * Create an alternate set of defaults for special processes by name.
526 */
527 struct task_exc_guard_named_default {
528 char *name;
529 uint32_t behavior;
530 };
531 #define _TASK_EXC_GUARD_MP_CORPSE (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_CORPSE)
532 #define _TASK_EXC_GUARD_MP_ONCE (_TASK_EXC_GUARD_MP_CORPSE | TASK_EXC_GUARD_MP_ONCE)
533 #define _TASK_EXC_GUARD_MP_FATAL (TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_FATAL)
534
535 #define _TASK_EXC_GUARD_VM_CORPSE (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_ONCE)
536 #define _TASK_EXC_GUARD_VM_ONCE (_TASK_EXC_GUARD_VM_CORPSE | TASK_EXC_GUARD_VM_ONCE)
537 #define _TASK_EXC_GUARD_VM_FATAL (TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_FATAL)
538
539 #define _TASK_EXC_GUARD_ALL_CORPSE (_TASK_EXC_GUARD_MP_CORPSE | _TASK_EXC_GUARD_VM_CORPSE)
540 #define _TASK_EXC_GUARD_ALL_ONCE (_TASK_EXC_GUARD_MP_ONCE | _TASK_EXC_GUARD_VM_ONCE)
541 #define _TASK_EXC_GUARD_ALL_FATAL (_TASK_EXC_GUARD_MP_FATAL | _TASK_EXC_GUARD_VM_FATAL)
542
543 /* cannot turn off FATAL and DELIVER bit if set */
544 uint32_t task_exc_guard_no_unset_mask = TASK_EXC_GUARD_MP_FATAL | TASK_EXC_GUARD_VM_FATAL |
545 TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_VM_DELIVER;
546 /* cannot turn on ONCE bit if unset */
547 uint32_t task_exc_guard_no_set_mask = TASK_EXC_GUARD_MP_ONCE | TASK_EXC_GUARD_VM_ONCE;
548
549 #if !defined(XNU_TARGET_OS_BRIDGE)
550
551 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_FATAL;
552 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
553 /*
554 * These "by-process-name" default overrides are intended to be a short-term fix to
555 * quickly get over races between changes introducing new EXC_GUARD raising behaviors
556 * in some process and a change in default behavior for same. We should ship with
557 * these lists empty (by fixing the bugs, or explicitly changing the task's EXC_GUARD
558 * exception behavior via task_set_exc_guard_behavior()).
559 *
560 * XXX Remember to add/remove TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS back to
561 * task_exc_guard_default when transitioning this list between empty and
562 * non-empty.
563 */
564 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
565
566 #else /* !defined(XNU_TARGET_OS_BRIDGE) */
567
568 uint32_t task_exc_guard_default = _TASK_EXC_GUARD_ALL_ONCE;
569 uint32_t task_exc_guard_config_mask = TASK_EXC_GUARD_MP_ALL | TASK_EXC_GUARD_VM_ALL;
570 static struct task_exc_guard_named_default task_exc_guard_named_defaults[] = {};
571
572 #endif /* !defined(XNU_TARGET_OS_BRIDGE) */
573
574 /* Forwards */
575
576 static bool task_hold_locked(task_t task);
577 static void task_wait_locked(task_t task, boolean_t until_not_runnable);
578 static void task_release_locked(task_t task);
579 extern task_t proc_get_task_raw(void *proc);
580 extern void task_ref_hold_proc_task_struct(task_t task);
581 extern void task_release_proc_task_struct(task_t task, proc_ro_t proc_ro);
582
583 static void task_synchronizer_destroy_all(task_t task);
584 static os_ref_count_t
585 task_add_turnstile_watchports_locked(
586 task_t task,
587 struct task_watchports *watchports,
588 struct task_watchport_elem **previous_elem_array,
589 ipc_port_t *portwatch_ports,
590 uint32_t portwatch_count);
591
592 static os_ref_count_t
593 task_remove_turnstile_watchports_locked(
594 task_t task,
595 struct task_watchports *watchports,
596 ipc_port_t *port_freelist);
597
598 static struct task_watchports *
599 task_watchports_alloc_init(
600 task_t task,
601 thread_t thread,
602 uint32_t count);
603
604 static void
605 task_watchports_deallocate(
606 struct task_watchports *watchports);
607
608 void
task_set_64bit(task_t task,boolean_t is_64bit,boolean_t is_64bit_data)609 task_set_64bit(
610 task_t task,
611 boolean_t is_64bit,
612 boolean_t is_64bit_data)
613 {
614 #if defined(__i386__) || defined(__x86_64__) || defined(__arm64__)
615 thread_t thread;
616 #endif /* defined(__i386__) || defined(__x86_64__) || defined(__arm64__) */
617
618 task_lock(task);
619
620 /*
621 * Switching to/from 64-bit address spaces
622 */
623 if (is_64bit) {
624 if (!task_has_64Bit_addr(task)) {
625 task_set_64Bit_addr(task);
626 }
627 } else {
628 if (task_has_64Bit_addr(task)) {
629 task_clear_64Bit_addr(task);
630 }
631 }
632
633 /*
634 * Switching to/from 64-bit register state.
635 */
636 if (is_64bit_data) {
637 if (task_has_64Bit_data(task)) {
638 goto out;
639 }
640
641 task_set_64Bit_data(task);
642 } else {
643 if (!task_has_64Bit_data(task)) {
644 goto out;
645 }
646
647 task_clear_64Bit_data(task);
648 }
649
650 /* FIXME: On x86, the thread save state flavor can diverge from the
651 * task's 64-bit feature flag due to the 32-bit/64-bit register save
652 * state dichotomy. Since we can be pre-empted in this interval,
653 * certain routines may observe the thread as being in an inconsistent
654 * state with respect to its task's 64-bitness.
655 */
656
657 #if defined(__x86_64__) || defined(__arm64__)
658 queue_iterate(&task->threads, thread, thread_t, task_threads) {
659 thread_mtx_lock(thread);
660 machine_thread_switch_addrmode(thread);
661 thread_mtx_unlock(thread);
662 }
663 #endif /* defined(__x86_64__) || defined(__arm64__) */
664
665 out:
666 task_unlock(task);
667 }
668
669 bool
task_get_64bit_addr(task_t task)670 task_get_64bit_addr(task_t task)
671 {
672 return task_has_64Bit_addr(task);
673 }
674
675 bool
task_get_64bit_data(task_t task)676 task_get_64bit_data(task_t task)
677 {
678 return task_has_64Bit_data(task);
679 }
680
681 void
task_set_platform_binary(task_t task,boolean_t is_platform)682 task_set_platform_binary(
683 task_t task,
684 boolean_t is_platform)
685 {
686 if (is_platform) {
687 task_ro_flags_set(task, TFRO_PLATFORM);
688 } else {
689 task_ro_flags_clear(task, TFRO_PLATFORM);
690 }
691 }
692
693 #if XNU_TARGET_OS_OSX
694 #if DEVELOPMENT || DEBUG
695 SECURITY_READ_ONLY_LATE(bool) AMFI_bootarg_disable_mach_hardening = false;
696 #endif /* DEVELOPMENT || DEBUG */
697
698 void
task_disable_mach_hardening(task_t task)699 task_disable_mach_hardening(task_t task)
700 {
701 task_ro_flags_set(task, TFRO_MACH_HARDENING_OPT_OUT);
702 }
703
704 bool
task_opted_out_mach_hardening(task_t task)705 task_opted_out_mach_hardening(task_t task)
706 {
707 return task_ro_flags_get(task) & TFRO_MACH_HARDENING_OPT_OUT;
708 }
709 #endif /* XNU_TARGET_OS_OSX */
710
711 /*
712 * Use the `task_is_hardened_binary` macro below
713 * when applying new security policies.
714 *
715 * Kernel security policies now generally apply to
716 * "hardened binaries" - which are platform binaries, and
717 * third party binaries who adopt hardened runtime on ios.
718 */
719 boolean_t
task_get_platform_binary(task_t task)720 task_get_platform_binary(task_t task)
721 {
722 return (task_ro_flags_get(task) & TFRO_PLATFORM) != 0;
723 }
724
725 static boolean_t
task_get_hardened_runtime(task_t task)726 task_get_hardened_runtime(task_t task)
727 {
728 return (task_ro_flags_get(task) & TFRO_HARDENED) != 0;
729 }
730
731 boolean_t
task_is_hardened_binary(task_t task)732 task_is_hardened_binary(task_t task)
733 {
734 return task_get_platform_binary(task) ||
735 task_get_hardened_runtime(task);
736 }
737
738 void
task_set_hardened_runtime(task_t task,bool is_hardened)739 task_set_hardened_runtime(
740 task_t task,
741 bool is_hardened)
742 {
743 if (is_hardened) {
744 task_ro_flags_set(task, TFRO_HARDENED);
745 } else {
746 task_ro_flags_clear(task, TFRO_HARDENED);
747 }
748 }
749
750 boolean_t
task_is_a_corpse(task_t task)751 task_is_a_corpse(task_t task)
752 {
753 return (task_ro_flags_get(task) & TFRO_CORPSE) != 0;
754 }
755
756 boolean_t
task_is_ipc_active(task_t task)757 task_is_ipc_active(task_t task)
758 {
759 return task->ipc_active;
760 }
761
762 void
task_set_corpse(task_t task)763 task_set_corpse(task_t task)
764 {
765 return task_ro_flags_set(task, TFRO_CORPSE);
766 }
767
768 void
task_set_immovable_pinned(task_t task)769 task_set_immovable_pinned(task_t task)
770 {
771 ipc_task_set_immovable_pinned(task);
772 }
773
774 /*
775 * Set or clear per-task TF_CA_CLIENT_WI flag according to specified argument.
776 * Returns "false" if flag is already set, and "true" in other cases.
777 */
778 bool
task_set_ca_client_wi(task_t task,boolean_t set_or_clear)779 task_set_ca_client_wi(
780 task_t task,
781 boolean_t set_or_clear)
782 {
783 bool ret = true;
784 task_lock(task);
785 if (set_or_clear) {
786 /* Tasks can have only one CA_CLIENT work interval */
787 if (task->t_flags & TF_CA_CLIENT_WI) {
788 ret = false;
789 } else {
790 task->t_flags |= TF_CA_CLIENT_WI;
791 }
792 } else {
793 task->t_flags &= ~TF_CA_CLIENT_WI;
794 }
795 task_unlock(task);
796 return ret;
797 }
798
799 /*
800 * task_set_dyld_info() is called at most three times.
801 * 1) at task struct creation to set addr/size to zero.
802 * 2) in mach_loader.c to set location of __all_image_info section in loaded dyld
803 * 3) is from dyld itself to update location of all_image_info
804 * For security any calls after that are ignored. The TF_DYLD_ALL_IMAGE_SET bit is used to determine state.
805 */
806 kern_return_t
task_set_dyld_info(task_t task,mach_vm_address_t addr,mach_vm_size_t size,bool finalize_value)807 task_set_dyld_info(
808 task_t task,
809 mach_vm_address_t addr,
810 mach_vm_size_t size,
811 bool finalize_value)
812 {
813 mach_vm_address_t end;
814 if (os_add_overflow(addr, size, &end)) {
815 return KERN_FAILURE;
816 }
817
818 task_lock(task);
819 /* don't accept updates if all_image_info_addr is final */
820 if ((task->t_flags & TF_DYLD_ALL_IMAGE_FINAL) == 0) {
821 bool inputNonZero = ((addr != 0) || (size != 0));
822 bool currentNonZero = ((task->all_image_info_addr != 0) || (task->all_image_info_size != 0));
823 task->all_image_info_addr = addr;
824 task->all_image_info_size = size;
825 /* can only change from a non-zero value to another non-zero once */
826 if ((inputNonZero && currentNonZero) || finalize_value) {
827 task->t_flags |= TF_DYLD_ALL_IMAGE_FINAL;
828 }
829 task_unlock(task);
830 return KERN_SUCCESS;
831 } else {
832 task_unlock(task);
833 return KERN_FAILURE;
834 }
835 }
836
837 bool
task_donates_own_pages(task_t task)838 task_donates_own_pages(
839 task_t task)
840 {
841 return task->donates_own_pages;
842 }
843
844 void
task_set_mach_header_address(task_t task,mach_vm_address_t addr)845 task_set_mach_header_address(
846 task_t task,
847 mach_vm_address_t addr)
848 {
849 task_lock(task);
850 task->mach_header_vm_address = addr;
851 task_unlock(task);
852 }
853
854 void
task_bank_reset(__unused task_t task)855 task_bank_reset(__unused task_t task)
856 {
857 if (task->bank_context != NULL) {
858 bank_task_destroy(task);
859 }
860 }
861
862 /*
863 * NOTE: This should only be called when the P_LINTRANSIT
864 * flag is set (the proc_trans lock is held) on the
865 * proc associated with the task.
866 */
867 void
task_bank_init(__unused task_t task)868 task_bank_init(__unused task_t task)
869 {
870 if (task->bank_context != NULL) {
871 panic("Task bank init called with non null bank context for task: %p and bank_context: %p", task, task->bank_context);
872 }
873 bank_task_initialize(task);
874 }
875
876 void
task_set_did_exec_flag(task_t task)877 task_set_did_exec_flag(task_t task)
878 {
879 task->t_procflags |= TPF_DID_EXEC;
880 }
881
882 void
task_clear_exec_copy_flag(task_t task)883 task_clear_exec_copy_flag(task_t task)
884 {
885 task->t_procflags &= ~TPF_EXEC_COPY;
886 }
887
888 event_t
task_get_return_wait_event(task_t task)889 task_get_return_wait_event(task_t task)
890 {
891 return (event_t)&task->returnwait_inheritor;
892 }
893
894 void
task_clear_return_wait(task_t task,uint32_t flags)895 task_clear_return_wait(task_t task, uint32_t flags)
896 {
897 if (flags & TCRW_CLEAR_INITIAL_WAIT) {
898 thread_wakeup(task_get_return_wait_event(task));
899 }
900
901 if (flags & TCRW_CLEAR_FINAL_WAIT) {
902 is_write_lock(task->itk_space);
903
904 task->t_returnwaitflags &= ~TRW_LRETURNWAIT;
905 task->returnwait_inheritor = NULL;
906
907 if (flags & TCRW_CLEAR_EXEC_COMPLETE) {
908 task->t_returnwaitflags &= ~TRW_LEXEC_COMPLETE;
909 }
910
911 if (task->t_returnwaitflags & TRW_LRETURNWAITER) {
912 struct turnstile *turnstile = turnstile_prepare_hash((uintptr_t) task_get_return_wait_event(task),
913 TURNSTILE_ULOCK);
914
915 waitq_wakeup64_all(&turnstile->ts_waitq,
916 CAST_EVENT64_T(task_get_return_wait_event(task)),
917 THREAD_AWAKENED, WAITQ_UPDATE_INHERITOR);
918
919 turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_HELD);
920
921 turnstile_complete_hash((uintptr_t) task_get_return_wait_event(task), TURNSTILE_ULOCK);
922 turnstile_cleanup();
923 task->t_returnwaitflags &= ~TRW_LRETURNWAITER;
924 }
925 is_write_unlock(task->itk_space);
926 }
927 }
928
929 void __attribute__((noreturn))
task_wait_to_return(void)930 task_wait_to_return(void)
931 {
932 task_t task = current_task();
933 uint8_t returnwaitflags;
934
935 is_write_lock(task->itk_space);
936
937 if (task->t_returnwaitflags & TRW_LRETURNWAIT) {
938 struct turnstile *turnstile = turnstile_prepare_hash((uintptr_t) task_get_return_wait_event(task),
939 TURNSTILE_ULOCK);
940
941 do {
942 task->t_returnwaitflags |= TRW_LRETURNWAITER;
943 turnstile_update_inheritor(turnstile, task->returnwait_inheritor,
944 (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
945
946 waitq_assert_wait64(&turnstile->ts_waitq,
947 CAST_EVENT64_T(task_get_return_wait_event(task)),
948 THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
949
950 is_write_unlock(task->itk_space);
951
952 turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
953
954 thread_block(THREAD_CONTINUE_NULL);
955
956 is_write_lock(task->itk_space);
957 } while (task->t_returnwaitflags & TRW_LRETURNWAIT);
958
959 turnstile_complete_hash((uintptr_t) task_get_return_wait_event(task), TURNSTILE_ULOCK);
960 }
961
962 returnwaitflags = task->t_returnwaitflags;
963 is_write_unlock(task->itk_space);
964 turnstile_cleanup();
965
966 /**
967 * In posix_spawn() path, process_signature() is guaranteed to complete
968 * when the "second wait" is cleared. Call out to execute whatever depends
969 * on the result of that before we return to EL0.
970 */
971 task_post_signature_processing_hook(task);
972 #if CONFIG_MACF
973 /*
974 * Before jumping to userspace and allowing this process
975 * to execute any code, make sure its credentials are cached,
976 * and notify any interested parties.
977 */
978 extern void current_cached_proc_cred_update(void);
979
980 current_cached_proc_cred_update();
981 if (returnwaitflags & TRW_LEXEC_COMPLETE) {
982 mac_proc_notify_exec_complete(current_proc());
983 }
984 #endif
985
986 thread_bootstrap_return();
987 }
988
989 /**
990 * A callout by task_wait_to_return on the main thread of a newly spawned task
991 * after process_signature() is completed by the parent task.
992 *
993 * @param task The newly spawned task
994 */
995 void
task_post_signature_processing_hook(task_t task)996 task_post_signature_processing_hook(task_t task)
997 {
998 ml_task_post_signature_processing_hook(task);
999 }
1000
1001 boolean_t
task_is_exec_copy(task_t task)1002 task_is_exec_copy(task_t task)
1003 {
1004 return task_is_exec_copy_internal(task);
1005 }
1006
1007 boolean_t
task_did_exec(task_t task)1008 task_did_exec(task_t task)
1009 {
1010 return task_did_exec_internal(task);
1011 }
1012
1013 boolean_t
task_is_active(task_t task)1014 task_is_active(task_t task)
1015 {
1016 return task->active;
1017 }
1018
1019 boolean_t
task_is_halting(task_t task)1020 task_is_halting(task_t task)
1021 {
1022 return task->halting;
1023 }
1024
1025 void
task_init(void)1026 task_init(void)
1027 {
1028 if (max_task_footprint_mb != 0) {
1029 #if CONFIG_MEMORYSTATUS
1030 if (max_task_footprint_mb < 50) {
1031 printf("Warning: max_task_pmem %d below minimum.\n",
1032 max_task_footprint_mb);
1033 max_task_footprint_mb = 50;
1034 }
1035 printf("Limiting task physical memory footprint to %d MB\n",
1036 max_task_footprint_mb);
1037
1038 max_task_footprint = (ledger_amount_t)max_task_footprint_mb * 1024 * 1024; // Convert MB to bytes
1039
1040 /*
1041 * Configure the per-task memory limit warning level.
1042 * This is computed as a percentage.
1043 */
1044 max_task_footprint_warning_level = 0;
1045
1046 if (max_mem < 0x40000000) {
1047 /*
1048 * On devices with < 1GB of memory:
1049 * -- set warnings to 50MB below the per-task limit.
1050 */
1051 if (max_task_footprint_mb > 50) {
1052 max_task_footprint_warning_level = ((max_task_footprint_mb - 50) * 100) / max_task_footprint_mb;
1053 }
1054 } else {
1055 /*
1056 * On devices with >= 1GB of memory:
1057 * -- set warnings to 100MB below the per-task limit.
1058 */
1059 if (max_task_footprint_mb > 100) {
1060 max_task_footprint_warning_level = ((max_task_footprint_mb - 100) * 100) / max_task_footprint_mb;
1061 }
1062 }
1063
1064 /*
1065 * Never allow warning level to land below the default.
1066 */
1067 if (max_task_footprint_warning_level < PHYS_FOOTPRINT_WARNING_LEVEL) {
1068 max_task_footprint_warning_level = PHYS_FOOTPRINT_WARNING_LEVEL;
1069 }
1070
1071 printf("Limiting task physical memory warning to %d%%\n", max_task_footprint_warning_level);
1072
1073 #else
1074 printf("Warning: max_task_pmem specified, but jetsam not configured; ignoring.\n");
1075 #endif /* CONFIG_MEMORYSTATUS */
1076 }
1077
1078 #if DEVELOPMENT || DEBUG
1079 PE_parse_boot_argn("task_exc_guard_default",
1080 &task_exc_guard_default,
1081 sizeof(task_exc_guard_default));
1082 #endif /* DEVELOPMENT || DEBUG */
1083
1084 #if CONFIG_COREDUMP
1085 if (!PE_parse_boot_argn("hwm_user_cores", &hwm_user_cores,
1086 sizeof(hwm_user_cores))) {
1087 hwm_user_cores = 0;
1088 }
1089 #endif
1090
1091 proc_init_cpumon_params();
1092
1093 if (!PE_parse_boot_argn("task_wakeups_monitor_rate", &task_wakeups_monitor_rate, sizeof(task_wakeups_monitor_rate))) {
1094 task_wakeups_monitor_rate = TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT;
1095 }
1096
1097 if (!PE_parse_boot_argn("task_wakeups_monitor_interval", &task_wakeups_monitor_interval, sizeof(task_wakeups_monitor_interval))) {
1098 task_wakeups_monitor_interval = TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL;
1099 }
1100
1101 if (!PE_parse_boot_argn("task_wakeups_monitor_ustackshots_trigger_pct", &task_wakeups_monitor_ustackshots_trigger_pct,
1102 sizeof(task_wakeups_monitor_ustackshots_trigger_pct))) {
1103 task_wakeups_monitor_ustackshots_trigger_pct = TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER;
1104 }
1105
1106 if (!PE_parse_boot_argn("task_iomon_limit_mb", &task_iomon_limit_mb, sizeof(task_iomon_limit_mb))) {
1107 task_iomon_limit_mb = IOMON_DEFAULT_LIMIT;
1108 }
1109
1110 if (!PE_parse_boot_argn("task_iomon_interval_secs", &task_iomon_interval_secs, sizeof(task_iomon_interval_secs))) {
1111 task_iomon_interval_secs = IOMON_DEFAULT_INTERVAL;
1112 }
1113
1114 if (!PE_parse_boot_argn("io_telemetry_limit", &io_telemetry_limit, sizeof(io_telemetry_limit))) {
1115 io_telemetry_limit = IO_TELEMETRY_DEFAULT_LIMIT;
1116 }
1117
1118 /*
1119 * If we have coalitions, coalition_init() will call init_task_ledgers() as it
1120 * sets up the ledgers for the default coalition. If we don't have coalitions,
1121 * then we have to call it now.
1122 */
1123 #if CONFIG_COALITIONS
1124 assert(task_ledger_template);
1125 #else /* CONFIG_COALITIONS */
1126 init_task_ledgers();
1127 #endif /* CONFIG_COALITIONS */
1128
1129 task_ref_init();
1130 task_zone_init();
1131
1132 #ifdef __LP64__
1133 boolean_t is_64bit = TRUE;
1134 #else
1135 boolean_t is_64bit = FALSE;
1136 #endif
1137
1138 kernproc = (struct proc *)zalloc_flags(proc_task_zone, Z_WAITOK | Z_ZERO);
1139 kernel_task = proc_get_task_raw(kernproc);
1140
1141 /*
1142 * Create the kernel task as the first task.
1143 */
1144 if (task_create_internal(TASK_NULL, NULL, NULL, FALSE, is_64bit,
1145 is_64bit, TF_NONE, TF_NONE, TPF_NONE, TWF_NONE, kernel_task) != KERN_SUCCESS) {
1146 panic("task_init");
1147 }
1148
1149 ipc_task_enable(kernel_task);
1150
1151 #if defined(HAS_APPLE_PAC)
1152 kernel_task->rop_pid = ml_default_rop_pid();
1153 kernel_task->jop_pid = ml_default_jop_pid();
1154 // kernel_task never runs at EL0, but machine_thread_state_convert_from/to_user() relies on
1155 // disable_user_jop to be false for kernel threads (e.g. in exception delivery on thread_exception_daemon)
1156 ml_task_set_disable_user_jop(kernel_task, FALSE);
1157 #endif
1158
1159 vm_map_deallocate(kernel_task->map);
1160 kernel_task->map = kernel_map;
1161 }
1162
1163 static inline void
task_zone_init(void)1164 task_zone_init(void)
1165 {
1166 proc_struct_size = roundup(proc_struct_size, task_alignment);
1167 task_struct_size = roundup(sizeof(struct task), proc_alignment);
1168 proc_and_task_size = proc_struct_size + task_struct_size;
1169
1170 proc_task_zone = zone_create_ext("proc_task", proc_and_task_size,
1171 ZC_ZFREE_CLEARMEM | ZC_SEQUESTER, ZONE_ID_PROC_TASK, NULL); /* sequester is needed for proc_rele() */
1172 }
1173
1174 /*
1175 * Task ledgers
1176 * ------------
1177 *
1178 * phys_footprint
1179 * Physical footprint: This is the sum of:
1180 * + (internal - alternate_accounting)
1181 * + (internal_compressed - alternate_accounting_compressed)
1182 * + iokit_mapped
1183 * + purgeable_nonvolatile
1184 * + purgeable_nonvolatile_compressed
1185 * + page_table
1186 *
1187 * internal
1188 * The task's anonymous memory, which on iOS is always resident.
1189 *
1190 * internal_compressed
1191 * Amount of this task's internal memory which is held by the compressor.
1192 * Such memory is no longer actually resident for the task [i.e., resident in its pmap],
1193 * and could be either decompressed back into memory, or paged out to storage, depending
1194 * on our implementation.
1195 *
1196 * iokit_mapped
1197 * IOKit mappings: The total size of all IOKit mappings in this task, regardless of
1198 * clean/dirty or internal/external state].
1199 *
1200 * alternate_accounting
1201 * The number of internal dirty pages which are part of IOKit mappings. By definition, these pages
1202 * are counted in both internal *and* iokit_mapped, so we must subtract them from the total to avoid
1203 * double counting.
1204 *
1205 * pages_grabbed
1206 * pages_grabbed counts all page grabs in a task. It is also broken out into three subtypes
1207 * which track UPL, IOPL and Kernel page grabs.
1208 */
1209 void
init_task_ledgers(void)1210 init_task_ledgers(void)
1211 {
1212 ledger_template_t t;
1213
1214 assert(task_ledger_template == NULL);
1215 assert(kernel_task == TASK_NULL);
1216
1217 #if MACH_ASSERT
1218 PE_parse_boot_argn("pmap_ledgers_panic",
1219 &pmap_ledgers_panic,
1220 sizeof(pmap_ledgers_panic));
1221 PE_parse_boot_argn("pmap_ledgers_panic_leeway",
1222 &pmap_ledgers_panic_leeway,
1223 sizeof(pmap_ledgers_panic_leeway));
1224 #endif /* MACH_ASSERT */
1225
1226 if ((t = ledger_template_create("Per-task ledger")) == NULL) {
1227 panic("couldn't create task ledger template");
1228 }
1229
1230 task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns");
1231 task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private",
1232 "physmem", "bytes");
1233 task_ledgers.tkm_shared = ledger_entry_add(t, "tkm_shared", "physmem",
1234 "bytes");
1235 task_ledgers.phys_mem = ledger_entry_add(t, "phys_mem", "physmem",
1236 "bytes");
1237 task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem",
1238 "bytes");
1239 task_ledgers.conclave_mem = ledger_entry_add_with_flags(t, "conclave_mem", "physmem", "bytes",
1240 LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE | LEDGER_ENTRY_ALLOW_DEBIT);
1241 task_ledgers.internal = ledger_entry_add(t, "internal", "physmem",
1242 "bytes");
1243 task_ledgers.iokit_mapped = ledger_entry_add_with_flags(t, "iokit_mapped", "mappings",
1244 "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1245 task_ledgers.alternate_accounting = ledger_entry_add_with_flags(t, "alternate_accounting", "physmem",
1246 "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1247 task_ledgers.alternate_accounting_compressed = ledger_entry_add_with_flags(t, "alternate_accounting_compressed", "physmem",
1248 "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1249 task_ledgers.page_table = ledger_entry_add_with_flags(t, "page_table", "physmem",
1250 "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1251 task_ledgers.phys_footprint = ledger_entry_add(t, "phys_footprint", "physmem",
1252 "bytes");
1253 task_ledgers.internal_compressed = ledger_entry_add(t, "internal_compressed", "physmem",
1254 "bytes");
1255 task_ledgers.reusable = ledger_entry_add(t, "reusable", "physmem", "bytes");
1256 task_ledgers.external = ledger_entry_add(t, "external", "physmem", "bytes");
1257 task_ledgers.purgeable_volatile = ledger_entry_add_with_flags(t, "purgeable_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1258 task_ledgers.purgeable_nonvolatile = ledger_entry_add_with_flags(t, "purgeable_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1259 task_ledgers.purgeable_volatile_compressed = ledger_entry_add_with_flags(t, "purgeable_volatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1260 task_ledgers.purgeable_nonvolatile_compressed = ledger_entry_add_with_flags(t, "purgeable_nonvolatile_compress", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1261 #if DEBUG || DEVELOPMENT
1262 task_ledgers.pages_grabbed = ledger_entry_add_with_flags(t, "pages_grabbed", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1263 task_ledgers.pages_grabbed_kern = ledger_entry_add_with_flags(t, "pages_grabbed_kern", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1264 task_ledgers.pages_grabbed_iopl = ledger_entry_add_with_flags(t, "pages_grabbed_iopl", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1265 task_ledgers.pages_grabbed_upl = ledger_entry_add_with_flags(t, "pages_grabbed_upl", "physmem", "count", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1266 #endif
1267 task_ledgers.tagged_nofootprint = ledger_entry_add_with_flags(t, "tagged_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1268 task_ledgers.tagged_footprint = ledger_entry_add_with_flags(t, "tagged_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1269 task_ledgers.tagged_nofootprint_compressed = ledger_entry_add_with_flags(t, "tagged_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1270 task_ledgers.tagged_footprint_compressed = ledger_entry_add_with_flags(t, "tagged_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1271 task_ledgers.network_volatile = ledger_entry_add_with_flags(t, "network_volatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1272 task_ledgers.network_nonvolatile = ledger_entry_add_with_flags(t, "network_nonvolatile", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1273 task_ledgers.network_volatile_compressed = ledger_entry_add_with_flags(t, "network_volatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1274 task_ledgers.network_nonvolatile_compressed = ledger_entry_add_with_flags(t, "network_nonvolatile_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1275 task_ledgers.media_nofootprint = ledger_entry_add_with_flags(t, "media_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1276 task_ledgers.media_footprint = ledger_entry_add_with_flags(t, "media_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1277 task_ledgers.media_nofootprint_compressed = ledger_entry_add_with_flags(t, "media_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1278 task_ledgers.media_footprint_compressed = ledger_entry_add_with_flags(t, "media_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1279 task_ledgers.graphics_nofootprint = ledger_entry_add_with_flags(t, "graphics_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1280 task_ledgers.graphics_footprint = ledger_entry_add_with_flags(t, "graphics_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1281 task_ledgers.graphics_nofootprint_compressed = ledger_entry_add_with_flags(t, "graphics_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1282 task_ledgers.graphics_footprint_compressed = ledger_entry_add_with_flags(t, "graphics_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1283 task_ledgers.neural_nofootprint = ledger_entry_add_with_flags(t, "neural_nofootprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1284 task_ledgers.neural_footprint = ledger_entry_add_with_flags(t, "neural_footprint", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1285 task_ledgers.neural_nofootprint_compressed = ledger_entry_add_with_flags(t, "neural_nofootprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1286 task_ledgers.neural_footprint_compressed = ledger_entry_add_with_flags(t, "neural_footprint_compressed", "physmem", "bytes", LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1287 task_ledgers.neural_nofootprint_total = ledger_entry_add(t, "neural_nofootprint_total", "physmem", "bytes");
1288
1289 #if CONFIG_FREEZE
1290 task_ledgers.frozen_to_swap = ledger_entry_add(t, "frozen_to_swap", "physmem", "bytes");
1291 #endif /* CONFIG_FREEZE */
1292
1293 task_ledgers.platform_idle_wakeups = ledger_entry_add(t, "platform_idle_wakeups", "power",
1294 "count");
1295 task_ledgers.interrupt_wakeups = ledger_entry_add(t, "interrupt_wakeups", "power",
1296 "count");
1297
1298 #if CONFIG_SCHED_SFI
1299 sfi_class_id_t class_id, ledger_alias;
1300 for (class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
1301 task_ledgers.sfi_wait_times[class_id] = -1;
1302 }
1303
1304 /* don't account for UNSPECIFIED */
1305 for (class_id = SFI_CLASS_UNSPECIFIED + 1; class_id < MAX_SFI_CLASS_ID; class_id++) {
1306 ledger_alias = sfi_get_ledger_alias_for_class(class_id);
1307 if (ledger_alias != SFI_CLASS_UNSPECIFIED) {
1308 /* Check to see if alias has been registered yet */
1309 if (task_ledgers.sfi_wait_times[ledger_alias] != -1) {
1310 task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias];
1311 } else {
1312 /* Otherwise, initialize it first */
1313 task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias] = sfi_ledger_entry_add(t, ledger_alias);
1314 }
1315 } else {
1316 task_ledgers.sfi_wait_times[class_id] = sfi_ledger_entry_add(t, class_id);
1317 }
1318
1319 if (task_ledgers.sfi_wait_times[class_id] < 0) {
1320 panic("couldn't create entries for task ledger template for SFI class 0x%x", class_id);
1321 }
1322 }
1323
1324 assert(task_ledgers.sfi_wait_times[MAX_SFI_CLASS_ID - 1] != -1);
1325 #endif /* CONFIG_SCHED_SFI */
1326
1327 task_ledgers.cpu_time_billed_to_me = ledger_entry_add(t, "cpu_time_billed_to_me", "sched", "ns");
1328 task_ledgers.cpu_time_billed_to_others = ledger_entry_add(t, "cpu_time_billed_to_others", "sched", "ns");
1329 task_ledgers.physical_writes = ledger_entry_add(t, "physical_writes", "res", "bytes");
1330 task_ledgers.logical_writes = ledger_entry_add(t, "logical_writes", "res", "bytes");
1331 task_ledgers.logical_writes_to_external = ledger_entry_add(t, "logical_writes_to_external", "res", "bytes");
1332 #if CONFIG_PHYS_WRITE_ACCT
1333 task_ledgers.fs_metadata_writes = ledger_entry_add(t, "fs_metadata_writes", "res", "bytes");
1334 #endif /* CONFIG_PHYS_WRITE_ACCT */
1335 task_ledgers.energy_billed_to_me = ledger_entry_add(t, "energy_billed_to_me", "power", "nj");
1336 task_ledgers.energy_billed_to_others = ledger_entry_add(t, "energy_billed_to_others", "power", "nj");
1337
1338 #if CONFIG_MEMORYSTATUS
1339 task_ledgers.memorystatus_dirty_time = ledger_entry_add(t, "memorystatus_dirty_time", "physmem", "ns");
1340 #endif /* CONFIG_MEMORYSTATUS */
1341
1342 task_ledgers.swapins = ledger_entry_add_with_flags(t, "swapins", "physmem", "bytes",
1343 LEDGER_ENTRY_ALLOW_PANIC_ON_NEGATIVE);
1344
1345 if ((task_ledgers.cpu_time < 0) ||
1346 (task_ledgers.tkm_private < 0) ||
1347 (task_ledgers.tkm_shared < 0) ||
1348 (task_ledgers.phys_mem < 0) ||
1349 (task_ledgers.wired_mem < 0) ||
1350 (task_ledgers.conclave_mem < 0) ||
1351 (task_ledgers.internal < 0) ||
1352 (task_ledgers.external < 0) ||
1353 (task_ledgers.reusable < 0) ||
1354 (task_ledgers.iokit_mapped < 0) ||
1355 (task_ledgers.alternate_accounting < 0) ||
1356 (task_ledgers.alternate_accounting_compressed < 0) ||
1357 (task_ledgers.page_table < 0) ||
1358 (task_ledgers.phys_footprint < 0) ||
1359 (task_ledgers.internal_compressed < 0) ||
1360 (task_ledgers.purgeable_volatile < 0) ||
1361 (task_ledgers.purgeable_nonvolatile < 0) ||
1362 (task_ledgers.purgeable_volatile_compressed < 0) ||
1363 (task_ledgers.purgeable_nonvolatile_compressed < 0) ||
1364 (task_ledgers.tagged_nofootprint < 0) ||
1365 (task_ledgers.tagged_footprint < 0) ||
1366 (task_ledgers.tagged_nofootprint_compressed < 0) ||
1367 (task_ledgers.tagged_footprint_compressed < 0) ||
1368 #if CONFIG_FREEZE
1369 (task_ledgers.frozen_to_swap < 0) ||
1370 #endif /* CONFIG_FREEZE */
1371 (task_ledgers.network_volatile < 0) ||
1372 (task_ledgers.network_nonvolatile < 0) ||
1373 (task_ledgers.network_volatile_compressed < 0) ||
1374 (task_ledgers.network_nonvolatile_compressed < 0) ||
1375 (task_ledgers.media_nofootprint < 0) ||
1376 (task_ledgers.media_footprint < 0) ||
1377 (task_ledgers.media_nofootprint_compressed < 0) ||
1378 (task_ledgers.media_footprint_compressed < 0) ||
1379 (task_ledgers.graphics_nofootprint < 0) ||
1380 (task_ledgers.graphics_footprint < 0) ||
1381 (task_ledgers.graphics_nofootprint_compressed < 0) ||
1382 (task_ledgers.graphics_footprint_compressed < 0) ||
1383 (task_ledgers.neural_nofootprint < 0) ||
1384 (task_ledgers.neural_footprint < 0) ||
1385 (task_ledgers.neural_nofootprint_compressed < 0) ||
1386 (task_ledgers.neural_footprint_compressed < 0) ||
1387 (task_ledgers.neural_nofootprint_total < 0) ||
1388 (task_ledgers.platform_idle_wakeups < 0) ||
1389 (task_ledgers.interrupt_wakeups < 0) ||
1390 (task_ledgers.cpu_time_billed_to_me < 0) || (task_ledgers.cpu_time_billed_to_others < 0) ||
1391 (task_ledgers.physical_writes < 0) ||
1392 (task_ledgers.logical_writes < 0) ||
1393 (task_ledgers.logical_writes_to_external < 0) ||
1394 #if CONFIG_PHYS_WRITE_ACCT
1395 (task_ledgers.fs_metadata_writes < 0) ||
1396 #endif /* CONFIG_PHYS_WRITE_ACCT */
1397 #if CONFIG_MEMORYSTATUS
1398 (task_ledgers.memorystatus_dirty_time < 0) ||
1399 #endif /* CONFIG_MEMORYSTATUS */
1400 (task_ledgers.energy_billed_to_me < 0) ||
1401 (task_ledgers.energy_billed_to_others < 0) ||
1402 (task_ledgers.swapins < 0)
1403 ) {
1404 panic("couldn't create entries for task ledger template");
1405 }
1406
1407 ledger_track_credit_only(t, task_ledgers.phys_footprint);
1408 ledger_track_credit_only(t, task_ledgers.internal);
1409 ledger_track_credit_only(t, task_ledgers.external);
1410 ledger_track_credit_only(t, task_ledgers.reusable);
1411
1412 ledger_track_maximum(t, task_ledgers.phys_footprint, 60);
1413 ledger_track_maximum(t, task_ledgers.phys_mem, 60);
1414 ledger_track_maximum(t, task_ledgers.internal, 60);
1415 ledger_track_maximum(t, task_ledgers.internal_compressed, 60);
1416 ledger_track_maximum(t, task_ledgers.reusable, 60);
1417 ledger_track_maximum(t, task_ledgers.external, 60);
1418 ledger_track_maximum(t, task_ledgers.neural_nofootprint_total, 60);
1419 #if MACH_ASSERT
1420 if (pmap_ledgers_panic) {
1421 ledger_panic_on_negative(t, task_ledgers.phys_footprint);
1422 ledger_panic_on_negative(t, task_ledgers.conclave_mem);
1423 ledger_panic_on_negative(t, task_ledgers.page_table);
1424 ledger_panic_on_negative(t, task_ledgers.internal);
1425 ledger_panic_on_negative(t, task_ledgers.iokit_mapped);
1426 ledger_panic_on_negative(t, task_ledgers.alternate_accounting);
1427 ledger_panic_on_negative(t, task_ledgers.alternate_accounting_compressed);
1428 ledger_panic_on_negative(t, task_ledgers.purgeable_volatile);
1429 ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile);
1430 ledger_panic_on_negative(t, task_ledgers.purgeable_volatile_compressed);
1431 ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile_compressed);
1432 #if CONFIG_PHYS_WRITE_ACCT
1433 ledger_panic_on_negative(t, task_ledgers.fs_metadata_writes);
1434 #endif /* CONFIG_PHYS_WRITE_ACCT */
1435
1436 ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint);
1437 ledger_panic_on_negative(t, task_ledgers.tagged_footprint);
1438 ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint_compressed);
1439 ledger_panic_on_negative(t, task_ledgers.tagged_footprint_compressed);
1440 ledger_panic_on_negative(t, task_ledgers.network_volatile);
1441 ledger_panic_on_negative(t, task_ledgers.network_nonvolatile);
1442 ledger_panic_on_negative(t, task_ledgers.network_volatile_compressed);
1443 ledger_panic_on_negative(t, task_ledgers.network_nonvolatile_compressed);
1444 ledger_panic_on_negative(t, task_ledgers.media_nofootprint);
1445 ledger_panic_on_negative(t, task_ledgers.media_footprint);
1446 ledger_panic_on_negative(t, task_ledgers.media_nofootprint_compressed);
1447 ledger_panic_on_negative(t, task_ledgers.media_footprint_compressed);
1448 ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint);
1449 ledger_panic_on_negative(t, task_ledgers.graphics_footprint);
1450 ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint_compressed);
1451 ledger_panic_on_negative(t, task_ledgers.graphics_footprint_compressed);
1452 ledger_panic_on_negative(t, task_ledgers.neural_nofootprint);
1453 ledger_panic_on_negative(t, task_ledgers.neural_footprint);
1454 ledger_panic_on_negative(t, task_ledgers.neural_nofootprint_compressed);
1455 ledger_panic_on_negative(t, task_ledgers.neural_footprint_compressed);
1456 }
1457 #endif /* MACH_ASSERT */
1458
1459 #if CONFIG_MEMORYSTATUS
1460 ledger_set_callback(t, task_ledgers.phys_footprint, task_footprint_exceeded, NULL, NULL);
1461 #endif /* CONFIG_MEMORYSTATUS */
1462
1463 ledger_set_callback(t, task_ledgers.interrupt_wakeups,
1464 task_wakeups_rate_exceeded, NULL, NULL);
1465 ledger_set_callback(t, task_ledgers.physical_writes, task_io_rate_exceeded, (void *)FLAVOR_IO_PHYSICAL_WRITES, NULL);
1466
1467 #if CONFIG_SPTM || !XNU_MONITOR
1468 ledger_template_complete(t);
1469 #else /* CONFIG_SPTM || !XNU_MONITOR */
1470 ledger_template_complete_secure_alloc(t);
1471 #endif /* XNU_MONITOR */
1472 task_ledger_template = t;
1473 }
1474
1475 /* Create a task, but leave the task ports disabled */
1476 kern_return_t
task_create_internal(task_t parent_task,proc_ro_t proc_ro,coalition_t * parent_coalitions __unused,boolean_t inherit_memory,boolean_t is_64bit,boolean_t is_64bit_data,uint32_t t_flags,uint32_t t_flags_ro,uint32_t t_procflags,uint8_t t_returnwaitflags,task_t child_task)1477 task_create_internal(
1478 task_t parent_task, /* Null-able */
1479 proc_ro_t proc_ro,
1480 coalition_t *parent_coalitions __unused,
1481 boolean_t inherit_memory,
1482 boolean_t is_64bit,
1483 boolean_t is_64bit_data,
1484 uint32_t t_flags,
1485 uint32_t t_flags_ro,
1486 uint32_t t_procflags,
1487 uint8_t t_returnwaitflags,
1488 task_t child_task)
1489 {
1490 task_t new_task;
1491 vm_shared_region_t shared_region;
1492 ledger_t ledger = NULL;
1493 struct task_ro_data task_ro_data = {};
1494 uint32_t parent_t_flags_ro = 0;
1495
1496 new_task = child_task;
1497
1498 if (task_ref_count_init(new_task) != KERN_SUCCESS) {
1499 return KERN_RESOURCE_SHORTAGE;
1500 }
1501
1502 /* allocate with active entries */
1503 assert(task_ledger_template != NULL);
1504 ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
1505 if (ledger == NULL) {
1506 task_ref_count_fini(new_task);
1507 return KERN_RESOURCE_SHORTAGE;
1508 }
1509
1510 counter_alloc(&(new_task->faults));
1511
1512 #if defined(HAS_APPLE_PAC)
1513 const uint8_t disable_user_jop = inherit_memory ? parent_task->disable_user_jop : FALSE;
1514 ml_task_set_rop_pid(new_task, parent_task, inherit_memory);
1515 ml_task_set_jop_pid(new_task, parent_task, inherit_memory, disable_user_jop);
1516 ml_task_set_disable_user_jop(new_task, disable_user_jop);
1517 #endif
1518
1519
1520 new_task->ledger = ledger;
1521
1522 /* if inherit_memory is true, parent_task MUST not be NULL */
1523 if (!(t_flags & TF_CORPSE_FORK) && inherit_memory) {
1524 #if CONFIG_DEFERRED_RECLAIM
1525 if (parent_task->deferred_reclamation_metadata) {
1526 /*
1527 * Prevent concurrent reclaims while we're forking the parent_task's map,
1528 * so that the child's map is in sync with the forked reclamation
1529 * metadata.
1530 */
1531 vm_deferred_reclamation_buffer_own(
1532 parent_task->deferred_reclamation_metadata);
1533 }
1534 #endif /* CONFIG_DEFERRED_RECLAIM */
1535 new_task->map = vm_map_fork(ledger, parent_task->map, 0);
1536 #if CONFIG_DEFERRED_RECLAIM
1537 if (new_task->map != NULL &&
1538 parent_task->deferred_reclamation_metadata) {
1539 new_task->deferred_reclamation_metadata =
1540 vm_deferred_reclamation_buffer_fork(new_task,
1541 parent_task->deferred_reclamation_metadata);
1542 }
1543 #endif /* CONFIG_DEFERRED_RECLAIM */
1544 } else {
1545 unsigned int pmap_flags = is_64bit ? PMAP_CREATE_64BIT : 0;
1546 pmap_t pmap = pmap_create_options(ledger, 0, pmap_flags);
1547 vm_map_t new_map;
1548
1549 if (pmap == NULL) {
1550 counter_free(&new_task->faults);
1551 ledger_dereference(ledger);
1552 task_ref_count_fini(new_task);
1553 return KERN_RESOURCE_SHORTAGE;
1554 }
1555 new_map = vm_map_create_options(pmap,
1556 (vm_map_offset_t)(VM_MIN_ADDRESS),
1557 (vm_map_offset_t)(VM_MAX_ADDRESS),
1558 VM_MAP_CREATE_PAGEABLE);
1559 if (parent_task) {
1560 vm_map_inherit_limits(new_map, parent_task->map);
1561 }
1562 new_task->map = new_map;
1563 }
1564
1565 if (new_task->map == NULL) {
1566 counter_free(&new_task->faults);
1567 ledger_dereference(ledger);
1568 task_ref_count_fini(new_task);
1569 return KERN_RESOURCE_SHORTAGE;
1570 }
1571
1572 lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
1573 queue_init(&new_task->threads);
1574 new_task->suspend_count = 0;
1575 new_task->thread_count = 0;
1576 new_task->active_thread_count = 0;
1577 new_task->user_stop_count = 0;
1578 new_task->legacy_stop_count = 0;
1579 new_task->active = TRUE;
1580 new_task->halting = FALSE;
1581 new_task->priv_flags = 0;
1582 new_task->t_flags = t_flags;
1583 task_ro_data.t_flags_ro = t_flags_ro;
1584 new_task->t_procflags = t_procflags;
1585 new_task->t_returnwaitflags = t_returnwaitflags;
1586 new_task->returnwait_inheritor = current_thread();
1587 new_task->importance = 0;
1588 new_task->crashed_thread_id = 0;
1589 new_task->watchports = NULL;
1590 new_task->t_rr_ranges = NULL;
1591
1592 new_task->bank_context = NULL;
1593
1594 if (parent_task) {
1595 parent_t_flags_ro = task_ro_flags_get(parent_task);
1596 }
1597
1598 if (parent_task && inherit_memory) {
1599 #if __has_feature(ptrauth_calls)
1600 /* Inherit the pac exception flags from parent if in fork */
1601 task_ro_data.t_flags_ro |= (parent_t_flags_ro & (TFRO_PAC_ENFORCE_USER_STATE |
1602 TFRO_PAC_EXC_FATAL));
1603 #endif /* __has_feature(ptrauth_calls) */
1604 /* Inherit the hardened binary flags from parent if in fork */
1605 task_ro_data.t_flags_ro |= parent_t_flags_ro & (TFRO_HARDENED | TFRO_PLATFORM | TFRO_JIT_EXC_FATAL);
1606 #if XNU_TARGET_OS_OSX
1607 task_ro_data.t_flags_ro |= parent_t_flags_ro & TFRO_MACH_HARDENING_OPT_OUT;
1608 #endif /* XNU_TARGET_OS_OSX */
1609 }
1610
1611 #ifdef MACH_BSD
1612 new_task->corpse_info = NULL;
1613 #endif /* MACH_BSD */
1614
1615 /* kern_task not created by this function has unique id 0, start with 1 here. */
1616 task_set_uniqueid(new_task);
1617
1618 #if CONFIG_MACF
1619 set_task_crash_label(new_task, NULL);
1620
1621 task_ro_data.task_filters.mach_trap_filter_mask = NULL;
1622 task_ro_data.task_filters.mach_kobj_filter_mask = NULL;
1623 #endif
1624
1625 #if CONFIG_MEMORYSTATUS
1626 if (max_task_footprint != 0) {
1627 ledger_set_limit(ledger, task_ledgers.phys_footprint, max_task_footprint, PHYS_FOOTPRINT_WARNING_LEVEL);
1628 }
1629 #endif /* CONFIG_MEMORYSTATUS */
1630
1631 if (task_wakeups_monitor_rate != 0) {
1632 uint32_t flags = WAKEMON_ENABLE | WAKEMON_SET_DEFAULTS;
1633 int32_t rate; // Ignored because of WAKEMON_SET_DEFAULTS
1634 task_wakeups_monitor_ctl(new_task, &flags, &rate);
1635 }
1636
1637 #if CONFIG_IO_ACCOUNTING
1638 uint32_t flags = IOMON_ENABLE;
1639 task_io_monitor_ctl(new_task, &flags);
1640 #endif /* CONFIG_IO_ACCOUNTING */
1641
1642 machine_task_init(new_task, parent_task, inherit_memory);
1643
1644 new_task->task_debug = NULL;
1645
1646 #if DEVELOPMENT || DEBUG
1647 new_task->task_unnested = FALSE;
1648 new_task->task_disconnected_count = 0;
1649 #endif
1650 queue_init(&new_task->semaphore_list);
1651 new_task->semaphores_owned = 0;
1652
1653 new_task->vtimers = 0;
1654
1655 new_task->shared_region = NULL;
1656
1657 new_task->affinity_space = NULL;
1658
1659 #if CONFIG_CPU_COUNTERS
1660 new_task->t_kpc = 0;
1661 #endif /* CONFIG_CPU_COUNTERS */
1662
1663 new_task->pidsuspended = FALSE;
1664 new_task->frozen = FALSE;
1665 new_task->changing_freeze_state = FALSE;
1666 new_task->rusage_cpu_flags = 0;
1667 new_task->rusage_cpu_percentage = 0;
1668 new_task->rusage_cpu_interval = 0;
1669 new_task->rusage_cpu_deadline = 0;
1670 new_task->rusage_cpu_callt = NULL;
1671 #if MACH_ASSERT
1672 new_task->suspends_outstanding = 0;
1673 #endif
1674 recount_task_init(&new_task->tk_recount);
1675
1676 #if HYPERVISOR
1677 new_task->hv_task_target = NULL;
1678 #endif /* HYPERVISOR */
1679
1680 #if CONFIG_TASKWATCH
1681 queue_init(&new_task->task_watchers);
1682 new_task->num_taskwatchers = 0;
1683 new_task->watchapplying = 0;
1684 #endif /* CONFIG_TASKWATCH */
1685
1686 new_task->mem_notify_reserved = 0;
1687 new_task->memlimit_attrs_reserved = 0;
1688
1689 new_task->requested_policy = default_task_requested_policy;
1690 new_task->effective_policy = default_task_effective_policy;
1691
1692 new_task->task_shared_region_slide = -1;
1693
1694 if (parent_task != NULL) {
1695 task_ro_data.task_tokens.sec_token = *task_get_sec_token(parent_task);
1696 task_ro_data.task_tokens.audit_token = *task_get_audit_token(parent_task);
1697
1698 /* only inherit the option bits, no effect until task_set_immovable_pinned() */
1699 task_ro_data.task_control_port_options = task_get_control_port_options(parent_task);
1700
1701 task_ro_data.t_flags_ro |= parent_t_flags_ro & TFRO_FILTER_MSG;
1702 #if CONFIG_MACF
1703 if (!(t_flags & TF_CORPSE_FORK)) {
1704 task_ro_data.task_filters.mach_trap_filter_mask = task_get_mach_trap_filter_mask(parent_task);
1705 task_ro_data.task_filters.mach_kobj_filter_mask = task_get_mach_kobj_filter_mask(parent_task);
1706 }
1707 #endif
1708 } else {
1709 task_ro_data.task_tokens.sec_token = KERNEL_SECURITY_TOKEN;
1710 task_ro_data.task_tokens.audit_token = KERNEL_AUDIT_TOKEN;
1711
1712 task_ro_data.task_control_port_options = TASK_CONTROL_PORT_OPTIONS_NONE;
1713 }
1714
1715 /* must set before task_importance_init_from_parent: */
1716 if (proc_ro != NULL) {
1717 new_task->bsd_info_ro = proc_ro_ref_task(proc_ro, new_task, &task_ro_data);
1718 } else {
1719 new_task->bsd_info_ro = proc_ro_alloc(NULL, NULL, new_task, &task_ro_data);
1720 }
1721
1722 ipc_task_init(new_task, parent_task);
1723
1724 task_importance_init_from_parent(new_task, parent_task);
1725
1726 new_task->corpse_vmobject_list = NULL;
1727
1728 if (parent_task != TASK_NULL) {
1729 /* inherit the parent's shared region */
1730 shared_region = vm_shared_region_get(parent_task);
1731 if (shared_region != NULL) {
1732 vm_shared_region_set(new_task, shared_region);
1733 }
1734
1735 #if __has_feature(ptrauth_calls)
1736 /* use parent's shared_region_id */
1737 char *shared_region_id = task_get_vm_shared_region_id_and_jop_pid(parent_task, NULL);
1738 if (shared_region_id != NULL) {
1739 shared_region_key_alloc(shared_region_id, FALSE, 0); /* get a reference */
1740 }
1741 task_set_shared_region_id(new_task, shared_region_id);
1742 #endif /* __has_feature(ptrauth_calls) */
1743
1744 if (task_has_64Bit_addr(parent_task)) {
1745 task_set_64Bit_addr(new_task);
1746 }
1747
1748 if (task_has_64Bit_data(parent_task)) {
1749 task_set_64Bit_data(new_task);
1750 }
1751
1752 if (inherit_memory) {
1753 new_task->all_image_info_addr = parent_task->all_image_info_addr;
1754 new_task->all_image_info_size = parent_task->all_image_info_size;
1755 if (parent_task->t_flags & TF_DYLD_ALL_IMAGE_FINAL) {
1756 new_task->t_flags |= TF_DYLD_ALL_IMAGE_FINAL;
1757 }
1758 }
1759 new_task->mach_header_vm_address = 0;
1760
1761 if (inherit_memory && parent_task->affinity_space) {
1762 task_affinity_create(parent_task, new_task);
1763 }
1764
1765 new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
1766
1767 new_task->task_exc_guard = parent_task->task_exc_guard;
1768 if (parent_task->t_flags & TF_NO_SMT) {
1769 new_task->t_flags |= TF_NO_SMT;
1770 }
1771
1772 if (parent_task->t_flags & TF_USE_PSET_HINT_CLUSTER_TYPE) {
1773 new_task->t_flags |= TF_USE_PSET_HINT_CLUSTER_TYPE;
1774 }
1775
1776 if (parent_task->t_flags & TF_TECS) {
1777 new_task->t_flags |= TF_TECS;
1778 }
1779
1780 #if defined(__x86_64__)
1781 if (parent_task->t_flags & TF_INSN_COPY_OPTOUT) {
1782 new_task->t_flags |= TF_INSN_COPY_OPTOUT;
1783 }
1784 #endif
1785
1786 new_task->priority = BASEPRI_DEFAULT;
1787 new_task->max_priority = MAXPRI_USER;
1788 } else {
1789 #ifdef __LP64__
1790 if (is_64bit) {
1791 task_set_64Bit_addr(new_task);
1792 }
1793 #endif
1794
1795 if (is_64bit_data) {
1796 task_set_64Bit_data(new_task);
1797 }
1798
1799 new_task->all_image_info_addr = (mach_vm_address_t)0;
1800 new_task->all_image_info_size = (mach_vm_size_t)0;
1801
1802 new_task->pset_hint = PROCESSOR_SET_NULL;
1803
1804 new_task->task_exc_guard = TASK_EXC_GUARD_NONE;
1805
1806 if (new_task == kernel_task) {
1807 new_task->priority = BASEPRI_KERNEL;
1808 new_task->max_priority = MAXPRI_KERNEL;
1809 } else {
1810 new_task->priority = BASEPRI_DEFAULT;
1811 new_task->max_priority = MAXPRI_USER;
1812 }
1813 }
1814
1815 bzero(new_task->coalition, sizeof(new_task->coalition));
1816 for (int i = 0; i < COALITION_NUM_TYPES; i++) {
1817 queue_chain_init(new_task->task_coalition[i]);
1818 }
1819
1820 /* Allocate I/O Statistics */
1821 new_task->task_io_stats = kalloc_data(sizeof(struct io_stat_info),
1822 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1823
1824 bzero(&(new_task->cpu_time_eqos_stats), sizeof(new_task->cpu_time_eqos_stats));
1825 bzero(&(new_task->cpu_time_rqos_stats), sizeof(new_task->cpu_time_rqos_stats));
1826
1827 bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
1828
1829 counter_alloc(&(new_task->pageins));
1830 counter_alloc(&(new_task->cow_faults));
1831 counter_alloc(&(new_task->messages_sent));
1832 counter_alloc(&(new_task->messages_received));
1833
1834 /* Copy resource acc. info from Parent for Corpe Forked task. */
1835 if (parent_task != NULL && (t_flags & TF_CORPSE_FORK)) {
1836 task_rollup_accounting_info(new_task, parent_task);
1837 task_store_owned_vmobject_info(new_task, parent_task);
1838 } else {
1839 /* Initialize to zero for standard fork/spawn case */
1840 new_task->total_runnable_time = 0;
1841 new_task->syscalls_mach = 0;
1842 new_task->syscalls_unix = 0;
1843 new_task->c_switch = 0;
1844 new_task->p_switch = 0;
1845 new_task->ps_switch = 0;
1846 new_task->decompressions = 0;
1847 new_task->low_mem_notified_warn = 0;
1848 new_task->low_mem_notified_critical = 0;
1849 new_task->purged_memory_warn = 0;
1850 new_task->purged_memory_critical = 0;
1851 new_task->low_mem_privileged_listener = 0;
1852 new_task->memlimit_is_active = 0;
1853 new_task->memlimit_is_fatal = 0;
1854 new_task->memlimit_active_exc_resource = 0;
1855 new_task->memlimit_inactive_exc_resource = 0;
1856 new_task->task_timer_wakeups_bin_1 = 0;
1857 new_task->task_timer_wakeups_bin_2 = 0;
1858 new_task->task_gpu_ns = 0;
1859 new_task->task_writes_counters_internal.task_immediate_writes = 0;
1860 new_task->task_writes_counters_internal.task_deferred_writes = 0;
1861 new_task->task_writes_counters_internal.task_invalidated_writes = 0;
1862 new_task->task_writes_counters_internal.task_metadata_writes = 0;
1863 new_task->task_writes_counters_external.task_immediate_writes = 0;
1864 new_task->task_writes_counters_external.task_deferred_writes = 0;
1865 new_task->task_writes_counters_external.task_invalidated_writes = 0;
1866 new_task->task_writes_counters_external.task_metadata_writes = 0;
1867 #if CONFIG_PHYS_WRITE_ACCT
1868 new_task->task_fs_metadata_writes = 0;
1869 #endif /* CONFIG_PHYS_WRITE_ACCT */
1870 }
1871
1872
1873 new_task->donates_own_pages = FALSE;
1874 #if CONFIG_COALITIONS
1875 if (!(t_flags & TF_CORPSE_FORK)) {
1876 /* TODO: there is no graceful failure path here... */
1877 if (parent_coalitions && parent_coalitions[COALITION_TYPE_RESOURCE]) {
1878 coalitions_adopt_task(parent_coalitions, new_task);
1879 if (parent_coalitions[COALITION_TYPE_JETSAM]) {
1880 new_task->donates_own_pages = coalition_is_swappable(parent_coalitions[COALITION_TYPE_JETSAM]);
1881 }
1882 } else if (parent_task && parent_task->coalition[COALITION_TYPE_RESOURCE]) {
1883 /*
1884 * all tasks at least have a resource coalition, so
1885 * if the parent has one then inherit all coalitions
1886 * the parent is a part of
1887 */
1888 coalitions_adopt_task(parent_task->coalition, new_task);
1889 if (parent_task->coalition[COALITION_TYPE_JETSAM]) {
1890 new_task->donates_own_pages = coalition_is_swappable(parent_task->coalition[COALITION_TYPE_JETSAM]);
1891 }
1892 } else {
1893 /* TODO: assert that new_task will be PID 1 (launchd) */
1894 coalitions_adopt_init_task(new_task);
1895 }
1896 /*
1897 * on exec, we need to transfer the coalition roles from the
1898 * parent task to the exec copy task.
1899 */
1900 if (parent_task && (t_procflags & TPF_EXEC_COPY)) {
1901 int coal_roles[COALITION_NUM_TYPES];
1902 task_coalition_roles(parent_task, coal_roles);
1903 (void)coalitions_set_roles(new_task->coalition, new_task, coal_roles);
1904 }
1905 } else {
1906 coalitions_adopt_corpse_task(new_task);
1907 }
1908
1909 if (new_task->coalition[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
1910 panic("created task is not a member of a resource coalition");
1911 }
1912 task_set_coalition_member(new_task);
1913 #endif /* CONFIG_COALITIONS */
1914
1915 if (parent_task != TASK_NULL) {
1916 /* task_policy_create queries the adopted coalition */
1917 task_policy_create(new_task, parent_task);
1918 }
1919
1920 new_task->dispatchqueue_offset = 0;
1921 if (parent_task != NULL) {
1922 new_task->dispatchqueue_offset = parent_task->dispatchqueue_offset;
1923 }
1924
1925 new_task->task_can_transfer_memory_ownership = FALSE;
1926 new_task->task_volatile_objects = 0;
1927 new_task->task_nonvolatile_objects = 0;
1928 new_task->task_objects_disowning = FALSE;
1929 new_task->task_objects_disowned = FALSE;
1930 new_task->task_owned_objects = 0;
1931 queue_init(&new_task->task_objq);
1932
1933 #if CONFIG_FREEZE
1934 queue_init(&new_task->task_frozen_cseg_q);
1935 #endif /* CONFIG_FREEZE */
1936
1937 task_objq_lock_init(new_task);
1938
1939 #if __arm64__
1940 new_task->task_legacy_footprint = FALSE;
1941 new_task->task_extra_footprint_limit = FALSE;
1942 new_task->task_ios13extended_footprint_limit = FALSE;
1943 #endif /* __arm64__ */
1944 new_task->task_region_footprint = FALSE;
1945 new_task->task_has_crossed_thread_limit = FALSE;
1946 new_task->task_thread_limit = 0;
1947 #if CONFIG_SECLUDED_MEMORY
1948 new_task->task_can_use_secluded_mem = FALSE;
1949 new_task->task_could_use_secluded_mem = FALSE;
1950 new_task->task_could_also_use_secluded_mem = FALSE;
1951 new_task->task_suppressed_secluded = FALSE;
1952 #endif /* CONFIG_SECLUDED_MEMORY */
1953
1954
1955 /*
1956 * t_flags is set up above. But since we don't
1957 * support darkwake mode being set that way
1958 * currently, we clear it out here explicitly.
1959 */
1960 new_task->t_flags &= ~(TF_DARKWAKE_MODE);
1961
1962 queue_init(&new_task->io_user_clients);
1963 new_task->loadTag = 0;
1964
1965 lck_mtx_lock(&tasks_threads_lock);
1966 queue_enter(&tasks, new_task, task_t, tasks);
1967 tasks_count++;
1968 if (tasks_suspend_state) {
1969 task_suspend_internal(new_task);
1970 }
1971 lck_mtx_unlock(&tasks_threads_lock);
1972 task_ref_hold_proc_task_struct(new_task);
1973
1974 return KERN_SUCCESS;
1975 }
1976
1977 /*
1978 * task_rollup_accounting_info
1979 *
1980 * Roll up accounting stats. Used to rollup stats
1981 * for exec copy task and corpse fork.
1982 */
1983 void
task_rollup_accounting_info(task_t to_task,task_t from_task)1984 task_rollup_accounting_info(task_t to_task, task_t from_task)
1985 {
1986 assert(from_task != to_task);
1987
1988 recount_task_copy(&to_task->tk_recount, &from_task->tk_recount);
1989 to_task->total_runnable_time = from_task->total_runnable_time;
1990 counter_add(&to_task->faults, counter_load(&from_task->faults));
1991 counter_add(&to_task->pageins, counter_load(&from_task->pageins));
1992 counter_add(&to_task->cow_faults, counter_load(&from_task->cow_faults));
1993 counter_add(&to_task->messages_sent, counter_load(&from_task->messages_sent));
1994 counter_add(&to_task->messages_received, counter_load(&from_task->messages_received));
1995 to_task->decompressions = from_task->decompressions;
1996 to_task->syscalls_mach = from_task->syscalls_mach;
1997 to_task->syscalls_unix = from_task->syscalls_unix;
1998 to_task->c_switch = from_task->c_switch;
1999 to_task->p_switch = from_task->p_switch;
2000 to_task->ps_switch = from_task->ps_switch;
2001 to_task->extmod_statistics = from_task->extmod_statistics;
2002 to_task->low_mem_notified_warn = from_task->low_mem_notified_warn;
2003 to_task->low_mem_notified_critical = from_task->low_mem_notified_critical;
2004 to_task->purged_memory_warn = from_task->purged_memory_warn;
2005 to_task->purged_memory_critical = from_task->purged_memory_critical;
2006 to_task->low_mem_privileged_listener = from_task->low_mem_privileged_listener;
2007 *to_task->task_io_stats = *from_task->task_io_stats;
2008 to_task->cpu_time_eqos_stats = from_task->cpu_time_eqos_stats;
2009 to_task->cpu_time_rqos_stats = from_task->cpu_time_rqos_stats;
2010 to_task->task_timer_wakeups_bin_1 = from_task->task_timer_wakeups_bin_1;
2011 to_task->task_timer_wakeups_bin_2 = from_task->task_timer_wakeups_bin_2;
2012 to_task->task_gpu_ns = from_task->task_gpu_ns;
2013 to_task->task_writes_counters_internal.task_immediate_writes = from_task->task_writes_counters_internal.task_immediate_writes;
2014 to_task->task_writes_counters_internal.task_deferred_writes = from_task->task_writes_counters_internal.task_deferred_writes;
2015 to_task->task_writes_counters_internal.task_invalidated_writes = from_task->task_writes_counters_internal.task_invalidated_writes;
2016 to_task->task_writes_counters_internal.task_metadata_writes = from_task->task_writes_counters_internal.task_metadata_writes;
2017 to_task->task_writes_counters_external.task_immediate_writes = from_task->task_writes_counters_external.task_immediate_writes;
2018 to_task->task_writes_counters_external.task_deferred_writes = from_task->task_writes_counters_external.task_deferred_writes;
2019 to_task->task_writes_counters_external.task_invalidated_writes = from_task->task_writes_counters_external.task_invalidated_writes;
2020 to_task->task_writes_counters_external.task_metadata_writes = from_task->task_writes_counters_external.task_metadata_writes;
2021 #if CONFIG_PHYS_WRITE_ACCT
2022 to_task->task_fs_metadata_writes = from_task->task_fs_metadata_writes;
2023 #endif /* CONFIG_PHYS_WRITE_ACCT */
2024
2025 #if CONFIG_MEMORYSTATUS
2026 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.memorystatus_dirty_time);
2027 #endif /* CONFIG_MEMORYSTATUS */
2028
2029 /* Skip ledger roll up for memory accounting entries */
2030 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time);
2031 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.platform_idle_wakeups);
2032 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.interrupt_wakeups);
2033 #if CONFIG_SCHED_SFI
2034 for (sfi_class_id_t class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
2035 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.sfi_wait_times[class_id]);
2036 }
2037 #endif
2038 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_me);
2039 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_others);
2040 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.physical_writes);
2041 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.logical_writes);
2042 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_me);
2043 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_others);
2044 }
2045
2046 /*
2047 * task_deallocate_internal:
2048 *
2049 * Drop a reference on a task.
2050 * Don't call this directly.
2051 */
2052 extern void task_deallocate_internal(task_t task, os_ref_count_t refs);
2053 void
task_deallocate_internal(task_t task,os_ref_count_t refs)2054 task_deallocate_internal(
2055 task_t task,
2056 os_ref_count_t refs)
2057 {
2058 ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups;
2059
2060 if (task == TASK_NULL) {
2061 return;
2062 }
2063
2064 #if IMPORTANCE_INHERITANCE
2065 if (refs == 1) {
2066 /*
2067 * If last ref potentially comes from the task's importance,
2068 * disconnect it. But more task refs may be added before
2069 * that completes, so wait for the reference to go to zero
2070 * naturally (it may happen on a recursive task_deallocate()
2071 * from the ipc_importance_disconnect_task() call).
2072 */
2073 if (IIT_NULL != task->task_imp_base) {
2074 ipc_importance_disconnect_task(task);
2075 }
2076 return;
2077 }
2078 #endif /* IMPORTANCE_INHERITANCE */
2079
2080 if (refs > 0) {
2081 return;
2082 }
2083
2084 /*
2085 * The task should be dead at this point. Ensure other resources
2086 * like threads, are gone before we trash the world.
2087 */
2088 assert(queue_empty(&task->threads));
2089 assert(get_bsdtask_info(task) == NULL);
2090 assert(!is_active(task->itk_space));
2091 assert(!task->active);
2092 assert(task->active_thread_count == 0);
2093 assert(!task_get_game_mode(task));
2094 assert(!task_get_carplay_mode(task));
2095
2096 lck_mtx_lock(&tasks_threads_lock);
2097 assert(terminated_tasks_count > 0);
2098 queue_remove(&terminated_tasks, task, task_t, tasks);
2099 terminated_tasks_count--;
2100 lck_mtx_unlock(&tasks_threads_lock);
2101
2102 /*
2103 * remove the reference on bank context
2104 */
2105 task_bank_reset(task);
2106
2107 kfree_data(task->task_io_stats, sizeof(struct io_stat_info));
2108
2109 /*
2110 * Give the machine dependent code a chance
2111 * to perform cleanup before ripping apart
2112 * the task.
2113 */
2114 machine_task_terminate(task);
2115
2116 ipc_task_terminate(task);
2117
2118 /* let iokit know 2 */
2119 iokit_task_terminate(task, 2);
2120
2121 /* Unregister task from userspace coredumps on panic */
2122 kern_unregister_userspace_coredump(task);
2123
2124 if (task->affinity_space) {
2125 task_affinity_deallocate(task);
2126 }
2127
2128 #if MACH_ASSERT
2129 if (task->ledger != NULL &&
2130 task->map != NULL &&
2131 task->map->pmap != NULL &&
2132 task->map->pmap->ledger != NULL) {
2133 assert(task->ledger == task->map->pmap->ledger);
2134 }
2135 #endif /* MACH_ASSERT */
2136
2137 vm_owned_objects_disown(task);
2138 assert(task->task_objects_disowned);
2139 if (task->task_owned_objects != 0) {
2140 panic("task_deallocate(%p): "
2141 "volatile_objects=%d nonvolatile_objects=%d owned=%d\n",
2142 task,
2143 task->task_volatile_objects,
2144 task->task_nonvolatile_objects,
2145 task->task_owned_objects);
2146 }
2147
2148 #if CONFIG_DEFERRED_RECLAIM
2149 if (task->deferred_reclamation_metadata != NULL) {
2150 vm_deferred_reclamation_buffer_deallocate(task->deferred_reclamation_metadata);
2151 task->deferred_reclamation_metadata = NULL;
2152 }
2153 #endif /* CONFIG_DEFERRED_RECLAIM */
2154
2155 vm_map_deallocate(task->map);
2156 if (task->is_large_corpse) {
2157 assert(large_corpse_count > 0);
2158 OSDecrementAtomic(&large_corpse_count);
2159 task->is_large_corpse = false;
2160 }
2161 is_release(task->itk_space);
2162
2163 if (task->t_rr_ranges) {
2164 restartable_ranges_release(task->t_rr_ranges);
2165 }
2166
2167 ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
2168 &interrupt_wakeups, &debit);
2169 ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
2170 &platform_idle_wakeups, &debit);
2171
2172 struct recount_times_mach sum = { 0 };
2173 struct recount_times_mach p_only = { 0 };
2174 recount_task_times_perf_only(task, &sum, &p_only);
2175 #if CONFIG_PERVASIVE_ENERGY
2176 uint64_t energy = recount_task_energy_nj(task);
2177 #endif /* CONFIG_PERVASIVE_ENERGY */
2178 recount_task_deinit(&task->tk_recount);
2179
2180 /* Accumulate statistics for dead tasks */
2181 lck_spin_lock(&dead_task_statistics_lock);
2182 dead_task_statistics.total_user_time += sum.rtm_user;
2183 dead_task_statistics.total_system_time += sum.rtm_system;
2184
2185 dead_task_statistics.task_interrupt_wakeups += interrupt_wakeups;
2186 dead_task_statistics.task_platform_idle_wakeups += platform_idle_wakeups;
2187
2188 dead_task_statistics.task_timer_wakeups_bin_1 += task->task_timer_wakeups_bin_1;
2189 dead_task_statistics.task_timer_wakeups_bin_2 += task->task_timer_wakeups_bin_2;
2190 dead_task_statistics.total_ptime += p_only.rtm_user + p_only.rtm_system;
2191 dead_task_statistics.total_pset_switches += task->ps_switch;
2192 dead_task_statistics.task_gpu_ns += task->task_gpu_ns;
2193 #if CONFIG_PERVASIVE_ENERGY
2194 dead_task_statistics.task_energy += energy;
2195 #endif /* CONFIG_PERVASIVE_ENERGY */
2196
2197 lck_spin_unlock(&dead_task_statistics_lock);
2198 lck_mtx_destroy(&task->lock, &task_lck_grp);
2199
2200 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit,
2201 &debit)) {
2202 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc);
2203 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_private.free);
2204 }
2205 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_shared, &credit,
2206 &debit)) {
2207 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_shared.alloc);
2208 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free);
2209 }
2210 ledger_dereference(task->ledger);
2211
2212 counter_free(&task->faults);
2213 counter_free(&task->pageins);
2214 counter_free(&task->cow_faults);
2215 counter_free(&task->messages_sent);
2216 counter_free(&task->messages_received);
2217
2218 #if CONFIG_COALITIONS
2219 task_release_coalitions(task);
2220 #endif /* CONFIG_COALITIONS */
2221
2222 bzero(task->coalition, sizeof(task->coalition));
2223
2224 #if MACH_BSD
2225 /* clean up collected information since last reference to task is gone */
2226 if (task->corpse_info) {
2227 void *corpse_info_kernel = kcdata_memory_get_begin_addr(task->corpse_info);
2228 task_crashinfo_destroy(task->corpse_info);
2229 task->corpse_info = NULL;
2230 kfree_data(corpse_info_kernel, CORPSEINFO_ALLOCATION_SIZE);
2231 }
2232 #endif
2233
2234 #if CONFIG_MACF
2235 if (get_task_crash_label(task)) {
2236 mac_exc_free_label(get_task_crash_label(task));
2237 set_task_crash_label(task, NULL);
2238 }
2239 #endif
2240
2241 assert(queue_empty(&task->task_objq));
2242 task_objq_lock_destroy(task);
2243
2244 if (task->corpse_vmobject_list) {
2245 kfree_data(task->corpse_vmobject_list,
2246 (vm_size_t)task->corpse_vmobject_list_size);
2247 }
2248
2249 task_ref_count_fini(task);
2250 proc_ro_erase_task(task->bsd_info_ro);
2251 task_release_proc_task_struct(task, task->bsd_info_ro);
2252 }
2253
2254 /*
2255 * task_name_deallocate_mig:
2256 *
2257 * Drop a reference on a task name.
2258 */
2259 void
task_name_deallocate_mig(task_name_t task_name)2260 task_name_deallocate_mig(
2261 task_name_t task_name)
2262 {
2263 return task_deallocate_grp((task_t)task_name, TASK_GRP_MIG);
2264 }
2265
2266 /*
2267 * task_policy_set_deallocate_mig:
2268 *
2269 * Drop a reference on a task type.
2270 */
2271 void
task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)2272 task_policy_set_deallocate_mig(task_policy_set_t task_policy_set)
2273 {
2274 return task_deallocate_grp((task_t)task_policy_set, TASK_GRP_MIG);
2275 }
2276
2277 /*
2278 * task_policy_get_deallocate_mig:
2279 *
2280 * Drop a reference on a task type.
2281 */
2282 void
task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)2283 task_policy_get_deallocate_mig(task_policy_get_t task_policy_get)
2284 {
2285 return task_deallocate_grp((task_t)task_policy_get, TASK_GRP_MIG);
2286 }
2287
2288 /*
2289 * task_inspect_deallocate_mig:
2290 *
2291 * Drop a task inspection reference.
2292 */
2293 void
task_inspect_deallocate_mig(task_inspect_t task_inspect)2294 task_inspect_deallocate_mig(
2295 task_inspect_t task_inspect)
2296 {
2297 return task_deallocate_grp((task_t)task_inspect, TASK_GRP_MIG);
2298 }
2299
2300 /*
2301 * task_read_deallocate_mig:
2302 *
2303 * Drop a reference on task read port.
2304 */
2305 void
task_read_deallocate_mig(task_read_t task_read)2306 task_read_deallocate_mig(
2307 task_read_t task_read)
2308 {
2309 return task_deallocate_grp((task_t)task_read, TASK_GRP_MIG);
2310 }
2311
2312 /*
2313 * task_suspension_token_deallocate:
2314 *
2315 * Drop a reference on a task suspension token.
2316 */
2317 void
task_suspension_token_deallocate(task_suspension_token_t token)2318 task_suspension_token_deallocate(
2319 task_suspension_token_t token)
2320 {
2321 return task_deallocate((task_t)token);
2322 }
2323
2324 void
task_suspension_token_deallocate_grp(task_suspension_token_t token,task_grp_t grp)2325 task_suspension_token_deallocate_grp(
2326 task_suspension_token_t token,
2327 task_grp_t grp)
2328 {
2329 return task_deallocate_grp((task_t)token, grp);
2330 }
2331
2332 /*
2333 * task_collect_crash_info:
2334 *
2335 * collect crash info from bsd and mach based data
2336 */
2337 kern_return_t
task_collect_crash_info(task_t task,struct label * crash_label,int is_corpse_fork)2338 task_collect_crash_info(
2339 task_t task,
2340 #ifdef CONFIG_MACF
2341 struct label *crash_label,
2342 #endif
2343 int is_corpse_fork)
2344 {
2345 kern_return_t kr = KERN_SUCCESS;
2346
2347 kcdata_descriptor_t crash_data = NULL;
2348 kcdata_descriptor_t crash_data_release = NULL;
2349 mach_msg_type_number_t size = CORPSEINFO_ALLOCATION_SIZE;
2350 mach_vm_offset_t crash_data_ptr = 0;
2351 void *crash_data_kernel = NULL;
2352 void *crash_data_kernel_release = NULL;
2353 #if CONFIG_MACF
2354 struct label *label, *free_label;
2355 #endif
2356
2357 if (!corpses_enabled()) {
2358 return KERN_NOT_SUPPORTED;
2359 }
2360
2361 #if CONFIG_MACF
2362 free_label = label = mac_exc_create_label(NULL);
2363 #endif
2364
2365 task_lock(task);
2366
2367 assert(is_corpse_fork || get_bsdtask_info(task) != NULL);
2368 if (task->corpse_info == NULL && (is_corpse_fork || get_bsdtask_info(task) != NULL)) {
2369 #if CONFIG_MACF
2370 /* Set the crash label, used by the exception delivery mac hook */
2371 free_label = get_task_crash_label(task); // Most likely NULL.
2372 set_task_crash_label(task, label);
2373 mac_exc_update_task_crash_label(task, crash_label);
2374 #endif
2375 task_unlock(task);
2376
2377 crash_data_kernel = kalloc_data(CORPSEINFO_ALLOCATION_SIZE,
2378 Z_WAITOK | Z_ZERO);
2379 if (crash_data_kernel == NULL) {
2380 kr = KERN_RESOURCE_SHORTAGE;
2381 goto out_no_lock;
2382 }
2383 crash_data_ptr = (mach_vm_offset_t) crash_data_kernel;
2384
2385 /* Do not get a corpse ref for corpse fork */
2386 crash_data = task_crashinfo_alloc_init((mach_vm_address_t)crash_data_ptr, size,
2387 is_corpse_fork ? 0 : CORPSE_CRASHINFO_HAS_REF,
2388 KCFLAG_USE_MEMCOPY);
2389 if (crash_data) {
2390 task_lock(task);
2391 crash_data_release = task->corpse_info;
2392 crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);
2393 task->corpse_info = crash_data;
2394
2395 task_unlock(task);
2396 kr = KERN_SUCCESS;
2397 } else {
2398 kfree_data(crash_data_kernel,
2399 CORPSEINFO_ALLOCATION_SIZE);
2400 kr = KERN_FAILURE;
2401 }
2402
2403 if (crash_data_release != NULL) {
2404 task_crashinfo_destroy(crash_data_release);
2405 }
2406 kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2407 } else {
2408 task_unlock(task);
2409 }
2410
2411 out_no_lock:
2412 #if CONFIG_MACF
2413 if (free_label != NULL) {
2414 mac_exc_free_label(free_label);
2415 }
2416 #endif
2417 return kr;
2418 }
2419
2420 /*
2421 * task_deliver_crash_notification:
2422 *
2423 * Makes outcall to registered host port for a corpse.
2424 */
2425 kern_return_t
task_deliver_crash_notification(task_t corpse,thread_t thread,exception_type_t etype,mach_exception_subcode_t subcode)2426 task_deliver_crash_notification(
2427 task_t corpse, /* corpse or corpse fork */
2428 thread_t thread,
2429 exception_type_t etype,
2430 mach_exception_subcode_t subcode)
2431 {
2432 kcdata_descriptor_t crash_info = corpse->corpse_info;
2433 thread_t th_iter = NULL;
2434 kern_return_t kr = KERN_SUCCESS;
2435 wait_interrupt_t wsave;
2436 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
2437 ipc_port_t corpse_port;
2438
2439 if (crash_info == NULL) {
2440 return KERN_FAILURE;
2441 }
2442
2443 assert(task_is_a_corpse(corpse));
2444
2445 task_lock(corpse);
2446
2447 /*
2448 * Always populate code[0] as the effective exception type for EXC_CORPSE_NOTIFY.
2449 * Crash reporters should derive whether it's fatal from corpse blob.
2450 */
2451 code[0] = etype;
2452 code[1] = subcode;
2453
2454 queue_iterate(&corpse->threads, th_iter, thread_t, task_threads)
2455 {
2456 if (th_iter->corpse_dup == FALSE) {
2457 ipc_thread_reset(th_iter);
2458 }
2459 }
2460 task_unlock(corpse);
2461
2462 /* Arm the no-sender notification for taskport */
2463 task_reference(corpse);
2464 corpse_port = convert_corpse_to_port_and_nsrequest(corpse);
2465
2466 wsave = thread_interrupt_level(THREAD_UNINT);
2467 kr = exception_triage_thread(EXC_CORPSE_NOTIFY, code, EXCEPTION_CODE_MAX, thread);
2468 if (kr != KERN_SUCCESS) {
2469 printf("Failed to send exception EXC_CORPSE_NOTIFY. error code: %d for pid %d\n", kr, task_pid(corpse));
2470 }
2471
2472 (void)thread_interrupt_level(wsave);
2473
2474 /*
2475 * Drop the send right on corpse port, will fire the
2476 * no-sender notification if exception deliver failed.
2477 */
2478 ipc_port_release_send(corpse_port);
2479 return kr;
2480 }
2481
2482 /*
2483 * task_terminate:
2484 *
2485 * Terminate the specified task. See comments on thread_terminate
2486 * (kern/thread.c) about problems with terminating the "current task."
2487 */
2488
2489 kern_return_t
task_terminate(task_t task)2490 task_terminate(
2491 task_t task)
2492 {
2493 if (task == TASK_NULL) {
2494 return KERN_INVALID_ARGUMENT;
2495 }
2496
2497 if (get_bsdtask_info(task)) {
2498 return KERN_FAILURE;
2499 }
2500
2501 return task_terminate_internal(task);
2502 }
2503
2504 #if MACH_ASSERT
2505 extern int proc_pid(struct proc *);
2506 extern void proc_name_kdp(struct proc *p, char *buf, int size);
2507 #endif /* MACH_ASSERT */
2508
2509 static void
task_partial_reap(task_t task,__unused int pid)2510 __unused task_partial_reap(task_t task, __unused int pid)
2511 {
2512 unsigned int reclaimed_resident = 0;
2513 unsigned int reclaimed_compressed = 0;
2514 uint64_t task_page_count;
2515
2516 task_page_count = (get_task_phys_footprint(task) / PAGE_SIZE_64);
2517
2518 KDBG(VMDBG_CODE(DBG_VM_MAP_PARTIAL_REAP) | DBG_FUNC_START,
2519 pid, task_page_count);
2520
2521 vm_map_partial_reap(task->map, &reclaimed_resident, &reclaimed_compressed);
2522
2523 KDBG(VMDBG_CODE(DBG_VM_MAP_PARTIAL_REAP) | DBG_FUNC_END,
2524 pid, reclaimed_resident, reclaimed_compressed);
2525 }
2526
2527 /*
2528 * task_mark_corpse:
2529 *
2530 * Mark the task as a corpse. Called by crashing thread.
2531 */
2532 kern_return_t
task_mark_corpse(task_t task)2533 task_mark_corpse(task_t task)
2534 {
2535 kern_return_t kr = KERN_SUCCESS;
2536 thread_t self_thread;
2537 (void) self_thread;
2538 wait_interrupt_t wsave;
2539 #if CONFIG_MACF
2540 struct label *crash_label = NULL;
2541 #endif
2542
2543 assert(task != kernel_task);
2544 assert(task == current_task());
2545 assert(!task_is_a_corpse(task));
2546
2547 #if CONFIG_MACF
2548 crash_label = mac_exc_create_label_for_proc((struct proc*)get_bsdtask_info(task));
2549 #endif
2550
2551 kr = task_collect_crash_info(task,
2552 #if CONFIG_MACF
2553 crash_label,
2554 #endif
2555 FALSE);
2556 if (kr != KERN_SUCCESS) {
2557 goto out;
2558 }
2559
2560 self_thread = current_thread();
2561
2562 wsave = thread_interrupt_level(THREAD_UNINT);
2563 task_lock(task);
2564
2565 /*
2566 * Check if any other thread called task_terminate_internal
2567 * and made the task inactive before we could mark it for
2568 * corpse pending report. Bail out if the task is inactive.
2569 */
2570 if (!task->active) {
2571 kcdata_descriptor_t crash_data_release = task->corpse_info;;
2572 void *crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);;
2573
2574 task->corpse_info = NULL;
2575 task_unlock(task);
2576
2577 if (crash_data_release != NULL) {
2578 task_crashinfo_destroy(crash_data_release);
2579 }
2580 kfree_data(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
2581 return KERN_TERMINATED;
2582 }
2583
2584 task_set_corpse_pending_report(task);
2585 task_set_corpse(task);
2586 task->crashed_thread_id = thread_tid(self_thread);
2587
2588 kr = task_start_halt_locked(task, TRUE);
2589 assert(kr == KERN_SUCCESS);
2590
2591 task_set_uniqueid(task);
2592
2593 task_unlock(task);
2594
2595 /*
2596 * ipc_task_reset() moved to last thread_terminate_self(): rdar://75737960.
2597 * disable old ports here instead.
2598 *
2599 * The vm_map and ipc_space must exist until this function returns,
2600 * convert_port_to_{map,space}_with_flavor relies on this behavior.
2601 */
2602 ipc_task_disable(task);
2603
2604 /* let iokit know 1 */
2605 iokit_task_terminate(task, 1);
2606
2607 /* terminate the ipc space */
2608 ipc_space_terminate(task->itk_space);
2609
2610 /* Add it to global corpse task list */
2611 task_add_to_corpse_task_list(task);
2612
2613 thread_terminate_internal(self_thread);
2614
2615 (void) thread_interrupt_level(wsave);
2616 assert(task->halting == TRUE);
2617
2618 out:
2619 #if CONFIG_MACF
2620 mac_exc_free_label(crash_label);
2621 #endif
2622 return kr;
2623 }
2624
2625 /*
2626 * task_set_uniqueid
2627 *
2628 * Set task uniqueid to systemwide unique 64 bit value
2629 */
2630 void
task_set_uniqueid(task_t task)2631 task_set_uniqueid(task_t task)
2632 {
2633 task->task_uniqueid = OSIncrementAtomic64(&next_taskuniqueid);
2634 }
2635
2636 /*
2637 * task_clear_corpse
2638 *
2639 * Clears the corpse pending bit on task.
2640 * Removes inspection bit on the threads.
2641 */
2642 void
task_clear_corpse(task_t task)2643 task_clear_corpse(task_t task)
2644 {
2645 thread_t th_iter = NULL;
2646
2647 task_lock(task);
2648 queue_iterate(&task->threads, th_iter, thread_t, task_threads)
2649 {
2650 thread_mtx_lock(th_iter);
2651 th_iter->inspection = FALSE;
2652 ipc_thread_disable(th_iter);
2653 thread_mtx_unlock(th_iter);
2654 }
2655
2656 thread_terminate_crashed_threads();
2657 /* remove the pending corpse report flag */
2658 task_clear_corpse_pending_report(task);
2659
2660 task_unlock(task);
2661 }
2662
2663 /*
2664 * task_port_no_senders
2665 *
2666 * Called whenever the Mach port system detects no-senders on
2667 * the task port of a corpse.
2668 * Each notification that comes in should terminate the task (corpse).
2669 */
2670 static void
task_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)2671 task_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
2672 {
2673 task_t task = ipc_kobject_get_locked(port, IKOT_TASK_CONTROL);
2674
2675 assert(task != TASK_NULL);
2676 assert(task_is_a_corpse(task));
2677
2678 /* Remove the task from global corpse task list */
2679 task_remove_from_corpse_task_list(task);
2680
2681 task_clear_corpse(task);
2682 vm_map_unset_corpse_source(task->map);
2683 task_terminate_internal(task);
2684 }
2685
2686 /*
2687 * task_port_with_flavor_no_senders
2688 *
2689 * Called whenever the Mach port system detects no-senders on
2690 * the task inspect or read port. These ports are allocated lazily and
2691 * should be deallocated here when there are no senders remaining.
2692 */
2693 static void
task_port_with_flavor_no_senders(ipc_port_t port,mach_port_mscount_t mscount __unused)2694 task_port_with_flavor_no_senders(
2695 ipc_port_t port,
2696 mach_port_mscount_t mscount __unused)
2697 {
2698 task_t task;
2699 mach_task_flavor_t flavor;
2700 ipc_kobject_type_t kotype;
2701
2702 ip_mq_lock(port);
2703 if (port->ip_srights > 0) {
2704 ip_mq_unlock(port);
2705 return;
2706 }
2707 kotype = ip_kotype(port);
2708 assert((IKOT_TASK_READ == kotype) || (IKOT_TASK_INSPECT == kotype));
2709 task = ipc_kobject_get_locked(port, kotype);
2710 if (task != TASK_NULL) {
2711 task_reference(task);
2712 }
2713 ip_mq_unlock(port);
2714
2715 if (task == TASK_NULL) {
2716 /* The task is exiting or disabled; it will eventually deallocate the port */
2717 return;
2718 }
2719
2720 if (kotype == IKOT_TASK_READ) {
2721 flavor = TASK_FLAVOR_READ;
2722 } else {
2723 flavor = TASK_FLAVOR_INSPECT;
2724 }
2725
2726 itk_lock(task);
2727 ip_mq_lock(port);
2728
2729 /*
2730 * If the port is no longer active, then ipc_task_terminate() ran
2731 * and destroyed the kobject already. Just deallocate the task
2732 * ref we took and go away.
2733 *
2734 * It is also possible that several nsrequests are in flight,
2735 * only one shall NULL-out the port entry, and this is the one
2736 * that gets to dealloc the port.
2737 *
2738 * Check for a stale no-senders notification. A call to any function
2739 * that vends out send rights to this port could resurrect it between
2740 * this notification being generated and actually being handled here.
2741 */
2742 if (!ip_active(port) ||
2743 task->itk_task_ports[flavor] != port ||
2744 port->ip_srights > 0) {
2745 ip_mq_unlock(port);
2746 itk_unlock(task);
2747 task_deallocate(task);
2748 return;
2749 }
2750
2751 assert(task->itk_task_ports[flavor] == port);
2752 task->itk_task_ports[flavor] = IP_NULL;
2753 itk_unlock(task);
2754
2755 ipc_kobject_dealloc_port_and_unlock(port, 0, kotype);
2756
2757 task_deallocate(task);
2758 }
2759
2760 /*
2761 * task_wait_till_threads_terminate_locked
2762 *
2763 * Wait till all the threads in the task are terminated.
2764 * Might release the task lock and re-acquire it.
2765 */
2766 void
task_wait_till_threads_terminate_locked(task_t task)2767 task_wait_till_threads_terminate_locked(task_t task)
2768 {
2769 /* wait for all the threads in the task to terminate */
2770 while (task->active_thread_count != 0) {
2771 assert_wait((event_t)&task->active_thread_count, THREAD_UNINT);
2772 task_unlock(task);
2773 thread_block(THREAD_CONTINUE_NULL);
2774
2775 task_lock(task);
2776 }
2777 }
2778
2779 /*
2780 * task_duplicate_map_and_threads
2781 *
2782 * Copy vmmap of source task.
2783 * Copy active threads from source task to destination task.
2784 * Source task would be suspended during the copy.
2785 */
2786 kern_return_t
task_duplicate_map_and_threads(task_t task,void * p,task_t new_task,thread_t * thread_ret,uint64_t ** udata_buffer,int * size,int * num_udata,bool for_exception)2787 task_duplicate_map_and_threads(
2788 task_t task,
2789 void *p,
2790 task_t new_task,
2791 thread_t *thread_ret,
2792 uint64_t **udata_buffer,
2793 int *size,
2794 int *num_udata,
2795 bool for_exception)
2796 {
2797 kern_return_t kr = KERN_SUCCESS;
2798 int active;
2799 thread_t thread, self, thread_return = THREAD_NULL;
2800 thread_t new_thread = THREAD_NULL, first_thread = THREAD_NULL;
2801 thread_t *thread_array;
2802 uint32_t active_thread_count = 0, array_count = 0, i;
2803 vm_map_t oldmap;
2804 uint64_t *buffer = NULL;
2805 int buf_size = 0;
2806 int est_knotes = 0, num_knotes = 0;
2807
2808 self = current_thread();
2809
2810 /*
2811 * Suspend the task to copy thread state, use the internal
2812 * variant so that no user-space process can resume
2813 * the task from under us
2814 */
2815 kr = task_suspend_internal(task);
2816 if (kr != KERN_SUCCESS) {
2817 return kr;
2818 }
2819
2820 if (task->map->disable_vmentry_reuse == TRUE) {
2821 /*
2822 * Quite likely GuardMalloc (or some debugging tool)
2823 * is being used on this task. And it has gone through
2824 * its limit. Making a corpse will likely encounter
2825 * a lot of VM entries that will need COW.
2826 *
2827 * Skip it.
2828 */
2829 #if DEVELOPMENT || DEBUG
2830 memorystatus_abort_vm_map_fork(task);
2831 #endif
2832 ktriage_record(thread_tid(self), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_CORPSE, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_CORPSE_FAIL_LIBGMALLOC), 0 /* arg */);
2833 task_resume_internal(task);
2834 return KERN_FAILURE;
2835 }
2836
2837 /* Check with VM if vm_map_fork is allowed for this task */
2838 bool is_large = false;
2839 if (memorystatus_allowed_vm_map_fork(task, &is_large)) {
2840 /* Setup new task's vmmap, switch from parent task's map to it COW map */
2841 oldmap = new_task->map;
2842 new_task->map = vm_map_fork(new_task->ledger,
2843 task->map,
2844 (VM_MAP_FORK_SHARE_IF_INHERIT_NONE |
2845 VM_MAP_FORK_PRESERVE_PURGEABLE |
2846 VM_MAP_FORK_CORPSE_FOOTPRINT |
2847 VM_MAP_FORK_SHARE_IF_OWNED));
2848 if (new_task->map) {
2849 new_task->is_large_corpse = is_large;
2850 vm_map_deallocate(oldmap);
2851
2852 /* copy ledgers that impact the memory footprint */
2853 vm_map_copy_footprint_ledgers(task, new_task);
2854
2855 /* Get all the udata pointers from kqueue */
2856 est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
2857 if (est_knotes > 0) {
2858 buf_size = (est_knotes + 32) * sizeof(uint64_t);
2859 buffer = kalloc_data(buf_size, Z_WAITOK);
2860 num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
2861 if (num_knotes > est_knotes + 32) {
2862 num_knotes = est_knotes + 32;
2863 }
2864 }
2865 } else {
2866 if (is_large) {
2867 assert(large_corpse_count > 0);
2868 OSDecrementAtomic(&large_corpse_count);
2869 }
2870 new_task->map = oldmap;
2871 #if DEVELOPMENT || DEBUG
2872 memorystatus_abort_vm_map_fork(task);
2873 #endif
2874 task_resume_internal(task);
2875 return KERN_NO_SPACE;
2876 }
2877 } else if (!for_exception) {
2878 #if DEVELOPMENT || DEBUG
2879 memorystatus_abort_vm_map_fork(task);
2880 #endif
2881 task_resume_internal(task);
2882 return KERN_NO_SPACE;
2883 }
2884
2885 active_thread_count = task->active_thread_count;
2886 if (active_thread_count == 0) {
2887 kfree_data(buffer, buf_size);
2888 task_resume_internal(task);
2889 return KERN_FAILURE;
2890 }
2891
2892 thread_array = kalloc_type(thread_t, active_thread_count, Z_WAITOK);
2893
2894 /* Iterate all the threads and drop the task lock before calling thread_create_with_continuation */
2895 task_lock(task);
2896 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2897 /* Skip inactive threads */
2898 active = thread->active;
2899 if (!active) {
2900 continue;
2901 }
2902
2903 if (array_count >= active_thread_count) {
2904 break;
2905 }
2906
2907 thread_array[array_count++] = thread;
2908 thread_reference(thread);
2909 }
2910 task_unlock(task);
2911
2912 for (i = 0; i < array_count; i++) {
2913 kr = thread_create_with_continuation(new_task, &new_thread, (thread_continue_t)thread_corpse_continue);
2914 if (kr != KERN_SUCCESS) {
2915 break;
2916 }
2917
2918 /* Equivalent of current thread in corpse */
2919 if (thread_array[i] == self) {
2920 thread_return = new_thread;
2921 new_task->crashed_thread_id = thread_tid(new_thread);
2922 } else if (first_thread == NULL) {
2923 first_thread = new_thread;
2924 } else {
2925 /* drop the extra ref returned by thread_create_with_continuation */
2926 thread_deallocate(new_thread);
2927 }
2928
2929 kr = thread_dup2(thread_array[i], new_thread);
2930 if (kr != KERN_SUCCESS) {
2931 thread_mtx_lock(new_thread);
2932 new_thread->corpse_dup = TRUE;
2933 thread_mtx_unlock(new_thread);
2934 continue;
2935 }
2936
2937 /* Copy thread name */
2938 bsd_copythreadname(get_bsdthread_info(new_thread),
2939 get_bsdthread_info(thread_array[i]));
2940 new_thread->thread_tag = thread_array[i]->thread_tag &
2941 ~THREAD_TAG_USER_JOIN;
2942 thread_copy_resource_info(new_thread, thread_array[i]);
2943 }
2944
2945 /* return the first thread if we couldn't find the equivalent of current */
2946 if (thread_return == THREAD_NULL) {
2947 thread_return = first_thread;
2948 } else if (first_thread != THREAD_NULL) {
2949 /* drop the extra ref returned by thread_create_with_continuation */
2950 thread_deallocate(first_thread);
2951 }
2952
2953 task_resume_internal(task);
2954
2955 for (i = 0; i < array_count; i++) {
2956 thread_deallocate(thread_array[i]);
2957 }
2958 kfree_type(thread_t, active_thread_count, thread_array);
2959
2960 if (kr == KERN_SUCCESS) {
2961 *thread_ret = thread_return;
2962 *udata_buffer = buffer;
2963 *size = buf_size;
2964 *num_udata = num_knotes;
2965 } else {
2966 if (thread_return != THREAD_NULL) {
2967 thread_deallocate(thread_return);
2968 }
2969 kfree_data(buffer, buf_size);
2970 }
2971
2972 return kr;
2973 }
2974
2975 #if CONFIG_SECLUDED_MEMORY
2976 extern void task_set_can_use_secluded_mem_locked(
2977 task_t task,
2978 boolean_t can_use_secluded_mem);
2979 #endif /* CONFIG_SECLUDED_MEMORY */
2980
2981 #if MACH_ASSERT
2982 int debug4k_panic_on_terminate = 0;
2983 #endif /* MACH_ASSERT */
2984 kern_return_t
task_terminate_internal(task_t task)2985 task_terminate_internal(
2986 task_t task)
2987 {
2988 thread_t thread, self;
2989 task_t self_task;
2990 boolean_t interrupt_save;
2991 int pid = 0;
2992
2993 assert(task != kernel_task);
2994
2995 self = current_thread();
2996 self_task = current_task();
2997
2998 /*
2999 * Get the task locked and make sure that we are not racing
3000 * with someone else trying to terminate us.
3001 */
3002 if (task == self_task) {
3003 task_lock(task);
3004 } else if (task < self_task) {
3005 task_lock(task);
3006 task_lock(self_task);
3007 } else {
3008 task_lock(self_task);
3009 task_lock(task);
3010 }
3011
3012 #if CONFIG_SECLUDED_MEMORY
3013 if (task->task_can_use_secluded_mem) {
3014 task_set_can_use_secluded_mem_locked(task, FALSE);
3015 }
3016 task->task_could_use_secluded_mem = FALSE;
3017 task->task_could_also_use_secluded_mem = FALSE;
3018
3019 if (task->task_suppressed_secluded) {
3020 stop_secluded_suppression(task);
3021 }
3022 #endif /* CONFIG_SECLUDED_MEMORY */
3023
3024 if (!task->active) {
3025 /*
3026 * Task is already being terminated.
3027 * Just return an error. If we are dying, this will
3028 * just get us to our AST special handler and that
3029 * will get us to finalize the termination of ourselves.
3030 */
3031 task_unlock(task);
3032 if (self_task != task) {
3033 task_unlock(self_task);
3034 }
3035
3036 return KERN_FAILURE;
3037 }
3038
3039 if (task_corpse_pending_report(task)) {
3040 /*
3041 * Task is marked for reporting as corpse.
3042 * Just return an error. This will
3043 * just get us to our AST special handler and that
3044 * will get us to finish the path to death
3045 */
3046 task_unlock(task);
3047 if (self_task != task) {
3048 task_unlock(self_task);
3049 }
3050
3051 return KERN_FAILURE;
3052 }
3053
3054 if (self_task != task) {
3055 task_unlock(self_task);
3056 }
3057
3058 /*
3059 * Make sure the current thread does not get aborted out of
3060 * the waits inside these operations.
3061 */
3062 interrupt_save = thread_interrupt_level(THREAD_UNINT);
3063
3064 /*
3065 * Indicate that we want all the threads to stop executing
3066 * at user space by holding the task (we would have held
3067 * each thread independently in thread_terminate_internal -
3068 * but this way we may be more likely to already find it
3069 * held there). Mark the task inactive, and prevent
3070 * further task operations via the task port.
3071 *
3072 * The vm_map and ipc_space must exist until this function returns,
3073 * convert_port_to_{map,space}_with_flavor relies on this behavior.
3074 */
3075 bool first_suspension __unused = task_hold_locked(task);
3076 task->active = FALSE;
3077 ipc_task_disable(task);
3078
3079 #if CONFIG_EXCLAVES
3080 //rdar://139307390, first suspension might not have done conclave suspend.
3081 first_suspension = true;
3082 if (first_suspension) {
3083 task_unlock(task);
3084 task_suspend_conclave(task);
3085 task_lock(task);
3086 }
3087 #endif /* CONFIG_EXCLAVES */
3088
3089
3090 /*
3091 * Terminate each thread in the task.
3092 */
3093 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3094 thread_terminate_internal(thread);
3095 }
3096
3097 #ifdef MACH_BSD
3098 void *bsd_info = get_bsdtask_info(task);
3099 if (bsd_info != NULL) {
3100 pid = proc_pid(bsd_info);
3101 }
3102 #endif /* MACH_BSD */
3103
3104 task_unlock(task);
3105
3106 #if CONFIG_EXCLAVES
3107 task_stop_conclave(task, false);
3108 #endif /* CONFIG_EXCLAVES */
3109
3110 proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE,
3111 TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
3112
3113 /* Early object reap phase */
3114
3115 // PR-17045188: Revisit implementation
3116 // task_partial_reap(task, pid);
3117
3118 #if CONFIG_TASKWATCH
3119 /*
3120 * remove all task watchers
3121 */
3122 task_removewatchers(task);
3123
3124 #endif /* CONFIG_TASKWATCH */
3125
3126 /*
3127 * Destroy all synchronizers owned by the task.
3128 */
3129 task_synchronizer_destroy_all(task);
3130
3131 /*
3132 * Clear the watchport boost on the task.
3133 */
3134 task_remove_turnstile_watchports(task);
3135
3136 /* let iokit know 1 */
3137 iokit_task_terminate(task, 1);
3138
3139 /*
3140 * Destroy the IPC space, leaving just a reference for it.
3141 */
3142 ipc_space_terminate(task->itk_space);
3143
3144 #if 00
3145 /* if some ledgers go negative on tear-down again... */
3146 ledger_disable_panic_on_negative(task->map->pmap->ledger,
3147 task_ledgers.phys_footprint);
3148 ledger_disable_panic_on_negative(task->map->pmap->ledger,
3149 task_ledgers.internal);
3150 ledger_disable_panic_on_negative(task->map->pmap->ledger,
3151 task_ledgers.iokit_mapped);
3152 ledger_disable_panic_on_negative(task->map->pmap->ledger,
3153 task_ledgers.alternate_accounting);
3154 ledger_disable_panic_on_negative(task->map->pmap->ledger,
3155 task_ledgers.alternate_accounting_compressed);
3156 #endif
3157
3158 #if CONFIG_DEFERRED_RECLAIM
3159 /*
3160 * Remove this tasks reclaim buffer from global queues.
3161 */
3162 if (task->deferred_reclamation_metadata != NULL) {
3163 vm_deferred_reclamation_buffer_uninstall(task->deferred_reclamation_metadata);
3164 }
3165 #endif /* CONFIG_DEFERRED_RECLAIM */
3166
3167 /*
3168 * If the current thread is a member of the task
3169 * being terminated, then the last reference to
3170 * the task will not be dropped until the thread
3171 * is finally reaped. To avoid incurring the
3172 * expense of removing the address space regions
3173 * at reap time, we do it explictly here.
3174 */
3175
3176 #if MACH_ASSERT
3177 /*
3178 * Identify the pmap's process, in case the pmap ledgers drift
3179 * and we have to report it.
3180 */
3181 char procname[17];
3182 void *proc = get_bsdtask_info(task);
3183 if (proc) {
3184 pid = proc_pid(proc);
3185 proc_name_kdp(proc, procname, sizeof(procname));
3186 } else {
3187 pid = 0;
3188 strlcpy(procname, "<unknown>", sizeof(procname));
3189 }
3190 pmap_set_process(task->map->pmap, pid, procname);
3191 if (vm_map_page_shift(task->map) < (int)PAGE_SHIFT) {
3192 DEBUG4K_LIFE("map %p procname: %s\n", task->map, procname);
3193 if (debug4k_panic_on_terminate) {
3194 panic("DEBUG4K: %s:%d %d[%s] map %p", __FUNCTION__, __LINE__, pid, procname, task->map);
3195 }
3196 }
3197 #endif /* MACH_ASSERT */
3198
3199 vm_map_terminate(task->map);
3200
3201 /* release our shared region */
3202 vm_shared_region_set(task, NULL);
3203
3204 #if __has_feature(ptrauth_calls)
3205 task_set_shared_region_id(task, NULL);
3206 #endif /* __has_feature(ptrauth_calls) */
3207
3208 lck_mtx_lock(&tasks_threads_lock);
3209 queue_remove(&tasks, task, task_t, tasks);
3210 queue_enter(&terminated_tasks, task, task_t, tasks);
3211 tasks_count--;
3212 terminated_tasks_count++;
3213 lck_mtx_unlock(&tasks_threads_lock);
3214
3215 /*
3216 * We no longer need to guard against being aborted, so restore
3217 * the previous interruptible state.
3218 */
3219 thread_interrupt_level(interrupt_save);
3220
3221 #if CONFIG_CPU_COUNTERS
3222 /* force the task to release all ctrs */
3223 if (task->t_kpc & TASK_KPC_FORCED_ALL_CTRS) {
3224 kpc_force_all_ctrs(task, 0);
3225 }
3226 #endif /* CONFIG_CPU_COUNTERS */
3227
3228 #if CONFIG_COALITIONS
3229 /*
3230 * Leave the coalition for corpse task or task that
3231 * never had any active threads (e.g. fork, exec failure).
3232 * For task with active threads, the task will be removed
3233 * from coalition by last terminating thread.
3234 */
3235 if (task->active_thread_count == 0) {
3236 coalitions_remove_task(task);
3237 }
3238 #endif
3239
3240 #if CONFIG_FREEZE
3241 extern int vm_compressor_available;
3242 if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE && vm_compressor_available) {
3243 task_disown_frozen_csegs(task);
3244 assert(queue_empty(&task->task_frozen_cseg_q));
3245 }
3246 #endif /* CONFIG_FREEZE */
3247
3248
3249 /*
3250 * Get rid of the task active reference on itself.
3251 */
3252 task_deallocate_grp(task, TASK_GRP_INTERNAL);
3253
3254 return KERN_SUCCESS;
3255 }
3256
3257 void
tasks_system_suspend(boolean_t suspend)3258 tasks_system_suspend(boolean_t suspend)
3259 {
3260 task_t task;
3261
3262 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SUSPEND_USERSPACE) |
3263 (suspend ? DBG_FUNC_START : DBG_FUNC_END));
3264
3265 lck_mtx_lock(&tasks_threads_lock);
3266 assert(tasks_suspend_state != suspend);
3267 tasks_suspend_state = suspend;
3268 queue_iterate(&tasks, task, task_t, tasks) {
3269 if (task == kernel_task) {
3270 continue;
3271 }
3272 suspend ? task_suspend_internal(task) : task_resume_internal(task);
3273 }
3274 lck_mtx_unlock(&tasks_threads_lock);
3275 }
3276
3277 /*
3278 * task_start_halt:
3279 *
3280 * Shut the current task down (except for the current thread) in
3281 * preparation for dramatic changes to the task (probably exec).
3282 * We hold the task and mark all other threads in the task for
3283 * termination.
3284 */
3285 kern_return_t
task_start_halt(task_t task)3286 task_start_halt(task_t task)
3287 {
3288 kern_return_t kr = KERN_SUCCESS;
3289 task_lock(task);
3290 kr = task_start_halt_locked(task, FALSE);
3291 task_unlock(task);
3292 return kr;
3293 }
3294
3295 static kern_return_t
task_start_halt_locked(task_t task,boolean_t should_mark_corpse)3296 task_start_halt_locked(task_t task, boolean_t should_mark_corpse)
3297 {
3298 thread_t thread, self;
3299 uint64_t dispatchqueue_offset;
3300
3301 assert(task != kernel_task);
3302
3303 self = current_thread();
3304
3305 if (task != get_threadtask(self) && !task_is_a_corpse_fork(task)) {
3306 return KERN_INVALID_ARGUMENT;
3307 }
3308
3309 if (!should_mark_corpse &&
3310 (task->halting || !task->active || !self->active)) {
3311 /*
3312 * Task or current thread is already being terminated.
3313 * Hurry up and return out of the current kernel context
3314 * so that we run our AST special handler to terminate
3315 * ourselves. If should_mark_corpse is set, the corpse
3316 * creation might have raced with exec, let the corpse
3317 * creation continue, once the current thread reaches AST
3318 * thread in exec will be woken up from task_complete_halt.
3319 * Exec will fail cause the proc was marked for exit.
3320 * Once the thread in exec reaches AST, it will call proc_exit
3321 * and deliver the EXC_CORPSE_NOTIFY.
3322 */
3323 return KERN_FAILURE;
3324 }
3325
3326 /* Thread creation will fail after this point of no return. */
3327 task->halting = TRUE;
3328
3329 /*
3330 * Mark all the threads to keep them from starting any more
3331 * user-level execution. The thread_terminate_internal code
3332 * would do this on a thread by thread basis anyway, but this
3333 * gives us a better chance of not having to wait there.
3334 */
3335 bool first_suspension __unused = task_hold_locked(task);
3336
3337 #if CONFIG_EXCLAVES
3338 if (should_mark_corpse) {
3339 void *crash_info_ptr = task_get_corpseinfo(task);
3340 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3341 if (crash_info_ptr != NULL && thread->th_exclaves_ipc_ctx.ipcb != NULL) {
3342 struct thread_crash_exclaves_info info = { 0 };
3343
3344 info.tcei_flags = kExclaveRPCActive;
3345 info.tcei_scid = thread->th_exclaves_ipc_ctx.scid;
3346 info.tcei_thread_id = thread->thread_id;
3347
3348 kcdata_push_data(crash_info_ptr,
3349 STACKSHOT_KCTYPE_KERN_EXCLAVES_CRASH_THREADINFO,
3350 sizeof(struct thread_crash_exclaves_info), &info);
3351 }
3352 }
3353 }
3354 //rdar://139307390, first suspension might not have done conclave suspend.
3355 first_suspension = true;
3356 if (first_suspension || should_mark_corpse) {
3357 task_unlock(task);
3358 if (first_suspension) {
3359 task_suspend_conclave(task);
3360 }
3361
3362 if (should_mark_corpse) {
3363 task_stop_conclave(task, true);
3364 }
3365 task_lock(task);
3366 }
3367 #endif /* CONFIG_EXCLAVES */
3368
3369 dispatchqueue_offset = get_dispatchqueue_offset_from_proc(get_bsdtask_info(task));
3370 /*
3371 * Terminate all the other threads in the task.
3372 */
3373 queue_iterate(&task->threads, thread, thread_t, task_threads)
3374 {
3375 /*
3376 * Remove priority throttles for threads to terminate timely. This has
3377 * to be done after task_hold_locked() traps all threads to AST, but before
3378 * threads are marked inactive in thread_terminate_internal(). Takes thread
3379 * mutex lock.
3380 *
3381 * We need task_is_a_corpse() check so that we don't accidently update policy
3382 * for tasks that are doing posix_spawn().
3383 *
3384 * See: thread_policy_update_tasklocked().
3385 */
3386 if (task_is_a_corpse(task)) {
3387 proc_set_thread_policy(thread, TASK_POLICY_ATTRIBUTE,
3388 TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
3389 }
3390
3391 if (should_mark_corpse) {
3392 thread_mtx_lock(thread);
3393 thread->inspection = TRUE;
3394 thread_mtx_unlock(thread);
3395 }
3396 if (thread != self) {
3397 thread_terminate_internal(thread);
3398 }
3399 }
3400 task->dispatchqueue_offset = dispatchqueue_offset;
3401
3402 task_release_locked(task);
3403
3404 return KERN_SUCCESS;
3405 }
3406
3407
3408 /*
3409 * task_complete_halt:
3410 *
3411 * Complete task halt by waiting for threads to terminate, then clean
3412 * up task resources (VM, port namespace, etc...) and then let the
3413 * current thread go in the (practically empty) task context.
3414 *
3415 * Note: task->halting flag is not cleared in order to avoid creation
3416 * of new thread in old exec'ed task.
3417 */
3418 void
task_complete_halt(task_t task)3419 task_complete_halt(task_t task)
3420 {
3421 task_lock(task);
3422 assert(task->halting);
3423 assert(task == current_task());
3424
3425 /*
3426 * Wait for the other threads to get shut down.
3427 * When the last other thread is reaped, we'll be
3428 * woken up.
3429 */
3430 if (task->thread_count > 1) {
3431 assert_wait((event_t)&task->halting, THREAD_UNINT);
3432 task_unlock(task);
3433 thread_block(THREAD_CONTINUE_NULL);
3434 } else {
3435 task_unlock(task);
3436 }
3437
3438 #if CONFIG_DEFERRED_RECLAIM
3439 if (task->deferred_reclamation_metadata) {
3440 vm_deferred_reclamation_buffer_uninstall(
3441 task->deferred_reclamation_metadata);
3442 vm_deferred_reclamation_buffer_deallocate(
3443 task->deferred_reclamation_metadata);
3444 task->deferred_reclamation_metadata = NULL;
3445 }
3446 #endif /* CONFIG_DEFERRED_RECLAIM */
3447
3448 /*
3449 * Give the machine dependent code a chance
3450 * to perform cleanup of task-level resources
3451 * associated with the current thread before
3452 * ripping apart the task.
3453 */
3454 machine_task_terminate(task);
3455
3456 /*
3457 * Destroy all synchronizers owned by the task.
3458 */
3459 task_synchronizer_destroy_all(task);
3460
3461 /* let iokit know 1 */
3462 iokit_task_terminate(task, 1);
3463
3464 /*
3465 * Terminate the IPC space. A long time ago,
3466 * this used to be ipc_space_clean() which would
3467 * keep the space active but hollow it.
3468 *
3469 * We really do not need this semantics given
3470 * tasks die with exec now.
3471 */
3472 ipc_space_terminate(task->itk_space);
3473
3474 /*
3475 * Clean out the address space, as we are going to be
3476 * getting a new one.
3477 */
3478 vm_map_terminate(task->map);
3479
3480 /*
3481 * Kick out any IOKitUser handles to the task. At best they're stale,
3482 * at worst someone is racing a SUID exec.
3483 */
3484 /* let iokit know 2 */
3485 iokit_task_terminate(task, 2);
3486 }
3487
3488 #ifdef CONFIG_TASK_SUSPEND_STATS
3489
3490 static void
_task_mark_suspend_source(task_t task)3491 _task_mark_suspend_source(task_t task)
3492 {
3493 int idx;
3494 task_suspend_stats_t stats;
3495 task_suspend_source_t source;
3496 task_lock_assert_owned(task);
3497 stats = &task->t_suspend_stats;
3498
3499 idx = stats->tss_count % TASK_SUSPEND_SOURCES_MAX;
3500 source = &task->t_suspend_sources[idx];
3501 bzero(source, sizeof(*source));
3502
3503 source->tss_time = mach_absolute_time();
3504 source->tss_tid = current_thread()->thread_id;
3505 source->tss_pid = task_pid(current_task());
3506 strlcpy(source->tss_procname, task_best_name(current_task()),
3507 sizeof(source->tss_procname));
3508
3509 stats->tss_count++;
3510 }
3511
3512 static inline void
_task_mark_suspend_start(task_t task)3513 _task_mark_suspend_start(task_t task)
3514 {
3515 task_lock_assert_owned(task);
3516 task->t_suspend_stats.tss_last_start = mach_absolute_time();
3517 }
3518
3519 static inline void
_task_mark_suspend_end(task_t task)3520 _task_mark_suspend_end(task_t task)
3521 {
3522 task_lock_assert_owned(task);
3523 task->t_suspend_stats.tss_last_end = mach_absolute_time();
3524 task->t_suspend_stats.tss_duration += (task->t_suspend_stats.tss_last_end -
3525 task->t_suspend_stats.tss_last_start);
3526 }
3527
3528 static kern_return_t
_task_get_suspend_stats_locked(task_t task,task_suspend_stats_t stats)3529 _task_get_suspend_stats_locked(task_t task, task_suspend_stats_t stats)
3530 {
3531 if (task == TASK_NULL || stats == NULL) {
3532 return KERN_INVALID_ARGUMENT;
3533 }
3534 task_lock_assert_owned(task);
3535 memcpy(stats, &task->t_suspend_stats, sizeof(task->t_suspend_stats));
3536 return KERN_SUCCESS;
3537 }
3538
3539 static kern_return_t
_task_get_suspend_sources_locked(task_t task,task_suspend_source_t sources)3540 _task_get_suspend_sources_locked(task_t task, task_suspend_source_t sources)
3541 {
3542 if (task == TASK_NULL || sources == NULL) {
3543 return KERN_INVALID_ARGUMENT;
3544 }
3545 task_lock_assert_owned(task);
3546 memcpy(sources, task->t_suspend_sources,
3547 sizeof(struct task_suspend_source_s) * TASK_SUSPEND_SOURCES_MAX);
3548 return KERN_SUCCESS;
3549 }
3550
3551 #endif /* CONFIG_TASK_SUSPEND_STATS */
3552
3553 kern_return_t
task_get_suspend_stats(task_t task,task_suspend_stats_t stats)3554 task_get_suspend_stats(task_t task, task_suspend_stats_t stats)
3555 {
3556 #ifdef CONFIG_TASK_SUSPEND_STATS
3557 kern_return_t kr;
3558 if (task == TASK_NULL || stats == NULL) {
3559 return KERN_INVALID_ARGUMENT;
3560 }
3561 task_lock(task);
3562 kr = _task_get_suspend_stats_locked(task, stats);
3563 task_unlock(task);
3564 return kr;
3565 #else /* CONFIG_TASK_SUSPEND_STATS */
3566 (void)task;
3567 (void)stats;
3568 return KERN_NOT_SUPPORTED;
3569 #endif
3570 }
3571
3572 kern_return_t
task_get_suspend_stats_kdp(task_t task,task_suspend_stats_t stats)3573 task_get_suspend_stats_kdp(task_t task, task_suspend_stats_t stats)
3574 {
3575 #ifdef CONFIG_TASK_SUSPEND_STATS
3576 if (task == TASK_NULL || stats == NULL) {
3577 return KERN_INVALID_ARGUMENT;
3578 }
3579 memcpy(stats, &task->t_suspend_stats, sizeof(task->t_suspend_stats));
3580 return KERN_SUCCESS;
3581 #else /* CONFIG_TASK_SUSPEND_STATS */
3582 #pragma unused(task, stats)
3583 return KERN_NOT_SUPPORTED;
3584 #endif /* CONFIG_TASK_SUSPEND_STATS */
3585 }
3586
3587 kern_return_t
task_get_suspend_sources(task_t task,task_suspend_source_array_t sources)3588 task_get_suspend_sources(task_t task, task_suspend_source_array_t sources)
3589 {
3590 #ifdef CONFIG_TASK_SUSPEND_STATS
3591 kern_return_t kr;
3592 if (task == TASK_NULL || sources == NULL) {
3593 return KERN_INVALID_ARGUMENT;
3594 }
3595 task_lock(task);
3596 kr = _task_get_suspend_sources_locked(task, sources);
3597 task_unlock(task);
3598 return kr;
3599 #else /* CONFIG_TASK_SUSPEND_STATS */
3600 (void)task;
3601 (void)sources;
3602 return KERN_NOT_SUPPORTED;
3603 #endif
3604 }
3605
3606 kern_return_t
task_get_suspend_sources_kdp(task_t task,task_suspend_source_array_t sources)3607 task_get_suspend_sources_kdp(task_t task, task_suspend_source_array_t sources)
3608 {
3609 #ifdef CONFIG_TASK_SUSPEND_STATS
3610 if (task == TASK_NULL || sources == NULL) {
3611 return KERN_INVALID_ARGUMENT;
3612 }
3613 memcpy(sources, task->t_suspend_sources,
3614 sizeof(struct task_suspend_source_s) * TASK_SUSPEND_SOURCES_MAX);
3615 return KERN_SUCCESS;
3616 #else /* CONFIG_TASK_SUSPEND_STATS */
3617 #pragma unused(task, sources)
3618 return KERN_NOT_SUPPORTED;
3619 #endif
3620 }
3621
3622 /*
3623 * task_hold_locked:
3624 *
3625 * Suspend execution of the specified task.
3626 * This is a recursive-style suspension of the task, a count of
3627 * suspends is maintained.
3628 *
3629 * CONDITIONS: the task is locked and active.
3630 * Returns true if this was first suspension
3631 */
3632 bool
task_hold_locked(task_t task)3633 task_hold_locked(
3634 task_t task)
3635 {
3636 thread_t thread;
3637 void *bsd_info = get_bsdtask_info(task);
3638
3639 assert(task->active);
3640
3641 if (task->suspend_count++ > 0) {
3642 return false;
3643 }
3644
3645 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SUSPENSION, MACH_TASK_SUSPEND),
3646 task_pid(task), task->user_stop_count, task->pidsuspended);
3647
3648 if (bsd_info) {
3649 workq_proc_suspended(bsd_info);
3650 }
3651
3652 /*
3653 * Iterate through all the threads and hold them.
3654 */
3655 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3656 thread_mtx_lock(thread);
3657 thread_hold(thread);
3658 thread_mtx_unlock(thread);
3659 }
3660
3661 #ifdef CONFIG_TASK_SUSPEND_STATS
3662 _task_mark_suspend_start(task);
3663 #endif
3664 return true;
3665 }
3666
3667 /*
3668 * task_hold_and_wait
3669 *
3670 * Same as the internal routine above, except that is must lock
3671 * and verify that the task is active. This differs from task_suspend
3672 * in that it places a kernel hold on the task rather than just a
3673 * user-level hold. This keeps users from over resuming and setting
3674 * it running out from under the kernel.
3675 *
3676 * CONDITIONS: the caller holds a reference on the task
3677 */
3678 kern_return_t
task_hold_and_wait(task_t task,bool suspend_conclave __unused)3679 task_hold_and_wait(
3680 task_t task,
3681 bool suspend_conclave __unused)
3682 {
3683 if (task == TASK_NULL) {
3684 return KERN_INVALID_ARGUMENT;
3685 }
3686
3687 task_lock(task);
3688 if (!task->active) {
3689 task_unlock(task);
3690 return KERN_FAILURE;
3691 }
3692
3693 #ifdef CONFIG_TASK_SUSPEND_STATS
3694 _task_mark_suspend_source(task);
3695 #endif /* CONFIG_TASK_SUSPEND_STATS */
3696
3697 bool first_suspension __unused = task_hold_locked(task);
3698
3699 #if CONFIG_EXCLAVES
3700 //rdar://139307390, first suspension might not have done conclave suspend.
3701 first_suspension = true;
3702 if (suspend_conclave && first_suspension) {
3703 task_unlock(task);
3704 task_suspend_conclave(task);
3705 task_lock(task);
3706 /*
3707 * If task terminated/resumed before we could wait on threads, then
3708 * it is a race we lost and we could treat that as termination/resume
3709 * happened after the wait and return SUCCESS.
3710 */
3711 if (!task->active || task->suspend_count <= 0) {
3712 task_unlock(task);
3713 return KERN_SUCCESS;
3714 }
3715 }
3716 #endif /* CONFIG_EXCLAVES */
3717
3718 task_wait_locked(task, FALSE);
3719 task_unlock(task);
3720
3721 return KERN_SUCCESS;
3722 }
3723
3724 /*
3725 * task_wait_locked:
3726 *
3727 * Wait for all threads in task to stop.
3728 *
3729 * Conditions:
3730 * Called with task locked, active, and held.
3731 */
3732 void
task_wait_locked(task_t task,boolean_t until_not_runnable)3733 task_wait_locked(
3734 task_t task,
3735 boolean_t until_not_runnable)
3736 {
3737 thread_t thread, self;
3738
3739 assert(task->active);
3740 assert(task->suspend_count > 0);
3741
3742 self = current_thread();
3743
3744 /*
3745 * Iterate through all the threads and wait for them to
3746 * stop. Do not wait for the current thread if it is within
3747 * the task.
3748 */
3749 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3750 if (thread != self) {
3751 thread_wait(thread, until_not_runnable);
3752 }
3753 }
3754 }
3755
3756 boolean_t
task_is_app_suspended(task_t task)3757 task_is_app_suspended(task_t task)
3758 {
3759 return task->pidsuspended;
3760 }
3761
3762 /*
3763 * task_release_locked:
3764 *
3765 * Release a kernel hold on a task.
3766 *
3767 * CONDITIONS: the task is locked and active
3768 */
3769 void
task_release_locked(task_t task)3770 task_release_locked(
3771 task_t task)
3772 {
3773 thread_t thread;
3774 void *bsd_info = get_bsdtask_info(task);
3775
3776 assert(task->active);
3777 assert(task->suspend_count > 0);
3778
3779 if (--task->suspend_count > 0) {
3780 return;
3781 }
3782
3783 if (bsd_info) {
3784 workq_proc_resumed(bsd_info);
3785 }
3786
3787 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3788 thread_mtx_lock(thread);
3789 thread_release(thread);
3790 thread_mtx_unlock(thread);
3791 }
3792
3793 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SUSPENSION, MACH_TASK_RESUME) | DBG_FUNC_NONE, task_pid(task));
3794
3795 #if CONFIG_TASK_SUSPEND_STATS
3796 _task_mark_suspend_end(task);
3797 #endif
3798
3799 //rdar://139307390.
3800 #if 0
3801 #if CONFIG_EXCLAVES
3802 task_unlock(task);
3803 task_resume_conclave(task);
3804 task_lock(task);
3805 #endif /* CONFIG_EXCLAVES */
3806 #endif
3807 }
3808
3809 /*
3810 * task_release:
3811 *
3812 * Same as the internal routine above, except that it must lock
3813 * and verify that the task is active.
3814 *
3815 * CONDITIONS: The caller holds a reference to the task
3816 */
3817 kern_return_t
task_release(task_t task)3818 task_release(
3819 task_t task)
3820 {
3821 if (task == TASK_NULL) {
3822 return KERN_INVALID_ARGUMENT;
3823 }
3824
3825 task_lock(task);
3826
3827 if (!task->active) {
3828 task_unlock(task);
3829
3830 return KERN_FAILURE;
3831 }
3832
3833 task_release_locked(task);
3834 task_unlock(task);
3835
3836 return KERN_SUCCESS;
3837 }
3838
3839 static kern_return_t
task_threads_internal(task_t task,thread_act_array_t * threads_out,mach_msg_type_number_t * countp,mach_thread_flavor_t flavor)3840 task_threads_internal(
3841 task_t task,
3842 thread_act_array_t *threads_out,
3843 mach_msg_type_number_t *countp,
3844 mach_thread_flavor_t flavor)
3845 {
3846 mach_msg_type_number_t actual, count, count_needed;
3847 thread_act_array_t thread_list;
3848 thread_t thread;
3849 unsigned int i;
3850
3851 count = 0;
3852 thread_list = NULL;
3853
3854 if (task == TASK_NULL) {
3855 return KERN_INVALID_ARGUMENT;
3856 }
3857
3858 assert(flavor <= THREAD_FLAVOR_INSPECT);
3859
3860 for (;;) {
3861 task_lock(task);
3862 if (!task->active) {
3863 task_unlock(task);
3864
3865 mach_port_array_free(thread_list, count);
3866 return KERN_FAILURE;
3867 }
3868
3869 count_needed = actual = task->thread_count;
3870 if (count_needed <= count) {
3871 break;
3872 }
3873
3874 /* unlock the task and allocate more memory */
3875 task_unlock(task);
3876
3877 mach_port_array_free(thread_list, count);
3878 count = count_needed;
3879 thread_list = mach_port_array_alloc(count, Z_WAITOK);
3880
3881 if (thread_list == NULL) {
3882 return KERN_RESOURCE_SHORTAGE;
3883 }
3884 }
3885
3886 i = 0;
3887 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3888 assert(i < actual);
3889 thread_reference(thread);
3890 ((thread_t *)thread_list)[i++] = thread;
3891 }
3892
3893 count_needed = actual;
3894
3895 /* can unlock task now that we've got the thread refs */
3896 task_unlock(task);
3897
3898 if (actual == 0) {
3899 /* no threads, so return null pointer and deallocate memory */
3900
3901 mach_port_array_free(thread_list, count);
3902
3903 *threads_out = NULL;
3904 *countp = 0;
3905 } else {
3906 /* if we allocated too much, must copy */
3907 if (count_needed < count) {
3908 mach_port_array_t newaddr;
3909
3910 newaddr = mach_port_array_alloc(count_needed, Z_WAITOK);
3911 if (newaddr == NULL) {
3912 for (i = 0; i < actual; ++i) {
3913 thread_deallocate(((thread_t *)thread_list)[i]);
3914 }
3915 mach_port_array_free(thread_list, count);
3916 return KERN_RESOURCE_SHORTAGE;
3917 }
3918
3919 bcopy(thread_list, newaddr, count_needed * sizeof(thread_t));
3920 mach_port_array_free(thread_list, count);
3921 thread_list = newaddr;
3922 }
3923
3924 /* do the conversion that Mig should handle */
3925 convert_thread_array_to_ports(thread_list, actual, flavor);
3926
3927 *threads_out = thread_list;
3928 *countp = actual;
3929 }
3930
3931 return KERN_SUCCESS;
3932 }
3933
3934
3935 kern_return_t
task_threads_from_user(mach_port_t port,thread_act_array_t * threads_out,mach_msg_type_number_t * count)3936 task_threads_from_user(
3937 mach_port_t port,
3938 thread_act_array_t *threads_out,
3939 mach_msg_type_number_t *count)
3940 {
3941 ipc_kobject_type_t kotype;
3942 kern_return_t kr;
3943
3944 task_t task = convert_port_to_task_inspect_no_eval(port);
3945
3946 if (task == TASK_NULL) {
3947 return KERN_INVALID_ARGUMENT;
3948 }
3949
3950 kotype = ip_kotype(port);
3951
3952 switch (kotype) {
3953 case IKOT_TASK_CONTROL:
3954 kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_CONTROL);
3955 break;
3956 case IKOT_TASK_READ:
3957 kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_READ);
3958 break;
3959 case IKOT_TASK_INSPECT:
3960 kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_INSPECT);
3961 break;
3962 default:
3963 panic("strange kobject type");
3964 break;
3965 }
3966
3967 task_deallocate(task);
3968 return kr;
3969 }
3970
3971 #define TASK_HOLD_NORMAL 0
3972 #define TASK_HOLD_PIDSUSPEND 1
3973 #define TASK_HOLD_LEGACY 2
3974 #define TASK_HOLD_LEGACY_ALL 3
3975
3976 static kern_return_t
place_task_hold(task_t task,int mode)3977 place_task_hold(
3978 task_t task,
3979 int mode)
3980 {
3981 if (!task->active && !task_is_a_corpse(task)) {
3982 return KERN_FAILURE;
3983 }
3984
3985 /* Return success for corpse task */
3986 if (task_is_a_corpse(task)) {
3987 return KERN_SUCCESS;
3988 }
3989
3990 #if MACH_ASSERT
3991 current_task()->suspends_outstanding++;
3992 #endif
3993
3994 if (mode == TASK_HOLD_LEGACY) {
3995 task->legacy_stop_count++;
3996 }
3997
3998 #ifdef CONFIG_TASK_SUSPEND_STATS
3999 _task_mark_suspend_source(task);
4000 #endif /* CONFIG_TASK_SUSPEND_STATS */
4001
4002 if (task->user_stop_count++ > 0) {
4003 /*
4004 * If the stop count was positive, the task is
4005 * already stopped and we can exit.
4006 */
4007 return KERN_SUCCESS;
4008 }
4009
4010 /*
4011 * Put a kernel-level hold on the threads in the task (all
4012 * user-level task suspensions added together represent a
4013 * single kernel-level hold). We then wait for the threads
4014 * to stop executing user code.
4015 */
4016 bool first_suspension __unused = task_hold_locked(task);
4017
4018 //rdar://139307390, do not suspend conclave on task suspend.
4019 #if 0
4020 #if CONFIG_EXCLAVES
4021 if (first_suspension) {
4022 task_unlock(task);
4023 task_suspend_conclave(task);
4024
4025 /*
4026 * If task terminated/resumed before we could wait on threads, then
4027 * it is a race we lost and we could treat that as termination/resume
4028 * happened after the wait and return SUCCESS.
4029 */
4030 task_lock(task);
4031 if (!task->active || task->suspend_count <= 0) {
4032 return KERN_SUCCESS;
4033 }
4034 }
4035 #endif /* CONFIG_EXCLAVES */
4036 #endif
4037
4038 task_wait_locked(task, FALSE);
4039
4040 return KERN_SUCCESS;
4041 }
4042
4043 static kern_return_t
release_task_hold(task_t task,int mode)4044 release_task_hold(
4045 task_t task,
4046 int mode)
4047 {
4048 boolean_t release = FALSE;
4049
4050 if (!task->active && !task_is_a_corpse(task)) {
4051 return KERN_FAILURE;
4052 }
4053
4054 /* Return success for corpse task */
4055 if (task_is_a_corpse(task)) {
4056 return KERN_SUCCESS;
4057 }
4058
4059 if (mode == TASK_HOLD_PIDSUSPEND) {
4060 if (task->pidsuspended == FALSE) {
4061 return KERN_FAILURE;
4062 }
4063 task->pidsuspended = FALSE;
4064 }
4065
4066 if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
4067 #if MACH_ASSERT
4068 /*
4069 * This is obviously not robust; if we suspend one task and then resume a different one,
4070 * we'll fly under the radar. This is only meant to catch the common case of a crashed
4071 * or buggy suspender.
4072 */
4073 current_task()->suspends_outstanding--;
4074 #endif
4075
4076 if (mode == TASK_HOLD_LEGACY_ALL) {
4077 if (task->legacy_stop_count >= task->user_stop_count) {
4078 task->user_stop_count = 0;
4079 release = TRUE;
4080 } else {
4081 task->user_stop_count -= task->legacy_stop_count;
4082 }
4083 task->legacy_stop_count = 0;
4084 } else {
4085 if (mode == TASK_HOLD_LEGACY && task->legacy_stop_count > 0) {
4086 task->legacy_stop_count--;
4087 }
4088 if (--task->user_stop_count == 0) {
4089 release = TRUE;
4090 }
4091 }
4092 } else {
4093 return KERN_FAILURE;
4094 }
4095
4096 /*
4097 * Release the task if necessary.
4098 */
4099 if (release) {
4100 task_release_locked(task);
4101 }
4102
4103 return KERN_SUCCESS;
4104 }
4105
4106 boolean_t
get_task_suspended(task_t task)4107 get_task_suspended(task_t task)
4108 {
4109 return 0 != task->user_stop_count;
4110 }
4111
4112 /*
4113 * task_suspend:
4114 *
4115 * Implement an (old-fashioned) user-level suspension on a task.
4116 *
4117 * Because the user isn't expecting to have to manage a suspension
4118 * token, we'll track it for him in the kernel in the form of a naked
4119 * send right to the task's resume port. All such send rights
4120 * account for a single suspension against the task (unlike task_suspend2()
4121 * where each caller gets a unique suspension count represented by a
4122 * unique send-once right).
4123 *
4124 * Conditions:
4125 * The caller holds a reference to the task
4126 */
4127 kern_return_t
task_suspend(task_t task)4128 task_suspend(
4129 task_t task)
4130 {
4131 kern_return_t kr;
4132 mach_port_t port;
4133 mach_port_name_t name;
4134
4135 if (task == TASK_NULL || task == kernel_task) {
4136 return KERN_INVALID_ARGUMENT;
4137 }
4138
4139 /*
4140 * place a legacy hold on the task.
4141 */
4142 task_lock(task);
4143 kr = place_task_hold(task, TASK_HOLD_LEGACY);
4144 task_unlock(task);
4145
4146 if (kr != KERN_SUCCESS) {
4147 return kr;
4148 }
4149
4150 /*
4151 * Claim a send right on the task resume port, and request a no-senders
4152 * notification on that port (if none outstanding).
4153 */
4154 itk_lock(task);
4155 port = task->itk_resume;
4156 if (port == IP_NULL) {
4157 port = ipc_kobject_alloc_port(task, IKOT_TASK_RESUME,
4158 IPC_KOBJECT_ALLOC_NSREQUEST | IPC_KOBJECT_ALLOC_MAKE_SEND);
4159 task->itk_resume = port;
4160 } else {
4161 (void)ipc_kobject_make_send_nsrequest(port, task, IKOT_TASK_RESUME);
4162 }
4163 itk_unlock(task);
4164
4165 /*
4166 * Copyout the send right into the calling task's IPC space. It won't know it is there,
4167 * but we'll look it up when calling a traditional resume. Any IPC operations that
4168 * deallocate the send right will auto-release the suspension.
4169 */
4170 if (IP_VALID(port)) {
4171 kr = ipc_object_copyout(current_space(), ip_to_object(port),
4172 MACH_MSG_TYPE_MOVE_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE,
4173 NULL, NULL, &name);
4174 } else {
4175 kr = KERN_SUCCESS;
4176 }
4177 if (kr != KERN_SUCCESS) {
4178 printf("warning: %s(%d) failed to copyout suspension "
4179 "token for pid %d with error: %d\n",
4180 proc_name_address(get_bsdtask_info(current_task())),
4181 proc_pid(get_bsdtask_info(current_task())),
4182 task_pid(task), kr);
4183 }
4184
4185 return kr;
4186 }
4187
4188 /*
4189 * task_resume:
4190 * Release a user hold on a task.
4191 *
4192 * Conditions:
4193 * The caller holds a reference to the task
4194 */
4195 kern_return_t
task_resume(task_t task)4196 task_resume(
4197 task_t task)
4198 {
4199 kern_return_t kr;
4200 mach_port_name_t resume_port_name;
4201 ipc_entry_t resume_port_entry;
4202 ipc_space_t space = current_task()->itk_space;
4203
4204 if (task == TASK_NULL || task == kernel_task) {
4205 return KERN_INVALID_ARGUMENT;
4206 }
4207
4208 /* release a legacy task hold */
4209 task_lock(task);
4210 kr = release_task_hold(task, TASK_HOLD_LEGACY);
4211 task_unlock(task);
4212
4213 itk_lock(task); /* for itk_resume */
4214 is_write_lock(space); /* spin lock */
4215 if (is_active(space) && IP_VALID(task->itk_resume) &&
4216 ipc_hash_lookup(space, ip_to_object(task->itk_resume), &resume_port_name, &resume_port_entry) == TRUE) {
4217 /*
4218 * We found a suspension token in the caller's IPC space. Release a send right to indicate that
4219 * we are holding one less legacy hold on the task from this caller. If the release failed,
4220 * go ahead and drop all the rights, as someone either already released our holds or the task
4221 * is gone.
4222 */
4223 itk_unlock(task);
4224 if (kr == KERN_SUCCESS) {
4225 ipc_right_dealloc(space, resume_port_name, resume_port_entry);
4226 } else {
4227 ipc_right_destroy(space, resume_port_name, resume_port_entry, FALSE, 0);
4228 }
4229 /* space unlocked */
4230 } else {
4231 itk_unlock(task);
4232 is_write_unlock(space);
4233 if (kr == KERN_SUCCESS) {
4234 printf("warning: %s(%d) performed out-of-band resume on pid %d\n",
4235 proc_name_address(get_bsdtask_info(current_task())), proc_pid(get_bsdtask_info(current_task())),
4236 task_pid(task));
4237 }
4238 }
4239
4240 return kr;
4241 }
4242
4243 /*
4244 * Suspend the target task.
4245 * Making/holding a token/reference/port is the callers responsibility.
4246 */
4247 kern_return_t
task_suspend_internal(task_t task)4248 task_suspend_internal(task_t task)
4249 {
4250 kern_return_t kr;
4251
4252 if (task == TASK_NULL || task == kernel_task) {
4253 return KERN_INVALID_ARGUMENT;
4254 }
4255
4256 task_lock(task);
4257 kr = place_task_hold(task, TASK_HOLD_NORMAL);
4258 task_unlock(task);
4259 return kr;
4260 }
4261
4262 /*
4263 * Suspend the target task, and return a suspension token. The token
4264 * represents a reference on the suspended task.
4265 */
4266 static kern_return_t
task_suspend2_grp(task_t task,task_suspension_token_t * suspend_token,task_grp_t grp)4267 task_suspend2_grp(
4268 task_t task,
4269 task_suspension_token_t *suspend_token,
4270 task_grp_t grp)
4271 {
4272 kern_return_t kr;
4273
4274 kr = task_suspend_internal(task);
4275 if (kr != KERN_SUCCESS) {
4276 *suspend_token = TASK_NULL;
4277 return kr;
4278 }
4279
4280 /*
4281 * Take a reference on the target task and return that to the caller
4282 * as a "suspension token," which can be converted into an SO right to
4283 * the now-suspended task's resume port.
4284 */
4285 task_reference_grp(task, grp);
4286 *suspend_token = task;
4287
4288 return KERN_SUCCESS;
4289 }
4290
4291 kern_return_t
task_suspend2_mig(task_t task,task_suspension_token_t * suspend_token)4292 task_suspend2_mig(
4293 task_t task,
4294 task_suspension_token_t *suspend_token)
4295 {
4296 return task_suspend2_grp(task, suspend_token, TASK_GRP_MIG);
4297 }
4298
4299 kern_return_t
task_suspend2_external(task_t task,task_suspension_token_t * suspend_token)4300 task_suspend2_external(
4301 task_t task,
4302 task_suspension_token_t *suspend_token)
4303 {
4304 return task_suspend2_grp(task, suspend_token, TASK_GRP_EXTERNAL);
4305 }
4306
4307 /*
4308 * Resume the task
4309 * (reference/token/port management is caller's responsibility).
4310 */
4311 kern_return_t
task_resume_internal(task_suspension_token_t task)4312 task_resume_internal(
4313 task_suspension_token_t task)
4314 {
4315 kern_return_t kr;
4316
4317 if (task == TASK_NULL || task == kernel_task) {
4318 return KERN_INVALID_ARGUMENT;
4319 }
4320
4321 task_lock(task);
4322 kr = release_task_hold(task, TASK_HOLD_NORMAL);
4323 task_unlock(task);
4324 return kr;
4325 }
4326
4327 /*
4328 * Resume the task using a suspension token. Consumes the token's ref.
4329 */
4330 static kern_return_t
task_resume2_grp(task_suspension_token_t task,task_grp_t grp)4331 task_resume2_grp(
4332 task_suspension_token_t task,
4333 task_grp_t grp)
4334 {
4335 kern_return_t kr;
4336
4337 kr = task_resume_internal(task);
4338 task_suspension_token_deallocate_grp(task, grp);
4339
4340 return kr;
4341 }
4342
4343 kern_return_t
task_resume2_mig(task_suspension_token_t task)4344 task_resume2_mig(
4345 task_suspension_token_t task)
4346 {
4347 return task_resume2_grp(task, TASK_GRP_MIG);
4348 }
4349
4350 kern_return_t
task_resume2_external(task_suspension_token_t task)4351 task_resume2_external(
4352 task_suspension_token_t task)
4353 {
4354 return task_resume2_grp(task, TASK_GRP_EXTERNAL);
4355 }
4356
4357 static void
task_suspension_no_senders(ipc_port_t port,mach_port_mscount_t mscount)4358 task_suspension_no_senders(ipc_port_t port, mach_port_mscount_t mscount)
4359 {
4360 task_t task = convert_port_to_task_suspension_token(port);
4361 kern_return_t kr;
4362
4363 if (task == TASK_NULL) {
4364 return;
4365 }
4366
4367 if (task == kernel_task) {
4368 task_suspension_token_deallocate(task);
4369 return;
4370 }
4371
4372 task_lock(task);
4373
4374 kr = ipc_kobject_nsrequest(port, mscount, NULL);
4375 if (kr == KERN_FAILURE) {
4376 /* release all the [remaining] outstanding legacy holds */
4377 release_task_hold(task, TASK_HOLD_LEGACY_ALL);
4378 }
4379
4380 task_unlock(task);
4381
4382 task_suspension_token_deallocate(task); /* drop token reference */
4383 }
4384
4385 /*
4386 * Fires when a send once made
4387 * by convert_task_suspension_token_to_port() dies.
4388 */
4389 void
task_suspension_send_once(ipc_port_t port)4390 task_suspension_send_once(ipc_port_t port)
4391 {
4392 task_t task = convert_port_to_task_suspension_token(port);
4393
4394 if (task == TASK_NULL || task == kernel_task) {
4395 return; /* nothing to do */
4396 }
4397
4398 /* release the hold held by this specific send-once right */
4399 task_lock(task);
4400 release_task_hold(task, TASK_HOLD_NORMAL);
4401 task_unlock(task);
4402
4403 task_suspension_token_deallocate(task); /* drop token reference */
4404 }
4405
4406 static kern_return_t
task_pidsuspend_locked(task_t task)4407 task_pidsuspend_locked(task_t task)
4408 {
4409 kern_return_t kr;
4410
4411 if (task->pidsuspended) {
4412 kr = KERN_FAILURE;
4413 goto out;
4414 }
4415
4416 task->pidsuspended = TRUE;
4417
4418 kr = place_task_hold(task, TASK_HOLD_PIDSUSPEND);
4419 if (kr != KERN_SUCCESS) {
4420 task->pidsuspended = FALSE;
4421 }
4422 out:
4423 return kr;
4424 }
4425
4426
4427 /*
4428 * task_pidsuspend:
4429 *
4430 * Suspends a task by placing a hold on its threads.
4431 *
4432 * Conditions:
4433 * The caller holds a reference to the task
4434 */
4435 kern_return_t
task_pidsuspend(task_t task)4436 task_pidsuspend(
4437 task_t task)
4438 {
4439 kern_return_t kr;
4440
4441 if (task == TASK_NULL || task == kernel_task) {
4442 return KERN_INVALID_ARGUMENT;
4443 }
4444
4445 task_lock(task);
4446
4447 kr = task_pidsuspend_locked(task);
4448
4449 task_unlock(task);
4450
4451 if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
4452 iokit_task_app_suspended_changed(task);
4453 }
4454
4455 return kr;
4456 }
4457
4458 /*
4459 * task_pidresume:
4460 * Resumes a previously suspended task.
4461 *
4462 * Conditions:
4463 * The caller holds a reference to the task
4464 */
4465 kern_return_t
task_pidresume(task_t task)4466 task_pidresume(
4467 task_t task)
4468 {
4469 kern_return_t kr;
4470
4471 if (task == TASK_NULL || task == kernel_task) {
4472 return KERN_INVALID_ARGUMENT;
4473 }
4474
4475 task_lock(task);
4476
4477 #if CONFIG_FREEZE
4478
4479 while (task->changing_freeze_state) {
4480 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
4481 task_unlock(task);
4482 thread_block(THREAD_CONTINUE_NULL);
4483
4484 task_lock(task);
4485 }
4486 task->changing_freeze_state = TRUE;
4487 #endif
4488
4489 kr = release_task_hold(task, TASK_HOLD_PIDSUSPEND);
4490
4491 task_unlock(task);
4492
4493 if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
4494 iokit_task_app_suspended_changed(task);
4495 }
4496
4497 #if CONFIG_FREEZE
4498
4499 task_lock(task);
4500
4501 if (kr == KERN_SUCCESS) {
4502 task->frozen = FALSE;
4503 }
4504 task->changing_freeze_state = FALSE;
4505 thread_wakeup(&task->changing_freeze_state);
4506
4507 task_unlock(task);
4508 #endif
4509
4510 return kr;
4511 }
4512
4513 os_refgrp_decl(static, task_watchports_refgrp, "task_watchports", NULL);
4514
4515 /*
4516 * task_add_turnstile_watchports:
4517 * Setup watchports to boost the main thread of the task.
4518 *
4519 * Arguments:
4520 * task: task being spawned
4521 * thread: main thread of task
4522 * portwatch_ports: array of watchports
4523 * portwatch_count: number of watchports
4524 *
4525 * Conditions:
4526 * Nothing locked.
4527 */
4528 void
task_add_turnstile_watchports(task_t task,thread_t thread,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4529 task_add_turnstile_watchports(
4530 task_t task,
4531 thread_t thread,
4532 ipc_port_t *portwatch_ports,
4533 uint32_t portwatch_count)
4534 {
4535 struct task_watchports *watchports = NULL;
4536 struct task_watchport_elem *previous_elem_array[TASK_MAX_WATCHPORT_COUNT] = {};
4537 os_ref_count_t refs;
4538
4539 /* Check if the task has terminated */
4540 if (!task->active) {
4541 return;
4542 }
4543
4544 assert(portwatch_count <= TASK_MAX_WATCHPORT_COUNT);
4545
4546 watchports = task_watchports_alloc_init(task, thread, portwatch_count);
4547
4548 /* Lock the ipc space */
4549 is_write_lock(task->itk_space);
4550
4551 /* Setup watchports to boost the main thread */
4552 refs = task_add_turnstile_watchports_locked(task,
4553 watchports, previous_elem_array, portwatch_ports,
4554 portwatch_count);
4555
4556 /* Drop the space lock */
4557 is_write_unlock(task->itk_space);
4558
4559 if (refs == 0) {
4560 task_watchports_deallocate(watchports);
4561 }
4562
4563 /* Drop the ref on previous_elem_array */
4564 for (uint32_t i = 0; i < portwatch_count && previous_elem_array[i] != NULL; i++) {
4565 task_watchport_elem_deallocate(previous_elem_array[i]);
4566 }
4567 }
4568
4569 /*
4570 * task_remove_turnstile_watchports:
4571 * Clear all turnstile boost on the task from watchports.
4572 *
4573 * Arguments:
4574 * task: task being terminated
4575 *
4576 * Conditions:
4577 * Nothing locked.
4578 */
4579 void
task_remove_turnstile_watchports(task_t task)4580 task_remove_turnstile_watchports(
4581 task_t task)
4582 {
4583 os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4584 struct task_watchports *watchports = NULL;
4585 ipc_port_t port_freelist[TASK_MAX_WATCHPORT_COUNT] = {};
4586 uint32_t portwatch_count;
4587
4588 /* Lock the ipc space */
4589 is_write_lock(task->itk_space);
4590
4591 /* Check if watchport boost exist */
4592 if (task->watchports == NULL) {
4593 is_write_unlock(task->itk_space);
4594 return;
4595 }
4596 watchports = task->watchports;
4597 portwatch_count = watchports->tw_elem_array_count;
4598
4599 refs = task_remove_turnstile_watchports_locked(task, watchports,
4600 port_freelist);
4601
4602 is_write_unlock(task->itk_space);
4603
4604 /* Drop all the port references */
4605 for (uint32_t i = 0; i < portwatch_count && port_freelist[i] != NULL; i++) {
4606 ip_release(port_freelist[i]);
4607 }
4608
4609 /* Clear the task and thread references for task_watchport */
4610 if (refs == 0) {
4611 task_watchports_deallocate(watchports);
4612 }
4613 }
4614
4615 /*
4616 * task_transfer_turnstile_watchports:
4617 * Transfer all watchport turnstile boost from old task to new task.
4618 *
4619 * Arguments:
4620 * old_task: task calling exec
4621 * new_task: new exec'ed task
4622 * thread: main thread of new task
4623 *
4624 * Conditions:
4625 * Nothing locked.
4626 */
4627 void
task_transfer_turnstile_watchports(task_t old_task,task_t new_task,thread_t new_thread)4628 task_transfer_turnstile_watchports(
4629 task_t old_task,
4630 task_t new_task,
4631 thread_t new_thread)
4632 {
4633 struct task_watchports *old_watchports = NULL;
4634 struct task_watchports *new_watchports = NULL;
4635 os_ref_count_t old_refs = TASK_MAX_WATCHPORT_COUNT;
4636 os_ref_count_t new_refs = TASK_MAX_WATCHPORT_COUNT;
4637 uint32_t portwatch_count;
4638
4639 if (old_task->watchports == NULL || !new_task->active) {
4640 return;
4641 }
4642
4643 /* Get the watch port count from the old task */
4644 is_write_lock(old_task->itk_space);
4645 if (old_task->watchports == NULL) {
4646 is_write_unlock(old_task->itk_space);
4647 return;
4648 }
4649
4650 portwatch_count = old_task->watchports->tw_elem_array_count;
4651 is_write_unlock(old_task->itk_space);
4652
4653 new_watchports = task_watchports_alloc_init(new_task, new_thread, portwatch_count);
4654
4655 /* Lock the ipc space for old task */
4656 is_write_lock(old_task->itk_space);
4657
4658 /* Lock the ipc space for new task */
4659 is_write_lock(new_task->itk_space);
4660
4661 /* Check if watchport boost exist */
4662 if (old_task->watchports == NULL || !new_task->active) {
4663 is_write_unlock(new_task->itk_space);
4664 is_write_unlock(old_task->itk_space);
4665 (void)task_watchports_release(new_watchports);
4666 task_watchports_deallocate(new_watchports);
4667 return;
4668 }
4669
4670 old_watchports = old_task->watchports;
4671 assert(portwatch_count == old_task->watchports->tw_elem_array_count);
4672
4673 /* Setup new task watchports */
4674 new_task->watchports = new_watchports;
4675
4676 for (uint32_t i = 0; i < portwatch_count; i++) {
4677 ipc_port_t port = old_watchports->tw_elem[i].twe_port;
4678
4679 if (port == NULL) {
4680 task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4681 continue;
4682 }
4683
4684 /* Lock the port and check if it has the entry */
4685 ip_mq_lock(port);
4686
4687 task_watchport_elem_init(&new_watchports->tw_elem[i], new_task, port);
4688
4689 if (ipc_port_replace_watchport_elem_conditional_locked(port,
4690 &old_watchports->tw_elem[i], &new_watchports->tw_elem[i]) == KERN_SUCCESS) {
4691 task_watchport_elem_clear(&old_watchports->tw_elem[i]);
4692
4693 task_watchports_retain(new_watchports);
4694 old_refs = task_watchports_release(old_watchports);
4695
4696 /* Check if all ports are cleaned */
4697 if (old_refs == 0) {
4698 old_task->watchports = NULL;
4699 }
4700 } else {
4701 task_watchport_elem_clear(&new_watchports->tw_elem[i]);
4702 }
4703 /* port unlocked by ipc_port_replace_watchport_elem_conditional_locked */
4704 }
4705
4706 /* Drop the reference on new task_watchports struct returned by task_watchports_alloc_init */
4707 new_refs = task_watchports_release(new_watchports);
4708 if (new_refs == 0) {
4709 new_task->watchports = NULL;
4710 }
4711
4712 is_write_unlock(new_task->itk_space);
4713 is_write_unlock(old_task->itk_space);
4714
4715 /* Clear the task and thread references for old_watchport */
4716 if (old_refs == 0) {
4717 task_watchports_deallocate(old_watchports);
4718 }
4719
4720 /* Clear the task and thread references for new_watchport */
4721 if (new_refs == 0) {
4722 task_watchports_deallocate(new_watchports);
4723 }
4724 }
4725
4726 /*
4727 * task_add_turnstile_watchports_locked:
4728 * Setup watchports to boost the main thread of the task.
4729 *
4730 * Arguments:
4731 * task: task to boost
4732 * watchports: watchport structure to be attached to the task
4733 * previous_elem_array: an array of old watchport_elem to be returned to caller
4734 * portwatch_ports: array of watchports
4735 * portwatch_count: number of watchports
4736 *
4737 * Conditions:
4738 * ipc space of the task locked.
4739 * returns array of old watchport_elem in previous_elem_array
4740 */
4741 static os_ref_count_t
task_add_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,struct task_watchport_elem ** previous_elem_array,ipc_port_t * portwatch_ports,uint32_t portwatch_count)4742 task_add_turnstile_watchports_locked(
4743 task_t task,
4744 struct task_watchports *watchports,
4745 struct task_watchport_elem **previous_elem_array,
4746 ipc_port_t *portwatch_ports,
4747 uint32_t portwatch_count)
4748 {
4749 os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4750
4751 /* Check if the task is still active */
4752 if (!task->active) {
4753 refs = task_watchports_release(watchports);
4754 return refs;
4755 }
4756
4757 assert(task->watchports == NULL);
4758 task->watchports = watchports;
4759
4760 for (uint32_t i = 0, j = 0; i < portwatch_count; i++) {
4761 ipc_port_t port = portwatch_ports[i];
4762
4763 task_watchport_elem_init(&watchports->tw_elem[i], task, port);
4764 if (port == NULL) {
4765 task_watchport_elem_clear(&watchports->tw_elem[i]);
4766 continue;
4767 }
4768
4769 ip_mq_lock(port);
4770
4771 /* Check if port is in valid state to be setup as watchport */
4772 if (ipc_port_add_watchport_elem_locked(port, &watchports->tw_elem[i],
4773 &previous_elem_array[j]) != KERN_SUCCESS) {
4774 task_watchport_elem_clear(&watchports->tw_elem[i]);
4775 continue;
4776 }
4777 /* port unlocked on return */
4778
4779 ip_reference(port);
4780 task_watchports_retain(watchports);
4781 if (previous_elem_array[j] != NULL) {
4782 j++;
4783 }
4784 }
4785
4786 /* Drop the reference on task_watchport struct returned by os_ref_init */
4787 refs = task_watchports_release(watchports);
4788 if (refs == 0) {
4789 task->watchports = NULL;
4790 }
4791
4792 return refs;
4793 }
4794
4795 /*
4796 * task_remove_turnstile_watchports_locked:
4797 * Clear all turnstile boost on the task from watchports.
4798 *
4799 * Arguments:
4800 * task: task to remove watchports from
4801 * watchports: watchports structure for the task
4802 * port_freelist: array of ports returned with ref to caller
4803 *
4804 *
4805 * Conditions:
4806 * ipc space of the task locked.
4807 * array of ports with refs are returned in port_freelist
4808 */
4809 static os_ref_count_t
task_remove_turnstile_watchports_locked(task_t task,struct task_watchports * watchports,ipc_port_t * port_freelist)4810 task_remove_turnstile_watchports_locked(
4811 task_t task,
4812 struct task_watchports *watchports,
4813 ipc_port_t *port_freelist)
4814 {
4815 os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4816
4817 for (uint32_t i = 0, j = 0; i < watchports->tw_elem_array_count; i++) {
4818 ipc_port_t port = watchports->tw_elem[i].twe_port;
4819 if (port == NULL) {
4820 continue;
4821 }
4822
4823 /* Lock the port and check if it has the entry */
4824 ip_mq_lock(port);
4825 if (ipc_port_clear_watchport_elem_internal_conditional_locked(port,
4826 &watchports->tw_elem[i]) == KERN_SUCCESS) {
4827 task_watchport_elem_clear(&watchports->tw_elem[i]);
4828 port_freelist[j++] = port;
4829 refs = task_watchports_release(watchports);
4830
4831 /* Check if all ports are cleaned */
4832 if (refs == 0) {
4833 task->watchports = NULL;
4834 break;
4835 }
4836 }
4837 /* mqueue and port unlocked by ipc_port_clear_watchport_elem_internal_conditional_locked */
4838 }
4839 return refs;
4840 }
4841
4842 /*
4843 * task_watchports_alloc_init:
4844 * Allocate and initialize task watchport struct.
4845 *
4846 * Conditions:
4847 * Nothing locked.
4848 */
4849 static struct task_watchports *
task_watchports_alloc_init(task_t task,thread_t thread,uint32_t count)4850 task_watchports_alloc_init(
4851 task_t task,
4852 thread_t thread,
4853 uint32_t count)
4854 {
4855 struct task_watchports *watchports = kalloc_type(struct task_watchports,
4856 struct task_watchport_elem, count, Z_WAITOK | Z_ZERO | Z_NOFAIL);
4857
4858 task_reference(task);
4859 thread_reference(thread);
4860 watchports->tw_task = task;
4861 watchports->tw_thread = thread;
4862 watchports->tw_elem_array_count = count;
4863 os_ref_init(&watchports->tw_refcount, &task_watchports_refgrp);
4864
4865 return watchports;
4866 }
4867
4868 /*
4869 * task_watchports_deallocate:
4870 * Deallocate task watchport struct.
4871 *
4872 * Conditions:
4873 * Nothing locked.
4874 */
4875 static void
task_watchports_deallocate(struct task_watchports * watchports)4876 task_watchports_deallocate(
4877 struct task_watchports *watchports)
4878 {
4879 uint32_t portwatch_count = watchports->tw_elem_array_count;
4880
4881 task_deallocate(watchports->tw_task);
4882 thread_deallocate(watchports->tw_thread);
4883 kfree_type(struct task_watchports, struct task_watchport_elem,
4884 portwatch_count, watchports);
4885 }
4886
4887 /*
4888 * task_watchport_elem_deallocate:
4889 * Deallocate task watchport element and release its ref on task_watchport.
4890 *
4891 * Conditions:
4892 * Nothing locked.
4893 */
4894 void
task_watchport_elem_deallocate(struct task_watchport_elem * watchport_elem)4895 task_watchport_elem_deallocate(
4896 struct task_watchport_elem *watchport_elem)
4897 {
4898 os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4899 task_t task = watchport_elem->twe_task;
4900 struct task_watchports *watchports = NULL;
4901 ipc_port_t port = NULL;
4902
4903 assert(task != NULL);
4904
4905 /* Take the space lock to modify the elememt */
4906 is_write_lock(task->itk_space);
4907
4908 watchports = task->watchports;
4909 assert(watchports != NULL);
4910
4911 port = watchport_elem->twe_port;
4912 assert(port != NULL);
4913
4914 task_watchport_elem_clear(watchport_elem);
4915 refs = task_watchports_release(watchports);
4916
4917 if (refs == 0) {
4918 task->watchports = NULL;
4919 }
4920
4921 is_write_unlock(task->itk_space);
4922
4923 ip_release(port);
4924 if (refs == 0) {
4925 task_watchports_deallocate(watchports);
4926 }
4927 }
4928
4929 /*
4930 * task_has_watchports:
4931 * Return TRUE if task has watchport boosts.
4932 *
4933 * Conditions:
4934 * Nothing locked.
4935 */
4936 boolean_t
task_has_watchports(task_t task)4937 task_has_watchports(task_t task)
4938 {
4939 return task->watchports != NULL;
4940 }
4941
4942 #if DEVELOPMENT || DEBUG
4943
4944 extern void IOSleep(int);
4945
4946 kern_return_t
task_disconnect_page_mappings(task_t task)4947 task_disconnect_page_mappings(task_t task)
4948 {
4949 int n;
4950
4951 if (task == TASK_NULL || task == kernel_task) {
4952 return KERN_INVALID_ARGUMENT;
4953 }
4954
4955 /*
4956 * this function is used to strip all of the mappings from
4957 * the pmap for the specified task to force the task to
4958 * re-fault all of the pages it is actively using... this
4959 * allows us to approximate the true working set of the
4960 * specified task. We only engage if at least 1 of the
4961 * threads in the task is runnable, but we want to continuously
4962 * sweep (at least for a while - I've arbitrarily set the limit at
4963 * 100 sweeps to be re-looked at as we gain experience) to get a better
4964 * view into what areas within a page are being visited (as opposed to only
4965 * seeing the first fault of a page after the task becomes
4966 * runnable)... in the future I may
4967 * try to block until awakened by a thread in this task
4968 * being made runnable, but for now we'll periodically poll from the
4969 * user level debug tool driving the sysctl
4970 */
4971 for (n = 0; n < 100; n++) {
4972 thread_t thread;
4973 boolean_t runnable;
4974 boolean_t do_unnest;
4975 int page_count;
4976
4977 runnable = FALSE;
4978 do_unnest = FALSE;
4979
4980 task_lock(task);
4981
4982 queue_iterate(&task->threads, thread, thread_t, task_threads) {
4983 if (thread->state & TH_RUN) {
4984 runnable = TRUE;
4985 break;
4986 }
4987 }
4988 if (n == 0) {
4989 task->task_disconnected_count++;
4990 }
4991
4992 if (task->task_unnested == FALSE) {
4993 if (runnable == TRUE) {
4994 task->task_unnested = TRUE;
4995 do_unnest = TRUE;
4996 }
4997 }
4998 task_unlock(task);
4999
5000 if (runnable == FALSE) {
5001 break;
5002 }
5003
5004 KDBG_RELEASE((MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_START,
5005 task, do_unnest, task->task_disconnected_count);
5006
5007 page_count = vm_map_disconnect_page_mappings(task->map, do_unnest);
5008
5009 KDBG_RELEASE((MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_END,
5010 task, page_count);
5011
5012 if ((n % 5) == 4) {
5013 IOSleep(1);
5014 }
5015 }
5016 return KERN_SUCCESS;
5017 }
5018
5019 #endif
5020
5021
5022 #if CONFIG_FREEZE
5023
5024 /*
5025 * task_freeze:
5026 *
5027 * Freeze a task.
5028 *
5029 * Conditions:
5030 * The caller holds a reference to the task
5031 */
5032 extern struct freezer_context freezer_context_global;
5033
5034 kern_return_t
task_freeze(task_t task,uint32_t * purgeable_count,uint32_t * wired_count,uint32_t * clean_count,uint32_t * dirty_count,uint32_t dirty_budget,uint32_t * shared_count,int * freezer_error_code,boolean_t eval_only)5035 task_freeze(
5036 task_t task,
5037 uint32_t *purgeable_count,
5038 uint32_t *wired_count,
5039 uint32_t *clean_count,
5040 uint32_t *dirty_count,
5041 uint32_t dirty_budget,
5042 uint32_t *shared_count,
5043 int *freezer_error_code,
5044 boolean_t eval_only)
5045 {
5046 kern_return_t kr = KERN_SUCCESS;
5047
5048 if (task == TASK_NULL || task == kernel_task) {
5049 return KERN_INVALID_ARGUMENT;
5050 }
5051
5052 task_lock(task);
5053
5054 while (task->changing_freeze_state) {
5055 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
5056 task_unlock(task);
5057 thread_block(THREAD_CONTINUE_NULL);
5058
5059 task_lock(task);
5060 }
5061 if (task->frozen) {
5062 task_unlock(task);
5063 return KERN_FAILURE;
5064 }
5065 task->changing_freeze_state = TRUE;
5066
5067 freezer_context_global.freezer_ctx_task = task;
5068
5069 task_unlock(task);
5070
5071 kr = vm_map_freeze(task,
5072 purgeable_count,
5073 wired_count,
5074 clean_count,
5075 dirty_count,
5076 dirty_budget,
5077 shared_count,
5078 freezer_error_code,
5079 eval_only);
5080
5081 task_lock(task);
5082
5083 if ((kr == KERN_SUCCESS) && (eval_only == FALSE)) {
5084 task->frozen = TRUE;
5085
5086 freezer_context_global.freezer_ctx_task = NULL;
5087 freezer_context_global.freezer_ctx_uncompressed_pages = 0;
5088
5089 if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
5090 /*
5091 * reset the counter tracking the # of swapped compressed pages
5092 * because we are now done with this freeze session and task.
5093 */
5094
5095 *dirty_count = (uint32_t) (freezer_context_global.freezer_ctx_swapped_bytes / PAGE_SIZE_64); /*used to track pageouts*/
5096 }
5097
5098 freezer_context_global.freezer_ctx_swapped_bytes = 0;
5099 }
5100
5101 task->changing_freeze_state = FALSE;
5102 thread_wakeup(&task->changing_freeze_state);
5103
5104 task_unlock(task);
5105
5106 if (VM_CONFIG_COMPRESSOR_IS_PRESENT &&
5107 (kr == KERN_SUCCESS) &&
5108 (eval_only == FALSE)) {
5109 vm_wake_compactor_swapper();
5110 /*
5111 * We do an explicit wakeup of the swapout thread here
5112 * because the compact_and_swap routines don't have
5113 * knowledge about these kind of "per-task packed c_segs"
5114 * and so will not be evaluating whether we need to do
5115 * a wakeup there.
5116 */
5117 thread_wakeup((event_t)&vm_swapout_thread);
5118 }
5119
5120 return kr;
5121 }
5122
5123 /*
5124 * task_thaw:
5125 *
5126 * Thaw a currently frozen task.
5127 *
5128 * Conditions:
5129 * The caller holds a reference to the task
5130 */
5131 kern_return_t
task_thaw(task_t task)5132 task_thaw(
5133 task_t task)
5134 {
5135 if (task == TASK_NULL || task == kernel_task) {
5136 return KERN_INVALID_ARGUMENT;
5137 }
5138
5139 task_lock(task);
5140
5141 while (task->changing_freeze_state) {
5142 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
5143 task_unlock(task);
5144 thread_block(THREAD_CONTINUE_NULL);
5145
5146 task_lock(task);
5147 }
5148 if (!task->frozen) {
5149 task_unlock(task);
5150 return KERN_FAILURE;
5151 }
5152 task->frozen = FALSE;
5153
5154 task_unlock(task);
5155
5156 return KERN_SUCCESS;
5157 }
5158
5159 void
task_update_frozen_to_swap_acct(task_t task,int64_t amount,freezer_acct_op_t op)5160 task_update_frozen_to_swap_acct(task_t task, int64_t amount, freezer_acct_op_t op)
5161 {
5162 /*
5163 * We don't assert that the task lock is held because we call this
5164 * routine from the decompression path and we won't be holding the
5165 * task lock. However, since we are in the context of the task we are
5166 * safe.
5167 * In the case of the task_freeze path, we call it from behind the task
5168 * lock but we don't need to because we have a reference on the proc
5169 * being frozen.
5170 */
5171
5172 assert(task);
5173 if (amount == 0) {
5174 return;
5175 }
5176
5177 if (op == CREDIT_TO_SWAP) {
5178 ledger_credit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
5179 } else if (op == DEBIT_FROM_SWAP) {
5180 ledger_debit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount);
5181 } else {
5182 panic("task_update_frozen_to_swap_acct: Invalid ledger op");
5183 }
5184 }
5185 #endif /* CONFIG_FREEZE */
5186
5187 kern_return_t
task_set_security_tokens(task_t task,security_token_t sec_token,audit_token_t audit_token,host_priv_t host_priv)5188 task_set_security_tokens(
5189 task_t task,
5190 security_token_t sec_token,
5191 audit_token_t audit_token,
5192 host_priv_t host_priv)
5193 {
5194 ipc_port_t host_port = IP_NULL;
5195 kern_return_t kr;
5196
5197 if (task == TASK_NULL) {
5198 return KERN_INVALID_ARGUMENT;
5199 }
5200
5201 task_lock(task);
5202 task_set_tokens(task, &sec_token, &audit_token);
5203 task_unlock(task);
5204
5205 if (host_priv != HOST_PRIV_NULL) {
5206 kr = host_get_host_priv_port(host_priv, &host_port);
5207 } else {
5208 kr = host_get_host_port(host_priv_self(), &host_port);
5209 }
5210 assert(kr == KERN_SUCCESS);
5211
5212 kr = task_set_special_port_internal(task, TASK_HOST_PORT, host_port);
5213 return kr;
5214 }
5215
5216 kern_return_t
task_send_trace_memory(__unused task_t target_task,__unused uint32_t pid,__unused uint64_t uniqueid)5217 task_send_trace_memory(
5218 __unused task_t target_task,
5219 __unused uint32_t pid,
5220 __unused uint64_t uniqueid)
5221 {
5222 return KERN_INVALID_ARGUMENT;
5223 }
5224
5225 /*
5226 * This routine was added, pretty much exclusively, for registering the
5227 * RPC glue vector for in-kernel short circuited tasks. Rather than
5228 * removing it completely, I have only disabled that feature (which was
5229 * the only feature at the time). It just appears that we are going to
5230 * want to add some user data to tasks in the future (i.e. bsd info,
5231 * task names, etc...), so I left it in the formal task interface.
5232 */
5233 kern_return_t
task_set_info(task_t task,task_flavor_t flavor,__unused task_info_t task_info_in,__unused mach_msg_type_number_t task_info_count)5234 task_set_info(
5235 task_t task,
5236 task_flavor_t flavor,
5237 __unused task_info_t task_info_in, /* pointer to IN array */
5238 __unused mach_msg_type_number_t task_info_count)
5239 {
5240 if (task == TASK_NULL) {
5241 return KERN_INVALID_ARGUMENT;
5242 }
5243 switch (flavor) {
5244 #if CONFIG_ATM
5245 case TASK_TRACE_MEMORY_INFO:
5246 return KERN_NOT_SUPPORTED;
5247 #endif // CONFIG_ATM
5248 default:
5249 return KERN_INVALID_ARGUMENT;
5250 }
5251 }
5252
5253 static void
_task_fill_times(task_t task,time_value_t * user_time,time_value_t * sys_time)5254 _task_fill_times(task_t task, time_value_t *user_time, time_value_t *sys_time)
5255 {
5256 clock_sec_t sec;
5257 clock_usec_t usec;
5258
5259 struct recount_times_mach times = recount_task_terminated_times(task);
5260 absolutetime_to_microtime(times.rtm_user, &sec, &usec);
5261 user_time->seconds = (typeof(user_time->seconds))sec;
5262 user_time->microseconds = usec;
5263 absolutetime_to_microtime(times.rtm_system, &sec, &usec);
5264 sys_time->seconds = (typeof(sys_time->seconds))sec;
5265 sys_time->microseconds = usec;
5266 }
5267
5268 int radar_20146450 = 1;
5269 kern_return_t
task_info(task_t task,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)5270 task_info(
5271 task_t task,
5272 task_flavor_t flavor,
5273 task_info_t task_info_out,
5274 mach_msg_type_number_t *task_info_count)
5275 {
5276 kern_return_t error = KERN_SUCCESS;
5277 mach_msg_type_number_t original_task_info_count;
5278 bool is_kernel_task = (task == kernel_task);
5279
5280 if (task == TASK_NULL) {
5281 return KERN_INVALID_ARGUMENT;
5282 }
5283
5284 original_task_info_count = *task_info_count;
5285 task_lock(task);
5286
5287 if (task != current_task() && !task->active) {
5288 task_unlock(task);
5289 return KERN_INVALID_ARGUMENT;
5290 }
5291
5292
5293 switch (flavor) {
5294 case TASK_BASIC_INFO_32:
5295 case TASK_BASIC2_INFO_32:
5296 #if defined(__arm64__)
5297 case TASK_BASIC_INFO_64:
5298 #endif
5299 {
5300 task_basic_info_32_t basic_info;
5301 ledger_amount_t tmp;
5302
5303 if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
5304 error = KERN_INVALID_ARGUMENT;
5305 break;
5306 }
5307
5308 basic_info = (task_basic_info_32_t)task_info_out;
5309
5310 basic_info->virtual_size = (typeof(basic_info->virtual_size))
5311 vm_map_adjusted_size(is_kernel_task ? kernel_map : task->map);
5312 if (flavor == TASK_BASIC2_INFO_32) {
5313 /*
5314 * The "BASIC2" flavor gets the maximum resident
5315 * size instead of the current resident size...
5316 */
5317 ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, &tmp);
5318 } else {
5319 ledger_get_balance(task->ledger, task_ledgers.phys_mem, &tmp);
5320 }
5321 basic_info->resident_size = (natural_t) MIN((ledger_amount_t) UINT32_MAX, tmp);
5322
5323 _task_fill_times(task, &basic_info->user_time,
5324 &basic_info->system_time);
5325
5326 basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5327 basic_info->suspend_count = task->user_stop_count;
5328
5329 *task_info_count = TASK_BASIC_INFO_32_COUNT;
5330 break;
5331 }
5332
5333 #if defined(__arm64__)
5334 case TASK_BASIC_INFO_64_2:
5335 {
5336 task_basic_info_64_2_t basic_info;
5337
5338 if (*task_info_count < TASK_BASIC_INFO_64_2_COUNT) {
5339 error = KERN_INVALID_ARGUMENT;
5340 break;
5341 }
5342
5343 basic_info = (task_basic_info_64_2_t)task_info_out;
5344
5345 basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5346 kernel_map : task->map);
5347 ledger_get_balance(task->ledger, task_ledgers.phys_mem,
5348 (ledger_amount_t *)&basic_info->resident_size);
5349 basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5350 basic_info->suspend_count = task->user_stop_count;
5351 _task_fill_times(task, &basic_info->user_time,
5352 &basic_info->system_time);
5353
5354 *task_info_count = TASK_BASIC_INFO_64_2_COUNT;
5355 break;
5356 }
5357
5358 #else /* defined(__arm64__) */
5359 case TASK_BASIC_INFO_64:
5360 {
5361 task_basic_info_64_t basic_info;
5362
5363 if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
5364 error = KERN_INVALID_ARGUMENT;
5365 break;
5366 }
5367
5368 basic_info = (task_basic_info_64_t)task_info_out;
5369
5370 basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5371 kernel_map : task->map);
5372 ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *)&basic_info->resident_size);
5373 basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5374 basic_info->suspend_count = task->user_stop_count;
5375 _task_fill_times(task, &basic_info->user_time,
5376 &basic_info->system_time);
5377
5378 *task_info_count = TASK_BASIC_INFO_64_COUNT;
5379 break;
5380 }
5381 #endif /* defined(__arm64__) */
5382
5383 case MACH_TASK_BASIC_INFO:
5384 {
5385 mach_task_basic_info_t basic_info;
5386
5387 if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
5388 error = KERN_INVALID_ARGUMENT;
5389 break;
5390 }
5391
5392 basic_info = (mach_task_basic_info_t)task_info_out;
5393
5394 basic_info->virtual_size = vm_map_adjusted_size(is_kernel_task ?
5395 kernel_map : task->map);
5396 ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size);
5397 ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &basic_info->resident_size_max);
5398 basic_info->policy = is_kernel_task ? POLICY_RR : POLICY_TIMESHARE;
5399 basic_info->suspend_count = task->user_stop_count;
5400 _task_fill_times(task, &basic_info->user_time,
5401 &basic_info->system_time);
5402
5403 *task_info_count = MACH_TASK_BASIC_INFO_COUNT;
5404 break;
5405 }
5406
5407 case TASK_THREAD_TIMES_INFO:
5408 {
5409 task_thread_times_info_t times_info;
5410 thread_t thread;
5411
5412 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
5413 error = KERN_INVALID_ARGUMENT;
5414 break;
5415 }
5416
5417 times_info = (task_thread_times_info_t)task_info_out;
5418 times_info->user_time = (time_value_t){ 0 };
5419 times_info->system_time = (time_value_t){ 0 };
5420
5421 queue_iterate(&task->threads, thread, thread_t, task_threads) {
5422 if ((thread->options & TH_OPT_IDLE_THREAD) == 0) {
5423 time_value_t user_time, system_time;
5424
5425 thread_read_times(thread, &user_time, &system_time, NULL);
5426 time_value_add(×_info->user_time, &user_time);
5427 time_value_add(×_info->system_time, &system_time);
5428 }
5429 }
5430
5431 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
5432 break;
5433 }
5434
5435 case TASK_ABSOLUTETIME_INFO:
5436 {
5437 task_absolutetime_info_t info;
5438
5439 if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
5440 error = KERN_INVALID_ARGUMENT;
5441 break;
5442 }
5443
5444 info = (task_absolutetime_info_t)task_info_out;
5445
5446 struct recount_times_mach term_times =
5447 recount_task_terminated_times(task);
5448 struct recount_times_mach total_times = recount_task_times(task);
5449
5450 info->total_user = total_times.rtm_user;
5451 info->total_system = total_times.rtm_system;
5452 info->threads_user = total_times.rtm_user - term_times.rtm_user;
5453 info->threads_system += total_times.rtm_system - term_times.rtm_system;
5454
5455 *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
5456 break;
5457 }
5458
5459 case TASK_DYLD_INFO:
5460 {
5461 task_dyld_info_t info;
5462
5463 /*
5464 * We added the format field to TASK_DYLD_INFO output. For
5465 * temporary backward compatibility, accept the fact that
5466 * clients may ask for the old version - distinquished by the
5467 * size of the expected result structure.
5468 */
5469 #define TASK_LEGACY_DYLD_INFO_COUNT \
5470 offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
5471
5472 if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
5473 error = KERN_INVALID_ARGUMENT;
5474 break;
5475 }
5476
5477 info = (task_dyld_info_t)task_info_out;
5478 info->all_image_info_addr = task->all_image_info_addr;
5479 info->all_image_info_size = task->all_image_info_size;
5480
5481 /* only set format on output for those expecting it */
5482 if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
5483 info->all_image_info_format = task_has_64Bit_addr(task) ?
5484 TASK_DYLD_ALL_IMAGE_INFO_64 :
5485 TASK_DYLD_ALL_IMAGE_INFO_32;
5486 *task_info_count = TASK_DYLD_INFO_COUNT;
5487 } else {
5488 *task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
5489 }
5490 break;
5491 }
5492
5493 case TASK_EXTMOD_INFO:
5494 {
5495 task_extmod_info_t info;
5496 void *p;
5497
5498 if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
5499 error = KERN_INVALID_ARGUMENT;
5500 break;
5501 }
5502
5503 info = (task_extmod_info_t)task_info_out;
5504
5505 p = get_bsdtask_info(task);
5506 if (p) {
5507 proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
5508 } else {
5509 bzero(info->task_uuid, sizeof(info->task_uuid));
5510 }
5511 info->extmod_statistics = task->extmod_statistics;
5512 *task_info_count = TASK_EXTMOD_INFO_COUNT;
5513
5514 break;
5515 }
5516
5517 case TASK_KERNELMEMORY_INFO:
5518 {
5519 task_kernelmemory_info_t tkm_info;
5520 ledger_amount_t credit, debit;
5521
5522 if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
5523 error = KERN_INVALID_ARGUMENT;
5524 break;
5525 }
5526
5527 tkm_info = (task_kernelmemory_info_t) task_info_out;
5528 tkm_info->total_palloc = 0;
5529 tkm_info->total_pfree = 0;
5530 tkm_info->total_salloc = 0;
5531 tkm_info->total_sfree = 0;
5532
5533 if (task == kernel_task) {
5534 /*
5535 * All shared allocs/frees from other tasks count against
5536 * the kernel private memory usage. If we are looking up
5537 * info for the kernel task, gather from everywhere.
5538 */
5539 task_unlock(task);
5540
5541 /* start by accounting for all the terminated tasks against the kernel */
5542 tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
5543 tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
5544
5545 /* count all other task/thread shared alloc/free against the kernel */
5546 lck_mtx_lock(&tasks_threads_lock);
5547
5548 /* XXX this really shouldn't be using the function parameter 'task' as a local var! */
5549 queue_iterate(&tasks, task, task_t, tasks) {
5550 if (task == kernel_task) {
5551 if (ledger_get_entries(task->ledger,
5552 task_ledgers.tkm_private, &credit,
5553 &debit) == KERN_SUCCESS) {
5554 tkm_info->total_palloc += credit;
5555 tkm_info->total_pfree += debit;
5556 }
5557 }
5558 if (!ledger_get_entries(task->ledger,
5559 task_ledgers.tkm_shared, &credit, &debit)) {
5560 tkm_info->total_palloc += credit;
5561 tkm_info->total_pfree += debit;
5562 }
5563 }
5564 lck_mtx_unlock(&tasks_threads_lock);
5565 } else {
5566 if (!ledger_get_entries(task->ledger,
5567 task_ledgers.tkm_private, &credit, &debit)) {
5568 tkm_info->total_palloc = credit;
5569 tkm_info->total_pfree = debit;
5570 }
5571 if (!ledger_get_entries(task->ledger,
5572 task_ledgers.tkm_shared, &credit, &debit)) {
5573 tkm_info->total_salloc = credit;
5574 tkm_info->total_sfree = debit;
5575 }
5576 task_unlock(task);
5577 }
5578
5579 *task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
5580 return KERN_SUCCESS;
5581 }
5582
5583 /* OBSOLETE */
5584 case TASK_SCHED_FIFO_INFO:
5585 {
5586 if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
5587 error = KERN_INVALID_ARGUMENT;
5588 break;
5589 }
5590
5591 error = KERN_INVALID_POLICY;
5592 break;
5593 }
5594
5595 /* OBSOLETE */
5596 case TASK_SCHED_RR_INFO:
5597 {
5598 policy_rr_base_t rr_base;
5599 uint32_t quantum_time;
5600 uint64_t quantum_ns;
5601
5602 if (*task_info_count < POLICY_RR_BASE_COUNT) {
5603 error = KERN_INVALID_ARGUMENT;
5604 break;
5605 }
5606
5607 rr_base = (policy_rr_base_t) task_info_out;
5608
5609 if (task != kernel_task) {
5610 error = KERN_INVALID_POLICY;
5611 break;
5612 }
5613
5614 rr_base->base_priority = task->priority;
5615
5616 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
5617 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
5618
5619 rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
5620
5621 *task_info_count = POLICY_RR_BASE_COUNT;
5622 break;
5623 }
5624
5625 /* OBSOLETE */
5626 case TASK_SCHED_TIMESHARE_INFO:
5627 {
5628 policy_timeshare_base_t ts_base;
5629
5630 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
5631 error = KERN_INVALID_ARGUMENT;
5632 break;
5633 }
5634
5635 ts_base = (policy_timeshare_base_t) task_info_out;
5636
5637 if (task == kernel_task) {
5638 error = KERN_INVALID_POLICY;
5639 break;
5640 }
5641
5642 ts_base->base_priority = task->priority;
5643
5644 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
5645 break;
5646 }
5647
5648 case TASK_SECURITY_TOKEN:
5649 {
5650 security_token_t *sec_token_p;
5651
5652 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
5653 error = KERN_INVALID_ARGUMENT;
5654 break;
5655 }
5656
5657 sec_token_p = (security_token_t *) task_info_out;
5658
5659 *sec_token_p = *task_get_sec_token(task);
5660
5661 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
5662 break;
5663 }
5664
5665 case TASK_AUDIT_TOKEN:
5666 {
5667 audit_token_t *audit_token_p;
5668
5669 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
5670 error = KERN_INVALID_ARGUMENT;
5671 break;
5672 }
5673
5674 audit_token_p = (audit_token_t *) task_info_out;
5675
5676 *audit_token_p = *task_get_audit_token(task);
5677
5678 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
5679 break;
5680 }
5681
5682 case TASK_SCHED_INFO:
5683 error = KERN_INVALID_ARGUMENT;
5684 break;
5685
5686 case TASK_EVENTS_INFO:
5687 {
5688 task_events_info_t events_info;
5689 thread_t thread;
5690 uint64_t n_syscalls_mach, n_syscalls_unix, n_csw;
5691
5692 if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
5693 error = KERN_INVALID_ARGUMENT;
5694 break;
5695 }
5696
5697 events_info = (task_events_info_t) task_info_out;
5698
5699
5700 events_info->faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
5701 events_info->pageins = (int32_t) MIN(counter_load(&task->pageins), INT32_MAX);
5702 events_info->cow_faults = (int32_t) MIN(counter_load(&task->cow_faults), INT32_MAX);
5703 events_info->messages_sent = (int32_t) MIN(counter_load(&task->messages_sent), INT32_MAX);
5704 events_info->messages_received = (int32_t) MIN(counter_load(&task->messages_received), INT32_MAX);
5705
5706 n_syscalls_mach = task->syscalls_mach;
5707 n_syscalls_unix = task->syscalls_unix;
5708 n_csw = task->c_switch;
5709
5710 queue_iterate(&task->threads, thread, thread_t, task_threads) {
5711 n_csw += thread->c_switch;
5712 n_syscalls_mach += thread->syscalls_mach;
5713 n_syscalls_unix += thread->syscalls_unix;
5714 }
5715
5716 events_info->syscalls_mach = (int32_t) MIN(n_syscalls_mach, INT32_MAX);
5717 events_info->syscalls_unix = (int32_t) MIN(n_syscalls_unix, INT32_MAX);
5718 events_info->csw = (int32_t) MIN(n_csw, INT32_MAX);
5719
5720 *task_info_count = TASK_EVENTS_INFO_COUNT;
5721 break;
5722 }
5723 case TASK_AFFINITY_TAG_INFO:
5724 {
5725 if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
5726 error = KERN_INVALID_ARGUMENT;
5727 break;
5728 }
5729
5730 error = task_affinity_info(task, task_info_out, task_info_count);
5731 break;
5732 }
5733 case TASK_POWER_INFO:
5734 {
5735 if (*task_info_count < TASK_POWER_INFO_COUNT) {
5736 error = KERN_INVALID_ARGUMENT;
5737 break;
5738 }
5739
5740 task_power_info_locked(task, (task_power_info_t)task_info_out, NULL, NULL, NULL);
5741 break;
5742 }
5743
5744 case TASK_POWER_INFO_V2:
5745 {
5746 if (*task_info_count < TASK_POWER_INFO_V2_COUNT_OLD) {
5747 error = KERN_INVALID_ARGUMENT;
5748 break;
5749 }
5750 task_power_info_v2_t tpiv2 = (task_power_info_v2_t) task_info_out;
5751 task_power_info_locked(task, &tpiv2->cpu_energy, &tpiv2->gpu_energy, tpiv2, NULL);
5752 break;
5753 }
5754
5755 case TASK_VM_INFO:
5756 case TASK_VM_INFO_PURGEABLE:
5757 {
5758 task_vm_info_t vm_info;
5759 vm_map_t map;
5760 ledger_amount_t tmp_amount;
5761
5762 struct proc *p;
5763 uint32_t platform, sdk;
5764 p = current_proc();
5765 platform = proc_platform(p);
5766 sdk = proc_sdk(p);
5767 if (original_task_info_count > TASK_VM_INFO_COUNT) {
5768 /*
5769 * Some iOS apps pass an incorrect value for
5770 * task_info_count, expressed in number of bytes
5771 * instead of number of "natural_t" elements, which
5772 * can lead to binary compatibility issues (including
5773 * stack corruption) when the data structure is
5774 * expanded in the future.
5775 * Let's make this potential issue visible by
5776 * logging about it...
5777 */
5778 if (!proc_is_simulated(p)) {
5779 os_log(OS_LOG_DEFAULT, "%s[%d] task_info: possibly invalid "
5780 "task_info_count %d > TASK_VM_INFO_COUNT=%d on platform %d sdk "
5781 "%d.%d.%d - please use TASK_VM_INFO_COUNT",
5782 proc_name_address(p), proc_pid(p),
5783 original_task_info_count, TASK_VM_INFO_COUNT,
5784 platform, (sdk >> 16), ((sdk >> 8) & 0xff), (sdk & 0xff));
5785 }
5786 DTRACE_VM4(suspicious_task_vm_info_count,
5787 mach_msg_type_number_t, original_task_info_count,
5788 mach_msg_type_number_t, TASK_VM_INFO_COUNT,
5789 uint32_t, platform,
5790 uint32_t, sdk);
5791 }
5792 #if __arm64__
5793 if (original_task_info_count > TASK_VM_INFO_REV2_COUNT &&
5794 platform == PLATFORM_IOS &&
5795 sdk != 0 &&
5796 (sdk >> 16) <= 12) {
5797 /*
5798 * Some iOS apps pass an incorrect value for
5799 * task_info_count, expressed in number of bytes
5800 * instead of number of "natural_t" elements.
5801 * For the sake of backwards binary compatibility
5802 * for apps built with an iOS12 or older SDK and using
5803 * the "rev2" data structure, let's fix task_info_count
5804 * for them, to avoid stomping past the actual end
5805 * of their buffer.
5806 */
5807 #if DEVELOPMENT || DEBUG
5808 printf("%s:%d %d[%s] rdar://49484582 task_info_count %d -> %d "
5809 "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5810 proc_name_address(p), original_task_info_count,
5811 TASK_VM_INFO_REV2_COUNT, platform, (sdk >> 16),
5812 ((sdk >> 8) & 0xff), (sdk & 0xff));
5813 #endif /* DEVELOPMENT || DEBUG */
5814 DTRACE_VM4(workaround_task_vm_info_count,
5815 mach_msg_type_number_t, original_task_info_count,
5816 mach_msg_type_number_t, TASK_VM_INFO_REV2_COUNT,
5817 uint32_t, platform,
5818 uint32_t, sdk);
5819 original_task_info_count = TASK_VM_INFO_REV2_COUNT;
5820 *task_info_count = original_task_info_count;
5821 }
5822 if (original_task_info_count > TASK_VM_INFO_REV5_COUNT &&
5823 platform == PLATFORM_IOS &&
5824 sdk != 0 &&
5825 (sdk >> 16) <= 15) {
5826 /*
5827 * Some iOS apps pass an incorrect value for
5828 * task_info_count, expressed in number of bytes
5829 * instead of number of "natural_t" elements.
5830 */
5831 printf("%s:%d %d[%s] task_info_count=%d > TASK_VM_INFO_COUNT=%d "
5832 "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5833 proc_name_address(p), original_task_info_count,
5834 TASK_VM_INFO_REV5_COUNT, platform, (sdk >> 16),
5835 ((sdk >> 8) & 0xff), (sdk & 0xff));
5836 DTRACE_VM4(workaround_task_vm_info_count,
5837 mach_msg_type_number_t, original_task_info_count,
5838 mach_msg_type_number_t, TASK_VM_INFO_REV5_COUNT,
5839 uint32_t, platform,
5840 uint32_t, sdk);
5841 #if DEVELOPMENT || DEBUG
5842 /*
5843 * For the sake of internal builds livability,
5844 * work around this user-space bug by capping the
5845 * buffer's size to what it was with the iOS15 SDK.
5846 */
5847 original_task_info_count = TASK_VM_INFO_REV5_COUNT;
5848 *task_info_count = original_task_info_count;
5849 #endif /* DEVELOPMENT || DEBUG */
5850 }
5851
5852 if (original_task_info_count > TASK_VM_INFO_REV7_COUNT &&
5853 platform == PLATFORM_IOS &&
5854 sdk != 0 &&
5855 (sdk >> 16) == 17) {
5856 /*
5857 * Some iOS apps still pass an incorrect value for
5858 * task_info_count, expressed in number of bytes
5859 * instead of number of "natural_t" elements.
5860 */
5861 printf("%s:%d %d[%s] task_info_count=%d > TASK_VM_INFO_COUNT=%d "
5862 "platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p),
5863 proc_name_address(p), original_task_info_count,
5864 TASK_VM_INFO_REV7_COUNT, platform, (sdk >> 16),
5865 ((sdk >> 8) & 0xff), (sdk & 0xff));
5866 DTRACE_VM4(workaround_task_vm_info_count,
5867 mach_msg_type_number_t, original_task_info_count,
5868 mach_msg_type_number_t, TASK_VM_INFO_REV6_COUNT,
5869 uint32_t, platform,
5870 uint32_t, sdk);
5871 #if DEVELOPMENT || DEBUG
5872 /*
5873 * For the sake of internal builds livability,
5874 * work around this user-space bug by capping the
5875 * buffer's size to what it was with the iOS15 and iOS16 SDKs.
5876 */
5877 original_task_info_count = TASK_VM_INFO_REV6_COUNT;
5878 *task_info_count = original_task_info_count;
5879 #endif /* DEVELOPMENT || DEBUG */
5880 }
5881 #endif /* __arm64__ */
5882
5883 if (*task_info_count < TASK_VM_INFO_REV0_COUNT) {
5884 error = KERN_INVALID_ARGUMENT;
5885 break;
5886 }
5887
5888 vm_info = (task_vm_info_t)task_info_out;
5889
5890 /*
5891 * Do not hold both the task and map locks,
5892 * so convert the task lock into a map reference,
5893 * drop the task lock, then lock the map.
5894 */
5895 if (is_kernel_task) {
5896 map = kernel_map;
5897 task_unlock(task);
5898 /* no lock, no reference */
5899 } else {
5900 map = task->map;
5901 vm_map_reference(map);
5902 task_unlock(task);
5903 vm_map_lock_read(map);
5904 }
5905
5906 vm_info->virtual_size = (typeof(vm_info->virtual_size))vm_map_adjusted_size(map);
5907 vm_info->region_count = map->hdr.nentries;
5908 vm_info->page_size = vm_map_page_size(map);
5909
5910 ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size);
5911 ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &vm_info->resident_size_peak);
5912
5913 vm_info->device = 0;
5914 vm_info->device_peak = 0;
5915 ledger_get_balance(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external);
5916 ledger_get_lifetime_max(task->ledger, task_ledgers.external, (ledger_amount_t *) &vm_info->external_peak);
5917 ledger_get_balance(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal);
5918 ledger_get_lifetime_max(task->ledger, task_ledgers.internal, (ledger_amount_t *) &vm_info->internal_peak);
5919 ledger_get_balance(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable);
5920 ledger_get_lifetime_max(task->ledger, task_ledgers.reusable, (ledger_amount_t *) &vm_info->reusable_peak);
5921 ledger_get_balance(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed);
5922 ledger_get_lifetime_max(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_peak);
5923 ledger_get_entries(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t*) &vm_info->compressed_lifetime, &tmp_amount);
5924 ledger_get_balance(task->ledger, task_ledgers.neural_nofootprint_total, (ledger_amount_t *) &vm_info->ledger_tag_neural_nofootprint_total);
5925 ledger_get_lifetime_max(task->ledger, task_ledgers.neural_nofootprint_total, (ledger_amount_t *) &vm_info->ledger_tag_neural_nofootprint_peak);
5926
5927 vm_info->purgeable_volatile_pmap = 0;
5928 vm_info->purgeable_volatile_resident = 0;
5929 vm_info->purgeable_volatile_virtual = 0;
5930 if (is_kernel_task) {
5931 /*
5932 * We do not maintain the detailed stats for the
5933 * kernel_pmap, so just count everything as
5934 * "internal"...
5935 */
5936 vm_info->internal = vm_info->resident_size;
5937 /*
5938 * ... but since the memory held by the VM compressor
5939 * in the kernel address space ought to be attributed
5940 * to user-space tasks, we subtract it from "internal"
5941 * to give memory reporting tools a more accurate idea
5942 * of what the kernel itself is actually using, instead
5943 * of making it look like the kernel is leaking memory
5944 * when the system is under memory pressure.
5945 */
5946 vm_info->internal -= (VM_PAGE_COMPRESSOR_COUNT *
5947 PAGE_SIZE);
5948 } else {
5949 mach_vm_size_t volatile_virtual_size;
5950 mach_vm_size_t volatile_resident_size;
5951 mach_vm_size_t volatile_compressed_size;
5952 mach_vm_size_t volatile_pmap_size;
5953 mach_vm_size_t volatile_compressed_pmap_size;
5954 kern_return_t kr;
5955
5956 if (flavor == TASK_VM_INFO_PURGEABLE) {
5957 kr = vm_map_query_volatile(
5958 map,
5959 &volatile_virtual_size,
5960 &volatile_resident_size,
5961 &volatile_compressed_size,
5962 &volatile_pmap_size,
5963 &volatile_compressed_pmap_size);
5964 if (kr == KERN_SUCCESS) {
5965 vm_info->purgeable_volatile_pmap =
5966 volatile_pmap_size;
5967 if (radar_20146450) {
5968 vm_info->compressed -=
5969 volatile_compressed_pmap_size;
5970 }
5971 vm_info->purgeable_volatile_resident =
5972 volatile_resident_size;
5973 vm_info->purgeable_volatile_virtual =
5974 volatile_virtual_size;
5975 }
5976 }
5977 }
5978 *task_info_count = TASK_VM_INFO_REV0_COUNT;
5979
5980 if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
5981 /* must be captured while we still have the map lock */
5982 vm_info->min_address = map->min_offset;
5983 vm_info->max_address = map->max_offset;
5984 }
5985
5986 /*
5987 * Done with vm map things, can drop the map lock and reference,
5988 * and take the task lock back.
5989 *
5990 * Re-validate that the task didn't die on us.
5991 */
5992 if (!is_kernel_task) {
5993 vm_map_unlock_read(map);
5994 vm_map_deallocate(map);
5995 }
5996 map = VM_MAP_NULL;
5997
5998 task_lock(task);
5999
6000 if ((task != current_task()) && (!task->active)) {
6001 error = KERN_INVALID_ARGUMENT;
6002 break;
6003 }
6004
6005 if (original_task_info_count >= TASK_VM_INFO_REV1_COUNT) {
6006 vm_info->phys_footprint =
6007 (mach_vm_size_t) get_task_phys_footprint(task);
6008 *task_info_count = TASK_VM_INFO_REV1_COUNT;
6009 }
6010 if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
6011 /* data was captured above */
6012 *task_info_count = TASK_VM_INFO_REV2_COUNT;
6013 }
6014
6015 if (original_task_info_count >= TASK_VM_INFO_REV3_COUNT) {
6016 ledger_get_lifetime_max(task->ledger,
6017 task_ledgers.phys_footprint,
6018 &vm_info->ledger_phys_footprint_peak);
6019 ledger_get_balance(task->ledger,
6020 task_ledgers.purgeable_nonvolatile,
6021 &vm_info->ledger_purgeable_nonvolatile);
6022 ledger_get_balance(task->ledger,
6023 task_ledgers.purgeable_nonvolatile_compressed,
6024 &vm_info->ledger_purgeable_novolatile_compressed);
6025 ledger_get_balance(task->ledger,
6026 task_ledgers.purgeable_volatile,
6027 &vm_info->ledger_purgeable_volatile);
6028 ledger_get_balance(task->ledger,
6029 task_ledgers.purgeable_volatile_compressed,
6030 &vm_info->ledger_purgeable_volatile_compressed);
6031 ledger_get_balance(task->ledger,
6032 task_ledgers.network_nonvolatile,
6033 &vm_info->ledger_tag_network_nonvolatile);
6034 ledger_get_balance(task->ledger,
6035 task_ledgers.network_nonvolatile_compressed,
6036 &vm_info->ledger_tag_network_nonvolatile_compressed);
6037 ledger_get_balance(task->ledger,
6038 task_ledgers.network_volatile,
6039 &vm_info->ledger_tag_network_volatile);
6040 ledger_get_balance(task->ledger,
6041 task_ledgers.network_volatile_compressed,
6042 &vm_info->ledger_tag_network_volatile_compressed);
6043 ledger_get_balance(task->ledger,
6044 task_ledgers.media_footprint,
6045 &vm_info->ledger_tag_media_footprint);
6046 ledger_get_balance(task->ledger,
6047 task_ledgers.media_footprint_compressed,
6048 &vm_info->ledger_tag_media_footprint_compressed);
6049 ledger_get_balance(task->ledger,
6050 task_ledgers.media_nofootprint,
6051 &vm_info->ledger_tag_media_nofootprint);
6052 ledger_get_balance(task->ledger,
6053 task_ledgers.media_nofootprint_compressed,
6054 &vm_info->ledger_tag_media_nofootprint_compressed);
6055 ledger_get_balance(task->ledger,
6056 task_ledgers.graphics_footprint,
6057 &vm_info->ledger_tag_graphics_footprint);
6058 ledger_get_balance(task->ledger,
6059 task_ledgers.graphics_footprint_compressed,
6060 &vm_info->ledger_tag_graphics_footprint_compressed);
6061 ledger_get_balance(task->ledger,
6062 task_ledgers.graphics_nofootprint,
6063 &vm_info->ledger_tag_graphics_nofootprint);
6064 ledger_get_balance(task->ledger,
6065 task_ledgers.graphics_nofootprint_compressed,
6066 &vm_info->ledger_tag_graphics_nofootprint_compressed);
6067 ledger_get_balance(task->ledger,
6068 task_ledgers.neural_footprint,
6069 &vm_info->ledger_tag_neural_footprint);
6070 ledger_get_balance(task->ledger,
6071 task_ledgers.neural_footprint_compressed,
6072 &vm_info->ledger_tag_neural_footprint_compressed);
6073 ledger_get_balance(task->ledger,
6074 task_ledgers.neural_nofootprint,
6075 &vm_info->ledger_tag_neural_nofootprint);
6076 ledger_get_balance(task->ledger,
6077 task_ledgers.neural_nofootprint_compressed,
6078 &vm_info->ledger_tag_neural_nofootprint_compressed);
6079 *task_info_count = TASK_VM_INFO_REV3_COUNT;
6080 }
6081 if (original_task_info_count >= TASK_VM_INFO_REV4_COUNT) {
6082 if (get_bsdtask_info(task)) {
6083 vm_info->limit_bytes_remaining =
6084 memorystatus_available_memory_internal(get_bsdtask_info(task));
6085 } else {
6086 vm_info->limit_bytes_remaining = 0;
6087 }
6088 *task_info_count = TASK_VM_INFO_REV4_COUNT;
6089 }
6090 if (original_task_info_count >= TASK_VM_INFO_REV5_COUNT) {
6091 thread_t thread;
6092 uint64_t total = task->decompressions;
6093 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6094 total += thread->decompressions;
6095 }
6096 vm_info->decompressions = (int32_t) MIN(total, INT32_MAX);
6097 *task_info_count = TASK_VM_INFO_REV5_COUNT;
6098 }
6099 if (original_task_info_count >= TASK_VM_INFO_REV6_COUNT) {
6100 ledger_get_balance(task->ledger, task_ledgers.swapins,
6101 &vm_info->ledger_swapins);
6102 *task_info_count = TASK_VM_INFO_REV6_COUNT;
6103 }
6104 if (original_task_info_count >= TASK_VM_INFO_REV7_COUNT) {
6105 ledger_get_balance(task->ledger,
6106 task_ledgers.neural_nofootprint_total,
6107 &vm_info->ledger_tag_neural_nofootprint_total);
6108 ledger_get_lifetime_max(task->ledger,
6109 task_ledgers.neural_nofootprint_total,
6110 &vm_info->ledger_tag_neural_nofootprint_peak);
6111 *task_info_count = TASK_VM_INFO_REV7_COUNT;
6112 }
6113
6114 break;
6115 }
6116
6117 case TASK_WAIT_STATE_INFO:
6118 {
6119 /*
6120 * Deprecated flavor. Currently allowing some results until all users
6121 * stop calling it. The results may not be accurate.
6122 */
6123 task_wait_state_info_t wait_state_info;
6124 uint64_t total_sfi_ledger_val = 0;
6125
6126 if (*task_info_count < TASK_WAIT_STATE_INFO_COUNT) {
6127 error = KERN_INVALID_ARGUMENT;
6128 break;
6129 }
6130
6131 wait_state_info = (task_wait_state_info_t) task_info_out;
6132
6133 wait_state_info->total_wait_state_time = 0;
6134 bzero(wait_state_info->_reserved, sizeof(wait_state_info->_reserved));
6135
6136 #if CONFIG_SCHED_SFI
6137 int i, prev_lentry = -1;
6138 int64_t val_credit, val_debit;
6139
6140 for (i = 0; i < MAX_SFI_CLASS_ID; i++) {
6141 val_credit = 0;
6142 /*
6143 * checking with prev_lentry != entry ensures adjacent classes
6144 * which share the same ledger do not add wait times twice.
6145 * Note: Use ledger() call to get data for each individual sfi class.
6146 */
6147 if (prev_lentry != task_ledgers.sfi_wait_times[i] &&
6148 KERN_SUCCESS == ledger_get_entries(task->ledger,
6149 task_ledgers.sfi_wait_times[i], &val_credit, &val_debit)) {
6150 total_sfi_ledger_val += val_credit;
6151 }
6152 prev_lentry = task_ledgers.sfi_wait_times[i];
6153 }
6154
6155 #endif /* CONFIG_SCHED_SFI */
6156 wait_state_info->total_wait_sfi_state_time = total_sfi_ledger_val;
6157 *task_info_count = TASK_WAIT_STATE_INFO_COUNT;
6158
6159 break;
6160 }
6161 case TASK_VM_INFO_PURGEABLE_ACCOUNT:
6162 {
6163 #if DEVELOPMENT || DEBUG
6164 pvm_account_info_t acnt_info;
6165
6166 if (*task_info_count < PVM_ACCOUNT_INFO_COUNT) {
6167 error = KERN_INVALID_ARGUMENT;
6168 break;
6169 }
6170
6171 if (task_info_out == NULL) {
6172 error = KERN_INVALID_ARGUMENT;
6173 break;
6174 }
6175
6176 acnt_info = (pvm_account_info_t) task_info_out;
6177
6178 error = vm_purgeable_account(task, acnt_info);
6179
6180 *task_info_count = PVM_ACCOUNT_INFO_COUNT;
6181
6182 break;
6183 #else /* DEVELOPMENT || DEBUG */
6184 error = KERN_NOT_SUPPORTED;
6185 break;
6186 #endif /* DEVELOPMENT || DEBUG */
6187 }
6188 case TASK_FLAGS_INFO:
6189 {
6190 task_flags_info_t flags_info;
6191
6192 if (*task_info_count < TASK_FLAGS_INFO_COUNT) {
6193 error = KERN_INVALID_ARGUMENT;
6194 break;
6195 }
6196
6197 flags_info = (task_flags_info_t)task_info_out;
6198
6199 /* only publish the 64-bit flag of the task */
6200 flags_info->flags = task->t_flags & (TF_64B_ADDR | TF_64B_DATA);
6201
6202 *task_info_count = TASK_FLAGS_INFO_COUNT;
6203 break;
6204 }
6205
6206 case TASK_DEBUG_INFO_INTERNAL:
6207 {
6208 #if DEVELOPMENT || DEBUG
6209 task_debug_info_internal_t dbg_info;
6210 ipc_space_t space = task->itk_space;
6211 if (*task_info_count < TASK_DEBUG_INFO_INTERNAL_COUNT) {
6212 error = KERN_NOT_SUPPORTED;
6213 break;
6214 }
6215
6216 if (task_info_out == NULL) {
6217 error = KERN_INVALID_ARGUMENT;
6218 break;
6219 }
6220 dbg_info = (task_debug_info_internal_t) task_info_out;
6221 dbg_info->ipc_space_size = 0;
6222
6223 if (space) {
6224 smr_ipc_enter();
6225 ipc_entry_table_t table = smr_entered_load(&space->is_table);
6226 if (table) {
6227 dbg_info->ipc_space_size =
6228 ipc_entry_table_count(table);
6229 }
6230 smr_ipc_leave();
6231 }
6232
6233 dbg_info->suspend_count = task->suspend_count;
6234
6235 error = KERN_SUCCESS;
6236 *task_info_count = TASK_DEBUG_INFO_INTERNAL_COUNT;
6237 break;
6238 #else /* DEVELOPMENT || DEBUG */
6239 error = KERN_NOT_SUPPORTED;
6240 break;
6241 #endif /* DEVELOPMENT || DEBUG */
6242 }
6243 case TASK_SUSPEND_STATS_INFO:
6244 {
6245 #if CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG)
6246 if (*task_info_count < TASK_SUSPEND_STATS_INFO_COUNT || task_info_out == NULL) {
6247 error = KERN_INVALID_ARGUMENT;
6248 break;
6249 }
6250 error = _task_get_suspend_stats_locked(task, (task_suspend_stats_t)task_info_out);
6251 *task_info_count = TASK_SUSPEND_STATS_INFO_COUNT;
6252 break;
6253 #else /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6254 error = KERN_NOT_SUPPORTED;
6255 break;
6256 #endif /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6257 }
6258 case TASK_SUSPEND_SOURCES_INFO:
6259 {
6260 #if CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG)
6261 if (*task_info_count < TASK_SUSPEND_SOURCES_INFO_COUNT || task_info_out == NULL) {
6262 error = KERN_INVALID_ARGUMENT;
6263 break;
6264 }
6265 error = _task_get_suspend_sources_locked(task, (task_suspend_source_t)task_info_out);
6266 *task_info_count = TASK_SUSPEND_SOURCES_INFO_COUNT;
6267 break;
6268 #else /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6269 error = KERN_NOT_SUPPORTED;
6270 break;
6271 #endif /* CONFIG_TASK_SUSPEND_STATS && (DEVELOPMENT || DEBUG) */
6272 }
6273 default:
6274 error = KERN_INVALID_ARGUMENT;
6275 }
6276
6277 task_unlock(task);
6278 return error;
6279 }
6280
6281 /*
6282 * task_info_from_user
6283 *
6284 * When calling task_info from user space,
6285 * this function will be executed as mig server side
6286 * instead of calling directly into task_info.
6287 * This gives the possibility to perform more security
6288 * checks on task_port.
6289 *
6290 * In the case of TASK_DYLD_INFO, we require the more
6291 * privileged task_read_port not the less-privileged task_name_port.
6292 *
6293 */
6294 kern_return_t
task_info_from_user(mach_port_t task_port,task_flavor_t flavor,task_info_t task_info_out,mach_msg_type_number_t * task_info_count)6295 task_info_from_user(
6296 mach_port_t task_port,
6297 task_flavor_t flavor,
6298 task_info_t task_info_out,
6299 mach_msg_type_number_t *task_info_count)
6300 {
6301 task_t task;
6302 kern_return_t ret;
6303
6304 if (flavor == TASK_DYLD_INFO) {
6305 task = convert_port_to_task_read(task_port);
6306 } else {
6307 task = convert_port_to_task_name(task_port);
6308 }
6309
6310 ret = task_info(task, flavor, task_info_out, task_info_count);
6311
6312 task_deallocate(task);
6313
6314 return ret;
6315 }
6316
6317 /*
6318 * Routine: task_dyld_process_info_update_helper
6319 *
6320 * Release send rights in release_ports.
6321 *
6322 * If no active ports found in task's dyld notifier array, unset the magic value
6323 * in user space to indicate so.
6324 *
6325 * Condition:
6326 * task's itk_lock is locked, and is unlocked upon return.
6327 * Global g_dyldinfo_mtx is locked, and is unlocked upon return.
6328 */
6329 void
task_dyld_process_info_update_helper(task_t task,size_t active_count,vm_map_address_t magic_addr,ipc_port_t * release_ports,size_t release_count)6330 task_dyld_process_info_update_helper(
6331 task_t task,
6332 size_t active_count,
6333 vm_map_address_t magic_addr, /* a userspace address */
6334 ipc_port_t *release_ports,
6335 size_t release_count)
6336 {
6337 void *notifiers_ptr = NULL;
6338
6339 assert(release_count <= DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT);
6340
6341 if (active_count == 0) {
6342 assert(task->itk_dyld_notify != NULL);
6343 notifiers_ptr = task->itk_dyld_notify;
6344 task->itk_dyld_notify = NULL;
6345 itk_unlock(task);
6346
6347 kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
6348 (void)copyoutmap_atomic32(task->map, MACH_PORT_NULL, magic_addr); /* unset magic */
6349 } else {
6350 itk_unlock(task);
6351 (void)copyoutmap_atomic32(task->map, (mach_port_name_t)DYLD_PROCESS_INFO_NOTIFY_MAGIC,
6352 magic_addr); /* reset magic */
6353 }
6354
6355 lck_mtx_unlock(&g_dyldinfo_mtx);
6356
6357 for (size_t i = 0; i < release_count; i++) {
6358 ipc_port_release_send(release_ports[i]);
6359 }
6360 }
6361
6362 /*
6363 * Routine: task_dyld_process_info_notify_register
6364 *
6365 * Insert a send right to target task's itk_dyld_notify array. Allocate kernel
6366 * memory for the array if it's the first port to be registered. Also cleanup
6367 * any dead rights found in the array.
6368 *
6369 * Consumes sright if returns KERN_SUCCESS, otherwise MIG will destroy it.
6370 *
6371 * Args:
6372 * task: Target task for the registration.
6373 * sright: A send right.
6374 *
6375 * Returns:
6376 * KERN_SUCCESS: Registration succeeded.
6377 * KERN_INVALID_TASK: task is invalid.
6378 * KERN_INVALID_RIGHT: sright is invalid.
6379 * KERN_DENIED: Security policy denied this call.
6380 * KERN_RESOURCE_SHORTAGE: Kernel memory allocation failed.
6381 * KERN_NO_SPACE: No available notifier port slot left for this task.
6382 * KERN_RIGHT_EXISTS: The notifier port is already registered and active.
6383 *
6384 * Other error code see task_info().
6385 *
6386 * See Also:
6387 * task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
6388 */
6389 kern_return_t
task_dyld_process_info_notify_register(task_t task,ipc_port_t sright)6390 task_dyld_process_info_notify_register(
6391 task_t task,
6392 ipc_port_t sright)
6393 {
6394 struct task_dyld_info dyld_info;
6395 mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
6396 ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
6397 uint32_t release_count = 0, active_count = 0;
6398 mach_vm_address_t ports_addr; /* a user space address */
6399 kern_return_t kr;
6400 boolean_t right_exists = false;
6401 ipc_port_t *notifiers_ptr = NULL;
6402 ipc_port_t *portp;
6403
6404 if (task == TASK_NULL || task == kernel_task) {
6405 return KERN_INVALID_TASK;
6406 }
6407
6408 if (!IP_VALID(sright)) {
6409 return KERN_INVALID_RIGHT;
6410 }
6411
6412 #if CONFIG_MACF
6413 if (mac_task_check_dyld_process_info_notify_register()) {
6414 return KERN_DENIED;
6415 }
6416 #endif
6417
6418 kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
6419 if (kr) {
6420 return kr;
6421 }
6422
6423 if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
6424 ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6425 offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
6426 } else {
6427 ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6428 offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
6429 }
6430
6431 retry:
6432 if (task->itk_dyld_notify == NULL) {
6433 notifiers_ptr = kalloc_type(ipc_port_t,
6434 DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT,
6435 Z_WAITOK | Z_ZERO | Z_NOFAIL);
6436 }
6437
6438 lck_mtx_lock(&g_dyldinfo_mtx);
6439 itk_lock(task);
6440
6441 if (task->itk_dyld_notify == NULL) {
6442 if (notifiers_ptr == NULL) {
6443 itk_unlock(task);
6444 lck_mtx_unlock(&g_dyldinfo_mtx);
6445 goto retry;
6446 }
6447 task->itk_dyld_notify = notifiers_ptr;
6448 notifiers_ptr = NULL;
6449 }
6450
6451 assert(task->itk_dyld_notify != NULL);
6452 /* First pass: clear dead names and check for duplicate registration */
6453 for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6454 portp = &task->itk_dyld_notify[slot];
6455 if (*portp != IPC_PORT_NULL && !ip_active(*portp)) {
6456 release_ports[release_count++] = *portp;
6457 *portp = IPC_PORT_NULL;
6458 } else if (*portp == sright) {
6459 /* the port is already registered and is active */
6460 right_exists = true;
6461 }
6462
6463 if (*portp != IPC_PORT_NULL) {
6464 active_count++;
6465 }
6466 }
6467
6468 if (right_exists) {
6469 /* skip second pass */
6470 kr = KERN_RIGHT_EXISTS;
6471 goto out;
6472 }
6473
6474 /* Second pass: register the port */
6475 kr = KERN_NO_SPACE;
6476 for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6477 portp = &task->itk_dyld_notify[slot];
6478 if (*portp == IPC_PORT_NULL) {
6479 *portp = sright;
6480 active_count++;
6481 kr = KERN_SUCCESS;
6482 break;
6483 }
6484 }
6485
6486 out:
6487 assert(active_count > 0);
6488
6489 task_dyld_process_info_update_helper(task, active_count,
6490 (vm_map_address_t)ports_addr, release_ports, release_count);
6491 /* itk_lock, g_dyldinfo_mtx are unlocked upon return */
6492
6493 kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
6494
6495 return kr;
6496 }
6497
6498 /*
6499 * Routine: task_dyld_process_info_notify_deregister
6500 *
6501 * Remove a send right in target task's itk_dyld_notify array matching the receive
6502 * right name passed in. Deallocate kernel memory for the array if it's the last port to
6503 * be deregistered, or all ports have died. Also cleanup any dead rights found in the array.
6504 *
6505 * Does not consume any reference.
6506 *
6507 * Args:
6508 * task: Target task for the deregistration.
6509 * rcv_name: The name denoting the receive right in caller's space.
6510 *
6511 * Returns:
6512 * KERN_SUCCESS: A matching entry found and degistration succeeded.
6513 * KERN_INVALID_TASK: task is invalid.
6514 * KERN_INVALID_NAME: name is invalid.
6515 * KERN_DENIED: Security policy denied this call.
6516 * KERN_FAILURE: A matching entry is not found.
6517 * KERN_INVALID_RIGHT: The name passed in does not represent a valid rcv right.
6518 *
6519 * Other error code see task_info().
6520 *
6521 * See Also:
6522 * task_dyld_process_info_notify_get_trap() in mach_kernelrpc.c
6523 */
6524 kern_return_t
task_dyld_process_info_notify_deregister(task_t task,mach_port_name_t rcv_name)6525 task_dyld_process_info_notify_deregister(
6526 task_t task,
6527 mach_port_name_t rcv_name)
6528 {
6529 struct task_dyld_info dyld_info;
6530 mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
6531 ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
6532 uint32_t release_count = 0, active_count = 0;
6533 boolean_t port_found = false;
6534 mach_vm_address_t ports_addr; /* a user space address */
6535 ipc_port_t sright;
6536 kern_return_t kr;
6537 ipc_port_t *portp;
6538
6539 if (task == TASK_NULL || task == kernel_task) {
6540 return KERN_INVALID_TASK;
6541 }
6542
6543 if (!MACH_PORT_VALID(rcv_name)) {
6544 return KERN_INVALID_NAME;
6545 }
6546
6547 #if CONFIG_MACF
6548 if (mac_task_check_dyld_process_info_notify_register()) {
6549 return KERN_DENIED;
6550 }
6551 #endif
6552
6553 kr = task_info(task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
6554 if (kr) {
6555 return kr;
6556 }
6557
6558 if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
6559 ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6560 offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
6561 } else {
6562 ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
6563 offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
6564 }
6565
6566 kr = ipc_port_translate_receive(current_space(), rcv_name, &sright); /* does not produce port ref */
6567 if (kr) {
6568 return KERN_INVALID_RIGHT;
6569 }
6570
6571 ip_reference(sright);
6572 ip_mq_unlock(sright);
6573
6574 assert(sright != IPC_PORT_NULL);
6575
6576 lck_mtx_lock(&g_dyldinfo_mtx);
6577 itk_lock(task);
6578
6579 if (task->itk_dyld_notify == NULL) {
6580 itk_unlock(task);
6581 lck_mtx_unlock(&g_dyldinfo_mtx);
6582 ip_release(sright);
6583 return KERN_FAILURE;
6584 }
6585
6586 for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
6587 portp = &task->itk_dyld_notify[slot];
6588 if (*portp == sright) {
6589 release_ports[release_count++] = *portp;
6590 *portp = IPC_PORT_NULL;
6591 port_found = true;
6592 } else if ((*portp != IPC_PORT_NULL && !ip_active(*portp))) {
6593 release_ports[release_count++] = *portp;
6594 *portp = IPC_PORT_NULL;
6595 }
6596
6597 if (*portp != IPC_PORT_NULL) {
6598 active_count++;
6599 }
6600 }
6601
6602 task_dyld_process_info_update_helper(task, active_count,
6603 (vm_map_address_t)ports_addr, release_ports, release_count);
6604 /* itk_lock, g_dyldinfo_mtx are unlocked upon return */
6605
6606 ip_release(sright);
6607
6608 return port_found ? KERN_SUCCESS : KERN_FAILURE;
6609 }
6610
6611 /*
6612 * task_power_info
6613 *
6614 * Returns power stats for the task.
6615 * Note: Called with task locked.
6616 */
6617 void
task_power_info_locked(task_t task,task_power_info_t info,gpu_energy_data_t ginfo,task_power_info_v2_t infov2,struct task_power_info_extra * extra_info)6618 task_power_info_locked(
6619 task_t task,
6620 task_power_info_t info,
6621 gpu_energy_data_t ginfo,
6622 task_power_info_v2_t infov2,
6623 struct task_power_info_extra *extra_info)
6624 {
6625 thread_t thread;
6626 ledger_amount_t tmp;
6627
6628 uint64_t runnable_time_sum = 0;
6629
6630 task_lock_assert_owned(task);
6631
6632 ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
6633 (ledger_amount_t *)&info->task_interrupt_wakeups, &tmp);
6634 ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
6635 (ledger_amount_t *)&info->task_platform_idle_wakeups, &tmp);
6636
6637 info->task_timer_wakeups_bin_1 = task->task_timer_wakeups_bin_1;
6638 info->task_timer_wakeups_bin_2 = task->task_timer_wakeups_bin_2;
6639
6640 struct recount_usage usage = { 0 };
6641 struct recount_usage usage_perf = { 0 };
6642 recount_task_usage_perf_only(task, &usage, &usage_perf);
6643
6644 info->total_user = usage.ru_metrics[RCT_LVL_USER].rm_time_mach;
6645 info->total_system = recount_usage_system_time_mach(&usage);
6646 runnable_time_sum = task->total_runnable_time;
6647
6648 if (ginfo) {
6649 ginfo->task_gpu_utilisation = task->task_gpu_ns;
6650 }
6651
6652 if (infov2) {
6653 infov2->task_ptime = recount_usage_time_mach(&usage_perf);
6654 infov2->task_pset_switches = task->ps_switch;
6655 #if CONFIG_PERVASIVE_ENERGY
6656 infov2->task_energy = usage.ru_energy_nj;
6657 #endif /* CONFIG_PERVASIVE_ENERGY */
6658 }
6659
6660 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6661 spl_t x;
6662
6663 if (thread->options & TH_OPT_IDLE_THREAD) {
6664 continue;
6665 }
6666
6667 x = splsched();
6668 thread_lock(thread);
6669
6670 info->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
6671 info->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
6672
6673 if (infov2) {
6674 infov2->task_pset_switches += thread->ps_switch;
6675 }
6676
6677 runnable_time_sum += timer_grab(&thread->runnable_timer);
6678
6679 if (ginfo) {
6680 ginfo->task_gpu_utilisation += ml_gpu_stat(thread);
6681 }
6682 thread_unlock(thread);
6683 splx(x);
6684 }
6685
6686 if (extra_info) {
6687 extra_info->runnable_time = runnable_time_sum;
6688 #if CONFIG_PERVASIVE_CPI
6689 extra_info->cycles = recount_usage_cycles(&usage);
6690 extra_info->instructions = recount_usage_instructions(&usage);
6691 extra_info->pcycles = recount_usage_cycles(&usage_perf);
6692 extra_info->pinstructions = recount_usage_instructions(&usage_perf);
6693 extra_info->user_ptime = usage_perf.ru_metrics[RCT_LVL_USER].rm_time_mach;
6694 extra_info->system_ptime = recount_usage_system_time_mach(&usage_perf);
6695 #endif // CONFIG_PERVASIVE_CPI
6696 #if CONFIG_PERVASIVE_ENERGY
6697 extra_info->energy = usage.ru_energy_nj;
6698 extra_info->penergy = usage_perf.ru_energy_nj;
6699 #endif // CONFIG_PERVASIVE_ENERGY
6700 #if RECOUNT_SECURE_METRICS
6701 if (PE_i_can_has_debugger(NULL)) {
6702 extra_info->secure_time = usage.ru_metrics[RCT_LVL_SECURE].rm_time_mach;
6703 extra_info->secure_ptime = usage_perf.ru_metrics[RCT_LVL_SECURE].rm_time_mach;
6704 }
6705 #endif // RECOUNT_SECURE_METRICS
6706 }
6707 }
6708
6709 /*
6710 * task_gpu_utilisation
6711 *
6712 * Returns the total gpu time used by the all the threads of the task
6713 * (both dead and alive)
6714 */
6715 uint64_t
task_gpu_utilisation(task_t task)6716 task_gpu_utilisation(
6717 task_t task)
6718 {
6719 uint64_t gpu_time = 0;
6720 #if defined(__x86_64__)
6721 thread_t thread;
6722
6723 task_lock(task);
6724 gpu_time += task->task_gpu_ns;
6725
6726 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6727 spl_t x;
6728 x = splsched();
6729 thread_lock(thread);
6730 gpu_time += ml_gpu_stat(thread);
6731 thread_unlock(thread);
6732 splx(x);
6733 }
6734
6735 task_unlock(task);
6736 #else /* defined(__x86_64__) */
6737 /* silence compiler warning */
6738 (void)task;
6739 #endif /* defined(__x86_64__) */
6740 return gpu_time;
6741 }
6742
6743 /* This function updates the cpu time in the arrays for each
6744 * effective and requested QoS class
6745 */
6746 void
task_update_cpu_time_qos_stats(task_t task,uint64_t * eqos_stats,uint64_t * rqos_stats)6747 task_update_cpu_time_qos_stats(
6748 task_t task,
6749 uint64_t *eqos_stats,
6750 uint64_t *rqos_stats)
6751 {
6752 if (!eqos_stats && !rqos_stats) {
6753 return;
6754 }
6755
6756 task_lock(task);
6757 thread_t thread;
6758 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6759 if (thread->options & TH_OPT_IDLE_THREAD) {
6760 continue;
6761 }
6762
6763 thread_update_qos_cpu_time(thread);
6764 }
6765
6766 if (eqos_stats) {
6767 eqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_eqos_stats.cpu_time_qos_default;
6768 eqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
6769 eqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_eqos_stats.cpu_time_qos_background;
6770 eqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_eqos_stats.cpu_time_qos_utility;
6771 eqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_eqos_stats.cpu_time_qos_legacy;
6772 eqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
6773 eqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
6774 }
6775
6776 if (rqos_stats) {
6777 rqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_rqos_stats.cpu_time_qos_default;
6778 rqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_rqos_stats.cpu_time_qos_maintenance;
6779 rqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_rqos_stats.cpu_time_qos_background;
6780 rqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_rqos_stats.cpu_time_qos_utility;
6781 rqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_rqos_stats.cpu_time_qos_legacy;
6782 rqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_rqos_stats.cpu_time_qos_user_initiated;
6783 rqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_rqos_stats.cpu_time_qos_user_interactive;
6784 }
6785
6786 task_unlock(task);
6787 }
6788
6789 kern_return_t
task_purgable_info(task_t task,task_purgable_info_t * stats)6790 task_purgable_info(
6791 task_t task,
6792 task_purgable_info_t *stats)
6793 {
6794 if (task == TASK_NULL || stats == NULL) {
6795 return KERN_INVALID_ARGUMENT;
6796 }
6797 /* Take task reference */
6798 task_reference(task);
6799 vm_purgeable_stats((vm_purgeable_info_t)stats, task);
6800 /* Drop task reference */
6801 task_deallocate(task);
6802 return KERN_SUCCESS;
6803 }
6804
6805 void
task_vtimer_set(task_t task,integer_t which)6806 task_vtimer_set(
6807 task_t task,
6808 integer_t which)
6809 {
6810 thread_t thread;
6811 spl_t x;
6812
6813 task_lock(task);
6814
6815 task->vtimers |= which;
6816
6817 switch (which) {
6818 case TASK_VTIMER_USER:
6819 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6820 x = splsched();
6821 thread_lock(thread);
6822 struct recount_times_mach times = recount_thread_times(thread);
6823 thread->vtimer_user_save = times.rtm_user;
6824 thread_unlock(thread);
6825 splx(x);
6826 }
6827 break;
6828
6829 case TASK_VTIMER_PROF:
6830 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6831 x = splsched();
6832 thread_lock(thread);
6833 thread->vtimer_prof_save = recount_thread_time_mach(thread);
6834 thread_unlock(thread);
6835 splx(x);
6836 }
6837 break;
6838
6839 case TASK_VTIMER_RLIM:
6840 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6841 x = splsched();
6842 thread_lock(thread);
6843 thread->vtimer_rlim_save = recount_thread_time_mach(thread);
6844 thread_unlock(thread);
6845 splx(x);
6846 }
6847 break;
6848 }
6849
6850 task_unlock(task);
6851 }
6852
6853 void
task_vtimer_clear(task_t task,integer_t which)6854 task_vtimer_clear(
6855 task_t task,
6856 integer_t which)
6857 {
6858 task_lock(task);
6859
6860 task->vtimers &= ~which;
6861
6862 task_unlock(task);
6863 }
6864
6865 void
task_vtimer_update(__unused task_t task,integer_t which,uint32_t * microsecs)6866 task_vtimer_update(
6867 __unused
6868 task_t task,
6869 integer_t which,
6870 uint32_t *microsecs)
6871 {
6872 thread_t thread = current_thread();
6873 uint32_t tdelt = 0;
6874 clock_sec_t secs = 0;
6875 uint64_t tsum;
6876
6877 assert(task == current_task());
6878
6879 spl_t s = splsched();
6880 thread_lock(thread);
6881
6882 if ((task->vtimers & which) != (uint32_t)which) {
6883 thread_unlock(thread);
6884 splx(s);
6885 return;
6886 }
6887
6888 switch (which) {
6889 case TASK_VTIMER_USER:;
6890 struct recount_times_mach times = recount_thread_times(thread);
6891 tsum = times.rtm_user;
6892 tdelt = (uint32_t)(tsum - thread->vtimer_user_save);
6893 thread->vtimer_user_save = tsum;
6894 absolutetime_to_microtime(tdelt, &secs, microsecs);
6895 break;
6896
6897 case TASK_VTIMER_PROF:
6898 tsum = recount_current_thread_time_mach();
6899 tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
6900 absolutetime_to_microtime(tdelt, &secs, microsecs);
6901 /* if the time delta is smaller than a usec, ignore */
6902 if (*microsecs != 0) {
6903 thread->vtimer_prof_save = tsum;
6904 }
6905 break;
6906
6907 case TASK_VTIMER_RLIM:
6908 tsum = recount_current_thread_time_mach();
6909 tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
6910 thread->vtimer_rlim_save = tsum;
6911 absolutetime_to_microtime(tdelt, &secs, microsecs);
6912 break;
6913 }
6914
6915 thread_unlock(thread);
6916 splx(s);
6917 }
6918
6919 uint64_t
get_task_dispatchqueue_offset(task_t task)6920 get_task_dispatchqueue_offset(
6921 task_t task)
6922 {
6923 return task->dispatchqueue_offset;
6924 }
6925
6926 void
task_synchronizer_destroy_all(task_t task)6927 task_synchronizer_destroy_all(task_t task)
6928 {
6929 /*
6930 * Destroy owned semaphores
6931 */
6932 semaphore_destroy_all(task);
6933 }
6934
6935 /*
6936 * Install default (machine-dependent) initial thread state
6937 * on the task. Subsequent thread creation will have this initial
6938 * state set on the thread by machine_thread_inherit_taskwide().
6939 * Flavors and structures are exactly the same as those to thread_set_state()
6940 */
6941 kern_return_t
task_set_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t state_count)6942 task_set_state(
6943 task_t task,
6944 int flavor,
6945 thread_state_t state,
6946 mach_msg_type_number_t state_count)
6947 {
6948 kern_return_t ret;
6949
6950 if (task == TASK_NULL) {
6951 return KERN_INVALID_ARGUMENT;
6952 }
6953
6954 task_lock(task);
6955
6956 if (!task->active) {
6957 task_unlock(task);
6958 return KERN_FAILURE;
6959 }
6960
6961 ret = machine_task_set_state(task, flavor, state, state_count);
6962
6963 task_unlock(task);
6964 return ret;
6965 }
6966
6967 /*
6968 * Examine the default (machine-dependent) initial thread state
6969 * on the task, as set by task_set_state(). Flavors and structures
6970 * are exactly the same as those passed to thread_get_state().
6971 */
6972 kern_return_t
task_get_state(task_t task,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)6973 task_get_state(
6974 task_t task,
6975 int flavor,
6976 thread_state_t state,
6977 mach_msg_type_number_t *state_count)
6978 {
6979 kern_return_t ret;
6980
6981 if (task == TASK_NULL) {
6982 return KERN_INVALID_ARGUMENT;
6983 }
6984
6985 task_lock(task);
6986
6987 if (!task->active) {
6988 task_unlock(task);
6989 return KERN_FAILURE;
6990 }
6991
6992 ret = machine_task_get_state(task, flavor, state, state_count);
6993
6994 task_unlock(task);
6995 return ret;
6996 }
6997
6998
6999 static kern_return_t __attribute__((noinline, not_tail_called))
PROC_VIOLATED_GUARD__SEND_EXC_GUARD(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason,boolean_t backtrace_only)7000 PROC_VIOLATED_GUARD__SEND_EXC_GUARD(
7001 mach_exception_code_t code,
7002 mach_exception_subcode_t subcode,
7003 void *reason,
7004 boolean_t backtrace_only)
7005 {
7006 #ifdef MACH_BSD
7007 if (1 == proc_selfpid()) {
7008 return KERN_NOT_SUPPORTED; // initproc is immune
7009 }
7010 #endif
7011 mach_exception_data_type_t codes[EXCEPTION_CODE_MAX] = {
7012 [0] = code,
7013 [1] = subcode,
7014 };
7015 task_t task = current_task();
7016 kern_return_t kr;
7017 void *bsd_info = get_bsdtask_info(task);
7018
7019 /* (See jetsam-related comments below) */
7020
7021 proc_memstat_skip(bsd_info, TRUE);
7022 kr = task_enqueue_exception_with_corpse(task, EXC_GUARD, codes, 2, reason, backtrace_only);
7023 proc_memstat_skip(bsd_info, FALSE);
7024 return kr;
7025 }
7026
7027 kern_return_t
task_violated_guard(mach_exception_code_t code,mach_exception_subcode_t subcode,void * reason,bool backtrace_only)7028 task_violated_guard(
7029 mach_exception_code_t code,
7030 mach_exception_subcode_t subcode,
7031 void *reason,
7032 bool backtrace_only)
7033 {
7034 return PROC_VIOLATED_GUARD__SEND_EXC_GUARD(code, subcode, reason, backtrace_only);
7035 }
7036
7037
7038 #if CONFIG_MEMORYSTATUS
7039
7040 boolean_t
task_get_memlimit_is_active(task_t task)7041 task_get_memlimit_is_active(task_t task)
7042 {
7043 assert(task != NULL);
7044
7045 if (task->memlimit_is_active == 1) {
7046 return TRUE;
7047 } else {
7048 return FALSE;
7049 }
7050 }
7051
7052 void
task_set_memlimit_is_active(task_t task,boolean_t memlimit_is_active)7053 task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active)
7054 {
7055 assert(task != NULL);
7056
7057 if (memlimit_is_active) {
7058 task->memlimit_is_active = 1;
7059 } else {
7060 task->memlimit_is_active = 0;
7061 }
7062 }
7063
7064 boolean_t
task_get_memlimit_is_fatal(task_t task)7065 task_get_memlimit_is_fatal(task_t task)
7066 {
7067 assert(task != NULL);
7068
7069 if (task->memlimit_is_fatal == 1) {
7070 return TRUE;
7071 } else {
7072 return FALSE;
7073 }
7074 }
7075
7076 void
task_set_memlimit_is_fatal(task_t task,boolean_t memlimit_is_fatal)7077 task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal)
7078 {
7079 assert(task != NULL);
7080
7081 if (memlimit_is_fatal) {
7082 task->memlimit_is_fatal = 1;
7083 } else {
7084 task->memlimit_is_fatal = 0;
7085 }
7086 }
7087
7088 uint64_t
task_get_dirty_start(task_t task)7089 task_get_dirty_start(task_t task)
7090 {
7091 return task->memstat_dirty_start;
7092 }
7093
7094 void
task_set_dirty_start(task_t task,uint64_t start)7095 task_set_dirty_start(task_t task, uint64_t start)
7096 {
7097 task_lock(task);
7098 task->memstat_dirty_start = start;
7099 task_unlock(task);
7100 }
7101
7102 boolean_t
task_has_triggered_exc_resource(task_t task,boolean_t memlimit_is_active)7103 task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
7104 {
7105 boolean_t triggered = FALSE;
7106
7107 assert(task == current_task());
7108
7109 /*
7110 * Returns true, if task has already triggered an exc_resource exception.
7111 */
7112
7113 if (memlimit_is_active) {
7114 triggered = (task->memlimit_active_exc_resource ? TRUE : FALSE);
7115 } else {
7116 triggered = (task->memlimit_inactive_exc_resource ? TRUE : FALSE);
7117 }
7118
7119 return triggered;
7120 }
7121
7122 void
task_mark_has_triggered_exc_resource(task_t task,boolean_t memlimit_is_active)7123 task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
7124 {
7125 assert(task == current_task());
7126
7127 /*
7128 * We allow one exc_resource per process per active/inactive limit.
7129 * The limit's fatal attribute does not come into play.
7130 */
7131
7132 if (memlimit_is_active) {
7133 task->memlimit_active_exc_resource = 1;
7134 } else {
7135 task->memlimit_inactive_exc_resource = 1;
7136 }
7137 }
7138
7139 #define HWM_USERCORE_MINSPACE 250 // free space (in MB) required *after* core file creation
7140
7141 void __attribute__((noinline))
PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb,send_exec_resource_options_t exception_options)7142 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, send_exec_resource_options_t exception_options)
7143 {
7144 task_t task = current_task();
7145 int pid = 0;
7146 const char *procname = "unknown";
7147 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
7148 boolean_t send_sync_exc_resource = FALSE;
7149 void *cur_bsd_info = get_bsdtask_info(current_task());
7150
7151 #ifdef MACH_BSD
7152 pid = proc_selfpid();
7153
7154 if (pid == 1) {
7155 /*
7156 * Cannot have ReportCrash analyzing
7157 * a suspended initproc.
7158 */
7159 return;
7160 }
7161
7162 if (cur_bsd_info != NULL) {
7163 procname = proc_name_address(cur_bsd_info);
7164 send_sync_exc_resource = proc_send_synchronous_EXC_RESOURCE(cur_bsd_info);
7165 }
7166 #endif
7167 #if CONFIG_COREDUMP
7168 if (hwm_user_cores) {
7169 int error;
7170 uint64_t starttime, end;
7171 clock_sec_t secs = 0;
7172 uint32_t microsecs = 0;
7173
7174 starttime = mach_absolute_time();
7175 /*
7176 * Trigger a coredump of this process. Don't proceed unless we know we won't
7177 * be filling up the disk; and ignore the core size resource limit for this
7178 * core file.
7179 */
7180 if ((error = coredump(cur_bsd_info, HWM_USERCORE_MINSPACE, COREDUMP_IGNORE_ULIMIT)) != 0) {
7181 printf("couldn't take coredump of %s[%d]: %d\n", procname, pid, error);
7182 }
7183 /*
7184 * coredump() leaves the task suspended.
7185 */
7186 task_resume_internal(current_task());
7187
7188 end = mach_absolute_time();
7189 absolutetime_to_microtime(end - starttime, &secs, µsecs);
7190 printf("coredump of %s[%d] taken in %d secs %d microsecs\n",
7191 proc_name_address(cur_bsd_info), pid, (int)secs, microsecs);
7192 }
7193 #endif /* CONFIG_COREDUMP */
7194
7195 if (disable_exc_resource) {
7196 printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
7197 "suppressed by a boot-arg.\n", procname, pid, max_footprint_mb);
7198 return;
7199 }
7200 printf("process %s [%d] crossed memory %s (%d MB); EXC_RESOURCE "
7201 "\n", procname, pid, (!(exception_options & EXEC_RESOURCE_DIAGNOSTIC) ? "high watermark" : "diagnostics limit"), max_footprint_mb);
7202
7203 /*
7204 * A task that has triggered an EXC_RESOURCE, should not be
7205 * jetsammed when the device is under memory pressure. Here
7206 * we set the P_MEMSTAT_SKIP flag so that the process
7207 * will be skipped if the memorystatus_thread wakes up.
7208 *
7209 * This is a debugging aid to ensure we can get a corpse before
7210 * the jetsam thread kills the process.
7211 * Note that proc_memstat_skip is a no-op on release kernels.
7212 */
7213 proc_memstat_skip(cur_bsd_info, TRUE);
7214
7215 code[0] = code[1] = 0;
7216 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_MEMORY);
7217 /*
7218 * Regardless if there was a diag memlimit violation, fatal exceptions shall be notified always
7219 * as high level watermaks. In another words, if there was a diag limit and a watermark, and the
7220 * violation if for limit watermark, a watermark shall be reported.
7221 */
7222 if (!(exception_options & EXEC_RESOURCE_FATAL)) {
7223 EXC_RESOURCE_ENCODE_FLAVOR(code[0], !(exception_options & EXEC_RESOURCE_DIAGNOSTIC) ? FLAVOR_HIGH_WATERMARK : FLAVOR_DIAG_MEMLIMIT);
7224 } else {
7225 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_HIGH_WATERMARK );
7226 }
7227 EXC_RESOURCE_HWM_ENCODE_LIMIT(code[0], max_footprint_mb);
7228 /*
7229 * Do not generate a corpse fork if the violation is a fatal one
7230 * or the process wants synchronous EXC_RESOURCE exceptions.
7231 */
7232 if ((exception_options & EXEC_RESOURCE_FATAL) || send_sync_exc_resource || !exc_via_corpse_forking) {
7233 if (exception_options & EXEC_RESOURCE_FATAL) {
7234 vm_map_set_corpse_source(task->map);
7235 }
7236
7237 /* Do not send a EXC_RESOURCE if corpse_for_fatal_memkill is set */
7238 if (send_sync_exc_resource || !corpse_for_fatal_memkill) {
7239 /*
7240 * Use the _internal_ variant so that no user-space
7241 * process can resume our task from under us.
7242 */
7243 task_suspend_internal(task);
7244 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
7245 task_resume_internal(task);
7246 }
7247 } else {
7248 if (disable_exc_resource_during_audio && audio_active) {
7249 printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
7250 "suppressed due to audio playback.\n", procname, pid, max_footprint_mb);
7251 } else {
7252 task_enqueue_exception_with_corpse(task, EXC_RESOURCE,
7253 code, EXCEPTION_CODE_MAX, NULL, FALSE);
7254 }
7255 }
7256
7257 /*
7258 * After the EXC_RESOURCE has been handled, we must clear the
7259 * P_MEMSTAT_SKIP flag so that the process can again be
7260 * considered for jetsam if the memorystatus_thread wakes up.
7261 */
7262 proc_memstat_skip(cur_bsd_info, FALSE); /* clear the flag */
7263 }
7264 /*
7265 * Callback invoked when a task exceeds its physical footprint limit.
7266 */
7267 void
task_footprint_exceeded(int warning,__unused const void * param0,__unused const void * param1)7268 task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1)
7269 {
7270 ledger_amount_t max_footprint = 0;
7271 ledger_amount_t max_footprint_mb = 0;
7272 #if DEBUG || DEVELOPMENT
7273 ledger_amount_t diag_threshold_limit_mb = 0;
7274 ledger_amount_t diag_threshold_limit = 0;
7275 #endif
7276 #if CONFIG_DEFERRED_RECLAIM
7277 ledger_amount_t current_footprint;
7278 #endif /* CONFIG_DEFERRED_RECLAIM */
7279 task_t task;
7280 send_exec_resource_is_warning is_warning = IS_NOT_WARNING;
7281 boolean_t memlimit_is_active;
7282 send_exec_resource_is_fatal memlimit_is_fatal;
7283 send_exec_resource_is_diagnostics is_diag_mem_threshold = IS_NOT_DIAGNOSTICS;
7284 if (warning == LEDGER_WARNING_DIAG_MEM_THRESHOLD) {
7285 is_diag_mem_threshold = IS_DIAGNOSTICS;
7286 is_warning = IS_WARNING;
7287 } else if (warning == LEDGER_WARNING_DIPPED_BELOW) {
7288 /*
7289 * Task memory limits only provide a warning on the way up.
7290 */
7291 return;
7292 } else if (warning == LEDGER_WARNING_ROSE_ABOVE) {
7293 /*
7294 * This task is in danger of violating a memory limit,
7295 * It has exceeded a percentage level of the limit.
7296 */
7297 is_warning = IS_WARNING;
7298 } else {
7299 /*
7300 * The task has exceeded the physical footprint limit.
7301 * This is not a warning but a true limit violation.
7302 */
7303 is_warning = IS_NOT_WARNING;
7304 }
7305
7306 task = current_task();
7307
7308 ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max_footprint);
7309 #if DEBUG || DEVELOPMENT
7310 ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &diag_threshold_limit);
7311 #endif
7312 #if CONFIG_DEFERRED_RECLAIM
7313 if (task->deferred_reclamation_metadata != NULL) {
7314 /*
7315 * Task is enrolled in deferred reclamation.
7316 * Do a reclaim to ensure it's really over its limit.
7317 */
7318 vm_deferred_reclamation_reclaim_from_task_sync(task, UINT64_MAX);
7319 ledger_get_balance(task->ledger, task_ledgers.phys_footprint, ¤t_footprint);
7320 if (current_footprint < max_footprint) {
7321 return;
7322 }
7323 }
7324 #endif /* CONFIG_DEFERRED_RECLAIM */
7325 max_footprint_mb = max_footprint >> 20;
7326 #if DEBUG || DEVELOPMENT
7327 diag_threshold_limit_mb = diag_threshold_limit >> 20;
7328 #endif
7329 memlimit_is_active = task_get_memlimit_is_active(task);
7330 memlimit_is_fatal = task_get_memlimit_is_fatal(task) == FALSE ? IS_NOT_FATAL : IS_FATAL;
7331 #if DEBUG || DEVELOPMENT
7332 if (is_diag_mem_threshold == IS_NOT_DIAGNOSTICS) {
7333 task_process_crossed_limit_no_diag(task, max_footprint_mb, memlimit_is_fatal, memlimit_is_active, is_warning);
7334 } else {
7335 task_process_crossed_limit_diag(diag_threshold_limit_mb);
7336 }
7337 #else
7338 task_process_crossed_limit_no_diag(task, max_footprint_mb, memlimit_is_fatal, memlimit_is_active, is_warning);
7339 #endif
7340 }
7341
7342 /*
7343 * Actions to perfrom when a process has crossed watermark or is a fatal consumption */
7344 static inline void
task_process_crossed_limit_no_diag(task_t task,ledger_amount_t ledger_limit_size,bool memlimit_is_fatal,bool memlimit_is_active,send_exec_resource_is_warning is_warning)7345 task_process_crossed_limit_no_diag(task_t task, ledger_amount_t ledger_limit_size, bool memlimit_is_fatal, bool memlimit_is_active, send_exec_resource_is_warning is_warning)
7346 {
7347 send_exec_resource_options_t exception_options = 0;
7348 if (memlimit_is_fatal) {
7349 exception_options |= EXEC_RESOURCE_FATAL;
7350 }
7351 /*
7352 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
7353 * We only generate the exception once per process per memlimit (active/inactive limit).
7354 * To enforce this, we monitor state based on the memlimit's active/inactive attribute
7355 * and we disable it by marking that memlimit as exception triggered.
7356 */
7357 if (is_warning == IS_NOT_WARNING && !task_has_triggered_exc_resource(task, memlimit_is_active)) {
7358 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)ledger_limit_size, exception_options);
7359 // If it was not a diag threshold (if was a memory limit), then we do not want more signalling,
7360 // however, if was a diag limit, the user may reload a different limit and signal again the violation
7361 memorystatus_log_exception((int)ledger_limit_size, memlimit_is_active, memlimit_is_fatal);
7362 task_mark_has_triggered_exc_resource(task, memlimit_is_active);
7363 }
7364 memorystatus_on_ledger_footprint_exceeded(is_warning == IS_NOT_WARNING ? FALSE : TRUE, memlimit_is_active, memlimit_is_fatal);
7365 }
7366
7367 #if DEBUG || DEVELOPMENT
7368 /**
7369 * Actions to take when a process has crossed the diagnostics limit
7370 */
7371 static inline void
task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size)7372 task_process_crossed_limit_diag(ledger_amount_t ledger_limit_size)
7373 {
7374 /*
7375 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
7376 * In the case of the diagnostics thresholds, the exception will be signaled only once, but the
7377 * inhibit / rearm mechanism if performed at ledger level.
7378 */
7379 send_exec_resource_options_t exception_options = EXEC_RESOURCE_DIAGNOSTIC;
7380 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)ledger_limit_size, exception_options);
7381 memorystatus_log_diag_threshold_exception((int)ledger_limit_size);
7382 }
7383 #endif
7384
7385 extern int proc_check_footprint_priv(void);
7386
7387 kern_return_t
task_set_phys_footprint_limit(task_t task,int new_limit_mb,int * old_limit_mb)7388 task_set_phys_footprint_limit(
7389 task_t task,
7390 int new_limit_mb,
7391 int *old_limit_mb)
7392 {
7393 kern_return_t error;
7394
7395 boolean_t memlimit_is_active;
7396 boolean_t memlimit_is_fatal;
7397
7398 if ((error = proc_check_footprint_priv())) {
7399 return KERN_NO_ACCESS;
7400 }
7401
7402 /*
7403 * This call should probably be obsoleted.
7404 * But for now, we default to current state.
7405 */
7406 memlimit_is_active = task_get_memlimit_is_active(task);
7407 memlimit_is_fatal = task_get_memlimit_is_fatal(task);
7408
7409 return task_set_phys_footprint_limit_internal(task, new_limit_mb, old_limit_mb, memlimit_is_active, memlimit_is_fatal);
7410 }
7411
7412 /*
7413 * Set the limit of diagnostics memory consumption for a concrete task
7414 */
7415 #if CONFIG_MEMORYSTATUS
7416 #if DEVELOPMENT || DEBUG
7417 kern_return_t
task_set_diag_footprint_limit(task_t task,uint64_t new_limit_mb,uint64_t * old_limit_mb)7418 task_set_diag_footprint_limit(
7419 task_t task,
7420 uint64_t new_limit_mb,
7421 uint64_t *old_limit_mb)
7422 {
7423 kern_return_t error;
7424
7425 if ((error = proc_check_footprint_priv())) {
7426 return KERN_NO_ACCESS;
7427 }
7428
7429 return task_set_diag_footprint_limit_internal(task, new_limit_mb, old_limit_mb);
7430 }
7431
7432 #endif // DEVELOPMENT || DEBUG
7433 #endif // CONFIG_MEMORYSTATUS
7434
7435 kern_return_t
task_convert_phys_footprint_limit(int limit_mb,int * converted_limit_mb)7436 task_convert_phys_footprint_limit(
7437 int limit_mb,
7438 int *converted_limit_mb)
7439 {
7440 if (limit_mb == -1) {
7441 /*
7442 * No limit
7443 */
7444 if (max_task_footprint != 0) {
7445 *converted_limit_mb = (int)(max_task_footprint / 1024 / 1024); /* bytes to MB */
7446 } else {
7447 *converted_limit_mb = (int)(LEDGER_LIMIT_INFINITY >> 20);
7448 }
7449 } else {
7450 /* nothing to convert */
7451 *converted_limit_mb = limit_mb;
7452 }
7453 return KERN_SUCCESS;
7454 }
7455
7456 kern_return_t
task_set_phys_footprint_limit_internal(task_t task,int new_limit_mb,int * old_limit_mb,boolean_t memlimit_is_active,boolean_t memlimit_is_fatal)7457 task_set_phys_footprint_limit_internal(
7458 task_t task,
7459 int new_limit_mb,
7460 int *old_limit_mb,
7461 boolean_t memlimit_is_active,
7462 boolean_t memlimit_is_fatal)
7463 {
7464 ledger_amount_t old;
7465 kern_return_t ret;
7466 #if DEVELOPMENT || DEBUG
7467 diagthreshold_check_return diag_threshold_validity;
7468 #endif
7469 ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &old);
7470
7471 if (ret != KERN_SUCCESS) {
7472 return ret;
7473 }
7474 /**
7475 * Maybe we will need to re-enable the diag threshold, lets get the value
7476 * and the current status
7477 */
7478 #if DEVELOPMENT || DEBUG
7479 diag_threshold_validity = task_check_memorythreshold_is_valid( task, new_limit_mb, false);
7480 /**
7481 * If the footprint and diagnostics threshold are going to be same, lets disable the threshold
7482 */
7483 if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7484 ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7485 } else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7486 ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7487 }
7488 #endif
7489
7490 /*
7491 * Check that limit >> 20 will not give an "unexpected" 32-bit
7492 * result. There are, however, implicit assumptions that -1 mb limit
7493 * equates to LEDGER_LIMIT_INFINITY.
7494 */
7495 assert(((old & 0xFFF0000000000000LL) == 0) || (old == LEDGER_LIMIT_INFINITY));
7496
7497 if (old_limit_mb) {
7498 *old_limit_mb = (int)(old >> 20);
7499 }
7500
7501 if (new_limit_mb == -1) {
7502 /*
7503 * Caller wishes to remove the limit.
7504 */
7505 ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7506 max_task_footprint ? max_task_footprint : LEDGER_LIMIT_INFINITY,
7507 max_task_footprint ? (uint8_t)max_task_footprint_warning_level : 0);
7508
7509 task_lock(task);
7510 task_set_memlimit_is_active(task, memlimit_is_active);
7511 task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7512 task_unlock(task);
7513 /**
7514 * If the diagnostics were disabled, and now we have a new limit, we have to re-enable it.
7515 */
7516 #if DEVELOPMENT || DEBUG
7517 if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7518 ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7519 } else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7520 ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7521 }
7522 #endif
7523 return KERN_SUCCESS;
7524 }
7525
7526 #ifdef CONFIG_NOMONITORS
7527 return KERN_SUCCESS;
7528 #endif /* CONFIG_NOMONITORS */
7529
7530 task_lock(task);
7531
7532 if ((memlimit_is_active == task_get_memlimit_is_active(task)) &&
7533 (memlimit_is_fatal == task_get_memlimit_is_fatal(task)) &&
7534 (((ledger_amount_t)new_limit_mb << 20) == old)) {
7535 /*
7536 * memlimit state is not changing
7537 */
7538 task_unlock(task);
7539 return KERN_SUCCESS;
7540 }
7541
7542 task_set_memlimit_is_active(task, memlimit_is_active);
7543 task_set_memlimit_is_fatal(task, memlimit_is_fatal);
7544
7545 ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
7546 (ledger_amount_t)new_limit_mb << 20, PHYS_FOOTPRINT_WARNING_LEVEL);
7547
7548 if (task == current_task()) {
7549 ledger_check_new_balance(current_thread(), task->ledger,
7550 task_ledgers.phys_footprint);
7551 }
7552
7553 task_unlock(task);
7554 #if DEVELOPMENT || DEBUG
7555 if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7556 ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7557 }
7558 #endif
7559
7560 return KERN_SUCCESS;
7561 }
7562
7563 #if RESETTABLE_DIAG_FOOTPRINT_LIMITS
7564 kern_return_t
task_set_diag_footprint_limit_internal(task_t task,uint64_t new_limit_bytes,uint64_t * old_limit_bytes)7565 task_set_diag_footprint_limit_internal(
7566 task_t task,
7567 uint64_t new_limit_bytes,
7568 uint64_t *old_limit_bytes)
7569 {
7570 ledger_amount_t old = 0;
7571 kern_return_t ret = KERN_SUCCESS;
7572 diagthreshold_check_return diag_threshold_validity;
7573 ret = ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &old);
7574
7575 if (ret != KERN_SUCCESS) {
7576 return ret;
7577 }
7578 /**
7579 * Maybe we will need to re-enable the diag threshold, lets get the value
7580 * and the current status
7581 */
7582 diag_threshold_validity = task_check_memorythreshold_is_valid( task, new_limit_bytes >> 20, true);
7583 /**
7584 * If the footprint and diagnostics threshold are going to be same, lets disable the threshold
7585 */
7586 if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7587 ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7588 }
7589
7590 /*
7591 * Check that limit >> 20 will not give an "unexpected" 32-bit
7592 * result. There are, however, implicit assumptions that -1 mb limit
7593 * equates to LEDGER_LIMIT_INFINITY.
7594 */
7595 if (old_limit_bytes) {
7596 *old_limit_bytes = old;
7597 }
7598
7599 if (new_limit_bytes == -1) {
7600 /*
7601 * Caller wishes to remove the limit.
7602 */
7603 ledger_set_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint,
7604 LEDGER_LIMIT_INFINITY);
7605 /*
7606 * If the memory diagnostics flag was disabled, lets enable it again
7607 */
7608 ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7609 return KERN_SUCCESS;
7610 }
7611
7612 #ifdef CONFIG_NOMONITORS
7613 return KERN_SUCCESS;
7614 #else
7615
7616 task_lock(task);
7617 ledger_set_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint,
7618 (ledger_amount_t)new_limit_bytes );
7619 if (task == current_task()) {
7620 ledger_check_new_balance(current_thread(), task->ledger,
7621 task_ledgers.phys_footprint);
7622 }
7623
7624 task_unlock(task);
7625 if (diag_threshold_validity == THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED) {
7626 ledger_set_diag_mem_threshold_disabled(task->ledger, task_ledgers.phys_footprint);
7627 } else if (diag_threshold_validity == THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED) {
7628 ledger_set_diag_mem_threshold_enabled(task->ledger, task_ledgers.phys_footprint);
7629 }
7630
7631 return KERN_SUCCESS;
7632 #endif /* CONFIG_NOMONITORS */
7633 }
7634
7635 kern_return_t
task_get_diag_footprint_limit_internal(task_t task,uint64_t * new_limit_bytes,bool * threshold_disabled)7636 task_get_diag_footprint_limit_internal(
7637 task_t task,
7638 uint64_t *new_limit_bytes,
7639 bool *threshold_disabled)
7640 {
7641 ledger_amount_t ledger_limit;
7642 kern_return_t ret = KERN_SUCCESS;
7643 if (new_limit_bytes == NULL || threshold_disabled == NULL) {
7644 return KERN_INVALID_ARGUMENT;
7645 }
7646 ret = ledger_get_diag_mem_threshold(task->ledger, task_ledgers.phys_footprint, &ledger_limit);
7647 if (ledger_limit == LEDGER_LIMIT_INFINITY) {
7648 ledger_limit = -1;
7649 }
7650 if (ret == KERN_SUCCESS) {
7651 *new_limit_bytes = ledger_limit;
7652 ret = ledger_is_diag_threshold_enabled(task->ledger, task_ledgers.phys_footprint, threshold_disabled);
7653 }
7654 return ret;
7655 }
7656 #endif /* RESETTABLE_DIAG_FOOTPRINT_LIMITS */
7657
7658
7659 kern_return_t
task_get_phys_footprint_limit(task_t task,int * limit_mb)7660 task_get_phys_footprint_limit(
7661 task_t task,
7662 int *limit_mb)
7663 {
7664 ledger_amount_t limit;
7665 kern_return_t ret;
7666
7667 ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &limit);
7668 if (ret != KERN_SUCCESS) {
7669 return ret;
7670 }
7671
7672 /*
7673 * Check that limit >> 20 will not give an "unexpected" signed, 32-bit
7674 * result. There are, however, implicit assumptions that -1 mb limit
7675 * equates to LEDGER_LIMIT_INFINITY.
7676 */
7677 assert(((limit & 0xFFF0000000000000LL) == 0) || (limit == LEDGER_LIMIT_INFINITY));
7678 *limit_mb = (int)(limit >> 20);
7679
7680 return KERN_SUCCESS;
7681 }
7682 #else /* CONFIG_MEMORYSTATUS */
7683 kern_return_t
task_set_phys_footprint_limit(__unused task_t task,__unused int new_limit_mb,__unused int * old_limit_mb)7684 task_set_phys_footprint_limit(
7685 __unused task_t task,
7686 __unused int new_limit_mb,
7687 __unused int *old_limit_mb)
7688 {
7689 return KERN_FAILURE;
7690 }
7691
7692 kern_return_t
task_get_phys_footprint_limit(__unused task_t task,__unused int * limit_mb)7693 task_get_phys_footprint_limit(
7694 __unused task_t task,
7695 __unused int *limit_mb)
7696 {
7697 return KERN_FAILURE;
7698 }
7699 #endif /* CONFIG_MEMORYSTATUS */
7700
7701 security_token_t *
task_get_sec_token(task_t task)7702 task_get_sec_token(task_t task)
7703 {
7704 return &task_get_ro(task)->task_tokens.sec_token;
7705 }
7706
7707 void
task_set_sec_token(task_t task,security_token_t * token)7708 task_set_sec_token(task_t task, security_token_t *token)
7709 {
7710 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7711 task_tokens.sec_token, token);
7712 }
7713
7714 audit_token_t *
task_get_audit_token(task_t task)7715 task_get_audit_token(task_t task)
7716 {
7717 return &task_get_ro(task)->task_tokens.audit_token;
7718 }
7719
7720 void
task_set_audit_token(task_t task,audit_token_t * token)7721 task_set_audit_token(task_t task, audit_token_t *token)
7722 {
7723 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7724 task_tokens.audit_token, token);
7725 }
7726
7727 void
task_set_tokens(task_t task,security_token_t * sec_token,audit_token_t * audit_token)7728 task_set_tokens(task_t task, security_token_t *sec_token, audit_token_t *audit_token)
7729 {
7730 struct task_token_ro_data tokens;
7731
7732 tokens = task_get_ro(task)->task_tokens;
7733 tokens.sec_token = *sec_token;
7734 tokens.audit_token = *audit_token;
7735
7736 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task), task_tokens,
7737 &tokens);
7738 }
7739
7740 boolean_t
task_is_privileged(task_t task)7741 task_is_privileged(task_t task)
7742 {
7743 return task_get_sec_token(task)->val[0] == 0;
7744 }
7745
7746 #ifdef CONFIG_MACF
7747 uint8_t *
task_get_mach_trap_filter_mask(task_t task)7748 task_get_mach_trap_filter_mask(task_t task)
7749 {
7750 return task_get_ro(task)->task_filters.mach_trap_filter_mask;
7751 }
7752
7753 void
task_set_mach_trap_filter_mask(task_t task,uint8_t * mask)7754 task_set_mach_trap_filter_mask(task_t task, uint8_t *mask)
7755 {
7756 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7757 task_filters.mach_trap_filter_mask, &mask);
7758 }
7759
7760 uint8_t *
task_get_mach_kobj_filter_mask(task_t task)7761 task_get_mach_kobj_filter_mask(task_t task)
7762 {
7763 return task_get_ro(task)->task_filters.mach_kobj_filter_mask;
7764 }
7765
7766 mach_vm_address_t
task_get_all_image_info_addr(task_t task)7767 task_get_all_image_info_addr(task_t task)
7768 {
7769 return task->all_image_info_addr;
7770 }
7771
7772 void
task_set_mach_kobj_filter_mask(task_t task,uint8_t * mask)7773 task_set_mach_kobj_filter_mask(task_t task, uint8_t *mask)
7774 {
7775 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
7776 task_filters.mach_kobj_filter_mask, &mask);
7777 }
7778
7779 #endif /* CONFIG_MACF */
7780
7781 void
task_set_thread_limit(task_t task,uint16_t thread_limit)7782 task_set_thread_limit(task_t task, uint16_t thread_limit)
7783 {
7784 assert(task != kernel_task);
7785 if (thread_limit <= TASK_MAX_THREAD_LIMIT) {
7786 task_lock(task);
7787 task->task_thread_limit = thread_limit;
7788 task_unlock(task);
7789 }
7790 }
7791
7792 #if CONFIG_PROC_RESOURCE_LIMITS
7793 kern_return_t
task_set_port_space_limits(task_t task,uint32_t soft_limit,uint32_t hard_limit)7794 task_set_port_space_limits(task_t task, uint32_t soft_limit, uint32_t hard_limit)
7795 {
7796 return ipc_space_set_table_size_limits(task->itk_space, soft_limit, hard_limit);
7797 }
7798 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
7799
7800 #if XNU_TARGET_OS_OSX
7801 boolean_t
task_has_system_version_compat_enabled(task_t task)7802 task_has_system_version_compat_enabled(task_t task)
7803 {
7804 boolean_t enabled = FALSE;
7805
7806 task_lock(task);
7807 enabled = (task->t_flags & TF_SYS_VERSION_COMPAT);
7808 task_unlock(task);
7809
7810 return enabled;
7811 }
7812
7813 void
task_set_system_version_compat_enabled(task_t task,boolean_t enable_system_version_compat)7814 task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat)
7815 {
7816 assert(task == current_task());
7817 assert(task != kernel_task);
7818
7819 task_lock(task);
7820 if (enable_system_version_compat) {
7821 task->t_flags |= TF_SYS_VERSION_COMPAT;
7822 } else {
7823 task->t_flags &= ~TF_SYS_VERSION_COMPAT;
7824 }
7825 task_unlock(task);
7826 }
7827 #endif /* XNU_TARGET_OS_OSX */
7828
7829 /*
7830 * We need to export some functions to other components that
7831 * are currently implemented in macros within the osfmk
7832 * component. Just export them as functions of the same name.
7833 */
7834 boolean_t
is_kerneltask(task_t t)7835 is_kerneltask(task_t t)
7836 {
7837 if (t == kernel_task) {
7838 return TRUE;
7839 }
7840
7841 return FALSE;
7842 }
7843
7844 boolean_t
is_corpsefork(task_t t)7845 is_corpsefork(task_t t)
7846 {
7847 return task_is_a_corpse_fork(t);
7848 }
7849
7850 task_t
current_task_early(void)7851 current_task_early(void)
7852 {
7853 if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
7854 if (current_thread()->t_tro == NULL) {
7855 return TASK_NULL;
7856 }
7857 }
7858 return get_threadtask(current_thread());
7859 }
7860
7861 task_t
current_task(void)7862 current_task(void)
7863 {
7864 return get_threadtask(current_thread());
7865 }
7866
7867 /* defined in bsd/kern/kern_prot.c */
7868 extern int get_audit_token_pid(audit_token_t *audit_token);
7869
7870 int
task_pid(task_t task)7871 task_pid(task_t task)
7872 {
7873 if (task) {
7874 return get_audit_token_pid(task_get_audit_token(task));
7875 }
7876 return -1;
7877 }
7878
7879 #if __has_feature(ptrauth_calls)
7880 /*
7881 * Get the shared region id and jop signing key for the task.
7882 * The function will allocate a kalloc buffer and return
7883 * it to caller, the caller needs to free it. This is used
7884 * for getting the information via task port.
7885 */
7886 char *
task_get_vm_shared_region_id_and_jop_pid(task_t task,uint64_t * jop_pid)7887 task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *jop_pid)
7888 {
7889 size_t len;
7890 char *shared_region_id = NULL;
7891
7892 task_lock(task);
7893 if (task->shared_region_id == NULL) {
7894 task_unlock(task);
7895 return NULL;
7896 }
7897 len = strlen(task->shared_region_id) + 1;
7898
7899 /* don't hold task lock while allocating */
7900 task_unlock(task);
7901 shared_region_id = kalloc_data(len, Z_WAITOK);
7902 task_lock(task);
7903
7904 if (task->shared_region_id == NULL) {
7905 task_unlock(task);
7906 kfree_data(shared_region_id, len);
7907 return NULL;
7908 }
7909 assert(len == strlen(task->shared_region_id) + 1); /* should never change */
7910 strlcpy(shared_region_id, task->shared_region_id, len);
7911 task_unlock(task);
7912
7913 /* find key from its auth pager */
7914 if (jop_pid != NULL) {
7915 *jop_pid = shared_region_find_key(shared_region_id);
7916 }
7917
7918 return shared_region_id;
7919 }
7920
7921 /*
7922 * set the shared region id for a task
7923 */
7924 void
task_set_shared_region_id(task_t task,char * id)7925 task_set_shared_region_id(task_t task, char *id)
7926 {
7927 char *old_id;
7928
7929 task_lock(task);
7930 old_id = task->shared_region_id;
7931 task->shared_region_id = id;
7932 task->shared_region_auth_remapped = FALSE;
7933 task_unlock(task);
7934
7935 /* free any pre-existing shared region id */
7936 if (old_id != NULL) {
7937 shared_region_key_dealloc(old_id);
7938 kfree_data(old_id, strlen(old_id) + 1);
7939 }
7940 }
7941 #endif /* __has_feature(ptrauth_calls) */
7942
7943 /*
7944 * This routine finds a thread in a task by its unique id
7945 * Returns a referenced thread or THREAD_NULL if the thread was not found
7946 *
7947 * TODO: This is super inefficient - it's an O(threads in task) list walk!
7948 * We should make a tid hash, or transition all tid clients to thread ports
7949 *
7950 * Precondition: No locks held (will take task lock)
7951 */
7952 thread_t
task_findtid(task_t task,uint64_t tid)7953 task_findtid(task_t task, uint64_t tid)
7954 {
7955 thread_t self = current_thread();
7956 thread_t found_thread = THREAD_NULL;
7957 thread_t iter_thread = THREAD_NULL;
7958
7959 /* Short-circuit the lookup if we're looking up ourselves */
7960 if (tid == self->thread_id || tid == TID_NULL) {
7961 assert(get_threadtask(self) == task);
7962
7963 thread_reference(self);
7964
7965 return self;
7966 }
7967
7968 task_lock(task);
7969
7970 queue_iterate(&task->threads, iter_thread, thread_t, task_threads) {
7971 if (iter_thread->thread_id == tid) {
7972 found_thread = iter_thread;
7973 thread_reference(found_thread);
7974 break;
7975 }
7976 }
7977
7978 task_unlock(task);
7979
7980 return found_thread;
7981 }
7982
7983 int
pid_from_task(task_t task)7984 pid_from_task(task_t task)
7985 {
7986 int pid = -1;
7987 void *bsd_info = get_bsdtask_info(task);
7988
7989 if (bsd_info) {
7990 pid = proc_pid(bsd_info);
7991 } else {
7992 pid = task_pid(task);
7993 }
7994
7995 return pid;
7996 }
7997
7998 /*
7999 * Control the CPU usage monitor for a task.
8000 */
8001 kern_return_t
task_cpu_usage_monitor_ctl(task_t task,uint32_t * flags)8002 task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags)
8003 {
8004 int error = KERN_SUCCESS;
8005
8006 if (*flags & CPUMON_MAKE_FATAL) {
8007 task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_CPUMON;
8008 } else {
8009 error = KERN_INVALID_ARGUMENT;
8010 }
8011
8012 return error;
8013 }
8014
8015 /*
8016 * Control the wakeups monitor for a task.
8017 */
8018 kern_return_t
task_wakeups_monitor_ctl(task_t task,uint32_t * flags,int32_t * rate_hz)8019 task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz)
8020 {
8021 ledger_t ledger = task->ledger;
8022
8023 task_lock(task);
8024 if (*flags & WAKEMON_GET_PARAMS) {
8025 ledger_amount_t limit;
8026 uint64_t period;
8027
8028 ledger_get_limit(ledger, task_ledgers.interrupt_wakeups, &limit);
8029 ledger_get_period(ledger, task_ledgers.interrupt_wakeups, &period);
8030
8031 if (limit != LEDGER_LIMIT_INFINITY) {
8032 /*
8033 * An active limit means the wakeups monitor is enabled.
8034 */
8035 *rate_hz = (int32_t)(limit / (int64_t)(period / NSEC_PER_SEC));
8036 *flags = WAKEMON_ENABLE;
8037 if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON) {
8038 *flags |= WAKEMON_MAKE_FATAL;
8039 }
8040 } else {
8041 *flags = WAKEMON_DISABLE;
8042 *rate_hz = -1;
8043 }
8044
8045 /*
8046 * If WAKEMON_GET_PARAMS is present in flags, all other flags are ignored.
8047 */
8048 task_unlock(task);
8049 return KERN_SUCCESS;
8050 }
8051
8052 if (*flags & WAKEMON_ENABLE) {
8053 if (*flags & WAKEMON_SET_DEFAULTS) {
8054 *rate_hz = task_wakeups_monitor_rate;
8055 }
8056
8057 #ifndef CONFIG_NOMONITORS
8058 if (*flags & WAKEMON_MAKE_FATAL) {
8059 task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
8060 }
8061 #endif /* CONFIG_NOMONITORS */
8062
8063 if (*rate_hz <= 0) {
8064 task_unlock(task);
8065 return KERN_INVALID_ARGUMENT;
8066 }
8067
8068 #ifndef CONFIG_NOMONITORS
8069 ledger_set_limit(ledger, task_ledgers.interrupt_wakeups, *rate_hz * task_wakeups_monitor_interval,
8070 (uint8_t)task_wakeups_monitor_ustackshots_trigger_pct);
8071 ledger_set_period(ledger, task_ledgers.interrupt_wakeups, task_wakeups_monitor_interval * NSEC_PER_SEC);
8072 ledger_enable_callback(ledger, task_ledgers.interrupt_wakeups);
8073 #endif /* CONFIG_NOMONITORS */
8074 } else if (*flags & WAKEMON_DISABLE) {
8075 /*
8076 * Caller wishes to disable wakeups monitor on the task.
8077 *
8078 * Remove the limit & callback on the wakeups ledger entry.
8079 */
8080 ledger_disable_refill(ledger, task_ledgers.interrupt_wakeups);
8081 ledger_disable_callback(ledger, task_ledgers.interrupt_wakeups);
8082 }
8083
8084 task_unlock(task);
8085 return KERN_SUCCESS;
8086 }
8087
8088 void
task_wakeups_rate_exceeded(int warning,__unused const void * param0,__unused const void * param1)8089 task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1)
8090 {
8091 if (warning == 0) {
8092 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS();
8093 }
8094 }
8095
8096 TUNABLE(bool, enable_wakeup_reports, "enable_wakeup_reports", false); /* Enable wakeup reports. */
8097
8098 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)8099 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)
8100 {
8101 task_t task = current_task();
8102 int pid = 0;
8103 const char *procname = "unknown";
8104 boolean_t fatal;
8105 kern_return_t kr;
8106 #ifdef EXC_RESOURCE_MONITORS
8107 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
8108 #endif /* EXC_RESOURCE_MONITORS */
8109 struct ledger_entry_info lei;
8110
8111 #ifdef MACH_BSD
8112 pid = proc_selfpid();
8113 if (get_bsdtask_info(task) != NULL) {
8114 procname = proc_name_address(get_bsdtask_info(current_task()));
8115 }
8116 #endif
8117
8118 ledger_get_entry_info(task->ledger, task_ledgers.interrupt_wakeups, &lei);
8119
8120 /*
8121 * Disable the exception notification so we don't overwhelm
8122 * the listener with an endless stream of redundant exceptions.
8123 * TODO: detect whether another thread is already reporting the violation.
8124 */
8125 uint32_t flags = WAKEMON_DISABLE;
8126 task_wakeups_monitor_ctl(task, &flags, NULL);
8127
8128 fatal = task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
8129 trace_resource_violation(RMON_CPUWAKES_VIOLATED, &lei);
8130 os_log(OS_LOG_DEFAULT, "process %s[%d] caught waking the CPU %llu times "
8131 "over ~%llu seconds, averaging %llu wakes / second and "
8132 "violating a %slimit of %llu wakes over %llu seconds.\n",
8133 procname, pid,
8134 lei.lei_balance, lei.lei_last_refill / NSEC_PER_SEC,
8135 lei.lei_last_refill == 0 ? 0 :
8136 (NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill),
8137 fatal ? "FATAL " : "",
8138 lei.lei_limit, lei.lei_refill_period / NSEC_PER_SEC);
8139
8140 if (enable_wakeup_reports) {
8141 kr = send_resource_violation(send_cpu_wakes_violation, task, &lei,
8142 fatal ? kRNFatalLimitFlag : 0);
8143 if (kr) {
8144 printf("send_resource_violation(CPU wakes, ...): error %#x\n", kr);
8145 }
8146 }
8147
8148 #ifdef EXC_RESOURCE_MONITORS
8149 if (disable_exc_resource) {
8150 printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8151 "suppressed by a boot-arg\n", procname, pid);
8152 return;
8153 }
8154 if (disable_exc_resource_during_audio && audio_active) {
8155 os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8156 "suppressed due to audio playback\n", procname, pid);
8157 return;
8158 }
8159 if (lei.lei_last_refill == 0) {
8160 os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
8161 "suppressed due to lei.lei_last_refill = 0 \n", procname, pid);
8162 }
8163
8164 code[0] = code[1] = 0;
8165 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_WAKEUPS);
8166 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_WAKEUPS_MONITOR);
8167 EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_PERMITTED(code[0],
8168 NSEC_PER_SEC * lei.lei_limit / lei.lei_refill_period);
8169 EXC_RESOURCE_CPUMONITOR_ENCODE_OBSERVATION_INTERVAL(code[0],
8170 lei.lei_last_refill);
8171 EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_OBSERVED(code[1],
8172 NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill);
8173 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
8174 #endif /* EXC_RESOURCE_MONITORS */
8175
8176 if (fatal) {
8177 task_terminate_internal(task);
8178 }
8179 }
8180
8181 static boolean_t
global_update_logical_writes(int64_t io_delta,int64_t * global_write_count)8182 global_update_logical_writes(int64_t io_delta, int64_t *global_write_count)
8183 {
8184 int64_t old_count, new_count;
8185 boolean_t needs_telemetry;
8186
8187 do {
8188 new_count = old_count = *global_write_count;
8189 new_count += io_delta;
8190 if (new_count >= io_telemetry_limit) {
8191 new_count = 0;
8192 needs_telemetry = TRUE;
8193 } else {
8194 needs_telemetry = FALSE;
8195 }
8196 } while (!OSCompareAndSwap64(old_count, new_count, global_write_count));
8197 return needs_telemetry;
8198 }
8199
8200 void
task_update_physical_writes(__unused task_t task,__unused task_physical_write_flavor_t flavor,__unused uint64_t io_size,__unused task_balance_flags_t flags)8201 task_update_physical_writes(__unused task_t task, __unused task_physical_write_flavor_t flavor, __unused uint64_t io_size, __unused task_balance_flags_t flags)
8202 {
8203 #if CONFIG_PHYS_WRITE_ACCT
8204 if (!io_size) {
8205 return;
8206 }
8207
8208 /*
8209 * task == NULL means that we have to update kernel_task ledgers
8210 */
8211 if (!task) {
8212 task = kernel_task;
8213 }
8214
8215 KDBG((VMDBG_CODE(DBG_VM_PHYS_WRITE_ACCT)) | DBG_FUNC_NONE,
8216 task_pid(task), flavor, io_size, flags);
8217 DTRACE_IO4(physical_writes, struct task *, task, task_physical_write_flavor_t, flavor, uint64_t, io_size, task_balance_flags_t, flags);
8218
8219 if (flags & TASK_BALANCE_CREDIT) {
8220 if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
8221 OSAddAtomic64(io_size, (SInt64 *)&(task->task_fs_metadata_writes));
8222 ledger_credit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
8223 }
8224 } else if (flags & TASK_BALANCE_DEBIT) {
8225 if (flavor == TASK_PHYSICAL_WRITE_METADATA) {
8226 OSAddAtomic64(-1 * io_size, (SInt64 *)&(task->task_fs_metadata_writes));
8227 ledger_debit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size);
8228 }
8229 }
8230 #endif /* CONFIG_PHYS_WRITE_ACCT */
8231 }
8232
8233 void
task_update_logical_writes(task_t task,uint32_t io_size,int flags,void * vp)8234 task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp)
8235 {
8236 int64_t io_delta = 0;
8237 int64_t * global_counter_to_update;
8238 boolean_t needs_telemetry = FALSE;
8239 boolean_t is_external_device = FALSE;
8240 int ledger_to_update = 0;
8241 struct task_writes_counters * writes_counters_to_update;
8242
8243 if ((!task) || (!io_size) || (!vp)) {
8244 return;
8245 }
8246
8247 KDBG((VMDBG_CODE(DBG_VM_DATA_WRITE)) | DBG_FUNC_NONE,
8248 task_pid(task), io_size, flags, (uintptr_t)VM_KERNEL_ADDRPERM(vp));
8249 DTRACE_IO4(logical_writes, struct task *, task, uint32_t, io_size, int, flags, vnode *, vp);
8250
8251 // Is the drive backing this vnode internal or external to the system?
8252 if (vnode_isonexternalstorage(vp) == false) {
8253 global_counter_to_update = &global_logical_writes_count;
8254 ledger_to_update = task_ledgers.logical_writes;
8255 writes_counters_to_update = &task->task_writes_counters_internal;
8256 is_external_device = FALSE;
8257 } else {
8258 global_counter_to_update = &global_logical_writes_to_external_count;
8259 ledger_to_update = task_ledgers.logical_writes_to_external;
8260 writes_counters_to_update = &task->task_writes_counters_external;
8261 is_external_device = TRUE;
8262 }
8263
8264 switch (flags) {
8265 case TASK_WRITE_IMMEDIATE:
8266 OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_immediate_writes));
8267 ledger_credit(task->ledger, ledger_to_update, io_size);
8268 if (!is_external_device) {
8269 coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8270 }
8271 break;
8272 case TASK_WRITE_DEFERRED:
8273 OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_deferred_writes));
8274 ledger_credit(task->ledger, ledger_to_update, io_size);
8275 if (!is_external_device) {
8276 coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8277 }
8278 break;
8279 case TASK_WRITE_INVALIDATED:
8280 OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_invalidated_writes));
8281 ledger_debit(task->ledger, ledger_to_update, io_size);
8282 if (!is_external_device) {
8283 coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, FALSE, io_size);
8284 }
8285 break;
8286 case TASK_WRITE_METADATA:
8287 OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_metadata_writes));
8288 ledger_credit(task->ledger, ledger_to_update, io_size);
8289 if (!is_external_device) {
8290 coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
8291 }
8292 break;
8293 }
8294
8295 io_delta = (flags == TASK_WRITE_INVALIDATED) ? ((int64_t)io_size * -1ll) : ((int64_t)io_size);
8296 if (io_telemetry_limit != 0) {
8297 /* If io_telemetry_limit is 0, disable global updates and I/O telemetry */
8298 needs_telemetry = global_update_logical_writes(io_delta, global_counter_to_update);
8299 if (needs_telemetry && !is_external_device) {
8300 act_set_io_telemetry_ast(current_thread());
8301 }
8302 }
8303 }
8304
8305 /*
8306 * Control the I/O monitor for a task.
8307 */
8308 kern_return_t
task_io_monitor_ctl(task_t task,uint32_t * flags)8309 task_io_monitor_ctl(task_t task, uint32_t *flags)
8310 {
8311 ledger_t ledger = task->ledger;
8312
8313 task_lock(task);
8314 if (*flags & IOMON_ENABLE) {
8315 /* Configure the physical I/O ledger */
8316 ledger_set_limit(ledger, task_ledgers.physical_writes, (task_iomon_limit_mb * 1024 * 1024), 0);
8317 ledger_set_period(ledger, task_ledgers.physical_writes, (task_iomon_interval_secs * NSEC_PER_SEC));
8318 } else if (*flags & IOMON_DISABLE) {
8319 /*
8320 * Caller wishes to disable I/O monitor on the task.
8321 */
8322 ledger_disable_refill(ledger, task_ledgers.physical_writes);
8323 ledger_disable_callback(ledger, task_ledgers.physical_writes);
8324 }
8325
8326 task_unlock(task);
8327 return KERN_SUCCESS;
8328 }
8329
8330 void
task_io_rate_exceeded(int warning,const void * param0,__unused const void * param1)8331 task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1)
8332 {
8333 if (warning == 0) {
8334 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO((int)param0);
8335 }
8336 }
8337
8338 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)8339 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)
8340 {
8341 int pid = 0;
8342 task_t task = current_task();
8343 #ifdef EXC_RESOURCE_MONITORS
8344 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
8345 #endif /* EXC_RESOURCE_MONITORS */
8346 struct ledger_entry_info lei = {};
8347 kern_return_t kr;
8348
8349 #ifdef MACH_BSD
8350 pid = proc_selfpid();
8351 #endif
8352 /*
8353 * Get the ledger entry info. We need to do this before disabling the exception
8354 * to get correct values for all fields.
8355 */
8356 switch (flavor) {
8357 case FLAVOR_IO_PHYSICAL_WRITES:
8358 ledger_get_entry_info(task->ledger, task_ledgers.physical_writes, &lei);
8359 break;
8360 }
8361
8362
8363 /*
8364 * Disable the exception notification so we don't overwhelm
8365 * the listener with an endless stream of redundant exceptions.
8366 * TODO: detect whether another thread is already reporting the violation.
8367 */
8368 uint32_t flags = IOMON_DISABLE;
8369 task_io_monitor_ctl(task, &flags);
8370
8371 if (flavor == FLAVOR_IO_LOGICAL_WRITES) {
8372 trace_resource_violation(RMON_LOGWRITES_VIOLATED, &lei);
8373 }
8374 os_log(OS_LOG_DEFAULT, "process [%d] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]\n",
8375 pid, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)), (lei.lei_refill_period / NSEC_PER_SEC));
8376
8377 kr = send_resource_violation(send_disk_writes_violation, task, &lei, kRNFlagsNone);
8378 if (kr) {
8379 printf("send_resource_violation(disk_writes, ...): error %#x\n", kr);
8380 }
8381
8382 #ifdef EXC_RESOURCE_MONITORS
8383 code[0] = code[1] = 0;
8384 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_IO);
8385 EXC_RESOURCE_ENCODE_FLAVOR(code[0], flavor);
8386 EXC_RESOURCE_IO_ENCODE_INTERVAL(code[0], (lei.lei_refill_period / NSEC_PER_SEC));
8387 EXC_RESOURCE_IO_ENCODE_LIMIT(code[0], (lei.lei_limit / (1024 * 1024)));
8388 EXC_RESOURCE_IO_ENCODE_OBSERVED(code[1], (lei.lei_balance / (1024 * 1024)));
8389 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
8390 #endif /* EXC_RESOURCE_MONITORS */
8391 }
8392
8393 void
task_port_space_ast(__unused task_t task)8394 task_port_space_ast(__unused task_t task)
8395 {
8396 uint32_t current_size, soft_limit, hard_limit;
8397 assert(task == current_task());
8398 bool should_notify = ipc_space_check_table_size_limit(task->itk_space,
8399 ¤t_size, &soft_limit, &hard_limit);
8400 if (should_notify) {
8401 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task, current_size, soft_limit, hard_limit);
8402 }
8403 }
8404
8405 #if CONFIG_PROC_RESOURCE_LIMITS
8406 static mach_port_t
task_allocate_fatal_port(void)8407 task_allocate_fatal_port(void)
8408 {
8409 mach_port_t task_fatal_port = MACH_PORT_NULL;
8410 task_id_token_t token;
8411
8412 kern_return_t kr = task_create_identity_token(current_task(), &token); /* Takes a reference on the token */
8413 if (kr) {
8414 return MACH_PORT_NULL;
8415 }
8416 task_fatal_port = ipc_kobject_alloc_port((ipc_kobject_t)token, IKOT_TASK_FATAL,
8417 IPC_KOBJECT_ALLOC_NSREQUEST | IPC_KOBJECT_ALLOC_MAKE_SEND);
8418
8419 task_id_token_set_port(token, task_fatal_port);
8420
8421 return task_fatal_port;
8422 }
8423
8424 static void
task_fatal_port_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)8425 task_fatal_port_no_senders(ipc_port_t port, __unused mach_port_mscount_t mscount)
8426 {
8427 task_t task = TASK_NULL;
8428 kern_return_t kr;
8429
8430 task_id_token_t token = ipc_kobject_get_stable(port, IKOT_TASK_FATAL);
8431
8432 assert(token != NULL);
8433 if (token) {
8434 kr = task_identity_token_get_task_grp(token, &task, TASK_GRP_KERNEL); /* takes a reference on task */
8435 if (task) {
8436 task_bsdtask_kill(task);
8437 task_deallocate(task);
8438 }
8439 task_id_token_release(token); /* consumes ref given by notification */
8440 }
8441 }
8442 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8443
8444 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task,uint32_t current_size,uint32_t soft_limit,uint32_t hard_limit)8445 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_MACH_PORTS(task_t task, uint32_t current_size, uint32_t soft_limit, uint32_t hard_limit)
8446 {
8447 int pid = 0;
8448 char *procname = (char *) "unknown";
8449 __unused kern_return_t kr;
8450 __unused resource_notify_flags_t flags = kRNFlagsNone;
8451 __unused uint32_t limit;
8452 __unused mach_port_t task_fatal_port = MACH_PORT_NULL;
8453 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
8454
8455 pid = proc_selfpid();
8456 if (get_bsdtask_info(task) != NULL) {
8457 procname = proc_name_address(get_bsdtask_info(task));
8458 }
8459
8460 /*
8461 * Only kernel_task and launchd may be allowed to
8462 * have really large ipc space.
8463 */
8464 if (pid == 0 || pid == 1) {
8465 return;
8466 }
8467
8468 os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many mach ports. \
8469 Num of ports allocated %u; \n", procname, pid, current_size);
8470
8471 /* Abort the process if it has hit the system-wide limit for ipc port table size */
8472 if (!hard_limit && !soft_limit) {
8473 code[0] = code[1] = 0;
8474 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_PORTS);
8475 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_PORT_SPACE_FULL);
8476 EXC_RESOURCE_PORTS_ENCODE_PORTS(code[0], current_size);
8477
8478 exception_info_t info = {
8479 .os_reason = OS_REASON_PORT_SPACE,
8480 .exception_type = EXC_RESOURCE,
8481 .mx_code = code[0],
8482 .mx_subcode = code[1]
8483 };
8484
8485 exit_with_mach_exception(current_proc(), info, PX_DEBUG_NO_HONOR);
8486 return;
8487 }
8488
8489 #if CONFIG_PROC_RESOURCE_LIMITS
8490 if (hard_limit > 0) {
8491 flags |= kRNHardLimitFlag;
8492 limit = hard_limit;
8493 task_fatal_port = task_allocate_fatal_port();
8494 if (!task_fatal_port) {
8495 os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8496 task_bsdtask_kill(task);
8497 }
8498 } else {
8499 flags |= kRNSoftLimitFlag;
8500 limit = soft_limit;
8501 }
8502
8503 kr = send_resource_violation_with_fatal_port(send_port_space_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8504 if (kr) {
8505 os_log(OS_LOG_DEFAULT, "send_resource_violation(ports, ...): error %#x\n", kr);
8506 }
8507 if (task_fatal_port) {
8508 ipc_port_release_send(task_fatal_port);
8509 }
8510 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8511 }
8512
8513 #if CONFIG_PROC_RESOURCE_LIMITS
8514 void
task_kqworkloop_ast(task_t task,int current_size,int soft_limit,int hard_limit)8515 task_kqworkloop_ast(task_t task, int current_size, int soft_limit, int hard_limit)
8516 {
8517 assert(task == current_task());
8518 return SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task, current_size, soft_limit, hard_limit);
8519 }
8520
8521 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task,int current_size,int soft_limit,int hard_limit)8522 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_KQWORKLOOPS(task_t task, int current_size, int soft_limit, int hard_limit)
8523 {
8524 int pid = 0;
8525 char *procname = (char *) "unknown";
8526 #ifdef MACH_BSD
8527 pid = proc_selfpid();
8528 if (get_bsdtask_info(task) != NULL) {
8529 procname = proc_name_address(get_bsdtask_info(task));
8530 }
8531 #endif
8532 if (pid == 0 || pid == 1) {
8533 return;
8534 }
8535
8536 os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many kqworkloops. \
8537 Num of kqworkloops allocated %u; \n", procname, pid, current_size);
8538
8539 int limit = 0;
8540 resource_notify_flags_t flags = kRNFlagsNone;
8541 mach_port_t task_fatal_port = MACH_PORT_NULL;
8542 if (hard_limit) {
8543 flags |= kRNHardLimitFlag;
8544 limit = hard_limit;
8545
8546 task_fatal_port = task_allocate_fatal_port();
8547 if (task_fatal_port == MACH_PORT_NULL) {
8548 os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8549 task_bsdtask_kill(task);
8550 }
8551 } else {
8552 flags |= kRNSoftLimitFlag;
8553 limit = soft_limit;
8554 }
8555
8556 kern_return_t kr;
8557 kr = send_resource_violation_with_fatal_port(send_kqworkloops_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8558 if (kr) {
8559 os_log(OS_LOG_DEFAULT, "send_resource_violation_with_fatal_port(kqworkloops, ...): error %#x\n", kr);
8560 }
8561 if (task_fatal_port) {
8562 ipc_port_release_send(task_fatal_port);
8563 }
8564 }
8565
8566
8567 void
task_filedesc_ast(__unused task_t task,__unused int current_size,__unused int soft_limit,__unused int hard_limit)8568 task_filedesc_ast(__unused task_t task, __unused int current_size, __unused int soft_limit, __unused int hard_limit)
8569 {
8570 assert(task == current_task());
8571 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task, current_size, soft_limit, hard_limit);
8572 }
8573
8574 void __attribute__((noinline))
SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task,int current_size,int soft_limit,int hard_limit)8575 SENDING_NOTIFICATION__THIS_PROCESS_HAS_TOO_MANY_FILE_DESCRIPTORS(task_t task, int current_size, int soft_limit, int hard_limit)
8576 {
8577 int pid = 0;
8578 char *procname = (char *) "unknown";
8579 kern_return_t kr;
8580 resource_notify_flags_t flags = kRNFlagsNone;
8581 int limit;
8582 mach_port_t task_fatal_port = MACH_PORT_NULL;
8583
8584 #ifdef MACH_BSD
8585 pid = proc_selfpid();
8586 if (get_bsdtask_info(task) != NULL) {
8587 procname = proc_name_address(get_bsdtask_info(task));
8588 }
8589 #endif
8590 /*
8591 * Only kernel_task and launchd may be allowed to
8592 * have really large ipc space.
8593 */
8594 if (pid == 0 || pid == 1) {
8595 return;
8596 }
8597
8598 os_log(OS_LOG_DEFAULT, "process %s[%d] caught allocating too many file descriptors. \
8599 Num of fds allocated %u; \n", procname, pid, current_size);
8600
8601 if (hard_limit > 0) {
8602 flags |= kRNHardLimitFlag;
8603 limit = hard_limit;
8604 task_fatal_port = task_allocate_fatal_port();
8605 if (!task_fatal_port) {
8606 os_log(OS_LOG_DEFAULT, "process %s[%d] Unable to create task token ident object", procname, pid);
8607 task_bsdtask_kill(task);
8608 }
8609 } else {
8610 flags |= kRNSoftLimitFlag;
8611 limit = soft_limit;
8612 }
8613
8614 kr = send_resource_violation_with_fatal_port(send_file_descriptors_violation, task, (int64_t)current_size, (int64_t)limit, task_fatal_port, flags);
8615 if (kr) {
8616 os_log(OS_LOG_DEFAULT, "send_resource_violation_with_fatal_port(filedesc, ...): error %#x\n", kr);
8617 }
8618 if (task_fatal_port) {
8619 ipc_port_release_send(task_fatal_port);
8620 }
8621 }
8622 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
8623
8624 /* Placeholders for the task set/get voucher interfaces */
8625 kern_return_t
task_get_mach_voucher(task_t task,mach_voucher_selector_t __unused which,ipc_voucher_t * voucher)8626 task_get_mach_voucher(
8627 task_t task,
8628 mach_voucher_selector_t __unused which,
8629 ipc_voucher_t *voucher)
8630 {
8631 if (TASK_NULL == task) {
8632 return KERN_INVALID_TASK;
8633 }
8634
8635 *voucher = NULL;
8636 return KERN_SUCCESS;
8637 }
8638
8639 kern_return_t
task_set_mach_voucher(task_t task,ipc_voucher_t __unused voucher)8640 task_set_mach_voucher(
8641 task_t task,
8642 ipc_voucher_t __unused voucher)
8643 {
8644 if (TASK_NULL == task) {
8645 return KERN_INVALID_TASK;
8646 }
8647
8648 return KERN_SUCCESS;
8649 }
8650
8651 kern_return_t
task_swap_mach_voucher(__unused task_t task,__unused ipc_voucher_t new_voucher,ipc_voucher_t * in_out_old_voucher)8652 task_swap_mach_voucher(
8653 __unused task_t task,
8654 __unused ipc_voucher_t new_voucher,
8655 ipc_voucher_t *in_out_old_voucher)
8656 {
8657 /*
8658 * Currently this function is only called from a MIG generated
8659 * routine which doesn't release the reference on the voucher
8660 * addressed by in_out_old_voucher. To avoid leaking this reference,
8661 * a call to release it has been added here.
8662 */
8663 ipc_voucher_release(*in_out_old_voucher);
8664 OS_ANALYZER_SUPPRESS("81787115") return KERN_NOT_SUPPORTED;
8665 }
8666
8667 void
task_set_gpu_denied(task_t task,boolean_t denied)8668 task_set_gpu_denied(task_t task, boolean_t denied)
8669 {
8670 task_lock(task);
8671
8672 if (denied) {
8673 task->t_flags |= TF_GPU_DENIED;
8674 } else {
8675 task->t_flags &= ~TF_GPU_DENIED;
8676 }
8677
8678 task_unlock(task);
8679 }
8680
8681 boolean_t
task_is_gpu_denied(task_t task)8682 task_is_gpu_denied(task_t task)
8683 {
8684 /* We don't need the lock to read this flag */
8685 return (task->t_flags & TF_GPU_DENIED) ? TRUE : FALSE;
8686 }
8687
8688 /*
8689 * Task policy termination uses this path to clear the bit the final time
8690 * during the termination flow, and the TASK_POLICY_TERMINATED bit guarantees
8691 * that it won't be changed again on a terminated task.
8692 */
8693 bool
task_set_game_mode_locked(task_t task,bool enabled)8694 task_set_game_mode_locked(task_t task, bool enabled)
8695 {
8696 task_lock_assert_owned(task);
8697
8698 if (enabled) {
8699 assert(proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0);
8700 }
8701
8702 bool previously_enabled = task_get_game_mode(task);
8703 bool needs_update = false;
8704 uint32_t new_count = 0;
8705
8706 if (enabled) {
8707 task->t_flags |= TF_GAME_MODE;
8708 } else {
8709 task->t_flags &= ~TF_GAME_MODE;
8710 }
8711
8712 if (enabled && !previously_enabled) {
8713 if (task_coalition_adjust_game_mode_count(task, 1, &new_count) && (new_count == 1)) {
8714 needs_update = true;
8715 }
8716 } else if (!enabled && previously_enabled) {
8717 if (task_coalition_adjust_game_mode_count(task, -1, &new_count) && (new_count == 0)) {
8718 needs_update = true;
8719 }
8720 }
8721
8722 return needs_update;
8723 }
8724
8725 void
task_set_game_mode(task_t task,bool enabled)8726 task_set_game_mode(task_t task, bool enabled)
8727 {
8728 bool needs_update = false;
8729
8730 task_lock(task);
8731
8732 /* After termination, further updates are no longer effective */
8733 if (proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0) {
8734 needs_update = task_set_game_mode_locked(task, enabled);
8735 }
8736
8737 task_unlock(task);
8738
8739 #if CONFIG_THREAD_GROUPS
8740 if (needs_update) {
8741 task_coalition_thread_group_game_mode_update(task);
8742 }
8743 #endif /* CONFIG_THREAD_GROUPS */
8744 }
8745
8746 bool
task_get_game_mode(task_t task)8747 task_get_game_mode(task_t task)
8748 {
8749 /* We don't need the lock to read this flag */
8750 return task->t_flags & TF_GAME_MODE;
8751 }
8752
8753 bool
task_set_carplay_mode_locked(task_t task,bool enabled)8754 task_set_carplay_mode_locked(task_t task, bool enabled)
8755 {
8756 task_lock_assert_owned(task);
8757
8758 if (enabled) {
8759 assert(proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0);
8760 }
8761
8762 bool previously_enabled = task_get_carplay_mode(task);
8763 bool needs_update = false;
8764 uint32_t new_count = 0;
8765
8766 if (enabled) {
8767 task->t_flags |= TF_CARPLAY_MODE;
8768 } else {
8769 task->t_flags &= ~TF_CARPLAY_MODE;
8770 }
8771
8772 if (enabled && !previously_enabled) {
8773 if (task_coalition_adjust_carplay_mode_count(task, 1, &new_count) && (new_count == 1)) {
8774 needs_update = true;
8775 }
8776 } else if (!enabled && previously_enabled) {
8777 if (task_coalition_adjust_carplay_mode_count(task, -1, &new_count) && (new_count == 0)) {
8778 needs_update = true;
8779 }
8780 }
8781 return needs_update;
8782 }
8783
8784 void
task_set_carplay_mode(task_t task,bool enabled)8785 task_set_carplay_mode(task_t task, bool enabled)
8786 {
8787 bool needs_update = false;
8788
8789 task_lock(task);
8790
8791 /* After termination, further updates are no longer effective */
8792 if (proc_get_effective_task_policy(task, TASK_POLICY_TERMINATED) == 0) {
8793 needs_update = task_set_carplay_mode_locked(task, enabled);
8794 }
8795
8796 task_unlock(task);
8797
8798 #if CONFIG_THREAD_GROUPS
8799 if (needs_update) {
8800 task_coalition_thread_group_carplay_mode_update(task);
8801 }
8802 #endif /* CONFIG_THREAD_GROUPS */
8803 }
8804
8805 bool
task_get_carplay_mode(task_t task)8806 task_get_carplay_mode(task_t task)
8807 {
8808 /* We don't need the lock to read this flag */
8809 return task->t_flags & TF_CARPLAY_MODE;
8810 }
8811
8812 uint64_t
get_task_memory_region_count(task_t task)8813 get_task_memory_region_count(task_t task)
8814 {
8815 vm_map_t map;
8816 map = (task == kernel_task) ? kernel_map: task->map;
8817 return (uint64_t)get_map_nentries(map);
8818 }
8819
8820 static void
kdebug_trace_dyld_internal(uint32_t base_code,struct dyld_kernel_image_info * info)8821 kdebug_trace_dyld_internal(uint32_t base_code,
8822 struct dyld_kernel_image_info *info)
8823 {
8824 static_assert(sizeof(info->uuid) >= 16);
8825
8826 #if defined(__LP64__)
8827 uint64_t *uuid = (uint64_t *)&(info->uuid);
8828
8829 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8830 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code), uuid[0],
8831 uuid[1], info->load_addr,
8832 (uint64_t)info->fsid.val[0] | ((uint64_t)info->fsid.val[1] << 32),
8833 0);
8834 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8835 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 1),
8836 (uint64_t)info->fsobjid.fid_objno |
8837 ((uint64_t)info->fsobjid.fid_generation << 32),
8838 0, 0, 0, 0);
8839 #else /* defined(__LP64__) */
8840 uint32_t *uuid = (uint32_t *)&(info->uuid);
8841
8842 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8843 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 2), uuid[0],
8844 uuid[1], uuid[2], uuid[3], 0);
8845 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8846 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 3),
8847 (uint32_t)info->load_addr, info->fsid.val[0], info->fsid.val[1],
8848 info->fsobjid.fid_objno, 0);
8849 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8850 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 4),
8851 info->fsobjid.fid_generation, 0, 0, 0, 0);
8852 #endif /* !defined(__LP64__) */
8853 }
8854
8855 static kern_return_t
kdebug_trace_dyld(task_t task,uint32_t base_code,vm_map_copy_t infos_copy,mach_msg_type_number_t infos_len)8856 kdebug_trace_dyld(task_t task, uint32_t base_code,
8857 vm_map_copy_t infos_copy, mach_msg_type_number_t infos_len)
8858 {
8859 kern_return_t kr;
8860 dyld_kernel_image_info_array_t infos;
8861 vm_map_offset_t map_data;
8862 vm_offset_t data;
8863
8864 if (!infos_copy) {
8865 return KERN_INVALID_ADDRESS;
8866 }
8867
8868 if (!kdebug_enable ||
8869 !kdebug_debugid_enabled(KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, 0))) {
8870 vm_map_copy_discard(infos_copy);
8871 return KERN_SUCCESS;
8872 }
8873
8874 if (task == NULL || task != current_task()) {
8875 return KERN_INVALID_TASK;
8876 }
8877
8878 kr = vm_map_copyout(ipc_kernel_map, &map_data, (vm_map_copy_t)infos_copy);
8879 if (kr != KERN_SUCCESS) {
8880 return kr;
8881 }
8882
8883 infos = CAST_DOWN(dyld_kernel_image_info_array_t, map_data);
8884
8885 for (mach_msg_type_number_t i = 0; i < infos_len; i++) {
8886 kdebug_trace_dyld_internal(base_code, &(infos[i]));
8887 }
8888
8889 data = CAST_DOWN(vm_offset_t, map_data);
8890 mach_vm_deallocate(ipc_kernel_map, data, infos_len * sizeof(infos[0]));
8891 return KERN_SUCCESS;
8892 }
8893
8894 kern_return_t
task_register_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)8895 task_register_dyld_image_infos(task_t task,
8896 dyld_kernel_image_info_array_t infos_copy,
8897 mach_msg_type_number_t infos_len)
8898 {
8899 return kdebug_trace_dyld(task, DBG_DYLD_UUID_MAP_A,
8900 (vm_map_copy_t)infos_copy, infos_len);
8901 }
8902
8903 kern_return_t
task_unregister_dyld_image_infos(task_t task,dyld_kernel_image_info_array_t infos_copy,mach_msg_type_number_t infos_len)8904 task_unregister_dyld_image_infos(task_t task,
8905 dyld_kernel_image_info_array_t infos_copy,
8906 mach_msg_type_number_t infos_len)
8907 {
8908 return kdebug_trace_dyld(task, DBG_DYLD_UUID_UNMAP_A,
8909 (vm_map_copy_t)infos_copy, infos_len);
8910 }
8911
8912 kern_return_t
task_get_dyld_image_infos(__unused task_t task,__unused dyld_kernel_image_info_array_t * dyld_images,__unused mach_msg_type_number_t * dyld_imagesCnt)8913 task_get_dyld_image_infos(__unused task_t task,
8914 __unused dyld_kernel_image_info_array_t * dyld_images,
8915 __unused mach_msg_type_number_t * dyld_imagesCnt)
8916 {
8917 return KERN_NOT_SUPPORTED;
8918 }
8919
8920 kern_return_t
task_register_dyld_shared_cache_image_info(task_t task,dyld_kernel_image_info_t cache_img,__unused boolean_t no_cache,__unused boolean_t private_cache)8921 task_register_dyld_shared_cache_image_info(task_t task,
8922 dyld_kernel_image_info_t cache_img,
8923 __unused boolean_t no_cache,
8924 __unused boolean_t private_cache)
8925 {
8926 if (task == NULL || task != current_task()) {
8927 return KERN_INVALID_TASK;
8928 }
8929
8930 kdebug_trace_dyld_internal(DBG_DYLD_UUID_SHARED_CACHE_A, &cache_img);
8931 return KERN_SUCCESS;
8932 }
8933
8934 kern_return_t
task_register_dyld_set_dyld_state(__unused task_t task,__unused uint8_t dyld_state)8935 task_register_dyld_set_dyld_state(__unused task_t task,
8936 __unused uint8_t dyld_state)
8937 {
8938 return KERN_NOT_SUPPORTED;
8939 }
8940
8941 kern_return_t
task_register_dyld_get_process_state(__unused task_t task,__unused dyld_kernel_process_info_t * dyld_process_state)8942 task_register_dyld_get_process_state(__unused task_t task,
8943 __unused dyld_kernel_process_info_t * dyld_process_state)
8944 {
8945 return KERN_NOT_SUPPORTED;
8946 }
8947
8948 kern_return_t
task_inspect(task_inspect_t task_insp,task_inspect_flavor_t flavor,task_inspect_info_t info_out,mach_msg_type_number_t * size_in_out)8949 task_inspect(task_inspect_t task_insp, task_inspect_flavor_t flavor,
8950 task_inspect_info_t info_out, mach_msg_type_number_t *size_in_out)
8951 {
8952 #if CONFIG_PERVASIVE_CPI
8953 task_t task = (task_t)task_insp;
8954 kern_return_t kr = KERN_SUCCESS;
8955 mach_msg_type_number_t size;
8956
8957 if (task == TASK_NULL) {
8958 return KERN_INVALID_ARGUMENT;
8959 }
8960
8961 size = *size_in_out;
8962
8963 switch (flavor) {
8964 case TASK_INSPECT_BASIC_COUNTS: {
8965 struct task_inspect_basic_counts *bc =
8966 (struct task_inspect_basic_counts *)info_out;
8967 struct recount_usage stats = { 0 };
8968 if (size < TASK_INSPECT_BASIC_COUNTS_COUNT) {
8969 kr = KERN_INVALID_ARGUMENT;
8970 break;
8971 }
8972
8973 recount_sum(&recount_task_plan, task->tk_recount.rtk_lifetime, &stats);
8974 bc->instructions = recount_usage_instructions(&stats);
8975 bc->cycles = recount_usage_cycles(&stats);
8976 size = TASK_INSPECT_BASIC_COUNTS_COUNT;
8977 break;
8978 }
8979 default:
8980 kr = KERN_INVALID_ARGUMENT;
8981 break;
8982 }
8983
8984 if (kr == KERN_SUCCESS) {
8985 *size_in_out = size;
8986 }
8987 return kr;
8988 #else /* CONFIG_PERVASIVE_CPI */
8989 #pragma unused(task_insp, flavor, info_out, size_in_out)
8990 return KERN_NOT_SUPPORTED;
8991 #endif /* !CONFIG_PERVASIVE_CPI */
8992 }
8993
8994 #if CONFIG_SECLUDED_MEMORY
8995 int num_tasks_can_use_secluded_mem = 0;
8996
8997 void
task_set_can_use_secluded_mem(task_t task,boolean_t can_use_secluded_mem)8998 task_set_can_use_secluded_mem(
8999 task_t task,
9000 boolean_t can_use_secluded_mem)
9001 {
9002 if (!task->task_could_use_secluded_mem) {
9003 return;
9004 }
9005 task_lock(task);
9006 task_set_can_use_secluded_mem_locked(task, can_use_secluded_mem);
9007 task_unlock(task);
9008 }
9009
9010 void
task_set_can_use_secluded_mem_locked(task_t task,boolean_t can_use_secluded_mem)9011 task_set_can_use_secluded_mem_locked(
9012 task_t task,
9013 boolean_t can_use_secluded_mem)
9014 {
9015 assert(task->task_could_use_secluded_mem);
9016 if (can_use_secluded_mem &&
9017 secluded_for_apps && /* global boot-arg */
9018 !task->task_can_use_secluded_mem) {
9019 assert(num_tasks_can_use_secluded_mem >= 0);
9020 OSAddAtomic(+1,
9021 (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
9022 task->task_can_use_secluded_mem = TRUE;
9023 } else if (!can_use_secluded_mem &&
9024 task->task_can_use_secluded_mem) {
9025 assert(num_tasks_can_use_secluded_mem > 0);
9026 OSAddAtomic(-1,
9027 (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
9028 task->task_can_use_secluded_mem = FALSE;
9029 }
9030 }
9031
9032 void
task_set_could_use_secluded_mem(task_t task,boolean_t could_use_secluded_mem)9033 task_set_could_use_secluded_mem(
9034 task_t task,
9035 boolean_t could_use_secluded_mem)
9036 {
9037 task->task_could_use_secluded_mem = !!could_use_secluded_mem;
9038 }
9039
9040 void
task_set_could_also_use_secluded_mem(task_t task,boolean_t could_also_use_secluded_mem)9041 task_set_could_also_use_secluded_mem(
9042 task_t task,
9043 boolean_t could_also_use_secluded_mem)
9044 {
9045 task->task_could_also_use_secluded_mem = !!could_also_use_secluded_mem;
9046 }
9047
9048 boolean_t
task_can_use_secluded_mem(task_t task,boolean_t is_alloc)9049 task_can_use_secluded_mem(
9050 task_t task,
9051 boolean_t is_alloc)
9052 {
9053 if (task->task_can_use_secluded_mem) {
9054 assert(task->task_could_use_secluded_mem);
9055 assert(num_tasks_can_use_secluded_mem > 0);
9056 return TRUE;
9057 }
9058 if (task->task_could_also_use_secluded_mem &&
9059 num_tasks_can_use_secluded_mem > 0) {
9060 assert(num_tasks_can_use_secluded_mem > 0);
9061 return TRUE;
9062 }
9063
9064 /*
9065 * If a single task is using more than some large amount of
9066 * memory (i.e. secluded_shutoff_trigger) and is approaching
9067 * its task limit, allow it to dip into secluded and begin
9068 * suppression of rebuilding secluded memory until that task exits.
9069 */
9070 if (is_alloc && secluded_shutoff_trigger != 0) {
9071 uint64_t phys_used = get_task_phys_footprint(task);
9072 uint64_t limit = get_task_phys_footprint_limit(task);
9073 if (phys_used > secluded_shutoff_trigger &&
9074 limit > secluded_shutoff_trigger &&
9075 phys_used > limit - secluded_shutoff_headroom) {
9076 start_secluded_suppression(task);
9077 return TRUE;
9078 }
9079 }
9080
9081 return FALSE;
9082 }
9083
9084 boolean_t
task_could_use_secluded_mem(task_t task)9085 task_could_use_secluded_mem(
9086 task_t task)
9087 {
9088 return task->task_could_use_secluded_mem;
9089 }
9090
9091 boolean_t
task_could_also_use_secluded_mem(task_t task)9092 task_could_also_use_secluded_mem(
9093 task_t task)
9094 {
9095 return task->task_could_also_use_secluded_mem;
9096 }
9097 #endif /* CONFIG_SECLUDED_MEMORY */
9098
9099 queue_head_t *
task_io_user_clients(task_t task)9100 task_io_user_clients(task_t task)
9101 {
9102 return &task->io_user_clients;
9103 }
9104
9105 void
task_set_message_app_suspended(task_t task,boolean_t enable)9106 task_set_message_app_suspended(task_t task, boolean_t enable)
9107 {
9108 task->message_app_suspended = enable;
9109 }
9110
9111 void
task_copy_fields_for_exec(task_t dst_task,task_t src_task)9112 task_copy_fields_for_exec(task_t dst_task, task_t src_task)
9113 {
9114 dst_task->vtimers = src_task->vtimers;
9115 }
9116
9117 #if DEVELOPMENT || DEBUG
9118 int vm_region_footprint = 0;
9119 #endif /* DEVELOPMENT || DEBUG */
9120
9121 boolean_t
task_self_region_footprint(void)9122 task_self_region_footprint(void)
9123 {
9124 #if DEVELOPMENT || DEBUG
9125 if (vm_region_footprint) {
9126 /* system-wide override */
9127 return TRUE;
9128 }
9129 #endif /* DEVELOPMENT || DEBUG */
9130 return current_task()->task_region_footprint;
9131 }
9132
9133 void
task_self_region_footprint_set(boolean_t newval)9134 task_self_region_footprint_set(
9135 boolean_t newval)
9136 {
9137 task_t curtask;
9138
9139 curtask = current_task();
9140 task_lock(curtask);
9141 if (newval) {
9142 curtask->task_region_footprint = TRUE;
9143 } else {
9144 curtask->task_region_footprint = FALSE;
9145 }
9146 task_unlock(curtask);
9147 }
9148
9149 int
task_self_region_info_flags(void)9150 task_self_region_info_flags(void)
9151 {
9152 return current_task()->task_region_info_flags;
9153 }
9154
9155 kern_return_t
task_self_region_info_flags_set(int newval)9156 task_self_region_info_flags_set(
9157 int newval)
9158 {
9159 task_t curtask;
9160 kern_return_t err = KERN_SUCCESS;
9161
9162 curtask = current_task();
9163 task_lock(curtask);
9164 curtask->task_region_info_flags = newval;
9165 /* check for overflow (flag added without increasing bitfield size?) */
9166 if (curtask->task_region_info_flags != newval) {
9167 err = KERN_INVALID_ARGUMENT;
9168 }
9169 task_unlock(curtask);
9170
9171 return err;
9172 }
9173
9174 void
task_set_darkwake_mode(task_t task,boolean_t set_mode)9175 task_set_darkwake_mode(task_t task, boolean_t set_mode)
9176 {
9177 assert(task);
9178
9179 task_lock(task);
9180
9181 if (set_mode) {
9182 task->t_flags |= TF_DARKWAKE_MODE;
9183 } else {
9184 task->t_flags &= ~(TF_DARKWAKE_MODE);
9185 }
9186
9187 task_unlock(task);
9188 }
9189
9190 boolean_t
task_get_darkwake_mode(task_t task)9191 task_get_darkwake_mode(task_t task)
9192 {
9193 assert(task);
9194 return (task->t_flags & TF_DARKWAKE_MODE) != 0;
9195 }
9196
9197 /*
9198 * Set default behavior for task's control port and EXC_GUARD variants that have
9199 * settable behavior.
9200 *
9201 * Platform binaries typically have one behavior, third parties another -
9202 * but there are special exception we may need to account for.
9203 */
9204 void
task_set_exc_guard_ctrl_port_default(task_t task,thread_t main_thread,const char * name,unsigned int namelen,boolean_t is_simulated,uint32_t platform,uint32_t sdk)9205 task_set_exc_guard_ctrl_port_default(
9206 task_t task,
9207 thread_t main_thread,
9208 const char *name,
9209 unsigned int namelen,
9210 boolean_t is_simulated,
9211 uint32_t platform,
9212 uint32_t sdk)
9213 {
9214 task_control_port_options_t opts = TASK_CONTROL_PORT_OPTIONS_NONE;
9215
9216 if (task_is_hardened_binary(task)) {
9217 /* set exc guard default behavior for hardened binaries */
9218 task->task_exc_guard = (task_exc_guard_default & TASK_EXC_GUARD_ALL);
9219
9220 if (1 == task_pid(task)) {
9221 /* special flags for inittask - delivery every instance as corpse */
9222 task->task_exc_guard = _TASK_EXC_GUARD_ALL_CORPSE;
9223 } else if (task_exc_guard_default & TASK_EXC_GUARD_HONOR_NAMED_DEFAULTS) {
9224 /* honor by-name default setting overrides */
9225
9226 int count = sizeof(task_exc_guard_named_defaults) / sizeof(struct task_exc_guard_named_default);
9227
9228 for (int i = 0; i < count; i++) {
9229 const struct task_exc_guard_named_default *named_default =
9230 &task_exc_guard_named_defaults[i];
9231 if (strncmp(named_default->name, name, namelen) == 0 &&
9232 strlen(named_default->name) == namelen) {
9233 task->task_exc_guard = named_default->behavior;
9234 break;
9235 }
9236 }
9237 }
9238
9239 /* set control port options for 1p code, inherited from parent task by default */
9240 opts = ipc_control_port_options & ICP_OPTIONS_1P_MASK;
9241 } else {
9242 /* set exc guard default behavior for third-party code */
9243 task->task_exc_guard = ((task_exc_guard_default >> TASK_EXC_GUARD_THIRD_PARTY_DEFAULT_SHIFT) & TASK_EXC_GUARD_ALL);
9244 /* set control port options for 3p code, inherited from parent task by default */
9245 opts = (ipc_control_port_options & ICP_OPTIONS_3P_MASK) >> ICP_OPTIONS_3P_SHIFT;
9246 }
9247
9248 if (is_simulated) {
9249 /* If simulated and built against pre-iOS 15 SDK, disable all EXC_GUARD */
9250 if ((platform == PLATFORM_IOSSIMULATOR && sdk < 0xf0000) ||
9251 (platform == PLATFORM_TVOSSIMULATOR && sdk < 0xf0000) ||
9252 (platform == PLATFORM_WATCHOSSIMULATOR && sdk < 0x80000)) {
9253 task->task_exc_guard = TASK_EXC_GUARD_NONE;
9254 }
9255 /* Disable protection for control ports for simulated binaries */
9256 opts = TASK_CONTROL_PORT_OPTIONS_NONE;
9257 }
9258
9259
9260 task_set_control_port_options(task, opts);
9261
9262 task_set_immovable_pinned(task);
9263 main_thread_set_immovable_pinned(main_thread);
9264 }
9265
9266 kern_return_t
task_get_exc_guard_behavior(task_t task,task_exc_guard_behavior_t * behaviorp)9267 task_get_exc_guard_behavior(
9268 task_t task,
9269 task_exc_guard_behavior_t *behaviorp)
9270 {
9271 if (task == TASK_NULL) {
9272 return KERN_INVALID_TASK;
9273 }
9274 *behaviorp = task->task_exc_guard;
9275 return KERN_SUCCESS;
9276 }
9277
9278 kern_return_t
task_set_exc_guard_behavior(task_t task,task_exc_guard_behavior_t new_behavior)9279 task_set_exc_guard_behavior(
9280 task_t task,
9281 task_exc_guard_behavior_t new_behavior)
9282 {
9283 if (task == TASK_NULL) {
9284 return KERN_INVALID_TASK;
9285 }
9286 if (new_behavior & ~TASK_EXC_GUARD_ALL) {
9287 return KERN_INVALID_VALUE;
9288 }
9289
9290 /* limit setting to that allowed for this config */
9291 new_behavior = new_behavior & task_exc_guard_config_mask;
9292
9293 #if !defined (DEBUG) && !defined (DEVELOPMENT)
9294 /* On release kernels, only allow _upgrading_ exc guard behavior */
9295 task_exc_guard_behavior_t cur_behavior;
9296
9297 os_atomic_rmw_loop(&task->task_exc_guard, cur_behavior, new_behavior, relaxed, {
9298 if ((cur_behavior & task_exc_guard_no_unset_mask) & ~(new_behavior & task_exc_guard_no_unset_mask)) {
9299 os_atomic_rmw_loop_give_up(return KERN_DENIED);
9300 }
9301
9302 if ((new_behavior & task_exc_guard_no_set_mask) & ~(cur_behavior & task_exc_guard_no_set_mask)) {
9303 os_atomic_rmw_loop_give_up(return KERN_DENIED);
9304 }
9305
9306 /* no restrictions on CORPSE bit */
9307 });
9308 #else
9309 task->task_exc_guard = new_behavior;
9310 #endif
9311 return KERN_SUCCESS;
9312 }
9313
9314 kern_return_t
task_set_corpse_forking_behavior(task_t task,task_corpse_forking_behavior_t behavior)9315 task_set_corpse_forking_behavior(task_t task, task_corpse_forking_behavior_t behavior)
9316 {
9317 #if DEVELOPMENT || DEBUG
9318 if (task == TASK_NULL) {
9319 return KERN_INVALID_TASK;
9320 }
9321
9322 task_lock(task);
9323 if (behavior & TASK_CORPSE_FORKING_DISABLED_MEM_DIAG) {
9324 task->t_flags |= TF_NO_CORPSE_FORKING;
9325 } else {
9326 task->t_flags &= ~TF_NO_CORPSE_FORKING;
9327 }
9328 task_unlock(task);
9329
9330 return KERN_SUCCESS;
9331 #else
9332 (void)task;
9333 (void)behavior;
9334 return KERN_NOT_SUPPORTED;
9335 #endif
9336 }
9337
9338 boolean_t
task_corpse_forking_disabled(task_t task)9339 task_corpse_forking_disabled(task_t task)
9340 {
9341 boolean_t disabled = FALSE;
9342
9343 task_lock(task);
9344 disabled = (task->t_flags & TF_NO_CORPSE_FORKING);
9345 task_unlock(task);
9346
9347 return disabled;
9348 }
9349
9350 #if __arm64__
9351 extern int legacy_footprint_entitlement_mode;
9352 extern void memorystatus_act_on_legacy_footprint_entitlement(struct proc *, boolean_t);
9353 extern void memorystatus_act_on_ios13extended_footprint_entitlement(struct proc *);
9354
9355
9356 void
task_set_legacy_footprint(task_t task)9357 task_set_legacy_footprint(
9358 task_t task)
9359 {
9360 task_lock(task);
9361 task->task_legacy_footprint = TRUE;
9362 task_unlock(task);
9363 }
9364
9365 void
task_set_extra_footprint_limit(task_t task)9366 task_set_extra_footprint_limit(
9367 task_t task)
9368 {
9369 if (task->task_extra_footprint_limit) {
9370 return;
9371 }
9372 task_lock(task);
9373 if (task->task_extra_footprint_limit) {
9374 task_unlock(task);
9375 return;
9376 }
9377 task->task_extra_footprint_limit = TRUE;
9378 task_unlock(task);
9379 memorystatus_act_on_legacy_footprint_entitlement(get_bsdtask_info(task), TRUE);
9380 }
9381
9382 void
task_set_ios13extended_footprint_limit(task_t task)9383 task_set_ios13extended_footprint_limit(
9384 task_t task)
9385 {
9386 if (task->task_ios13extended_footprint_limit) {
9387 return;
9388 }
9389 task_lock(task);
9390 if (task->task_ios13extended_footprint_limit) {
9391 task_unlock(task);
9392 return;
9393 }
9394 task->task_ios13extended_footprint_limit = TRUE;
9395 task_unlock(task);
9396 memorystatus_act_on_ios13extended_footprint_entitlement(get_bsdtask_info(task));
9397 }
9398 #endif /* __arm64__ */
9399
9400 static inline ledger_amount_t
task_ledger_get_balance(ledger_t ledger,int ledger_idx)9401 task_ledger_get_balance(
9402 ledger_t ledger,
9403 int ledger_idx)
9404 {
9405 ledger_amount_t amount;
9406 amount = 0;
9407 ledger_get_balance(ledger, ledger_idx, &amount);
9408 return amount;
9409 }
9410
9411 /*
9412 * Gather the amount of memory counted in a task's footprint due to
9413 * being in a specific set of ledgers.
9414 */
9415 void
task_ledgers_footprint(ledger_t ledger,ledger_amount_t * ledger_resident,ledger_amount_t * ledger_compressed)9416 task_ledgers_footprint(
9417 ledger_t ledger,
9418 ledger_amount_t *ledger_resident,
9419 ledger_amount_t *ledger_compressed)
9420 {
9421 *ledger_resident = 0;
9422 *ledger_compressed = 0;
9423
9424 /* purgeable non-volatile memory */
9425 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile);
9426 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile_compressed);
9427
9428 /* "default" tagged memory */
9429 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint);
9430 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint_compressed);
9431
9432 /* "network" currently never counts in the footprint... */
9433
9434 /* "media" tagged memory */
9435 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.media_footprint);
9436 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.media_footprint_compressed);
9437
9438 /* "graphics" tagged memory */
9439 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint);
9440 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint_compressed);
9441
9442 /* "neural" tagged memory */
9443 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.neural_footprint);
9444 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.neural_footprint_compressed);
9445 }
9446
9447 #if CONFIG_MEMORYSTATUS
9448 /*
9449 * Credit any outstanding task dirty time to the ledger.
9450 * memstat_dirty_start is pushed forward to prevent any possibility of double
9451 * counting, making it safe to call this as often as necessary to ensure that
9452 * anyone reading the ledger gets up-to-date information.
9453 */
9454 void
task_ledger_settle_dirty_time(task_t t)9455 task_ledger_settle_dirty_time(task_t t)
9456 {
9457 task_lock(t);
9458
9459 uint64_t start = t->memstat_dirty_start;
9460 if (start) {
9461 uint64_t now = mach_absolute_time();
9462
9463 uint64_t duration;
9464 absolutetime_to_nanoseconds(now - start, &duration);
9465
9466 ledger_t ledger = get_task_ledger(t);
9467 ledger_credit(ledger, task_ledgers.memorystatus_dirty_time, duration);
9468
9469 t->memstat_dirty_start = now;
9470 }
9471
9472 task_unlock(t);
9473 }
9474 #endif /* CONFIG_MEMORYSTATUS */
9475
9476 void
task_set_memory_ownership_transfer(task_t task,boolean_t value)9477 task_set_memory_ownership_transfer(
9478 task_t task,
9479 boolean_t value)
9480 {
9481 task_lock(task);
9482 task->task_can_transfer_memory_ownership = !!value;
9483 task_unlock(task);
9484 }
9485
9486 #if DEVELOPMENT || DEBUG
9487
9488 void
task_set_no_footprint_for_debug(task_t task,boolean_t value)9489 task_set_no_footprint_for_debug(task_t task, boolean_t value)
9490 {
9491 task_lock(task);
9492 task->task_no_footprint_for_debug = !!value;
9493 task_unlock(task);
9494 }
9495
9496 int
task_get_no_footprint_for_debug(task_t task)9497 task_get_no_footprint_for_debug(task_t task)
9498 {
9499 return task->task_no_footprint_for_debug;
9500 }
9501
9502 #endif /* DEVELOPMENT || DEBUG */
9503
9504 void
task_copy_vmobjects(task_t task,vm_object_query_t query,size_t len,size_t * num)9505 task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num)
9506 {
9507 vm_object_t find_vmo;
9508 size_t size = 0;
9509
9510 /*
9511 * Allocate a save area for FP state before taking task_objq lock,
9512 * if necessary, to ensure that VM_KERNEL_ADDRHASH() doesn't cause
9513 * an FP state allocation while holding VM locks.
9514 */
9515 ml_fp_save_area_prealloc();
9516
9517 task_objq_lock(task);
9518 if (query != NULL) {
9519 queue_iterate(&task->task_objq, find_vmo, vm_object_t, task_objq)
9520 {
9521 vm_object_query_t p = &query[size++];
9522
9523 /* make sure to not overrun */
9524 if (size * sizeof(vm_object_query_data_t) > len) {
9525 --size;
9526 break;
9527 }
9528
9529 bzero(p, sizeof(*p));
9530 p->object_id = (vm_object_id_t) VM_KERNEL_ADDRHASH(find_vmo);
9531 p->virtual_size = find_vmo->internal ? find_vmo->vo_size : 0;
9532 p->resident_size = find_vmo->resident_page_count * PAGE_SIZE;
9533 p->wired_size = find_vmo->wired_page_count * PAGE_SIZE;
9534 p->reusable_size = find_vmo->reusable_page_count * PAGE_SIZE;
9535 p->vo_no_footprint = find_vmo->vo_no_footprint;
9536 p->vo_ledger_tag = find_vmo->vo_ledger_tag;
9537 p->purgable = find_vmo->purgable;
9538
9539 if (find_vmo->internal && find_vmo->pager_created && find_vmo->pager != NULL) {
9540 p->compressed_size = vm_compressor_pager_get_count(find_vmo->pager) * PAGE_SIZE;
9541 } else {
9542 p->compressed_size = 0;
9543 }
9544 }
9545 } else {
9546 size = (size_t)task->task_owned_objects;
9547 }
9548 task_objq_unlock(task);
9549
9550 *num = size;
9551 }
9552
9553 void
task_get_owned_vmobjects(task_t task,size_t buffer_size,vmobject_list_output_t buffer,size_t * output_size,size_t * entries)9554 task_get_owned_vmobjects(task_t task, size_t buffer_size, vmobject_list_output_t buffer, size_t* output_size, size_t* entries)
9555 {
9556 assert(output_size);
9557 assert(entries);
9558
9559 /* copy the vmobjects and vmobject data out of the task */
9560 if (buffer_size == 0) {
9561 task_copy_vmobjects(task, NULL, 0, entries);
9562 *output_size = (*entries > 0) ? *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer) : 0;
9563 } else {
9564 assert(buffer);
9565 task_copy_vmobjects(task, &buffer->data[0], buffer_size - sizeof(*buffer), entries);
9566 buffer->entries = (uint64_t)*entries;
9567 *output_size = *entries * sizeof(vm_object_query_data_t) + sizeof(*buffer);
9568 }
9569 }
9570
9571 void
task_store_owned_vmobject_info(task_t to_task,task_t from_task)9572 task_store_owned_vmobject_info(task_t to_task, task_t from_task)
9573 {
9574 size_t buffer_size;
9575 vmobject_list_output_t buffer;
9576 size_t output_size;
9577 size_t entries;
9578
9579 assert(to_task != from_task);
9580
9581 /* get the size, allocate a bufferr, and populate */
9582 entries = 0;
9583 output_size = 0;
9584 task_get_owned_vmobjects(from_task, 0, NULL, &output_size, &entries);
9585
9586 if (output_size) {
9587 buffer_size = output_size;
9588 buffer = kalloc_data(buffer_size, Z_WAITOK);
9589
9590 if (buffer) {
9591 entries = 0;
9592 output_size = 0;
9593
9594 task_get_owned_vmobjects(from_task, buffer_size, buffer, &output_size, &entries);
9595
9596 if (entries) {
9597 to_task->corpse_vmobject_list = buffer;
9598 to_task->corpse_vmobject_list_size = buffer_size;
9599 }
9600 }
9601 }
9602 }
9603
9604 void
task_set_filter_msg_flag(task_t task,boolean_t flag)9605 task_set_filter_msg_flag(
9606 task_t task,
9607 boolean_t flag)
9608 {
9609 assert(task != TASK_NULL);
9610
9611 if (flag) {
9612 task_ro_flags_set(task, TFRO_FILTER_MSG);
9613 } else {
9614 task_ro_flags_clear(task, TFRO_FILTER_MSG);
9615 }
9616 }
9617
9618 boolean_t
task_get_filter_msg_flag(task_t task)9619 task_get_filter_msg_flag(
9620 task_t task)
9621 {
9622 if (!task) {
9623 return false;
9624 }
9625
9626 return (task_ro_flags_get(task) & TFRO_FILTER_MSG) ? TRUE : FALSE;
9627 }
9628 bool
task_is_exotic(task_t task)9629 task_is_exotic(
9630 task_t task)
9631 {
9632 if (task == TASK_NULL) {
9633 return false;
9634 }
9635 return vm_map_is_exotic(get_task_map(task));
9636 }
9637
9638 bool
task_is_alien(task_t task)9639 task_is_alien(
9640 task_t task)
9641 {
9642 if (task == TASK_NULL) {
9643 return false;
9644 }
9645 return vm_map_is_alien(get_task_map(task));
9646 }
9647
9648
9649
9650 #if CONFIG_MACF
9651 uint8_t *
mac_task_get_mach_filter_mask(task_t task)9652 mac_task_get_mach_filter_mask(task_t task)
9653 {
9654 assert(task);
9655 return task_get_mach_trap_filter_mask(task);
9656 }
9657
9658 uint8_t *
mac_task_get_kobj_filter_mask(task_t task)9659 mac_task_get_kobj_filter_mask(task_t task)
9660 {
9661 assert(task);
9662 return task_get_mach_kobj_filter_mask(task);
9663 }
9664
9665 /* Set the filter mask for Mach traps. */
9666 void
mac_task_set_mach_filter_mask(task_t task,uint8_t * maskptr)9667 mac_task_set_mach_filter_mask(task_t task, uint8_t *maskptr)
9668 {
9669 assert(task);
9670
9671 task_set_mach_trap_filter_mask(task, maskptr);
9672 }
9673
9674 /* Set the filter mask for kobject msgs. */
9675 void
mac_task_set_kobj_filter_mask(task_t task,uint8_t * maskptr)9676 mac_task_set_kobj_filter_mask(task_t task, uint8_t *maskptr)
9677 {
9678 assert(task);
9679
9680 task_set_mach_kobj_filter_mask(task, maskptr);
9681 }
9682
9683 /* Hook for mach trap/sc filter evaluation policy. */
9684 SECURITY_READ_ONLY_LATE(mac_task_mach_filter_cbfunc_t) mac_task_mach_trap_evaluate = NULL;
9685
9686 /* Hook for kobj message filter evaluation policy. */
9687 SECURITY_READ_ONLY_LATE(mac_task_kobj_filter_cbfunc_t) mac_task_kobj_msg_evaluate = NULL;
9688
9689 /* Set the callback hooks for the filtering policy. */
9690 int
mac_task_register_filter_callbacks(const mac_task_mach_filter_cbfunc_t mach_cbfunc,const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)9691 mac_task_register_filter_callbacks(
9692 const mac_task_mach_filter_cbfunc_t mach_cbfunc,
9693 const mac_task_kobj_filter_cbfunc_t kobj_cbfunc)
9694 {
9695 if (mach_cbfunc != NULL) {
9696 if (mac_task_mach_trap_evaluate != NULL) {
9697 return KERN_FAILURE;
9698 }
9699 mac_task_mach_trap_evaluate = mach_cbfunc;
9700 }
9701 if (kobj_cbfunc != NULL) {
9702 if (mac_task_kobj_msg_evaluate != NULL) {
9703 return KERN_FAILURE;
9704 }
9705 mac_task_kobj_msg_evaluate = kobj_cbfunc;
9706 }
9707
9708 return KERN_SUCCESS;
9709 }
9710 #endif /* CONFIG_MACF */
9711
9712 #if CONFIG_ROSETTA
9713 bool
task_is_translated(task_t task)9714 task_is_translated(task_t task)
9715 {
9716 extern boolean_t proc_is_translated(struct proc* p);
9717 return task && proc_is_translated(get_bsdtask_info(task));
9718 }
9719 #endif
9720
9721
9722
9723 #if __has_feature(ptrauth_calls)
9724 /* On FPAC, we want to deliver all PAC violations as fatal exceptions, regardless
9725 * of the enable_pac_exception boot-arg value or any other entitlements.
9726 * The only case where we allow non-fatal PAC exceptions on FPAC is for debugging,
9727 * which requires Developer Mode enabled.
9728 *
9729 * On non-FPAC hardware, we gate the decision behind entitlements and the
9730 * enable_pac_exception boot-arg.
9731 */
9732 extern int gARM_FEAT_FPAC;
9733 /*
9734 * Having the PAC_EXCEPTION_ENTITLEMENT entitlement means we always enforce all
9735 * of the PAC exception hardening: fatal exceptions and signed user state.
9736 */
9737 #define PAC_EXCEPTION_ENTITLEMENT "com.apple.private.pac.exception"
9738 /*
9739 * On non-FPAC hardware, when enable_pac_exception boot-arg is set to true,
9740 * processes can choose to get non-fatal PAC exception delivery by setting
9741 * the SKIP_PAC_EXCEPTION_ENTITLEMENT entitlement.
9742 */
9743 #define SKIP_PAC_EXCEPTION_ENTITLEMENT "com.apple.private.skip.pac.exception"
9744
9745 void
task_set_pac_exception_fatal_flag(task_t task)9746 task_set_pac_exception_fatal_flag(
9747 task_t task)
9748 {
9749 assert(task != TASK_NULL);
9750 bool pac_hardened_task = false;
9751 uint32_t set_flags = 0;
9752
9753 /*
9754 * We must not apply this security policy on tasks which have opted out of mach hardening to
9755 * avoid regressions in third party plugins and third party apps when using AMFI boot-args
9756 */
9757 bool platform_binary = task_get_platform_binary(task);
9758 #if XNU_TARGET_OS_OSX
9759 platform_binary &= !task_opted_out_mach_hardening(task);
9760 #endif /* XNU_TARGET_OS_OSX */
9761
9762 /*
9763 * On non-FPAC hardware, we allow gating PAC exceptions behind
9764 * SKIP_PAC_EXCEPTION_ENTITLEMENT and the boot-arg.
9765 */
9766 if (!gARM_FEAT_FPAC && enable_pac_exception &&
9767 IOTaskHasEntitlement(task, SKIP_PAC_EXCEPTION_ENTITLEMENT)) {
9768 return;
9769 }
9770
9771 if (IOTaskHasEntitlement(task, PAC_EXCEPTION_ENTITLEMENT) || task_get_hardened_runtime(task)) {
9772 pac_hardened_task = true;
9773 set_flags |= TFRO_PAC_ENFORCE_USER_STATE;
9774 }
9775
9776 /* On non-FPAC hardware, gate the fatal property behind entitlements and boot-arg. */
9777 if (pac_hardened_task ||
9778 ((enable_pac_exception || gARM_FEAT_FPAC) && platform_binary)) {
9779 set_flags |= TFRO_PAC_EXC_FATAL;
9780 }
9781
9782 if (set_flags != 0) {
9783 task_ro_flags_set(task, set_flags);
9784 }
9785 }
9786
9787 bool
task_is_pac_exception_fatal(task_t task)9788 task_is_pac_exception_fatal(
9789 task_t task)
9790 {
9791 assert(task != TASK_NULL);
9792 return !!(task_ro_flags_get(task) & TFRO_PAC_EXC_FATAL);
9793 }
9794 #endif /* __has_feature(ptrauth_calls) */
9795
9796 /*
9797 * FATAL_EXCEPTION_ENTITLEMENT, if present, will contain a list of
9798 * conditions for which access violations should deliver SIGKILL rather than
9799 * SIGSEGV. This is a hardening measure intended for use by applications
9800 * that are able to handle the stricter error handling behavior. Currently
9801 * this supports FATAL_EXCEPTION_ENTITLEMENT_JIT, which is documented in
9802 * user_fault_in_self_restrict_mode().
9803 */
9804 #define FATAL_EXCEPTION_ENTITLEMENT "com.apple.security.fatal-exceptions"
9805 #define FATAL_EXCEPTION_ENTITLEMENT_JIT "jit"
9806
9807 void
task_set_jit_exception_fatal_flag(task_t task)9808 task_set_jit_exception_fatal_flag(
9809 task_t task)
9810 {
9811 assert(task != TASK_NULL);
9812 if (IOTaskHasStringEntitlement(task, FATAL_EXCEPTION_ENTITLEMENT, FATAL_EXCEPTION_ENTITLEMENT_JIT)) {
9813 task_ro_flags_set(task, TFRO_JIT_EXC_FATAL);
9814 }
9815 }
9816
9817 bool
task_is_jit_exception_fatal(__unused task_t task)9818 task_is_jit_exception_fatal(
9819 __unused task_t task)
9820 {
9821 #if !defined(XNU_PLATFORM_MacOSX)
9822 return true;
9823 #else
9824 assert(task != TASK_NULL);
9825 return !!(task_ro_flags_get(task) & TFRO_JIT_EXC_FATAL);
9826 #endif
9827 }
9828
9829 bool
task_needs_user_signed_thread_state(task_t task)9830 task_needs_user_signed_thread_state(
9831 task_t task)
9832 {
9833 assert(task != TASK_NULL);
9834 return !!(task_ro_flags_get(task) & TFRO_PAC_ENFORCE_USER_STATE);
9835 }
9836
9837 void
task_set_tecs(task_t task)9838 task_set_tecs(task_t task)
9839 {
9840 if (task == TASK_NULL) {
9841 task = current_task();
9842 }
9843
9844 if (!machine_csv(CPUVN_CI)) {
9845 return;
9846 }
9847
9848 LCK_MTX_ASSERT(&task->lock, LCK_MTX_ASSERT_NOTOWNED);
9849
9850 task_lock(task);
9851
9852 task->t_flags |= TF_TECS;
9853
9854 thread_t thread;
9855 queue_iterate(&task->threads, thread, thread_t, task_threads) {
9856 machine_tecs(thread);
9857 }
9858 task_unlock(task);
9859 }
9860
9861 kern_return_t
task_test_sync_upcall(task_t task,ipc_port_t send_port)9862 task_test_sync_upcall(
9863 task_t task,
9864 ipc_port_t send_port)
9865 {
9866 #if DEVELOPMENT || DEBUG
9867 if (task != current_task() || !IPC_PORT_VALID(send_port)) {
9868 return KERN_INVALID_ARGUMENT;
9869 }
9870
9871 /* Block on sync kernel upcall on the given send port */
9872 mach_test_sync_upcall(send_port);
9873
9874 ipc_port_release_send(send_port);
9875 return KERN_SUCCESS;
9876 #else
9877 (void)task;
9878 (void)send_port;
9879 return KERN_NOT_SUPPORTED;
9880 #endif
9881 }
9882
9883 kern_return_t
task_test_async_upcall_propagation(task_t task,ipc_port_t send_port,int qos,int iotier)9884 task_test_async_upcall_propagation(
9885 task_t task,
9886 ipc_port_t send_port,
9887 int qos,
9888 int iotier)
9889 {
9890 #if DEVELOPMENT || DEBUG
9891 kern_return_t kr;
9892
9893 if (task != current_task() || !IPC_PORT_VALID(send_port)) {
9894 return KERN_INVALID_ARGUMENT;
9895 }
9896
9897 if (qos < THREAD_QOS_DEFAULT || qos > THREAD_QOS_USER_INTERACTIVE ||
9898 iotier < THROTTLE_LEVEL_START || iotier > THROTTLE_LEVEL_END) {
9899 return KERN_INVALID_ARGUMENT;
9900 }
9901
9902 struct thread_attr_for_ipc_propagation attr = {
9903 .tafip_iotier = iotier,
9904 .tafip_qos = qos
9905 };
9906
9907 /* Apply propagate attr to port */
9908 kr = ipc_port_propagate_thread_attr(send_port, attr);
9909 if (kr != KERN_SUCCESS) {
9910 return kr;
9911 }
9912
9913 thread_enable_send_importance(current_thread(), TRUE);
9914
9915 /* Perform an async kernel upcall on the given send port */
9916 mach_test_async_upcall(send_port);
9917 thread_enable_send_importance(current_thread(), FALSE);
9918
9919 ipc_port_release_send(send_port);
9920 return KERN_SUCCESS;
9921 #else
9922 (void)task;
9923 (void)send_port;
9924 (void)qos;
9925 (void)iotier;
9926 return KERN_NOT_SUPPORTED;
9927 #endif
9928 }
9929
9930 #if CONFIG_PROC_RESOURCE_LIMITS
9931 mach_port_name_t
current_task_get_fatal_port_name(void)9932 current_task_get_fatal_port_name(void)
9933 {
9934 mach_port_t task_fatal_port = MACH_PORT_NULL;
9935 mach_port_name_t port_name = 0;
9936
9937 task_fatal_port = task_allocate_fatal_port();
9938
9939 if (task_fatal_port) {
9940 ipc_object_copyout(current_space(), ip_to_object(task_fatal_port), MACH_MSG_TYPE_PORT_SEND,
9941 IPC_OBJECT_COPYOUT_FLAGS_NONE, NULL, NULL, &port_name);
9942 }
9943
9944 return port_name;
9945 }
9946 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
9947
9948 #if defined(__x86_64__)
9949 bool
curtask_get_insn_copy_optout(void)9950 curtask_get_insn_copy_optout(void)
9951 {
9952 bool optout;
9953 task_t cur_task = current_task();
9954
9955 task_lock(cur_task);
9956 optout = (cur_task->t_flags & TF_INSN_COPY_OPTOUT) ? true : false;
9957 task_unlock(cur_task);
9958
9959 return optout;
9960 }
9961
9962 void
curtask_set_insn_copy_optout(void)9963 curtask_set_insn_copy_optout(void)
9964 {
9965 task_t cur_task = current_task();
9966
9967 task_lock(cur_task);
9968
9969 cur_task->t_flags |= TF_INSN_COPY_OPTOUT;
9970
9971 thread_t thread;
9972 queue_iterate(&cur_task->threads, thread, thread_t, task_threads) {
9973 machine_thread_set_insn_copy_optout(thread);
9974 }
9975 task_unlock(cur_task);
9976 }
9977 #endif /* defined(__x86_64__) */
9978
9979 void
task_get_corpse_vmobject_list(task_t task,vmobject_list_output_t * list,size_t * list_size)9980 task_get_corpse_vmobject_list(task_t task, vmobject_list_output_t* list, size_t* list_size)
9981 {
9982 assert(task);
9983 assert(list_size);
9984
9985 *list = task->corpse_vmobject_list;
9986 *list_size = (size_t)task->corpse_vmobject_list_size;
9987 }
9988
9989 __abortlike
9990 static void
panic_proc_ro_task_backref_mismatch(task_t t,proc_ro_t ro)9991 panic_proc_ro_task_backref_mismatch(task_t t, proc_ro_t ro)
9992 {
9993 panic("proc_ro->task backref mismatch: t=%p, ro=%p, "
9994 "proc_ro_task(ro)=%p", t, ro, proc_ro_task(ro));
9995 }
9996
9997 proc_ro_t
task_get_ro(task_t t)9998 task_get_ro(task_t t)
9999 {
10000 proc_ro_t ro = (proc_ro_t)t->bsd_info_ro;
10001
10002 zone_require_ro(ZONE_ID_PROC_RO, sizeof(struct proc_ro), ro);
10003 if (__improbable(proc_ro_task(ro) != t)) {
10004 panic_proc_ro_task_backref_mismatch(t, ro);
10005 }
10006
10007 return ro;
10008 }
10009
10010 uint32_t
task_ro_flags_get(task_t task)10011 task_ro_flags_get(task_t task)
10012 {
10013 return task_get_ro(task)->t_flags_ro;
10014 }
10015
10016 void
task_ro_flags_set(task_t task,uint32_t flags)10017 task_ro_flags_set(task_t task, uint32_t flags)
10018 {
10019 zalloc_ro_update_field_atomic(ZONE_ID_PROC_RO, task_get_ro(task),
10020 t_flags_ro, ZRO_ATOMIC_OR_32, flags);
10021 }
10022
10023 void
task_ro_flags_clear(task_t task,uint32_t flags)10024 task_ro_flags_clear(task_t task, uint32_t flags)
10025 {
10026 zalloc_ro_update_field_atomic(ZONE_ID_PROC_RO, task_get_ro(task),
10027 t_flags_ro, ZRO_ATOMIC_AND_32, ~flags);
10028 }
10029
10030 task_control_port_options_t
task_get_control_port_options(task_t task)10031 task_get_control_port_options(task_t task)
10032 {
10033 return task_get_ro(task)->task_control_port_options;
10034 }
10035
10036 void
task_set_control_port_options(task_t task,task_control_port_options_t opts)10037 task_set_control_port_options(task_t task, task_control_port_options_t opts)
10038 {
10039 zalloc_ro_update_field(ZONE_ID_PROC_RO, task_get_ro(task),
10040 task_control_port_options, &opts);
10041 }
10042
10043 /*!
10044 * @function kdp_task_is_locked
10045 *
10046 * @abstract
10047 * Checks if task is locked.
10048 *
10049 * @discussion
10050 * NOT SAFE: To be used only by kernel debugger.
10051 *
10052 * @param task task to check
10053 *
10054 * @returns TRUE if the task is locked.
10055 */
10056 boolean_t
kdp_task_is_locked(task_t task)10057 kdp_task_is_locked(task_t task)
10058 {
10059 return kdp_lck_mtx_lock_spin_is_acquired(&task->lock);
10060 }
10061
10062 #if DEBUG || DEVELOPMENT
10063 /**
10064 *
10065 * Check if a threshold limit is valid based on the actual phys memory
10066 * limit. If they are same, race conditions may arise, so we have to prevent
10067 * it to happen.
10068 */
10069 static diagthreshold_check_return
task_check_memorythreshold_is_valid(task_t task,uint64_t new_limit,bool is_diagnostics_value)10070 task_check_memorythreshold_is_valid(task_t task, uint64_t new_limit, bool is_diagnostics_value)
10071 {
10072 int phys_limit_mb;
10073 kern_return_t ret_value;
10074 bool threshold_enabled;
10075 bool dummy;
10076 ret_value = ledger_is_diag_threshold_enabled(task->ledger, task_ledgers.phys_footprint, &threshold_enabled);
10077 if (ret_value != KERN_SUCCESS) {
10078 return ret_value;
10079 }
10080 if (is_diagnostics_value == true) {
10081 ret_value = task_get_phys_footprint_limit(task, &phys_limit_mb);
10082 } else {
10083 uint64_t diag_limit;
10084 ret_value = task_get_diag_footprint_limit_internal(task, &diag_limit, &dummy);
10085 phys_limit_mb = (int)(diag_limit >> 20);
10086 }
10087 if (ret_value != KERN_SUCCESS) {
10088 return ret_value;
10089 }
10090 if (phys_limit_mb == (int) new_limit) {
10091 if (threshold_enabled == false) {
10092 return THRESHOLD_IS_SAME_AS_LIMIT_FLAG_DISABLED;
10093 } else {
10094 return THRESHOLD_IS_SAME_AS_LIMIT_FLAG_ENABLED;
10095 }
10096 }
10097 if (threshold_enabled == false) {
10098 return THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_DISABLED;
10099 } else {
10100 return THRESHOLD_IS_NOT_SAME_AS_LIMIT_FLAG_ENABLED;
10101 }
10102 }
10103 #endif
10104
10105 #if CONFIG_EXCLAVES
10106 kern_return_t
task_add_conclave(task_t task,void * vnode,int64_t off,const char * task_conclave_id)10107 task_add_conclave(task_t task, void *vnode, int64_t off, const char *task_conclave_id)
10108 {
10109 /*
10110 * Only launchd or properly entitled tasks can attach tasks to
10111 * conclaves.
10112 */
10113 if (!exclaves_has_priv(current_task(), EXCLAVES_PRIV_CONCLAVE_SPAWN)) {
10114 return KERN_DENIED;
10115 }
10116
10117 /*
10118 * Only entitled tasks can have conclaves attached.
10119 * Allow tasks which have the SPAWN privilege to also host conclaves.
10120 * This allows xpc proxy to add a conclave before execing a daemon.
10121 */
10122 if (!exclaves_has_priv_vnode(vnode, off, EXCLAVES_PRIV_CONCLAVE_HOST) &&
10123 !exclaves_has_priv_vnode(vnode, off, EXCLAVES_PRIV_CONCLAVE_SPAWN)) {
10124 return KERN_DENIED;
10125 }
10126
10127 return exclaves_conclave_attach(task_conclave_id, task);
10128 }
10129
10130 kern_return_t
task_launch_conclave(mach_port_name_t port __unused)10131 task_launch_conclave(mach_port_name_t port __unused)
10132 {
10133 kern_return_t kr = KERN_FAILURE;
10134 assert3u(port, ==, MACH_PORT_NULL);
10135 exclaves_resource_t *conclave = task_get_conclave(current_task());
10136 if (conclave == NULL) {
10137 return kr;
10138 }
10139
10140 kr = exclaves_conclave_launch(conclave);
10141 if (kr != KERN_SUCCESS) {
10142 return kr;
10143 }
10144 task_set_conclave_taint(current_task());
10145
10146 return KERN_SUCCESS;
10147 }
10148
10149 kern_return_t
task_inherit_conclave(task_t old_task,task_t new_task,void * vnode,int64_t off)10150 task_inherit_conclave(task_t old_task, task_t new_task, void *vnode, int64_t off)
10151 {
10152 if (old_task->conclave == NULL ||
10153 !exclaves_conclave_is_attached(old_task->conclave)) {
10154 return KERN_SUCCESS;
10155 }
10156
10157 /*
10158 * Only launchd or properly entitled tasks can attach tasks to
10159 * conclaves.
10160 */
10161 if (!exclaves_has_priv(current_task(), EXCLAVES_PRIV_CONCLAVE_SPAWN)) {
10162 return KERN_DENIED;
10163 }
10164
10165 /*
10166 * Only entitled tasks can have conclaves attached.
10167 */
10168 if (!exclaves_has_priv_vnode(vnode, off, EXCLAVES_PRIV_CONCLAVE_HOST)) {
10169 return KERN_DENIED;
10170 }
10171
10172 return exclaves_conclave_inherit(old_task->conclave, old_task, new_task);
10173 }
10174
10175 void
task_clear_conclave(task_t task)10176 task_clear_conclave(task_t task)
10177 {
10178 if (task->exclave_crash_info) {
10179 kfree_data(task->exclave_crash_info, CONCLAVE_CRASH_BUFFER_PAGECOUNT * PAGE_SIZE);
10180 task->exclave_crash_info = NULL;
10181 }
10182
10183 if (task->conclave == NULL) {
10184 return;
10185 }
10186
10187 /*
10188 * XXX
10189 * This should only fail if either the conclave is in an unexpected
10190 * state (i.e. not ATTACHED) or if the wrong port is supplied.
10191 * We should re-visit this and make sure we guarantee the above
10192 * constraints.
10193 */
10194 __assert_only kern_return_t ret =
10195 exclaves_conclave_detach(task->conclave, task);
10196 assert3u(ret, ==, KERN_SUCCESS);
10197 }
10198
10199 void
task_stop_conclave(task_t task,bool gather_crash_bt)10200 task_stop_conclave(task_t task, bool gather_crash_bt)
10201 {
10202 thread_t thread = current_thread();
10203
10204 if (task->conclave == NULL) {
10205 return;
10206 }
10207
10208 if (task_should_panic_on_exit_due_to_conclave_taint(task)) {
10209 panic("Conclave tainted task %p terminated\n", task);
10210 }
10211
10212 /* Stash the task on current thread for conclave teardown */
10213 thread->conclave_stop_task = task;
10214
10215 __assert_only kern_return_t ret =
10216 exclaves_conclave_stop(task->conclave, gather_crash_bt);
10217
10218 thread->conclave_stop_task = TASK_NULL;
10219
10220 assert3u(ret, ==, KERN_SUCCESS);
10221 }
10222
10223 void
task_suspend_conclave(task_t task)10224 task_suspend_conclave(task_t task)
10225 {
10226 thread_t thread = current_thread();
10227
10228 if (task->conclave == NULL) {
10229 return;
10230 }
10231
10232 /* Stash the task on current thread for conclave teardown */
10233 thread->conclave_stop_task = task;
10234
10235 __assert_only kern_return_t ret =
10236 exclaves_conclave_suspend(task->conclave);
10237
10238 thread->conclave_stop_task = TASK_NULL;
10239
10240 assert3u(ret, ==, KERN_SUCCESS);
10241 }
10242
10243 void
task_resume_conclave(task_t task)10244 task_resume_conclave(task_t task)
10245 {
10246 thread_t thread = current_thread();
10247
10248 if (task->conclave == NULL) {
10249 return;
10250 }
10251
10252 /* Stash the task on current thread for conclave teardown */
10253 thread->conclave_stop_task = task;
10254
10255 __assert_only kern_return_t ret =
10256 exclaves_conclave_resume(task->conclave);
10257
10258 thread->conclave_stop_task = TASK_NULL;
10259
10260 assert3u(ret, ==, KERN_SUCCESS);
10261 }
10262
10263 kern_return_t
task_stop_conclave_upcall(void)10264 task_stop_conclave_upcall(void)
10265 {
10266 task_t task = current_task();
10267 if (task->conclave == NULL) {
10268 return KERN_INVALID_TASK;
10269 }
10270
10271 return exclaves_conclave_stop_upcall(task->conclave);
10272 }
10273
10274 kern_return_t
task_stop_conclave_upcall_complete(void)10275 task_stop_conclave_upcall_complete(void)
10276 {
10277 task_t task = current_task();
10278 thread_t thread = current_thread();
10279
10280 if (!(thread->th_exclaves_state & TH_EXCLAVES_STOP_UPCALL_PENDING)) {
10281 return KERN_SUCCESS;
10282 }
10283
10284 assert3p(task->conclave, !=, NULL);
10285
10286 return exclaves_conclave_stop_upcall_complete(task->conclave, task);
10287 }
10288
10289 kern_return_t
task_suspend_conclave_upcall(uint64_t * scid_list,size_t scid_list_count)10290 task_suspend_conclave_upcall(uint64_t *scid_list, size_t scid_list_count)
10291 {
10292 task_t task = current_task();
10293 thread_t thread;
10294 int scid_count = 0;
10295 kern_return_t kr;
10296 if (task->conclave == NULL) {
10297 return KERN_INVALID_TASK;
10298 }
10299
10300 kr = task_hold_and_wait(task, false);
10301
10302 task_lock(task);
10303 queue_iterate(&task->threads, thread, thread_t, task_threads)
10304 {
10305 if (thread->th_exclaves_state & TH_EXCLAVES_RPC) {
10306 scid_list[scid_count++] = thread->th_exclaves_ipc_ctx.scid;
10307 if (scid_count >= scid_list_count) {
10308 break;
10309 }
10310 }
10311 }
10312
10313 task_unlock(task);
10314 return kr;
10315 }
10316
10317 kern_return_t
task_crash_info_conclave_upcall(task_t task,const struct conclave_sharedbuffer_t * shared_buf,uint32_t length)10318 task_crash_info_conclave_upcall(task_t task, const struct conclave_sharedbuffer_t *shared_buf,
10319 uint32_t length)
10320 {
10321 if (task->conclave == NULL) {
10322 return KERN_INVALID_TASK;
10323 }
10324
10325 /* Allocate the buffer and memcpy it */
10326 int task_crash_info_buffer_size = 0;
10327 uint8_t * task_crash_info_buffer;
10328
10329 if (!length) {
10330 printf("Conclave upcall: task_crash_info_conclave_upcall did not return any page addresses\n");
10331 return KERN_INVALID_ARGUMENT;
10332 }
10333
10334 task_crash_info_buffer_size = CONCLAVE_CRASH_BUFFER_PAGECOUNT * PAGE_SIZE;
10335 assert3u(task_crash_info_buffer_size, >=, length);
10336
10337 task_crash_info_buffer = kalloc_data(task_crash_info_buffer_size, Z_WAITOK);
10338 if (!task_crash_info_buffer) {
10339 panic("task_crash_info_conclave_upcall: cannot allocate buffer for task_info shared memory");
10340 return KERN_INVALID_ARGUMENT;
10341 }
10342
10343 uint8_t * dst = task_crash_info_buffer;
10344 uint32_t remaining = length;
10345 for (size_t i = 0; i < CONCLAVE_CRASH_BUFFER_PAGECOUNT; i++) {
10346 if (remaining) {
10347 memcpy(dst, (uint8_t*)phystokv((pmap_paddr_t)shared_buf->physaddr[i]), PAGE_SIZE);
10348 remaining = (remaining >= PAGE_SIZE) ? remaining - PAGE_SIZE : 0;
10349 dst += PAGE_SIZE;
10350 }
10351 }
10352
10353 task_lock(task);
10354 if (task->exclave_crash_info == NULL && task->active) {
10355 task->exclave_crash_info = task_crash_info_buffer;
10356 task->exclave_crash_info_length = length;
10357 task_crash_info_buffer = NULL;
10358 }
10359 task_unlock(task);
10360
10361 if (task_crash_info_buffer) {
10362 kfree_data(task_crash_info_buffer, task_crash_info_buffer_size);
10363 }
10364
10365 return KERN_SUCCESS;
10366 }
10367
10368 exclaves_resource_t *
task_get_conclave(task_t task)10369 task_get_conclave(task_t task)
10370 {
10371 return task->conclave;
10372 }
10373
10374 extern boolean_t IOPMRootDomainGetWillShutdown(void);
10375
10376 TUNABLE(bool, disable_conclave_taint, "disable_conclave_taint", true); /* Do not taint processes when they talk to conclave, so system does not panic when exit. */
10377
10378 static bool
task_should_panic_on_exit_due_to_conclave_taint(task_t task)10379 task_should_panic_on_exit_due_to_conclave_taint(task_t task)
10380 {
10381 /* Check if boot-arg to disable conclave taint is set */
10382 if (disable_conclave_taint) {
10383 return false;
10384 }
10385
10386 /* Check if the system is shutting down */
10387 if (IOPMRootDomainGetWillShutdown()) {
10388 return false;
10389 }
10390
10391 return task_is_conclave_tainted(task);
10392 }
10393
10394 static bool
task_is_conclave_tainted(task_t task)10395 task_is_conclave_tainted(task_t task)
10396 {
10397 return (task->t_exclave_state & TES_CONCLAVE_TAINTED) != 0 &&
10398 !(task->t_exclave_state & TES_CONCLAVE_UNTAINTABLE);
10399 }
10400
10401 static void
task_set_conclave_taint(task_t task)10402 task_set_conclave_taint(task_t task)
10403 {
10404 os_atomic_or(&task->t_exclave_state, TES_CONCLAVE_TAINTED, relaxed);
10405 }
10406
10407 void
task_set_conclave_untaintable(task_t task)10408 task_set_conclave_untaintable(task_t task)
10409 {
10410 os_atomic_or(&task->t_exclave_state, TES_CONCLAVE_UNTAINTABLE, relaxed);
10411 }
10412
10413 void
task_add_conclave_crash_info(task_t task,void * crash_info_ptr)10414 task_add_conclave_crash_info(task_t task, void *crash_info_ptr)
10415 {
10416 __block kern_return_t error = KERN_SUCCESS;
10417 tb_error_t tberr = TB_ERROR_SUCCESS;
10418 void *crash_info;
10419 uint32_t crash_info_length = 0;
10420
10421 if (task->conclave == NULL) {
10422 return;
10423 }
10424
10425 if (task->exclave_crash_info_length == 0) {
10426 return;
10427 }
10428
10429 error = kcdata_add_container_marker(crash_info_ptr, KCDATA_TYPE_CONTAINER_BEGIN,
10430 STACKSHOT_KCCONTAINER_EXCLAVES, 0);
10431 if (error != KERN_SUCCESS) {
10432 return;
10433 }
10434
10435 crash_info = task->exclave_crash_info;
10436 crash_info_length = task->exclave_crash_info_length;
10437
10438 tberr = stackshot_stackshotresult__unmarshal(crash_info,
10439 (uint64_t)crash_info_length, ^(stackshot_stackshotresult_s result){
10440 error = stackshot_exclaves_process_stackshot(&result, crash_info_ptr, false);
10441 if (error != KERN_SUCCESS) {
10442 printf("task_add_conclave_crash_info: error processing stackshot result %d\n", error);
10443 }
10444 });
10445 if (tberr != TB_ERROR_SUCCESS) {
10446 printf("task_conclave_crash: task_add_conclave_crash_info could not unmarshal stackshot data 0x%x\n", tberr);
10447 error = KERN_FAILURE;
10448 goto error_exit;
10449 }
10450
10451 error_exit:
10452 kcdata_add_container_marker(crash_info_ptr, KCDATA_TYPE_CONTAINER_END,
10453 STACKSHOT_KCCONTAINER_EXCLAVES, 0);
10454
10455 return;
10456 }
10457
10458 #endif /* CONFIG_EXCLAVES */
10459
10460 /* defined in bsd/kern/kern_proc.c */
10461 extern void proc_name(int pid, char *buf, int size);
10462 extern const char *proc_best_name(struct proc *p);
10463
10464 void
task_procname(task_t task,char * buf,int size)10465 task_procname(task_t task, char *buf, int size)
10466 {
10467 proc_name(task_pid(task), buf, size);
10468 }
10469
10470 const char *
task_best_name(task_t task)10471 task_best_name(task_t task)
10472 {
10473 return proc_best_name(task_get_proc_raw(task));
10474 }
10475