1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: thread.h
60 * Author: Avadis Tevanian, Jr.
61 *
62 * This file contains the structure definitions for threads.
63 *
64 */
65 /*
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
68 *
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
74 *
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
78 *
79 * CSL requests users of this software to return to [email protected] any
80 * improvements that they make and grant CSL redistribution rights.
81 *
82 */
83
84 #ifndef _KERN_THREAD_H_
85 #define _KERN_THREAD_H_
86
87 #include <mach/kern_return.h>
88 #include <mach/mach_types.h>
89 #include <mach/mach_param.h>
90 #include <mach/message.h>
91 #include <mach/boolean.h>
92 #include <mach/vm_param.h>
93 #include <mach/thread_info.h>
94 #include <mach/thread_status.h>
95 #include <mach/exception_types.h>
96
97 #include <kern/kern_types.h>
98 #include <vm/vm_kern.h>
99 #include <sys/cdefs.h>
100 #include <sys/_types/_size_t.h>
101
102 #ifdef MACH_KERNEL_PRIVATE
103 #include <mach_assert.h>
104 #include <mach_ldebug.h>
105
106 #include <ipc/ipc_types.h>
107
108 #include <mach/port.h>
109 #include <kern/cpu_number.h>
110 #include <kern/smp.h>
111 #include <kern/queue.h>
112
113 #include <kern/timer.h>
114 #include <kern/simple_lock.h>
115 #include <kern/locks.h>
116 #include <kern/sched.h>
117 #include <kern/sched_prim.h>
118 #include <mach/sfi_class.h>
119 #include <kern/thread_call.h>
120 #include <kern/thread_group.h>
121 #include <kern/timer_call.h>
122 #include <kern/task.h>
123 #include <kern/exception.h>
124 #include <kern/affinity.h>
125 #include <kern/debug.h>
126 #include <kern/block_hint.h>
127 #include <kern/recount.h>
128 #include <kern/turnstile.h>
129 #include <kern/mpsc_queue.h>
130
131 #if CONFIG_EXCLAVES
132 #include <mach/exclaves.h>
133 #endif /* CONFIG_EXCLAVES */
134
135 #include <kern/waitq.h>
136 #include <san/kasan.h>
137 #include <san/kcov_data.h>
138 #include <os/refcnt.h>
139
140 #include <ipc/ipc_kmsg.h>
141
142 #include <machine/atomic.h>
143 #include <machine/cpu_data.h>
144 #include <machine/thread.h>
145
146 #endif /* MACH_KERNEL_PRIVATE */
147 #ifdef XNU_KERNEL_PRIVATE
148 /* priority queue static asserts fail for __ARM64_ARCH_8_32__ kext builds */
149 #include <kern/priority_queue.h>
150 #endif /* XNU_KERNEL_PRIVATE */
151
152 __BEGIN_DECLS
153
154 #ifdef XNU_KERNEL_PRIVATE
155 #if CONFIG_TASKWATCH
156 /* Taskwatch related. TODO: find this a better home */
157 typedef struct task_watcher task_watch_t;
158 #endif /* CONFIG_TASKWATCH */
159
160 /* Thread tags; for easy identification. */
161 __options_closed_decl(thread_tag_t, uint16_t, {
162 THREAD_TAG_MAINTHREAD = 0x01,
163 THREAD_TAG_CALLOUT = 0x02,
164 THREAD_TAG_IOWORKLOOP = 0x04,
165 THREAD_TAG_PTHREAD = 0x10,
166 THREAD_TAG_WORKQUEUE = 0x20,
167 THREAD_TAG_USER_JOIN = 0x40,
168 });
169
170 typedef struct thread_ro *thread_ro_t;
171
172 /*!
173 * @struct thread_ro
174 *
175 * @brief
176 * A structure allocated in a read only zone that safely
177 * represents the linkages of a thread to its cred, proc, task, ...
178 *
179 * @discussion
180 * The lifetime of a @c thread_ro structure is 1:1 with that
181 * of a @c thread_t or a @c uthread_t and holding a thread reference
182 * always allows to dereference this structure safely.
183 */
184 struct thread_ro {
185 struct thread *tro_owner;
186 #if MACH_BSD
187 __xnu_struct_group(thread_ro_creds, tro_creds, {
188 /*
189 * @c tro_cred holds the current thread credentials.
190 *
191 * For most threads, this is a cache of the proc's
192 * credentials that has been updated at the last
193 * syscall boundary via current_cached_proc_cred_update().
194 *
195 * If the thread assumed a different identity using settid(),
196 * then the proc cached credential lives in @c tro_realcred
197 * instead.
198 */
199 struct ucred *tro_cred;
200 struct ucred *tro_realcred;
201 });
202 struct proc *tro_proc;
203 struct proc_ro *tro_proc_ro;
204 #endif
205 struct task *tro_task;
206
207 struct ipc_port *tro_self_port;
208 #if CONFIG_CSR
209 struct ipc_port *tro_settable_self_port; /* send right */
210 #endif /* CONFIG_CSR */
211 struct ipc_port *tro_ports[THREAD_SELF_PORT_COUNT]; /* no right */
212
213 struct exception_action *tro_exc_actions;
214 };
215
216 /*
217 * Flags for `thread set status`.
218 */
219 __options_decl(thread_set_status_flags_t, uint32_t, {
220 TSSF_FLAGS_NONE = 0,
221
222 /* Translate the state to user. */
223 TSSF_TRANSLATE_TO_USER = 0x01,
224
225 /* Translate the state to user. Preserve flags */
226 TSSF_PRESERVE_FLAGS = 0x02,
227
228 /* Check kernel signed flag */
229 TSSF_CHECK_USER_FLAGS = 0x04,
230
231 /* Allow only user state PTRS */
232 TSSF_ALLOW_ONLY_USER_PTRS = 0x08,
233
234 /* Generate random diversifier and stash it */
235 TSSF_RANDOM_USER_DIV = 0x10,
236
237 /* Stash sigreturn token */
238 TSSF_STASH_SIGRETURN_TOKEN = 0x20,
239
240 /* Check sigreturn token */
241 TSSF_CHECK_SIGRETURN_TOKEN = 0x40,
242
243 /* Allow only matching sigreturn token */
244 TSSF_ALLOW_ONLY_MATCHING_TOKEN = 0x80,
245
246 /* Stash diversifier from thread */
247 TSSF_THREAD_USER_DIV = 0x100,
248
249 /* Check for entitlement */
250 TSSF_CHECK_ENTITLEMENT = 0x200,
251
252 /* Stash diversifier from task */
253 TSSF_TASK_USER_DIV = 0x400,
254
255 /* Only take the PC from the new thread state */
256 TSSF_ONLY_PC = 0x800,
257 });
258
259 /*
260 * Size in bits of compact thread id (ctid).
261 */
262 #define CTID_SIZE_BIT 20
263 typedef uint32_t ctid_t;
264
265 #endif /* XNU_KERNEL_PRIVATE */
266 #ifdef MACH_KERNEL_PRIVATE
267
268 extern zone_t thread_ro_zone;
269
270 __options_decl(thread_work_interval_flags_t, uint32_t, {
271 TH_WORK_INTERVAL_FLAGS_NONE = 0x0,
272 #if CONFIG_SCHED_AUTO_JOIN
273 /* Flags to indicate status about work interval thread is currently part of */
274 TH_WORK_INTERVAL_FLAGS_AUTO_JOIN_LEAK = 0x1,
275 #endif /* CONFIG_SCHED_AUTO_JOIN */
276 TH_WORK_INTERVAL_FLAGS_HAS_WORKLOAD_ID = 0x2,
277 TH_WORK_INTERVAL_FLAGS_RT_ALLOWED = 0x4,
278 });
279
280 #if CONFIG_EXCLAVES
281 /* Thread exclaves interrupt-safe state bits (ORd) */
282 __options_decl(thread_exclaves_intstate_flags_t, uint32_t, {
283 /* Thread is currently executing in secure kernel or exclaves userspace
284 * or was interrupted/preempted while doing so. */
285 TH_EXCLAVES_EXECUTION = 0x1,
286 });
287
288 __options_decl(thread_exclaves_state_flags_t, uint16_t, {
289 /* Thread exclaves state bits (ORd) */
290 /* Thread is handling RPC from a client in xnu or Darwin userspace (but
291 * may have returned to xnu due to an exclaves scheduler request or having
292 * upcalled). Must not re-enter exclaves via RPC or return to Darwin
293 * userspace. */
294 TH_EXCLAVES_RPC = 0x1,
295 /* Thread has made an upcall RPC request back into xnu while handling RPC
296 * into exclaves from a client in xnu or Darwin userspace. Must not
297 * re-enter exclaves via RPC or return to Darwin userspace. */
298 TH_EXCLAVES_UPCALL = 0x2,
299 /* Thread has made an exclaves scheduler request (such as a wait or wake)
300 * from the xnu scheduler while handling RPC into exclaves from a client in
301 * xnu or Darwin userspace. Must not re-enter exclaves via RPC or return to
302 * Darwin userspace. */
303 TH_EXCLAVES_SCHEDULER_REQUEST = 0x4,
304 /* Thread is calling into xnu proxy server directly (but may have
305 * returned to xnu due to an exclaves scheduler request or having
306 * upcalled). Must not re-enter exclaves or return to Darwin userspace.
307 */
308 TH_EXCLAVES_XNUPROXY = 0x8,
309 /* Thread is calling into the exclaves scheduler directly.
310 * Must not re-enter exclaves or return to Darwin userspace.
311 */
312 TH_EXCLAVES_SCHEDULER_CALL = 0x10,
313 /* Thread has called the stop upcall and once the thread returns from
314 * downcall, exit_with_reason needs to be called on the task.
315 */
316 TH_EXCLAVES_STOP_UPCALL_PENDING = 0x20,
317 /* Thread is expecting that an exclaves-side thread may be spawned.
318 */
319 TH_EXCLAVES_SPAWN_EXPECTED = 0x40,
320 /* Thread is resuming the panic thread.
321 * Must not re-enter exclaves or return to Darwin userspace.
322 */
323 TH_EXCLAVES_RESUME_PANIC_THREAD = 0x80,
324 });
325 #define TH_EXCLAVES_STATE_ANY ( \
326 TH_EXCLAVES_RPC | \
327 TH_EXCLAVES_UPCALL | \
328 TH_EXCLAVES_SCHEDULER_REQUEST | \
329 TH_EXCLAVES_XNUPROXY | \
330 TH_EXCLAVES_SCHEDULER_CALL | \
331 TH_EXCLAVES_RESUME_PANIC_THREAD)
332
333 __options_decl(thread_exclaves_inspection_flags_t, uint16_t, {
334 /* Thread is on Stackshot's inspection queue */
335 TH_EXCLAVES_INSPECTION_STACKSHOT = 0x1,
336 /* Thread is on Kperf's inspection queue */
337 TH_EXCLAVES_INSPECTION_KPERF = 0x2,
338 /* Thread must not be inspected (may deadlock, etc.) - set by collector thread*/
339 TH_EXCLAVES_INSPECTION_NOINSPECT = 0x8000,
340 });
341
342 #endif /* CONFIG_EXCLAVES */
343
344 typedef union thread_rr_state {
345 uint32_t trr_value;
346 struct {
347 #define TRR_FAULT_NONE 0
348 #define TRR_FAULT_PENDING 1
349 #define TRR_FAULT_OBSERVED 2
350 /*
351 * Set to TRR_FAULT_PENDING with interrupts disabled
352 * by the thread when it is entering a user fault codepath.
353 *
354 * Moved to TRR_FAULT_OBSERVED from TRR_FAULT_PENDING:
355 * - by the thread if at IPI time,
356 * - or by task_restartable_ranges_synchronize() if the thread
357 * is interrupted (under the thread lock)
358 *
359 * Cleared by the thread when returning from a user fault
360 * codepath.
361 */
362 uint8_t trr_fault_state;
363
364 /*
365 * Set by task_restartable_ranges_synchronize()
366 * if trr_fault_state is TRR_FAULT_OBSERVED
367 * and a rendez vous at the AST is required.
368 *
369 * Set atomically if trr_fault_state == TRR_FAULT_OBSERVED,
370 * and trr_ipi_ack_pending == 0
371 */
372 uint8_t trr_sync_waiting;
373
374 /*
375 * Updated under the thread_lock(),
376 * set by task_restartable_ranges_synchronize()
377 * when the thread was IPIed and the caller is waiting
378 * for an ACK.
379 */
380 uint16_t trr_ipi_ack_pending;
381 };
382 } thread_rr_state_t;
383
384 struct thread {
385 #if MACH_ASSERT
386 #define THREAD_MAGIC 0x1234ABCDDCBA4321ULL
387 /* Ensure nothing uses &thread as a queue entry */
388 uint64_t thread_magic;
389 #endif /* MACH_ASSERT */
390
391 /*
392 * NOTE: The runq field in the thread structure has an unusual
393 * locking protocol. If its value is PROCESSOR_NULL, then it is
394 * locked by the thread_lock, but if its value is something else
395 * then it is locked by the associated run queue lock. It is
396 * set to PROCESSOR_NULL without holding the thread lock, but the
397 * transition from PROCESSOR_NULL to non-null must be done
398 * under the thread lock and the run queue lock. To enforce the
399 * protocol, runq should only be accessed using the
400 * thread_get/set/clear_runq functions and locked variants below.
401 *
402 * New waitq APIs allow the 'links' and '__runq' fields to be
403 * anywhere in the thread structure.
404 */
405 union {
406 queue_chain_t runq_links; /* run queue links */
407 queue_chain_t wait_links; /* wait queue links */
408 struct mpsc_queue_chain mpsc_links; /* thread daemon mpsc links */
409 struct priority_queue_entry_sched wait_prioq_links; /* priority ordered waitq links */
410 };
411
412 event64_t wait_event; /* wait queue event */
413 struct { processor_t runq; } __runq; /* internally managed run queue assignment, see above comment */
414 waitq_t waitq; /* wait queue this thread is enqueued on */
415 struct turnstile *turnstile; /* thread's turnstile, protected by primitives interlock */
416 void *inheritor; /* inheritor of the primitive the thread will block on */
417 struct priority_queue_sched_max sched_inheritor_queue; /* Inheritor queue for kernel promotion */
418 struct priority_queue_sched_max base_inheritor_queue; /* Inheritor queue for user promotion */
419
420 #if CONFIG_SCHED_EDGE
421 bool th_bound_cluster_enqueued;
422 bool th_shared_rsrc_enqueued[CLUSTER_SHARED_RSRC_TYPE_COUNT];
423 bool th_shared_rsrc_heavy_user[CLUSTER_SHARED_RSRC_TYPE_COUNT];
424 bool th_shared_rsrc_heavy_perf_control[CLUSTER_SHARED_RSRC_TYPE_COUNT];
425 #endif /* CONFIG_SCHED_EDGE */
426
427 #if CONFIG_SCHED_CLUTCH
428 /*
429 * In the clutch scheduler, the threads are maintained in runqs at the clutch_bucket
430 * level (clutch_bucket defines a unique thread group and scheduling bucket pair). The
431 * thread is linked via a couple of linkages in the clutch bucket:
432 *
433 * - A stable priority queue linkage which is the main runqueue (based on sched_pri) for the clutch bucket
434 * - A regular priority queue linkage which is based on thread's base/promoted pri (used for clutch bucket priority calculation)
435 * - A queue linkage used for timesharing operations of threads at the scheduler tick
436 */
437 struct priority_queue_entry_stable th_clutch_runq_link;
438 struct priority_queue_entry_sched th_clutch_pri_link;
439 queue_chain_t th_clutch_timeshare_link;
440 #endif /* CONFIG_SCHED_CLUTCH */
441
442 /* Data updated during assert_wait/thread_wakeup */
443 decl_simple_lock_data(, sched_lock); /* scheduling lock (thread_lock()) */
444 decl_simple_lock_data(, wake_lock); /* for thread stop / wait (wake_lock()) */
445 uint16_t options; /* options set by thread itself */
446 #define TH_OPT_INTMASK 0x0003 /* interrupt / abort level */
447 #define TH_OPT_VMPRIV 0x0004 /* may allocate reserved memory */
448 #define TH_OPT_SYSTEM_CRITICAL 0x0010 /* Thread must always be allowed to run - even under heavy load */
449 #define TH_OPT_PROC_CPULIMIT 0x0020 /* Thread has a task-wide CPU limit applied to it */
450 #define TH_OPT_PRVT_CPULIMIT 0x0040 /* Thread has a thread-private CPU limit applied to it */
451 #define TH_OPT_IDLE_THREAD 0x0080 /* Thread is a per-processor idle thread */
452 #define TH_OPT_GLOBAL_FORCED_IDLE 0x0100 /* Thread performs forced idle for thermal control */
453 #define TH_OPT_SCHED_VM_GROUP 0x0200 /* Thread belongs to special scheduler VM group */
454 #define TH_OPT_HONOR_QLIMIT 0x0400 /* Thread will honor qlimit while sending mach_msg, regardless of MACH_SEND_ALWAYS */
455 #define TH_OPT_SEND_IMPORTANCE 0x0800 /* Thread will allow importance donation from kernel rpc */
456 #define TH_OPT_ZONE_PRIV 0x1000 /* Thread may use the zone replenish reserve */
457 #define TH_OPT_IPC_TG_BLOCKED 0x2000 /* Thread blocked in sync IPC and has made the thread group blocked callout */
458 #define TH_OPT_FORCED_LEDGER 0x4000 /* Thread has a forced CPU limit */
459 #define TH_IN_MACH_EXCEPTION 0x8000 /* Thread is currently handling a mach exception */
460
461 bool wake_active; /* wake event on stop */
462 bool at_safe_point; /* thread_abort_safely allowed */
463 uint8_t sched_saved_run_weight;
464 #if DEVELOPMENT || DEBUG
465 bool pmap_footprint_suspended;
466 #endif /* DEVELOPMENT || DEBUG */
467
468
469 ast_t reason; /* why we blocked */
470 uint32_t quantum_remaining;
471 wait_result_t wait_result; /* outcome of wait -
472 * may be examined by this thread
473 * WITHOUT locking */
474 thread_rr_state_t t_rr_state; /* state for restartable ranges */
475 thread_continue_t continuation; /* continue here next dispatch */
476 void *parameter; /* continuation parameter */
477
478 /* Data updated/used in thread_invoke */
479 vm_offset_t kernel_stack; /* current kernel stack */
480 vm_offset_t reserved_stack; /* reserved kernel stack */
481
482 /*** Machine-dependent state ***/
483 struct machine_thread machine;
484
485 #if KASAN
486 struct kasan_thread_data kasan_data;
487 #endif
488 #if CONFIG_KCOV
489 kcov_thread_data_t kcov_data;
490 #endif
491
492 /* Thread state: */
493 int state;
494 /*
495 * Thread states [bits or'ed]
496 * All but TH_WAIT_REPORT are encoded in SS_TH_FLAGS
497 * All are encoded in kcdata.py ('ths_state')
498 */
499 #define TH_WAIT 0x01 /* queued for waiting */
500 #define TH_SUSP 0x02 /* stopped or requested to stop */
501 #define TH_RUN 0x04 /* running or on runq */
502 #define TH_UNINT 0x08 /* waiting uninteruptibly */
503 #define TH_TERMINATE 0x10 /* halted at termination */
504 #define TH_TERMINATE2 0x20 /* added to termination queue */
505 #define TH_WAIT_REPORT 0x40 /* the wait is using the sched_call,
506 * only set if TH_WAIT is also set */
507 #define TH_IDLE 0x80 /* idling processor */
508 #define TH_WAKING 0x100 /* between waitq remove and thread_go */
509
510 /* Scheduling information */
511 sched_mode_t sched_mode; /* scheduling mode */
512 sched_mode_t saved_mode; /* saved mode during forced mode demotion */
513
514 /* This thread's contribution to global sched counters */
515 sched_bucket_t th_sched_bucket;
516
517 sfi_class_id_t sfi_class; /* SFI class (XXX Updated on CSW/QE/AST) */
518 sfi_class_id_t sfi_wait_class; /* Currently in SFI wait for this class, protected by sfi_lock */
519
520 uint32_t sched_flags; /* current flag bits */
521 #define TH_SFLAG_NO_SMT 0x0001 /* On an SMT CPU, this thread must be scheduled alone */
522 #define TH_SFLAG_FAILSAFE 0x0002 /* fail-safe has tripped */
523 #define TH_SFLAG_THROTTLED 0x0004 /* throttled thread forced to timeshare mode (may be applied in addition to failsafe) */
524
525 #define TH_SFLAG_PROMOTED 0x0008 /* sched pri has been promoted by kernel mutex priority promotion */
526 #define TH_SFLAG_ABORT 0x0010 /* abort interruptible waits */
527 #define TH_SFLAG_ABORTSAFELY 0x0020 /* ... but only those at safe point */
528 #define TH_SFLAG_ABORTED_MASK (TH_SFLAG_ABORT | TH_SFLAG_ABORTSAFELY)
529 #define TH_SFLAG_DEPRESS 0x0040 /* normal depress yield */
530 #define TH_SFLAG_POLLDEPRESS 0x0080 /* polled depress yield */
531 #define TH_SFLAG_DEPRESSED_MASK (TH_SFLAG_DEPRESS | TH_SFLAG_POLLDEPRESS)
532 /* unused TH_SFLAG_PRI_UPDATE 0x0100 */
533 #define TH_SFLAG_EAGERPREEMPT 0x0200 /* Any preemption of this thread should be treated as if AST_URGENT applied */
534 #define TH_SFLAG_RW_PROMOTED 0x0400 /* promote reason: blocking with RW lock held */
535 #define TH_SFLAG_BASE_PRI_FROZEN 0x0800 /* (effective) base_pri is frozen */
536 #define TH_SFLAG_WAITQ_PROMOTED 0x1000 /* promote reason: waitq wakeup (generally for IPC receive) */
537
538 #if __AMP__
539 #define TH_SFLAG_ECORE_ONLY 0x2000 /* (unused) Bind thread to E core processor set */
540 #define TH_SFLAG_PCORE_ONLY 0x4000 /* (unused) Bind thread to P core processor set */
541 #endif
542
543 #define TH_SFLAG_EXEC_PROMOTED 0x8000 /* promote reason: thread is in an exec */
544
545 #define TH_SFLAG_THREAD_GROUP_AUTO_JOIN 0x10000 /* thread has been auto-joined to thread group */
546 #if __AMP__
547 #define TH_SFLAG_BOUND_SOFT 0x20000 /* thread is soft bound to a cluster; can run anywhere if bound cluster unavailable */
548 #endif /* __AMP__ */
549
550 #if CONFIG_PREADOPT_TG
551 #define TH_SFLAG_REEVALUTE_TG_HIERARCHY_LATER 0x40000 /* thread needs to reevaluate its TG hierarchy */
552 #endif
553
554 #define TH_SFLAG_FLOOR_PROMOTED 0x80000 /* promote reason: boost requested */
555
556 /* 'promote reasons' that request a priority floor only, not a custom priority */
557 #define TH_SFLAG_PROMOTE_REASON_MASK (TH_SFLAG_RW_PROMOTED | TH_SFLAG_WAITQ_PROMOTED | TH_SFLAG_EXEC_PROMOTED | TH_SFLAG_FLOOR_PROMOTED)
558
559 #define TH_SFLAG_RT_DISALLOWED 0x100000 /* thread wants RT but may not have joined a work interval that allows it */
560 #define TH_SFLAG_DEMOTED_MASK (TH_SFLAG_THROTTLED | TH_SFLAG_FAILSAFE | TH_SFLAG_RT_DISALLOWED) /* saved_mode contains previous sched_mode */
561 #define TH_SFLAG_RT_CPULIMIT 0x200000 /* thread should have a CPU limit applied. */
562
563 int16_t sched_pri; /* scheduled (current) priority */
564 int16_t base_pri; /* effective base priority (equal to req_base_pri unless TH_SFLAG_BASE_PRI_FROZEN) */
565 int16_t req_base_pri; /* requested base priority */
566 int16_t max_priority; /* copy of max base priority */
567 int16_t task_priority; /* copy of task base priority */
568 int16_t promotion_priority; /* priority thread is currently promoted to */
569 uint16_t priority_floor_count; /* number of push to boost the floor priority */
570 int16_t suspend_count; /* Kernel holds on this thread */
571
572 int iotier_override; /* atomic operations to set, cleared on ret to user */
573 os_ref_atomic_t ref_count; /* number of references to me */
574
575 uint32_t rwlock_count; /* Number of lck_rw_t locks held by thread */
576 struct smrq_slist_head smr_stack;
577 #ifdef DEBUG_RW
578 rw_lock_debug_t rw_lock_held; /* rw_locks currently held by the thread */
579 #endif /* DEBUG_RW */
580
581 integer_t importance; /* task-relative importance */
582
583 /* Priority depression expiration */
584 integer_t depress_timer_active;
585 timer_call_t depress_timer;
586
587 /* real-time parameters */
588 struct { /* see mach/thread_policy.h */
589 uint32_t period;
590 uint32_t computation;
591 uint32_t constraint;
592 bool preemptible;
593 uint8_t priority_offset; /* base_pri = BASEPRI_RTQUEUES + priority_offset */
594 uint64_t deadline;
595 } realtime;
596
597 uint64_t last_run_time; /* time when thread was switched away from */
598 uint64_t last_made_runnable_time; /* time when thread was unblocked or preempted */
599 uint64_t last_basepri_change_time; /* time when thread was last changed in basepri while runnable */
600 uint64_t same_pri_latency;
601 /*
602 * workq_quantum_deadline is the workq thread's next runtime deadline. This
603 * value is set to 0 if the thread has no such deadline applicable to it.
604 *
605 * The synchronization for this field is due to how this field is modified
606 * 1) This field is always modified on the thread by itself or on the thread
607 * when it is not running/runnable
608 * 2) Change of this field is immediately followed by a
609 * corresponding change to the AST_KEVENT to either set or clear the
610 * AST_KEVENT_WORKQ_QUANTUM_EXPIRED bit
611 *
612 * workq_quantum_deadline can be modified by the thread on itself during
613 * interrupt context. However, due to (2) and due to the fact that the
614 * change to the AST_KEVENT is volatile, this forces the compiler to
615 * guarantee the order between the write to workq_quantum_deadline and the
616 * kevent field and therefore guarantees the correct synchronization.
617 */
618 uint64_t workq_quantum_deadline;
619
620 #if WORKQ_QUANTUM_HISTORY_DEBUG
621
622 #define WORKQ_QUANTUM_HISTORY_COUNT 16
623 struct workq_quantum_history {
624 uint64_t time;
625 uint64_t deadline;
626 bool arm;
627 } workq_quantum_history[WORKQ_QUANTUM_HISTORY_COUNT];
628 uint64_t workq_quantum_history_index;
629
630 #define WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, ...) ({\
631 thread_t __th = (thread); \
632 uint64_t __index = os_atomic_inc_orig(&thread->workq_quantum_history_index, relaxed); \
633 struct workq_quantum_history _wq_quantum_history = { mach_approximate_time(), __VA_ARGS__}; \
634 __th->workq_quantum_history[__index % WORKQ_QUANTUM_HISTORY_COUNT] = \
635 (struct workq_quantum_history) _wq_quantum_history; \
636 })
637 #else /* WORKQ_QUANTUM_HISTORY_DEBUG */
638 #define WORKQ_QUANTUM_HISTORY_WRITE_ENTRY(thread, ...)
639 #endif /* WORKQ_QUANTUM_HISTORY_DEBUG */
640
641 #define THREAD_NOT_RUNNABLE (~0ULL)
642
643 #if CONFIG_THREAD_GROUPS
644 struct thread_group *thread_group;
645 #endif
646
647 /* Data used during setrun/dispatch */
648 processor_t bound_processor; /* bound to a processor? */
649 processor_t last_processor; /* processor last dispatched on */
650 processor_t chosen_processor; /* Where we want to run this thread */
651
652 /* Fail-safe computation since last unblock or qualifying yield */
653 uint64_t computation_metered;
654 uint64_t computation_epoch;
655 uint64_t computation_interrupt_epoch;
656 uint64_t safe_release; /* when to release fail-safe */
657
658 /* Call out from scheduler */
659 void (*sched_call)(int type, thread_t thread);
660
661 /* Statistics and timesharing calculations */
662 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
663 natural_t sched_stamp; /* last scheduler tick */
664 natural_t sched_usage; /* timesharing cpu usage [sched] */
665 natural_t pri_shift; /* usage -> priority from pset */
666 natural_t cpu_usage; /* instrumented cpu usage [%cpu] */
667 natural_t cpu_delta; /* accumulated cpu_usage delta */
668 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
669
670 uint32_t c_switch; /* total context switches */
671 uint32_t p_switch; /* total processor switches */
672 uint32_t ps_switch; /* total pset switches */
673
674 /* Timing data structures */
675 uint64_t sched_time_save; /* saved time for scheduler tick */
676 uint64_t vtimer_user_save; /* saved values for vtimers */
677 uint64_t vtimer_prof_save;
678 uint64_t vtimer_rlim_save;
679 uint64_t vtimer_qos_save;
680
681 timer_data_t runnable_timer; /* time the thread is runnable (including running) */
682
683 struct recount_thread th_recount; /* resource accounting */
684
685 #if CONFIG_SCHED_SFI
686 /* Timing for wait state */
687 uint64_t wait_sfi_begin_time; /* start time for thread waiting in SFI */
688 #endif
689
690 /*
691 * Processor/cache affinity
692 * - affinity_threads links task threads with the same affinity set
693 */
694 queue_chain_t affinity_threads;
695 affinity_set_t affinity_set;
696
697 #if CONFIG_TASKWATCH
698 task_watch_t *taskwatch; /* task watch */
699 #endif /* CONFIG_TASKWATCH */
700
701 /* Various bits of state to stash across a continuation, exclusive to the current thread block point */
702 union {
703 struct {
704 /* set before ipc_mqueue_receive() as implicit arguments */
705 mach_msg_recv_bufs_t recv_bufs; /* receive context */
706 mach_msg_option64_t option; /* 64 bits options for receive */
707 ipc_object_t object; /* object received on */
708
709 /* set by ipc_mqueue_receive() as implicit results */
710 mach_msg_return_t state; /* receive state */
711 mach_port_seqno_t seqno; /* seqno of recvd message */
712 mach_msg_size_t msize; /* actual size for the msg */
713 mach_msg_size_t asize; /* actual size for aux data */
714 mach_port_name_t receiver_name; /* the receive port name */
715 union {
716 struct ipc_kmsg *XNU_PTRAUTH_SIGNED_PTR("thread.ith_kmsg") kmsg; /* received message */
717 #if MACH_FLIPC
718 struct ipc_mqueue *XNU_PTRAUTH_SIGNED_PTR("thread.ith_peekq") peekq; /* mqueue to peek at */
719 #endif /* MACH_FLIPC */
720 };
721 } receive;
722 struct {
723 struct semaphore *waitsemaphore; /* semaphore ref */
724 struct semaphore *signalsemaphore; /* semaphore ref */
725 int options; /* semaphore options */
726 kern_return_t result; /* primary result */
727 mach_msg_continue_t continuation;
728 } sema;
729 struct {
730 #define THREAD_SAVE_IOKIT_TLS_COUNT 8
731 void *tls[THREAD_SAVE_IOKIT_TLS_COUNT];
732 } iokit;
733 } saved;
734
735 /* Only user threads can cause guard exceptions, only kernel threads can be thread call threads */
736 union {
737 /* Thread call thread's state structure, stored on its stack */
738 struct thread_call_thread_state *thc_state;
739
740 /* Structure to save information about guard exception */
741 struct {
742 mach_exception_code_t code;
743 mach_exception_subcode_t subcode;
744 } guard_exc_info;
745 };
746
747 /* User level suspensions */
748 int32_t user_stop_count;
749
750 /* IPC data structures */
751 #if IMPORTANCE_INHERITANCE
752 natural_t ith_assertions; /* assertions pending drop */
753 #endif
754 circle_queue_head_t ith_messages; /* messages to reap */
755 mach_port_t ith_kernel_reply_port; /* reply port for kernel RPCs */
756
757 /* VM Fault Tolerance */
758 bool th_vm_faults_disabled;
759
760 /* Ast/Halt data structures */
761 bool recover; /* True if page faulted in recoverable IO */
762
763 #if DEBUG || DEVELOPMENT
764 struct thread_test_context *th_test_ctx; /* thread-specific data for kernel tests */
765 #endif
766
767 queue_chain_t threads; /* global list of all threads */
768
769 /* Activation */
770 queue_chain_t task_threads;
771
772 /* Task membership */
773 #if __x86_64__ || __arm__
774 struct task *t_task;
775 #endif
776 struct thread_ro *t_tro;
777 vm_map_t map;
778 thread_t handoff_thread;
779
780 /* Timed wait expiration */
781 timer_call_t wait_timer;
782 uint16_t wait_timer_active; /* is the call running */
783 bool wait_timer_armed; /* should the wait be cleared */
784
785 /* Miscellaneous bits guarded by mutex */
786 uint32_t
787 active:1, /* Thread is active and has not been terminated */
788 ipc_active:1, /* IPC with the thread ports is allowed */
789 started:1, /* Thread has been started after creation */
790 static_param:1, /* Disallow policy parameter changes */
791 inspection:1, /* TRUE when task is being inspected by crash reporter */
792 policy_reset:1, /* Disallow policy parameter changes on terminating threads */
793 suspend_parked:1, /* thread parked in thread_suspended */
794 corpse_dup:1, /* TRUE when thread is an inactive duplicate in a corpse */
795 :0;
796
797 /* Pending thread ast(s) */
798 os_atomic(ast_t) ast;
799
800 decl_lck_mtx_data(, mutex);
801
802 struct ipc_port *ith_special_reply_port; /* ref to special reply port */
803
804 #if CONFIG_DTRACE
805 uint16_t t_dtrace_flags; /* DTrace thread states */
806 #define TH_DTRACE_EXECSUCCESS 0x01
807 uint16_t t_dtrace_inprobe; /* Executing under dtrace_probe */
808 uint32_t t_dtrace_predcache; /* DTrace per thread predicate value hint */
809 int64_t t_dtrace_tracing; /* Thread time under dtrace_probe() */
810 int64_t t_dtrace_vtime;
811 #endif
812
813 clock_sec_t t_page_creation_time;
814 uint32_t t_page_creation_count;
815 uint32_t t_page_creation_throttled;
816 #if (DEVELOPMENT || DEBUG)
817 uint64_t t_page_creation_throttled_hard;
818 uint64_t t_page_creation_throttled_soft;
819 #endif /* DEVELOPMENT || DEBUG */
820 int t_pagein_error; /* for vm_fault(), holds error from vnop_pagein() */
821
822 mach_port_name_t ith_voucher_name;
823 ipc_voucher_t ith_voucher;
824
825 #ifdef KPERF
826 /* The high 8 bits are the number of frames to sample of a user callstack. */
827 #define T_KPERF_CALLSTACK_DEPTH_OFFSET (24)
828 #define T_KPERF_SET_CALLSTACK_DEPTH(DEPTH) (((uint32_t)(DEPTH)) << T_KPERF_CALLSTACK_DEPTH_OFFSET)
829 #define T_KPERF_GET_CALLSTACK_DEPTH(FLAGS) ((FLAGS) >> T_KPERF_CALLSTACK_DEPTH_OFFSET)
830 #define T_KPERF_ACTIONID_OFFSET (18)
831 #define T_KPERF_SET_ACTIONID(AID) (((uint32_t)(AID)) << T_KPERF_ACTIONID_OFFSET)
832 #define T_KPERF_GET_ACTIONID(FLAGS) ((FLAGS) >> T_KPERF_ACTIONID_OFFSET)
833 #endif
834
835 #define T_KPERF_AST_CALLSTACK 0x1 /* dump a callstack on thread's next AST */
836 #define T_KPERF_AST_DISPATCH 0x2 /* dump a name on thread's next AST */
837 #define T_KPC_ALLOC 0x4 /* thread needs a kpc_buf allocated */
838
839 #define T_KPERF_AST_ALL \
840 (T_KPERF_AST_CALLSTACK | T_KPERF_AST_DISPATCH | T_KPC_ALLOC)
841 /* only go up to T_KPERF_ACTIONID_OFFSET - 1 */
842
843 #ifdef KPERF
844 uint32_t kperf_ast;
845 uint32_t kperf_pet_gen; /* last generation of PET that sampled this thread*/
846 uint32_t kperf_c_switch; /* last dispatch detection */
847 uint32_t kperf_pet_cnt; /* how many times a thread has been sampled by PET */
848 #if CONFIG_EXCLAVES
849 uint32_t kperf_exclaves_ast;
850 #endif
851 #endif
852
853 #ifdef CONFIG_CPU_COUNTERS
854 /* accumulated performance counters for this thread */
855 uint64_t *kpc_buf;
856 #endif /* CONFIG_CPU_COUNTERS */
857
858 #if HYPERVISOR
859 /* hypervisor virtual CPU object associated with this thread */
860 void *hv_thread_target;
861 #endif /* HYPERVISOR */
862
863 /* Statistics accumulated per-thread and aggregated per-task */
864 uint32_t syscalls_unix;
865 uint32_t syscalls_mach;
866 ledger_t t_ledger;
867 ledger_t t_threadledger; /* per thread ledger */
868 ledger_t t_bankledger; /* ledger to charge someone */
869 uint64_t t_deduct_bank_ledger_time; /* cpu time to be deducted from bank ledger */
870 uint64_t t_deduct_bank_ledger_energy; /* energy to be deducted from bank ledger */
871
872 uint64_t thread_id; /* system wide unique thread-id */
873 uint32_t ctid; /* system wide compact thread-id */
874 uint32_t ctsid; /* this thread ts ID */
875
876 /* policy is protected by the thread mutex */
877 struct thread_requested_policy requested_policy;
878 struct thread_effective_policy effective_policy;
879
880 /* usynch override is protected by the task lock, eventually will be thread mutex */
881 struct thread_qos_override {
882 struct thread_qos_override *override_next;
883 uint32_t override_contended_resource_count;
884 int16_t override_qos;
885 int16_t override_resource_type;
886 user_addr_t override_resource;
887 } *overrides;
888
889 uint32_t kevent_overrides;
890 uint8_t user_promotion_basepri;
891 uint8_t kern_promotion_schedpri;
892 _Atomic uint16_t kevent_ast_bits;
893
894 io_stat_info_t thread_io_stats; /* per-thread I/O statistics */
895
896 uint32_t thread_callout_interrupt_wakeups;
897 uint32_t thread_callout_platform_idle_wakeups;
898 uint32_t thread_timer_wakeups_bin_1;
899 uint32_t thread_timer_wakeups_bin_2;
900 thread_tag_t thread_tag;
901
902 /*
903 * callout_* fields are only set for thread call threads whereas guard_exc_fatal is set
904 * by user threads on themselves while taking a guard exception. So it's okay for them to
905 * share this bitfield.
906 */
907 uint16_t
908 callout_woken_from_icontext:1,
909 callout_woken_from_platform_idle:1,
910 callout_woke_thread:1,
911 guard_exc_fatal:1,
912 thread_bitfield_unused:12;
913
914 #define THREAD_BOUND_CLUSTER_NONE (UINT32_MAX)
915 uint32_t th_bound_cluster_id;
916
917 #if CONFIG_THREAD_GROUPS
918 #if CONFIG_PREADOPT_TG
919 /* The preadopt thread group is set on the thread
920 *
921 * a) By another thread when it is a creator and it is scheduled with the
922 * thread group on the TR
923 * b) On itself when it binds a thread request and becomes a
924 * servicer or when it rebinds to the thread request
925 * c) On itself when it processes knotes and finds the first
926 * EVFILT_MACHPORT event to deliver to userspace
927 *
928 * Note that this is a full reference owned by the thread_t and not a
929 * borrowed reference.
930 *
931 * This reference is cleared from the thread_t by the thread itself at the
932 * following times:
933 * a) When it explicitly adopts a work interval or a bank voucher
934 * b) If it still exists on the thread, after it has unbound and is about
935 * to park
936 * c) During thread termination if one still exists
937 * d) When a different preadoption thread group is set on the thread
938 *
939 * It is modified under the thread lock.
940 */
941 struct thread_group *preadopt_thread_group;
942
943 /* This field here is present in order to make sure that the t->thread_group
944 * is always pointing to a valid thread group and isn't a dangling pointer.
945 *
946 * Consider the following scenario:
947 * a) t->thread_group points to the preadoption thread group
948 * b) The preadoption thread group is modified on the thread but we are
949 * unable to resolve the hierarchy immediately due to the current state of
950 * the thread
951 *
952 * In order to make sure that t->thread_group points to a valid thread
953 * group until we can resolve the hierarchy again, we save the existing
954 * thread_group it points to in old_preadopt_thread_group. The next time a
955 * hierarchy resolution is done, we know that t->thread_group will not point
956 * to this field anymore so we can clear it.
957 *
958 * This field is always going to take the reference that was previously in
959 * preadopt_thread_group so it will have a full +1
960 */
961 struct thread_group *old_preadopt_thread_group;
962 #endif /* CONFIG_PREADOPT_TG */
963
964 /* This is a borrowed reference to the TG from the ith_voucher and is saved
965 * here since we may not always be in the right context to able to do the
966 * lookups.
967 *
968 * It is set always set on self under the thread lock */
969 struct thread_group *bank_thread_group;
970
971 /* Whether this is the autojoin thread group or the work interval thread
972 * group depends on whether the thread's sched_flags has the
973 * TH_SFLAG_THREAD_GROUP_AUTO_JOIN bit set */
974 union {
975 /* This is a borrowed reference to the auto join thread group from the
976 * work_interval. It is set with the thread lock held */
977 struct thread_group *auto_join_thread_group;
978 /* This is a borrowed reference to the explicit work_interval thread group
979 * and is always set on self */
980 struct thread_group *work_interval_thread_group;
981 };
982 #endif /* CONFIG_THREAD_GROUPS */
983
984 /* work interval (if any) associated with the thread. Only modified by
985 * current thread on itself or when another thread when the thread is held
986 * off of runq */
987 struct work_interval *th_work_interval;
988 thread_work_interval_flags_t th_work_interval_flags;
989
990 #if SCHED_TRACE_THREAD_WAKEUPS
991 uintptr_t thread_wakeup_bt[64];
992 #endif
993 turnstile_update_flags_t inheritor_flags; /* inheritor flags for inheritor field */
994 block_hint_t pending_block_hint;
995 block_hint_t block_hint; /* What type of primitive last caused us to block. */
996 uint32_t decompressions; /* Per-thread decompressions counter to be added to per-task decompressions counter */
997 int thread_region_page_shift; /* Page shift that this thread would like to use when */
998 /* introspecting a task. This is currently being used */
999 /* by footprint which uses a thread for each task being inspected. */
1000 #if CONFIG_SCHED_RT_ALLOW
1001 /* Used when a thread is requested to set/clear its own CPU limit */
1002 uint32_t
1003 t_ledger_req_action:2,
1004 t_ledger_req_percentage:7,
1005 t_ledger_req_interval_ms:16,
1006 :0;
1007 #endif /* CONFIG_SCHED_RT_ALLOW */
1008
1009 #if CONFIG_IOSCHED
1010 void *decmp_upl;
1011 #endif /* CONFIG_IOSCHED */
1012 struct knote *ith_knote; /* knote fired for rcv */
1013
1014 #if CONFIG_SPTM
1015 /* TXM thread stack associated with this thread */
1016 uintptr_t txm_thread_stack;
1017 #endif
1018
1019 #if CONFIG_EXCLAVES
1020 /* Per-thread IPC context for exclaves communication. Only modified by the
1021 * current thread on itself. */
1022 exclaves_ctx_t th_exclaves_ipc_ctx;
1023 /* Thread exclaves interrupt-safe state. Only mutated by the current thread
1024 * on itself with interrupts disabled, and only ever read by the current
1025 * thread (with no locking), including from interrupt context, or during
1026 * debug/stackshot. */
1027 thread_exclaves_intstate_flags_t th_exclaves_intstate;
1028 /* Thread exclaves state. Only mutated by the current thread on itself, and
1029 * only ever read by the current thread (with no locking). Unsafe to read
1030 * from interrupt context. */
1031 thread_exclaves_state_flags_t th_exclaves_state;
1032 /* Thread stackshot state. Prevents returning to Exclave world until after
1033 * an external agent has triggered inspection (likely via Exclave stackshot),
1034 * and woken this thread. */
1035 thread_exclaves_inspection_flags_t _Atomic th_exclaves_inspection_state;
1036 /* Task for which conclave teardown is being called by this thread. Used
1037 * for context by conclave crash info upcall to find the task for appending
1038 * the conclave crash info. */
1039 task_t conclave_stop_task;
1040 /* Queue of threads being inspected by Stackshot.
1041 * Modified under exclaves_collect_mtx. */
1042 queue_chain_t th_exclaves_inspection_queue_stackshot;
1043 /* Queue of threads being inspected by kperf.
1044 * Modified under exclaves_collect_mtx. */
1045 queue_chain_t th_exclaves_inspection_queue_kperf;
1046 #endif /* CONFIG_EXCLAVES */
1047 };
1048
1049 #define ith_receive saved.receive
1050 /* arguments */
1051 #define ith_recv_bufs saved.receive.recv_bufs
1052 #define ith_object saved.receive.object
1053 #define ith_option saved.receive.option
1054 /* results */
1055 #define ith_state saved.receive.state
1056 #define ith_seqno saved.receive.seqno
1057 #define ith_msize saved.receive.msize
1058 #define ith_asize saved.receive.asize
1059 #define ith_receiver_name saved.receive.receiver_name
1060 #define ith_kmsg saved.receive.kmsg
1061 #if MACH_FLIPC
1062 #define ith_peekq saved.receive.peekq
1063 #endif /* MACH_FLIPC */
1064
1065 #define sth_waitsemaphore saved.sema.waitsemaphore
1066 #define sth_signalsemaphore saved.sema.signalsemaphore
1067 #define sth_options saved.sema.options
1068 #define sth_result saved.sema.result
1069 #define sth_continuation saved.sema.continuation
1070
1071 #define ITH_KNOTE_NULL ((void *)NULL)
1072 #define ITH_KNOTE_PSEUDO ((void *)0xdeadbeef)
1073 /*
1074 * The ith_knote is used during message delivery, and can safely be interpreted
1075 * only when used for one of these codepaths, which the test for the msgt_name
1076 * being RECEIVE or SEND_ONCE is about.
1077 */
1078 #define ITH_KNOTE_VALID(kn, msgt_name) \
1079 (((kn) != ITH_KNOTE_NULL && (kn) != ITH_KNOTE_PSEUDO) && \
1080 ((msgt_name) == MACH_MSG_TYPE_PORT_RECEIVE || \
1081 (msgt_name) == MACH_MSG_TYPE_PORT_SEND_ONCE))
1082
1083 #if MACH_ASSERT
1084 #define assert_thread_magic(thread) assertf((thread)->thread_magic == THREAD_MAGIC, \
1085 "bad thread magic 0x%llx for thread %p, expected 0x%llx", \
1086 (thread)->thread_magic, (thread), THREAD_MAGIC)
1087 #else
1088 #define assert_thread_magic(thread) do { (void)(thread); } while (0)
1089 #endif
1090
1091 extern thread_t thread_bootstrap(void);
1092
1093 extern void thread_machine_init_template(void);
1094
1095 extern void thread_init(void);
1096
1097 extern void thread_daemon_init(void);
1098
1099 extern void thread_reference(
1100 thread_t thread);
1101
1102 extern void thread_deallocate(
1103 thread_t thread);
1104
1105 extern void thread_inspect_deallocate(
1106 thread_inspect_t thread);
1107
1108 extern void thread_read_deallocate(
1109 thread_read_t thread);
1110
1111 extern kern_return_t thread_terminate(
1112 thread_t thread);
1113
1114 extern void thread_terminate_self(void);
1115
1116 extern kern_return_t thread_terminate_internal(
1117 thread_t thread);
1118
1119 extern void thread_start(
1120 thread_t thread) __attribute__ ((noinline));
1121
1122 extern void thread_start_in_assert_wait(
1123 thread_t thread,
1124 struct waitq *waitq,
1125 event64_t event,
1126 wait_interrupt_t interruptible) __attribute__ ((noinline));
1127
1128 extern void thread_terminate_enqueue(
1129 thread_t thread);
1130
1131 extern void thread_exception_enqueue(
1132 task_t task,
1133 thread_t thread,
1134 exception_type_t etype);
1135
1136 extern void thread_backtrace_enqueue(
1137 kcdata_object_t obj,
1138 exception_port_t ports[static BT_EXC_PORTS_COUNT],
1139 exception_type_t etype);
1140
1141 extern void thread_copy_resource_info(
1142 thread_t dst_thread,
1143 thread_t src_thread);
1144
1145 extern void thread_terminate_crashed_threads(void);
1146
1147 extern void thread_stack_enqueue(
1148 thread_t thread);
1149
1150 extern void thread_hold(
1151 thread_t thread);
1152
1153 extern void thread_release(
1154 thread_t thread);
1155
1156 extern void thread_corpse_continue(void) __dead2;
1157
1158 extern boolean_t thread_is_active(thread_t thread);
1159
1160 extern lck_grp_t thread_lck_grp;
1161
1162 /* Locking for scheduler state, always acquired with interrupts disabled (splsched()) */
1163 #define thread_lock_init(th) simple_lock_init(&(th)->sched_lock, 0)
1164 #define thread_lock(th) simple_lock(&(th)->sched_lock, &thread_lck_grp)
1165 #define thread_unlock(th) simple_unlock(&(th)->sched_lock)
1166 #define thread_lock_assert(th, x) simple_lock_assert(&(th)->sched_lock, (x))
1167
1168 #define wake_lock_init(th) simple_lock_init(&(th)->wake_lock, 0)
1169 #define wake_lock(th) simple_lock(&(th)->wake_lock, &thread_lck_grp)
1170 #define wake_unlock(th) simple_unlock(&(th)->wake_lock)
1171
1172 #define thread_should_halt_fast(thread) (!(thread)->active)
1173
1174 extern void stack_alloc(
1175 thread_t thread);
1176
1177 extern void stack_handoff(
1178 thread_t from,
1179 thread_t to);
1180
1181 extern void stack_free(
1182 thread_t thread);
1183
1184 extern void stack_free_reserved(
1185 thread_t thread);
1186
1187 extern boolean_t stack_alloc_try(
1188 thread_t thread);
1189
1190 extern void stack_collect(void);
1191
1192 extern kern_return_t thread_info_internal(
1193 thread_t thread,
1194 thread_flavor_t flavor,
1195 thread_info_t thread_info_out,
1196 mach_msg_type_number_t *thread_info_count);
1197
1198 extern kern_return_t kernel_thread_create(
1199 thread_continue_t continuation,
1200 void *parameter,
1201 integer_t priority,
1202 thread_t *new_thread);
1203
1204 extern kern_return_t kernel_thread_start_priority(
1205 thread_continue_t continuation,
1206 void *parameter,
1207 integer_t priority,
1208 thread_t *new_thread);
1209
1210 extern void machine_stack_attach(
1211 thread_t thread,
1212 vm_offset_t stack);
1213
1214 extern vm_offset_t machine_stack_detach(
1215 thread_t thread);
1216
1217 extern void machine_stack_handoff(
1218 thread_t old,
1219 thread_t new);
1220
1221 extern thread_t machine_switch_context(
1222 thread_t old_thread,
1223 thread_continue_t continuation,
1224 thread_t new_thread);
1225
1226 extern void machine_load_context(
1227 thread_t thread) __attribute__((noreturn));
1228
1229 extern void machine_thread_state_initialize(
1230 thread_t thread);
1231
1232 extern kern_return_t machine_thread_set_state(
1233 thread_t thread,
1234 thread_flavor_t flavor,
1235 thread_state_t state,
1236 mach_msg_type_number_t count);
1237
1238 extern mach_vm_address_t machine_thread_pc(
1239 thread_t thread);
1240
1241 extern void machine_thread_reset_pc(
1242 thread_t thread,
1243 mach_vm_address_t pc);
1244
1245 extern boolean_t machine_thread_on_core(
1246 thread_t thread);
1247
1248 extern boolean_t machine_thread_on_core_allow_invalid(
1249 thread_t thread);
1250
1251 extern kern_return_t machine_thread_get_state(
1252 thread_t thread,
1253 thread_flavor_t flavor,
1254 thread_state_t state,
1255 mach_msg_type_number_t *count);
1256
1257 extern kern_return_t machine_thread_state_convert_from_user(
1258 thread_t thread,
1259 thread_flavor_t flavor,
1260 thread_state_t tstate,
1261 mach_msg_type_number_t count,
1262 thread_state_t old_tstate,
1263 mach_msg_type_number_t old_count,
1264 thread_set_status_flags_t tssf_flags);
1265
1266 extern kern_return_t machine_thread_state_convert_to_user(
1267 thread_t thread,
1268 thread_flavor_t flavor,
1269 thread_state_t tstate,
1270 mach_msg_type_number_t *count,
1271 thread_set_status_flags_t tssf_flags);
1272
1273 extern kern_return_t machine_thread_dup(
1274 thread_t self,
1275 thread_t target,
1276 boolean_t is_corpse);
1277
1278 extern void machine_thread_init(void);
1279
1280 extern void machine_thread_template_init(thread_t thr_template);
1281
1282 #if __has_feature(ptrauth_calls)
1283 extern bool machine_thread_state_is_debug_flavor(int flavor);
1284 #endif /* __has_feature(ptrauth_calls) */
1285
1286
1287 extern void machine_thread_create(
1288 thread_t thread,
1289 task_t task,
1290 bool first_thread);
1291
1292 extern kern_return_t machine_thread_process_signature(
1293 thread_t thread,
1294 task_t task);
1295
1296 extern void machine_thread_switch_addrmode(
1297 thread_t thread);
1298
1299 extern void machine_thread_destroy(
1300 thread_t thread);
1301
1302 extern void machine_set_current_thread(
1303 thread_t thread);
1304
1305 extern kern_return_t machine_thread_get_kern_state(
1306 thread_t thread,
1307 thread_flavor_t flavor,
1308 thread_state_t tstate,
1309 mach_msg_type_number_t *count);
1310
1311 extern kern_return_t machine_thread_inherit_taskwide(
1312 thread_t thread,
1313 task_t parent_task);
1314
1315 extern kern_return_t machine_thread_set_tsd_base(
1316 thread_t thread,
1317 mach_vm_offset_t tsd_base);
1318
1319 #define thread_mtx_try(thread) lck_mtx_try_lock(&(thread)->mutex)
1320 #define thread_mtx_held(thread) lck_mtx_assert(&(thread)->mutex, LCK_MTX_ASSERT_OWNED)
1321
1322 extern void thread_apc_ast(thread_t thread);
1323
1324 extern void thread_update_qos_cpu_time(thread_t thread);
1325
1326 void act_machine_sv_free(thread_t, int);
1327
1328 vm_offset_t min_valid_stack_address(void);
1329 vm_offset_t max_valid_stack_address(void);
1330
1331 extern bool thread_no_smt(thread_t thread);
1332 extern bool processor_active_thread_no_smt(processor_t processor);
1333
1334 extern void thread_set_options(uint32_t thopt);
1335
1336 #if CONFIG_THREAD_GROUPS
1337 struct thread_group *thread_get_current_voucher_thread_group(thread_t thread);
1338 #endif /* CONFIG_THREAD_GROUPS */
1339
1340 #if CONFIG_COALITIONS
1341 uint64_t thread_get_current_voucher_resource_coalition_id(thread_t thread);
1342 #endif /* CONFIG_COALITIONS */
1343
1344 #endif /* MACH_KERNEL_PRIVATE */
1345 #if BSD_KERNEL_PRIVATE
1346
1347 /* Duplicated from osfmk/kern/ipc_tt.h */
1348 __options_decl(port_intrans_options_t, uint32_t, {
1349 PORT_INTRANS_OPTIONS_NONE = 0x0000,
1350 PORT_INTRANS_THREAD_IN_CURRENT_TASK = 0x0001,
1351 PORT_INTRANS_THREAD_NOT_CURRENT_THREAD = 0x0002,
1352
1353 PORT_INTRANS_SKIP_TASK_EVAL = 0x0004,
1354 PORT_INTRANS_ALLOW_CORPSE_TASK = 0x0008,
1355 });
1356
1357 extern thread_t port_name_to_thread(
1358 mach_port_name_t port_name,
1359 port_intrans_options_t options);
1360
1361 #endif /* BSD_KERNEL_PRIVATE */
1362 #ifdef XNU_KERNEL_PRIVATE
1363
1364 extern void thread_require(
1365 thread_t thread);
1366
1367 extern void thread_deallocate_safe(
1368 thread_t thread);
1369
1370 extern uint64_t thread_rettokern_addr(
1371 thread_t thread);
1372
1373 extern uint64_t thread_wqquantum_addr(
1374 thread_t thread);
1375
1376 extern integer_t thread_kern_get_pri(thread_t thr) __pure2;
1377
1378 extern void thread_kern_set_pri(thread_t thr, integer_t pri);
1379
1380 extern integer_t thread_kern_get_kernel_maxpri(void) __pure2;
1381
1382 uint16_t thread_set_tag(thread_t thread, uint16_t tag);
1383 uint16_t thread_get_tag(thread_t thread);
1384
1385 __options_decl(shared_rsrc_policy_agent_t, uint32_t, {
1386 SHARED_RSRC_POLICY_AGENT_DISPATCH = 0,
1387 SHARED_RSRC_POLICY_AGENT_SYSCTL = 1,
1388 SHARED_RSRC_POLICY_AGENT_PERFCTL_CSW = 2,
1389 SHARED_RSRC_POLICY_AGENT_PERFCTL_QUANTUM = 3,
1390 });
1391
1392 boolean_t thread_shared_rsrc_policy_get(thread_t thread, cluster_shared_rsrc_type_t type);
1393 kern_return_t thread_shared_rsrc_policy_set(thread_t thread, uint32_t index, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent);
1394 kern_return_t thread_shared_rsrc_policy_clear(thread_t thread, cluster_shared_rsrc_type_t type, shared_rsrc_policy_agent_t agent);
1395
1396 #ifdef MACH_KERNEL_PRIVATE
1397 static inline thread_tag_t
thread_set_tag_internal(thread_t thread,thread_tag_t tag)1398 thread_set_tag_internal(thread_t thread, thread_tag_t tag)
1399 {
1400 return os_atomic_or_orig(&thread->thread_tag, tag, relaxed);
1401 }
1402
1403 static inline thread_tag_t
thread_get_tag_internal(thread_t thread)1404 thread_get_tag_internal(thread_t thread)
1405 {
1406 return thread->thread_tag;
1407 }
1408 #endif /* MACH_KERNEL_PRIVATE */
1409
1410 uint64_t thread_last_run_time(thread_t thread);
1411
1412 extern kern_return_t thread_state_initialize(
1413 thread_t thread);
1414
1415 extern kern_return_t thread_setstatus(
1416 thread_t thread,
1417 int flavor,
1418 thread_state_t tstate,
1419 mach_msg_type_number_t count);
1420
1421 extern kern_return_t thread_setstatus_from_user(
1422 thread_t thread,
1423 int flavor,
1424 thread_state_t tstate,
1425 mach_msg_type_number_t count,
1426 thread_state_t old_tstate,
1427 mach_msg_type_number_t old_count,
1428 thread_set_status_flags_t flags);
1429
1430 extern kern_return_t thread_getstatus(
1431 thread_t thread,
1432 int flavor,
1433 thread_state_t tstate,
1434 mach_msg_type_number_t *count);
1435
1436 extern void main_thread_set_immovable_pinned(thread_t thread);
1437
1438 extern kern_return_t thread_getstatus_to_user(
1439 thread_t thread,
1440 int flavor,
1441 thread_state_t tstate,
1442 mach_msg_type_number_t *count,
1443 thread_set_status_flags_t flags);
1444
1445 extern kern_return_t thread_create_with_continuation(
1446 task_t task,
1447 thread_t *new_thread,
1448 thread_continue_t continuation);
1449
1450 extern kern_return_t main_thread_create_waiting(task_t task,
1451 thread_continue_t continuation,
1452 event_t event,
1453 thread_t *new_thread);
1454
1455 extern kern_return_t thread_create_workq_waiting(
1456 task_t task,
1457 thread_continue_t thread_return,
1458 thread_t *new_thread,
1459 bool is_permanently_bound);
1460
1461 extern void thread_yield_internal(
1462 mach_msg_timeout_t interval);
1463
1464 extern void thread_yield_to_preemption(void);
1465
1466 extern void thread_depress_timer_setup(thread_t self);
1467
1468 /*
1469 * Thread-private CPU limits: apply a private CPU limit to this thread only. Available actions are:
1470 *
1471 * 1) Block. Prevent CPU consumption of the thread from exceeding the limit.
1472 * 2) Exception. Generate a resource consumption exception when the limit is exceeded.
1473 * 3) Disable. Remove any existing CPU limit.
1474 */
1475 #define THREAD_CPULIMIT_BLOCK 0x1
1476 #define THREAD_CPULIMIT_EXCEPTION 0x2
1477 #define THREAD_CPULIMIT_DISABLE 0x3
1478
1479 struct _thread_ledger_indices {
1480 int cpu_time;
1481 };
1482
1483 extern struct _thread_ledger_indices thread_ledgers;
1484
1485 extern int thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns);
1486 extern int thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns);
1487
1488 extern uint64_t thread_cpulimit_remaining(uint64_t now);
1489 extern bool thread_cpulimit_interval_has_expired(uint64_t now);
1490 extern void thread_cpulimit_restart(uint64_t now);
1491
1492 extern void thread_read_times(
1493 thread_t thread,
1494 time_value_t *user_time,
1495 time_value_t *system_time,
1496 time_value_t *runnable_time);
1497
1498 extern void thread_read_times_unsafe(
1499 thread_t thread,
1500 time_value_t *user_time,
1501 time_value_t *system_time,
1502 time_value_t *runnable_time);
1503
1504 extern uint64_t thread_get_runtime_self(void);
1505
1506 extern void thread_setuserstack(
1507 thread_t thread,
1508 mach_vm_offset_t user_stack);
1509
1510 extern user_addr_t thread_adjuserstack(
1511 thread_t thread,
1512 int adjust);
1513
1514
1515 extern void thread_setentrypoint(
1516 thread_t thread,
1517 mach_vm_offset_t entry);
1518
1519 extern kern_return_t thread_set_tsd_base(
1520 thread_t thread,
1521 mach_vm_offset_t tsd_base);
1522
1523 extern kern_return_t thread_setsinglestep(
1524 thread_t thread,
1525 int on);
1526
1527 extern kern_return_t thread_userstack(
1528 thread_t,
1529 int,
1530 thread_state_t,
1531 unsigned int,
1532 mach_vm_offset_t *,
1533 int *,
1534 boolean_t);
1535
1536 extern kern_return_t thread_entrypoint(
1537 thread_t,
1538 int,
1539 thread_state_t,
1540 unsigned int,
1541 mach_vm_offset_t *);
1542
1543 extern kern_return_t thread_userstackdefault(
1544 mach_vm_offset_t *,
1545 boolean_t);
1546
1547 extern kern_return_t thread_wire_internal(
1548 host_priv_t host_priv,
1549 thread_t thread,
1550 boolean_t wired,
1551 boolean_t *prev_state);
1552
1553
1554 extern kern_return_t thread_dup(thread_t);
1555
1556 extern kern_return_t thread_dup2(thread_t, thread_t);
1557
1558 #if !defined(_SCHED_CALL_T_DEFINED)
1559 #define _SCHED_CALL_T_DEFINED
1560 typedef void (*sched_call_t)(
1561 int type,
1562 thread_t thread);
1563 #endif
1564
1565 #define SCHED_CALL_BLOCK 0x1
1566 #define SCHED_CALL_UNBLOCK 0x2
1567
1568 extern void thread_sched_call(
1569 thread_t thread,
1570 sched_call_t call);
1571
1572 extern boolean_t thread_is_static_param(
1573 thread_t thread);
1574
1575 extern task_t get_threadtask(thread_t) __pure2;
1576
1577 extern task_t get_threadtask_early(thread_t) __pure2;
1578
1579 /*
1580 * Thread is running within a 64-bit address space.
1581 */
1582 #define thread_is_64bit_addr(thd) \
1583 task_has_64Bit_addr(get_threadtask(thd))
1584
1585 /*
1586 * Thread is using 64-bit machine state.
1587 */
1588 #define thread_is_64bit_data(thd) \
1589 task_has_64Bit_data(get_threadtask(thd))
1590
1591 struct uthread;
1592
1593 #if defined(__x86_64__)
1594 extern int thread_task_has_ldt(thread_t);
1595 #endif
1596 extern void set_thread_pagein_error(thread_t, int);
1597 extern event_t workq_thread_init_and_wq_lock(task_t, thread_t); // bsd/pthread/
1598
1599 struct proc;
1600 struct uthread;
1601 struct image_params;
1602 extern const size_t uthread_size;
1603 extern thread_ro_t get_thread_ro_unchecked(thread_t) __pure2;
1604 extern thread_ro_t get_thread_ro(thread_t) __pure2;
1605 extern thread_ro_t current_thread_ro_unchecked(void) __pure2;
1606 extern thread_ro_t current_thread_ro(void) __pure2;
1607 extern void clear_thread_ro_proc(thread_t);
1608 extern struct uthread *get_bsdthread_info(thread_t) __pure2;
1609 extern thread_t get_machthread(struct uthread *) __pure2;
1610 extern uint64_t uthread_tid(struct uthread *) __pure2;
1611 extern user_addr_t thread_get_sigreturn_token(thread_t thread);
1612 extern uint32_t thread_get_sigreturn_diversifier(thread_t thread);
1613 extern void uthread_init(task_t, struct uthread *, thread_ro_t, int);
1614 extern void uthread_cleanup_name(struct uthread *uthread);
1615 extern void uthread_cleanup(struct uthread *, thread_ro_t);
1616 extern void uthread_cred_ref(struct ucred *);
1617 extern void uthread_cred_free(struct ucred *);
1618 extern void uthread_destroy(struct uthread *);
1619 extern void uthread_reset_proc_refcount(struct uthread *);
1620
1621 extern void uthread_set_exec_data(struct uthread *uth, struct image_params *imgp);
1622 extern bool uthread_is64bit(struct uthread *uth) __pure2;
1623 #if PROC_REF_DEBUG
1624 extern void uthread_init_proc_refcount(struct uthread *);
1625 extern void uthread_destroy_proc_refcount(struct uthread *);
1626 extern void uthread_assert_zero_proc_refcount(struct uthread *);
1627 #else
1628 #define uthread_init_proc_refcount(uth) ((void)(uth))
1629 #define uthread_destroy_proc_refcount(uth) ((void)(uth))
1630 #define uthread_assert_zero_proc_refcount(uth) ((void)(uth))
1631 #endif
1632 #if CONFIG_DEBUG_SYSCALL_REJECTION
1633 extern uint64_t uthread_get_syscall_rejection_flags(void *);
1634 extern uint64_t *uthread_get_syscall_rejection_mask(void *);
1635 extern uint64_t *uthread_get_syscall_rejection_once_mask(void *);
1636 extern bool uthread_syscall_rejection_is_enabled(void *);
1637 #endif /* CONFIG_DEBUG_SYSCALL_REJECTION */
1638 extern mach_port_name_t uthread_joiner_port(struct uthread *);
1639 extern user_addr_t uthread_joiner_address(struct uthread *);
1640 extern void uthread_joiner_wake(task_t task, struct uthread *);
1641
1642 extern boolean_t thread_should_halt(
1643 thread_t thread);
1644
1645 extern boolean_t thread_should_abort(
1646 thread_t);
1647
1648 extern bool current_thread_in_kernel_fault(void);
1649
1650 extern int is_64signalregset(void);
1651
1652 extern void act_set_kperf(thread_t);
1653 extern void act_set_astledger(thread_t thread);
1654 extern void act_set_astledger_async(thread_t thread);
1655 extern void act_set_io_telemetry_ast(thread_t);
1656 extern void act_set_macf_telemetry_ast(thread_t);
1657 extern void act_set_astproc_resource(thread_t);
1658
1659 extern vm_offset_t thread_get_kernel_stack(thread_t);
1660
1661 extern kern_return_t thread_process_signature(thread_t thread, task_t task);
1662
1663 extern uint32_t dtrace_get_thread_predcache(thread_t);
1664 extern int64_t dtrace_get_thread_vtime(thread_t);
1665 extern int64_t dtrace_get_thread_tracing(thread_t);
1666 extern uint16_t dtrace_get_thread_inprobe(thread_t);
1667 extern int dtrace_get_thread_last_cpu_id(thread_t);
1668 extern vm_offset_t dtrace_get_kernel_stack(thread_t);
1669 #define dtrace_get_kernel_stack thread_get_kernel_stack
1670 extern void dtrace_set_thread_predcache(thread_t, uint32_t);
1671 extern void dtrace_set_thread_vtime(thread_t, int64_t);
1672 extern void dtrace_set_thread_tracing(thread_t, int64_t);
1673 extern void dtrace_set_thread_inprobe(thread_t, uint16_t);
1674 extern void dtrace_thread_bootstrap(void);
1675 extern void dtrace_thread_didexec(thread_t);
1676
1677 extern int64_t dtrace_calc_thread_recent_vtime(thread_t);
1678
1679
1680 extern kern_return_t thread_set_wq_state32(
1681 thread_t thread,
1682 thread_state_t tstate);
1683
1684 extern kern_return_t thread_set_wq_state64(
1685 thread_t thread,
1686 thread_state_t tstate);
1687
1688 extern vm_offset_t kernel_stack_mask;
1689 extern vm_offset_t kernel_stack_size;
1690 extern vm_offset_t kernel_stack_depth_max;
1691
1692 extern void guard_ast(thread_t);
1693 extern void fd_guard_ast(thread_t,
1694 mach_exception_code_t, mach_exception_subcode_t);
1695 extern void vn_guard_ast(thread_t,
1696 mach_exception_code_t, mach_exception_subcode_t);
1697 extern void mach_port_guard_ast(thread_t,
1698 mach_exception_code_t, mach_exception_subcode_t);
1699 extern void virt_memory_guard_ast(thread_t,
1700 mach_exception_code_t, mach_exception_subcode_t);
1701 extern void thread_guard_violation(thread_t,
1702 mach_exception_code_t, mach_exception_subcode_t, boolean_t);
1703 extern void thread_update_io_stats(thread_t, int size, int io_flags);
1704
1705 extern kern_return_t thread_set_voucher_name(mach_port_name_t name);
1706 extern kern_return_t thread_get_voucher_origin_pid(thread_t thread, int32_t *pid);
1707 extern kern_return_t thread_get_voucher_origin_proximate_pid(thread_t thread,
1708 int32_t *origin_pid, int32_t *proximate_pid);
1709 extern kern_return_t thread_get_current_voucher_origin_pid(int32_t *pid);
1710
1711 extern void thread_enable_send_importance(thread_t thread, boolean_t enable);
1712
1713 /*
1714 * Translate signal context data pointer to userspace representation
1715 */
1716
1717 extern kern_return_t machine_thread_siguctx_pointer_convert_to_user(
1718 thread_t thread,
1719 user_addr_t *uctxp);
1720
1721 extern void machine_tecs(thread_t thr);
1722
1723 typedef enum cpuvn {
1724 CPUVN_CI = 1
1725 } cpuvn_e;
1726
1727 extern int machine_csv(cpuvn_e cve);
1728 #if defined(__x86_64__)
1729 extern void machine_thread_set_insn_copy_optout(thread_t thr);
1730 #endif
1731
1732 /*
1733 * Translate array of function pointer syscall arguments from userspace representation
1734 */
1735
1736 extern kern_return_t machine_thread_function_pointers_convert_from_user(
1737 thread_t thread,
1738 user_addr_t *fptrs,
1739 uint32_t count);
1740
1741 /*
1742 * Get the duration of the given thread's last wait.
1743 */
1744 uint64_t thread_get_last_wait_duration(thread_t thread);
1745
1746 extern bool thread_get_no_smt(void);
1747 #if defined(__x86_64__)
1748 extern bool curtask_get_insn_copy_optout(void);
1749 extern void curtask_set_insn_copy_optout(void);
1750 #endif /* defined(__x86_64__) */
1751
1752 /*! @function ctid_get_thread
1753 * @abstract translates a ctid_t to thread_t
1754 * @discussion ctid are system wide compact thread-id
1755 * associated to thread_t at thread creation
1756 * and recycled at thread termination. If a ctid is
1757 * referenced past the corresponding thread termination,
1758 * it is considered stale, and the behavior is not defined.
1759 * Note that this call does not acquire a reference on the thread,
1760 * so as soon as the matching thread terminates, the ctid
1761 * will become stale, and it could be re-used and associated with
1762 * another thread. You must externally guarantee that the thread
1763 * will not exit while you are using its ctid.
1764 * @result thread_t corresponding to ctid
1765 */
1766 extern thread_t ctid_get_thread(ctid_t ctid);
1767
1768 /*! @function ctid_get_thread
1769 * @abstract translates a ctid_t to thread_t
1770 * @discussion Unsafe variant of ctid_get_thread() to be used
1771 * when the caller can't guarantee the liveness of this ctid_t.
1772 * may return NULL or a freed thread_t.
1773 */
1774 extern thread_t ctid_get_thread_unsafe(ctid_t ctid);
1775
1776 /*!
1777 * @function thread_get_ctid
1778 * @abstract returns the ctid of thread.
1779 * @param thread to find the corresponding ctid.
1780 * @discussion the ctid provided will become stale after the matching thread
1781 * terminates.
1782 * @result uint32_t ctid.
1783 */
1784 extern ctid_t thread_get_ctid(thread_t thread);
1785
1786 #endif /* XNU_KERNEL_PRIVATE */
1787 #ifdef KERNEL_PRIVATE
1788
1789 typedef struct thread_pri_floor {
1790 thread_t thread;
1791 } thread_pri_floor_t;
1792
1793 #ifdef MACH_KERNEL_PRIVATE
1794 extern void thread_floor_boost_ast(thread_t thread);
1795 extern void thread_floor_boost_set_promotion_locked(thread_t thread);
1796 #endif /* MACH_KERNEL_PRIVATE */
1797
1798 /*! @function thread_priority_floor_start
1799 * @abstract boost the current thread priority to floor.
1800 * @discussion Increase the priority of the current thread to at least MINPRI_FLOOR.
1801 * The boost will be mantained until a corresponding thread_priority_floor_end()
1802 * is called. Every call of thread_priority_floor_start() needs to have a corresponding
1803 * call to thread_priority_floor_end() from the same thread.
1804 * No thread can return to userspace before calling thread_priority_floor_end().
1805 *
1806 * NOTE: avoid to use this function. Try to use gate_t or sleep_with_inheritor()
1807 * instead.
1808 * @result a token to be given to the corresponding thread_priority_floor_end()
1809 */
1810 extern thread_pri_floor_t thread_priority_floor_start(void);
1811 /*! @function thread_priority_floor_end
1812 * @abstract ends the floor boost.
1813 * @param token the token obtained from thread_priority_floor_start()
1814 * @discussion ends the priority floor boost started with thread_priority_floor_start()
1815 */
1816 extern void thread_priority_floor_end(thread_pri_floor_t *token);
1817
1818 extern void thread_set_no_smt(bool set);
1819
1820 extern void thread_mtx_lock(thread_t thread);
1821
1822 extern void thread_mtx_unlock(thread_t thread);
1823
1824 extern uint64_t thread_dispatchqaddr(
1825 thread_t thread);
1826
1827 bool thread_is_eager_preempt(thread_t thread);
1828 void thread_set_eager_preempt(thread_t thread);
1829 void thread_clear_eager_preempt(thread_t thread);
1830 void thread_set_honor_qlimit(thread_t thread);
1831 void thread_clear_honor_qlimit(thread_t thread);
1832 extern ipc_port_t convert_thread_to_port(thread_t);
1833 extern ipc_port_t convert_thread_to_port_pinned(thread_t);
1834 extern ipc_port_t convert_thread_inspect_to_port(thread_inspect_t);
1835 extern ipc_port_t convert_thread_read_to_port(thread_read_t);
1836 extern void convert_thread_array_to_ports(thread_act_array_t, size_t, mach_thread_flavor_t);
1837 extern boolean_t is_external_pageout_thread(void);
1838 extern boolean_t is_vm_privileged(void);
1839 extern boolean_t set_vm_privilege(boolean_t);
1840 extern kern_allocation_name_t thread_set_allocation_name(kern_allocation_name_t new_name);
1841 extern void *thread_iokit_tls_get(uint32_t index);
1842 extern void thread_iokit_tls_set(uint32_t index, void * data);
1843 extern int thread_self_region_page_shift(void);
1844 extern void thread_self_region_page_shift_set(int pgshift);
1845 extern kern_return_t thread_create_immovable(task_t task, thread_t *new_thread);
1846 extern kern_return_t thread_terminate_pinned(thread_t thread);
1847
1848 struct thread_attr_for_ipc_propagation;
1849 extern kern_return_t thread_get_ipc_propagate_attr(thread_t thread, struct thread_attr_for_ipc_propagation *attr);
1850 extern size_t thread_get_current_exec_path(char *path, size_t size);
1851 #endif /* KERNEL_PRIVATE */
1852 #ifdef XNU_KERNEL_PRIVATE
1853
1854 extern void
1855 thread_get_thread_name(thread_t th, char* name);
1856
1857 /* Read the runq assignment, under the thread lock. */
1858 extern processor_t thread_get_runq(thread_t thread);
1859
1860 /*
1861 * Read the runq assignment, under both the thread lock and
1862 * the pset lock corresponding to the last non-null assignment.
1863 */
1864 extern processor_t thread_get_runq_locked(thread_t thread);
1865
1866 /*
1867 * Set the runq assignment to a non-null value, under both the
1868 * thread lock and the pset lock corresponding to the new
1869 * assignment.
1870 */
1871 extern void thread_set_runq_locked(thread_t thread, processor_t new_runq);
1872
1873 /*
1874 * Set the runq assignment to PROCESSOR_NULL, under the pset
1875 * lock corresponding to the current non-null assignment.
1876 */
1877 extern void thread_clear_runq(thread_t thread);
1878
1879 /*
1880 * Set the runq assignment to PROCESSOR_NULL, under both the
1881 * thread lock and the pset lock corresponding to the current
1882 * non-null assignment.
1883 */
1884 extern void thread_clear_runq_locked(thread_t thread);
1885
1886 /*
1887 * Assert the runq assignment to be PROCESSOR_NULL, under
1888 * some guarantee that the runq will not change from null to
1889 * non-null, such as holding the thread lock.
1890 */
1891 extern void thread_assert_runq_null(thread_t thread);
1892
1893 /*
1894 * Assert the runq assignment to be non-null, under the pset
1895 * lock corresponding to the current non-null assignment.
1896 */
1897 extern void thread_assert_runq_nonnull(thread_t thread);
1898
1899 extern bool thread_supports_cooperative_workqueue(thread_t thread);
1900 extern void thread_arm_workqueue_quantum(thread_t thread);
1901 extern void thread_disarm_workqueue_quantum(thread_t thread);
1902
1903 extern void thread_evaluate_workqueue_quantum_expiry(thread_t thread);
1904 extern bool thread_has_expired_workqueue_quantum(thread_t thread, bool should_trace);
1905
1906 #if CONFIG_SPTM
1907
1908 extern void
1909 thread_associate_txm_thread_stack(uintptr_t thread_stack);
1910
1911 extern void
1912 thread_disassociate_txm_thread_stack(uintptr_t thread_stack);
1913
1914 extern uintptr_t
1915 thread_get_txm_thread_stack(void);
1916
1917 #endif /* CONFIG_SPTM */
1918
1919 /* Kernel side prototypes for MIG routines */
1920 extern kern_return_t thread_get_exception_ports(
1921 thread_t thread,
1922 exception_mask_t exception_mask,
1923 exception_mask_array_t masks,
1924 mach_msg_type_number_t *CountCnt,
1925 exception_port_array_t ports,
1926 exception_behavior_array_t behaviors,
1927 thread_state_flavor_array_t flavors);
1928
1929 extern kern_return_t thread_get_special_port(
1930 thread_inspect_t thread,
1931 int which,
1932 ipc_port_t *portp);
1933
1934 #endif /* XNU_KERNEL_PRIVATE */
1935
1936 /*! @function thread_has_thread_name
1937 * @abstract Checks if a thread has a name.
1938 * @discussion This function takes one input, a thread, and returns
1939 * a boolean value indicating if that thread already has a name associated
1940 * with it.
1941 * @param th The thread to inspect.
1942 * @result TRUE if the thread has a name, FALSE otherwise.
1943 */
1944 extern boolean_t thread_has_thread_name(thread_t th);
1945
1946 /*! @function thread_set_thread_name
1947 * @abstract Set a thread's name.
1948 * @discussion This function takes two input parameters: a thread to name,
1949 * and the name to apply to the thread. The name will be copied over to
1950 * the thread in order to better identify the thread. If the name is
1951 * longer than MAXTHREADNAMESIZE - 1, it will be truncated.
1952 * @param th The thread to be named.
1953 * @param name The name to apply to the thread.
1954 */
1955 extern void thread_set_thread_name(thread_t th, const char* name);
1956
1957 #if !MACH_KERNEL_PRIVATE || !defined(current_thread)
1958 extern thread_t current_thread(void) __pure2;
1959 #endif
1960
1961 extern uint64_t thread_tid(thread_t thread) __pure2;
1962
1963 extern void thread_reference(
1964 thread_t thread);
1965
1966 extern void thread_deallocate(
1967 thread_t thread);
1968
1969 /*! @function kernel_thread_start
1970 * @abstract Create a kernel thread.
1971 * @discussion This function takes three input parameters, namely reference
1972 * to the function that the thread should execute, caller specified data
1973 * and a reference which is used to return the newly created kernel
1974 * thread. The function returns KERN_SUCCESS on success or an appropriate
1975 * kernel code type indicating the error. It may be noted that the caller
1976 * is responsible for explicitly releasing the reference to the created
1977 * thread when no longer needed. This should be done by calling
1978 * thread_deallocate(new_thread).
1979 * @param continuation A C-function pointer where the thread will begin execution.
1980 * @param parameter Caller specified data to be passed to the new thread.
1981 * @param new_thread Reference to the new thread is returned in this parameter.
1982 * @result Returns KERN_SUCCESS on success or an appropriate kernel code type.
1983 */
1984
1985 extern kern_return_t kernel_thread_start(
1986 thread_continue_t continuation,
1987 void *parameter,
1988 thread_t *new_thread);
1989
1990 __END_DECLS
1991
1992 #endif /* _KERN_THREAD_H_ */
1993