1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
40 *
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * CSS requests users of this software to return to [email protected] any
46 * improvements that they make and grant CSS redistribution rights.
47 *
48 * Author: Bryan Ford, University of Utah CSS
49 *
50 * Thread management routines
51 */
52
53 #include <sys/kdebug.h>
54 #include <mach/mach_types.h>
55 #include <mach/kern_return.h>
56 #include <mach/thread_act_server.h>
57 #include <mach/thread_act.h>
58
59 #include <kern/kern_types.h>
60 #include <kern/ast.h>
61 #include <kern/mach_param.h>
62 #include <kern/zalloc.h>
63 #include <kern/extmod_statistics.h>
64 #include <kern/thread.h>
65 #include <kern/task.h>
66 #include <kern/sched_prim.h>
67 #include <kern/misc_protos.h>
68 #include <kern/assert.h>
69 #include <kern/exception.h>
70 #include <kern/ipc_mig.h>
71 #include <kern/ipc_tt.h>
72 #include <kern/machine.h>
73 #include <kern/spl.h>
74 #include <kern/syscall_subr.h>
75 #include <kern/processor.h>
76 #include <kern/restartable.h>
77 #include <kern/timer.h>
78 #include <kern/affinity.h>
79 #include <kern/host.h>
80 #include <kern/exc_guard.h>
81 #include <ipc/port.h>
82 #include <mach/arm/thread_status.h>
83
84
85 #include <stdatomic.h>
86
87 #include <security/mac_mach_internal.h>
88 #include <libkern/coreanalytics/coreanalytics.h>
89
90 static void act_abort(thread_t thread);
91
92 static void thread_suspended(void *arg, wait_result_t result);
93 static void thread_set_apc_ast(thread_t thread);
94 static void thread_set_apc_ast_locked(thread_t thread);
95
96 extern void proc_name(int pid, char * buf, int size);
97 extern boolean_t IOCurrentTaskHasEntitlement(const char *);
98
99 CA_EVENT(thread_set_state,
100 CA_STATIC_STRING(CA_PROCNAME_LEN), current_proc);
101
102 static void
send_thread_set_state_telemetry(void)103 send_thread_set_state_telemetry(void)
104 {
105 ca_event_t ca_event = CA_EVENT_ALLOCATE(thread_set_state);
106 CA_EVENT_TYPE(thread_set_state) * event = ca_event->data;
107
108 proc_name(task_pid(current_task()), (char *) &event->current_proc, CA_PROCNAME_LEN);
109
110 CA_EVENT_SEND(ca_event);
111 }
112
113 /* bootarg to create lightweight corpse for thread set state lockdown */
114 TUNABLE(bool, tss_should_crash, "tss_should_crash", true);
115
116 static inline boolean_t
thread_set_state_allowed(thread_t thread,int flavor)117 thread_set_state_allowed(thread_t thread, int flavor)
118 {
119 task_t target_task = get_threadtask(thread);
120
121 #if DEVELOPMENT || DEBUG
122 /* disable the feature if the boot-arg is disabled. */
123 if (!tss_should_crash) {
124 return TRUE;
125 }
126 #endif /* DEVELOPMENT || DEBUG */
127
128 /* hardened binaries must have entitlement - all others ok */
129 if (task_is_hardened_binary(target_task)
130 && !(thread->options & TH_IN_MACH_EXCEPTION) /* Allowed for now - rdar://103085786 */
131 && FLAVOR_MODIFIES_CORE_CPU_REGISTERS(flavor) /* only care about locking down PC/LR */
132 #if XNU_TARGET_OS_OSX
133 && !task_opted_out_mach_hardening(target_task)
134 #endif /* XNU_TARGET_OS_OSX */
135 #if CONFIG_ROSETTA
136 && !task_is_translated(target_task) /* Ignore translated tasks */
137 #endif /* CONFIG_ROSETTA */
138 && !IOCurrentTaskHasEntitlement("com.apple.private.thread-set-state")
139 ) {
140 /* fatal crash */
141 mach_port_guard_exception(MACH_PORT_NULL, 0, 0, kGUARD_EXC_THREAD_SET_STATE);
142 send_thread_set_state_telemetry();
143 return FALSE;
144 }
145
146 #if __has_feature(ptrauth_calls)
147 /* Do not allow Fatal PAC exception binaries to set Debug state */
148 if (task_is_pac_exception_fatal(target_task)
149 && machine_thread_state_is_debug_flavor(flavor)
150 #if XNU_TARGET_OS_OSX
151 && !task_opted_out_mach_hardening(target_task)
152 #endif /* XNU_TARGET_OS_OSX */
153 #if CONFIG_ROSETTA
154 && !task_is_translated(target_task) /* Ignore translated tasks */
155 #endif /* CONFIG_ROSETTA */
156 && !IOCurrentTaskHasEntitlement("com.apple.private.thread-set-state")
157 ) {
158 /* fatal crash */
159 mach_port_guard_exception(MACH_PORT_NULL, 0, 0, kGUARD_EXC_THREAD_SET_STATE);
160 send_thread_set_state_telemetry();
161 return FALSE;
162 }
163 #endif /* __has_feature(ptrauth_calls) */
164
165 return TRUE;
166 }
167
168 /*
169 * Internal routine to mark a thread as started.
170 * Always called with the thread mutex locked.
171 */
172 void
thread_start(thread_t thread)173 thread_start(
174 thread_t thread)
175 {
176 clear_wait(thread, THREAD_AWAKENED);
177 thread->started = TRUE;
178 }
179
180 /*
181 * Internal routine to mark a thread as waiting
182 * right after it has been created. The caller
183 * is responsible to call wakeup()/thread_wakeup()
184 * or thread_terminate() to get it going.
185 *
186 * Always called with the thread mutex locked.
187 *
188 * Task and task_threads mutexes also held
189 * (so nobody can set the thread running before
190 * this point)
191 *
192 * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
193 * to allow termination from this point forward.
194 */
195 void
thread_start_in_assert_wait(thread_t thread,struct waitq * waitq,event64_t event,wait_interrupt_t interruptible)196 thread_start_in_assert_wait(
197 thread_t thread,
198 struct waitq *waitq,
199 event64_t event,
200 wait_interrupt_t interruptible)
201 {
202 wait_result_t wait_result;
203 spl_t spl;
204
205 spl = splsched();
206 waitq_lock(waitq);
207
208 /* clear out startup condition (safe because thread not started yet) */
209 thread_lock(thread);
210 assert(!thread->started);
211 assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT));
212 thread->state &= ~(TH_WAIT | TH_UNINT);
213 thread_unlock(thread);
214
215 /* assert wait interruptibly forever */
216 wait_result = waitq_assert_wait64_locked(waitq, event,
217 interruptible,
218 TIMEOUT_URGENCY_SYS_NORMAL,
219 TIMEOUT_WAIT_FOREVER,
220 TIMEOUT_NO_LEEWAY,
221 thread);
222 assert(wait_result == THREAD_WAITING);
223
224 /* mark thread started while we still hold the waitq lock */
225 thread_lock(thread);
226 thread->started = TRUE;
227 thread_unlock(thread);
228
229 waitq_unlock(waitq);
230 splx(spl);
231 }
232
233 /*
234 * Internal routine to terminate a thread.
235 * Sometimes called with task already locked.
236 *
237 * If thread is on core, cause AST check immediately;
238 * Otherwise, let the thread continue running in kernel
239 * until it hits AST.
240 */
241 kern_return_t
thread_terminate_internal(thread_t thread)242 thread_terminate_internal(
243 thread_t thread)
244 {
245 kern_return_t result = KERN_SUCCESS;
246
247 thread_mtx_lock(thread);
248
249 if (thread->active) {
250 thread->active = FALSE;
251
252 act_abort(thread);
253
254 if (thread->started) {
255 clear_wait(thread, THREAD_INTERRUPTED);
256 } else {
257 thread_start(thread);
258 }
259 } else {
260 result = KERN_TERMINATED;
261 }
262
263 if (thread->affinity_set != NULL) {
264 thread_affinity_terminate(thread);
265 }
266
267 /* unconditionally unpin the thread in internal termination */
268 ipc_thread_port_unpin(get_thread_ro(thread)->tro_self_port);
269
270 thread_mtx_unlock(thread);
271
272 if (thread != current_thread() && result == KERN_SUCCESS) {
273 thread_wait(thread, FALSE);
274 }
275
276 return result;
277 }
278
279 kern_return_t
thread_terminate(thread_t thread)280 thread_terminate(
281 thread_t thread)
282 {
283 task_t task;
284
285 if (thread == THREAD_NULL) {
286 return KERN_INVALID_ARGUMENT;
287 }
288
289 if (thread->state & TH_IDLE) {
290 panic("idle thread calling thread_terminate!");
291 }
292
293 task = get_threadtask(thread);
294
295 /* Kernel threads can't be terminated without their own cooperation */
296 if (task == kernel_task && thread != current_thread()) {
297 return KERN_FAILURE;
298 }
299
300 kern_return_t result = thread_terminate_internal(thread);
301
302 /*
303 * If a kernel thread is terminating itself, force handle the APC_AST here.
304 * Kernel threads don't pass through the return-to-user AST checking code,
305 * but all threads must finish their own termination in thread_apc_ast.
306 */
307 if (task == kernel_task) {
308 assert(thread->active == FALSE);
309 thread_ast_clear(thread, AST_APC);
310 thread_apc_ast(thread);
311
312 panic("thread_terminate");
313 /* NOTREACHED */
314 }
315
316 return result;
317 }
318
319 /*
320 * [MIG Call] Terminate a thread.
321 *
322 * Cannot be used on threads managed by pthread.
323 */
324 kern_return_t
thread_terminate_from_user(thread_t thread)325 thread_terminate_from_user(
326 thread_t thread)
327 {
328 if (thread == THREAD_NULL) {
329 return KERN_INVALID_ARGUMENT;
330 }
331
332 if (thread_get_tag(thread) & THREAD_TAG_PTHREAD) {
333 return KERN_DENIED;
334 }
335
336 return thread_terminate(thread);
337 }
338
339 /*
340 * Terminate a thread with pinned control port.
341 *
342 * Can only be used on threads managed by pthread. Exported in pthread_kern.
343 */
344 kern_return_t
thread_terminate_pinned(thread_t thread)345 thread_terminate_pinned(
346 thread_t thread)
347 {
348 task_t task;
349
350 if (thread == THREAD_NULL) {
351 return KERN_INVALID_ARGUMENT;
352 }
353
354 task = get_threadtask(thread);
355
356
357 assert(task != kernel_task);
358 assert(thread_get_tag(thread) & (THREAD_TAG_PTHREAD | THREAD_TAG_MAINTHREAD));
359
360 thread_mtx_lock(thread);
361 if (task_is_pinned(task) && thread->active) {
362 assert(get_thread_ro(thread)->tro_self_port->ip_pinned == 1);
363 }
364 thread_mtx_unlock(thread);
365
366 kern_return_t result = thread_terminate_internal(thread);
367 return result;
368 }
369
370 /*
371 * Suspend execution of the specified thread.
372 * This is a recursive-style suspension of the thread, a count of
373 * suspends is maintained.
374 *
375 * Called with thread mutex held.
376 */
377 void
thread_hold(thread_t thread)378 thread_hold(thread_t thread)
379 {
380 if (thread->suspend_count++ == 0) {
381 task_t task = get_threadtask(thread);
382 thread_set_apc_ast(thread);
383 assert(thread->suspend_parked == FALSE);
384
385 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SUSPENSION, MACH_THREAD_SUSPEND) | DBG_FUNC_NONE,
386 thread->thread_id, thread->user_stop_count, task->pidsuspended);
387 }
388 }
389
390 /*
391 * Decrement internal suspension count, setting thread
392 * runnable when count falls to zero.
393 *
394 * Because the wait is abortsafe, we can't be guaranteed that the thread
395 * is currently actually waiting even if suspend_parked is set.
396 *
397 * Called with thread mutex held.
398 */
399 void
thread_release(thread_t thread)400 thread_release(thread_t thread)
401 {
402 assertf(thread->suspend_count > 0, "thread %p over-resumed", thread);
403
404 /* fail-safe on non-assert builds */
405 if (thread->suspend_count == 0) {
406 return;
407 }
408
409 if (--thread->suspend_count == 0) {
410 if (!thread->started) {
411 thread_start(thread);
412 } else if (thread->suspend_parked) {
413 thread->suspend_parked = FALSE;
414 thread_wakeup_thread(&thread->suspend_count, thread);
415 }
416 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SUSPENSION, MACH_THREAD_RESUME) | DBG_FUNC_NONE, thread->thread_id);
417 }
418 }
419
420 kern_return_t
thread_suspend(thread_t thread)421 thread_suspend(thread_t thread)
422 {
423 kern_return_t result = KERN_SUCCESS;
424
425 if (thread == THREAD_NULL || get_threadtask(thread) == kernel_task) {
426 return KERN_INVALID_ARGUMENT;
427 }
428
429 thread_mtx_lock(thread);
430
431 if (thread->active) {
432 if (thread->user_stop_count++ == 0) {
433 thread_hold(thread);
434 }
435 } else {
436 result = KERN_TERMINATED;
437 }
438
439 thread_mtx_unlock(thread);
440
441 if (thread != current_thread() && result == KERN_SUCCESS) {
442 thread_wait(thread, FALSE);
443 }
444
445 return result;
446 }
447
448 kern_return_t
thread_resume(thread_t thread)449 thread_resume(thread_t thread)
450 {
451 kern_return_t result = KERN_SUCCESS;
452
453 if (thread == THREAD_NULL || get_threadtask(thread) == kernel_task) {
454 return KERN_INVALID_ARGUMENT;
455 }
456
457 thread_mtx_lock(thread);
458
459 if (thread->active) {
460 if (thread->user_stop_count > 0) {
461 if (--thread->user_stop_count == 0) {
462 thread_release(thread);
463 }
464 } else {
465 result = KERN_FAILURE;
466 }
467 } else {
468 result = KERN_TERMINATED;
469 }
470
471 thread_mtx_unlock(thread);
472
473 return result;
474 }
475
476 /*
477 * thread_depress_abort_from_user:
478 *
479 * Prematurely abort priority depression if there is one.
480 */
481 kern_return_t
thread_depress_abort_from_user(thread_t thread)482 thread_depress_abort_from_user(thread_t thread)
483 {
484 kern_return_t result;
485
486 if (thread == THREAD_NULL) {
487 return KERN_INVALID_ARGUMENT;
488 }
489
490 thread_mtx_lock(thread);
491
492 if (thread->active) {
493 result = thread_depress_abort(thread);
494 } else {
495 result = KERN_TERMINATED;
496 }
497
498 thread_mtx_unlock(thread);
499
500 return result;
501 }
502
503
504 /*
505 * Indicate that the thread should run the AST_APC callback
506 * to detect an abort condition.
507 *
508 * Called with thread mutex held.
509 */
510 static void
act_abort(thread_t thread)511 act_abort(
512 thread_t thread)
513 {
514 spl_t s = splsched();
515
516 thread_lock(thread);
517
518 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
519 thread->sched_flags |= TH_SFLAG_ABORT;
520 thread_set_apc_ast_locked(thread);
521 thread_depress_abort_locked(thread);
522 } else {
523 thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
524 }
525
526 thread_unlock(thread);
527 splx(s);
528 }
529
530 kern_return_t
thread_abort(thread_t thread)531 thread_abort(
532 thread_t thread)
533 {
534 kern_return_t result = KERN_SUCCESS;
535
536 if (thread == THREAD_NULL) {
537 return KERN_INVALID_ARGUMENT;
538 }
539
540 thread_mtx_lock(thread);
541
542 if (thread->active) {
543 act_abort(thread);
544 clear_wait(thread, THREAD_INTERRUPTED);
545 } else {
546 result = KERN_TERMINATED;
547 }
548
549 thread_mtx_unlock(thread);
550
551 return result;
552 }
553
554 kern_return_t
thread_abort_safely(thread_t thread)555 thread_abort_safely(
556 thread_t thread)
557 {
558 kern_return_t result = KERN_SUCCESS;
559
560 if (thread == THREAD_NULL) {
561 return KERN_INVALID_ARGUMENT;
562 }
563
564 thread_mtx_lock(thread);
565
566 if (thread->active) {
567 spl_t s = splsched();
568
569 thread_lock(thread);
570 if (!thread->at_safe_point ||
571 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
572 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
573 thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
574 thread_set_apc_ast_locked(thread);
575 thread_depress_abort_locked(thread);
576 }
577 }
578 thread_unlock(thread);
579 splx(s);
580 } else {
581 result = KERN_TERMINATED;
582 }
583
584 thread_mtx_unlock(thread);
585
586 return result;
587 }
588
589 /*** backward compatibility hacks ***/
590 #include <mach/thread_info.h>
591 #include <mach/thread_special_ports.h>
592 #include <ipc/ipc_port.h>
593
594 kern_return_t
thread_info(thread_t thread,thread_flavor_t flavor,thread_info_t thread_info_out,mach_msg_type_number_t * thread_info_count)595 thread_info(
596 thread_t thread,
597 thread_flavor_t flavor,
598 thread_info_t thread_info_out,
599 mach_msg_type_number_t *thread_info_count)
600 {
601 kern_return_t result;
602
603 if (thread == THREAD_NULL) {
604 return KERN_INVALID_ARGUMENT;
605 }
606
607 thread_mtx_lock(thread);
608
609 if (thread->active || thread->inspection) {
610 result = thread_info_internal(
611 thread, flavor, thread_info_out, thread_info_count);
612 } else {
613 result = KERN_TERMINATED;
614 }
615
616 thread_mtx_unlock(thread);
617
618 return result;
619 }
620
621 static inline kern_return_t
thread_get_state_internal(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count,thread_set_status_flags_t flags)622 thread_get_state_internal(
623 thread_t thread,
624 int flavor,
625 thread_state_t state, /* pointer to OUT array */
626 mach_msg_type_number_t *state_count, /*IN/OUT*/
627 thread_set_status_flags_t flags)
628 {
629 kern_return_t result = KERN_SUCCESS;
630 boolean_t to_user = !!(flags & TSSF_TRANSLATE_TO_USER);
631
632 if (thread == THREAD_NULL) {
633 return KERN_INVALID_ARGUMENT;
634 }
635
636 thread_mtx_lock(thread);
637
638 if (thread->active) {
639 if (thread != current_thread()) {
640 thread_hold(thread);
641
642 thread_mtx_unlock(thread);
643
644 if (thread_stop(thread, FALSE)) {
645 thread_mtx_lock(thread);
646 result = machine_thread_get_state(
647 thread, flavor, state, state_count);
648 thread_unstop(thread);
649 } else {
650 thread_mtx_lock(thread);
651 result = KERN_ABORTED;
652 }
653
654 thread_release(thread);
655 } else {
656 result = machine_thread_get_state(
657 thread, flavor, state, state_count);
658 }
659 } else if (thread->inspection) {
660 result = machine_thread_get_state(
661 thread, flavor, state, state_count);
662 } else {
663 result = KERN_TERMINATED;
664 }
665
666 if (to_user && result == KERN_SUCCESS) {
667 result = machine_thread_state_convert_to_user(thread, flavor, state,
668 state_count, flags);
669 }
670
671 thread_mtx_unlock(thread);
672
673 return result;
674 }
675
676 /* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */
677
678 kern_return_t
679 thread_get_state(
680 thread_t thread,
681 int flavor,
682 thread_state_t state,
683 mach_msg_type_number_t *state_count);
684
685 kern_return_t
thread_get_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)686 thread_get_state(
687 thread_t thread,
688 int flavor,
689 thread_state_t state, /* pointer to OUT array */
690 mach_msg_type_number_t *state_count) /*IN/OUT*/
691 {
692 return thread_get_state_internal(thread, flavor, state, state_count, TSSF_FLAGS_NONE);
693 }
694
695 kern_return_t
thread_get_state_to_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)696 thread_get_state_to_user(
697 thread_t thread,
698 int flavor,
699 thread_state_t state, /* pointer to OUT array */
700 mach_msg_type_number_t *state_count) /*IN/OUT*/
701 {
702 return thread_get_state_internal(thread, flavor, state, state_count, TSSF_TRANSLATE_TO_USER);
703 }
704
705 /*
706 * Change thread's machine-dependent state. Called with nothing
707 * locked. Returns same way.
708 */
709 static inline kern_return_t
thread_set_state_internal(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count,thread_state_t old_state,mach_msg_type_number_t old_state_count,thread_set_status_flags_t flags)710 thread_set_state_internal(
711 thread_t thread,
712 int flavor,
713 thread_state_t state,
714 mach_msg_type_number_t state_count,
715 thread_state_t old_state,
716 mach_msg_type_number_t old_state_count,
717 thread_set_status_flags_t flags)
718 {
719 kern_return_t result = KERN_SUCCESS;
720 boolean_t from_user = !!(flags & TSSF_TRANSLATE_TO_USER);
721
722 if (thread == THREAD_NULL) {
723 return KERN_INVALID_ARGUMENT;
724 }
725
726 if ((flags & TSSF_CHECK_ENTITLEMENT) &&
727 !thread_set_state_allowed(thread, flavor)) {
728 return KERN_NO_ACCESS;
729 }
730
731 thread_mtx_lock(thread);
732
733 if (thread->active) {
734 if (from_user) {
735 result = machine_thread_state_convert_from_user(thread, flavor,
736 state, state_count, old_state, old_state_count, flags);
737 if (result != KERN_SUCCESS) {
738 goto out;
739 }
740 }
741 if (thread != current_thread()) {
742 thread_hold(thread);
743
744 thread_mtx_unlock(thread);
745
746 if (thread_stop(thread, TRUE)) {
747 thread_mtx_lock(thread);
748 result = machine_thread_set_state(
749 thread, flavor, state, state_count);
750 thread_unstop(thread);
751 } else {
752 thread_mtx_lock(thread);
753 result = KERN_ABORTED;
754 }
755
756 thread_release(thread);
757 } else {
758 result = machine_thread_set_state(
759 thread, flavor, state, state_count);
760 }
761 } else {
762 result = KERN_TERMINATED;
763 }
764
765 if ((result == KERN_SUCCESS) && from_user) {
766 extmod_statistics_incr_thread_set_state(thread);
767 }
768
769 out:
770 thread_mtx_unlock(thread);
771
772 return result;
773 }
774
775 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
776 kern_return_t
777 thread_set_state(
778 thread_t thread,
779 int flavor,
780 thread_state_t state,
781 mach_msg_type_number_t state_count);
782
783 kern_return_t
thread_set_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count)784 thread_set_state(
785 thread_t thread,
786 int flavor,
787 thread_state_t state,
788 mach_msg_type_number_t state_count)
789 {
790 return thread_set_state_internal(thread, flavor, state, state_count, NULL, 0, TSSF_FLAGS_NONE);
791 }
792
793 kern_return_t
thread_set_state_from_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count)794 thread_set_state_from_user(
795 thread_t thread,
796 int flavor,
797 thread_state_t state,
798 mach_msg_type_number_t state_count)
799 {
800 return thread_set_state_internal(thread, flavor, state, state_count, NULL,
801 0, TSSF_TRANSLATE_TO_USER | TSSF_CHECK_ENTITLEMENT);
802 }
803
804 kern_return_t
thread_convert_thread_state(thread_t thread,int direction,thread_state_flavor_t flavor,thread_state_t in_state,mach_msg_type_number_t in_state_count,thread_state_t out_state,mach_msg_type_number_t * out_state_count)805 thread_convert_thread_state(
806 thread_t thread,
807 int direction,
808 thread_state_flavor_t flavor,
809 thread_state_t in_state, /* pointer to IN array */
810 mach_msg_type_number_t in_state_count,
811 thread_state_t out_state, /* pointer to OUT array */
812 mach_msg_type_number_t *out_state_count) /*IN/OUT*/
813 {
814 kern_return_t kr;
815 thread_t to_thread = THREAD_NULL;
816 thread_t from_thread = THREAD_NULL;
817 mach_msg_type_number_t state_count = in_state_count;
818
819 if (direction != THREAD_CONVERT_THREAD_STATE_TO_SELF &&
820 direction != THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
821 return KERN_INVALID_ARGUMENT;
822 }
823
824 if (thread == THREAD_NULL) {
825 return KERN_INVALID_ARGUMENT;
826 }
827
828 if (state_count > *out_state_count) {
829 return KERN_INSUFFICIENT_BUFFER_SIZE;
830 }
831
832 if (direction == THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
833 to_thread = thread;
834 from_thread = current_thread();
835 } else {
836 to_thread = current_thread();
837 from_thread = thread;
838 }
839
840 /* Authenticate and convert thread state to kernel representation */
841 kr = machine_thread_state_convert_from_user(from_thread, flavor,
842 in_state, state_count, NULL, 0, TSSF_FLAGS_NONE);
843
844 /* Return early if one of the thread was jop disabled while other wasn't */
845 if (kr != KERN_SUCCESS) {
846 return kr;
847 }
848
849 /* Convert thread state to target thread user representation */
850 kr = machine_thread_state_convert_to_user(to_thread, flavor,
851 in_state, &state_count, TSSF_PRESERVE_FLAGS);
852
853 if (kr == KERN_SUCCESS) {
854 if (state_count <= *out_state_count) {
855 memcpy(out_state, in_state, state_count * sizeof(uint32_t));
856 *out_state_count = state_count;
857 } else {
858 kr = KERN_INSUFFICIENT_BUFFER_SIZE;
859 }
860 }
861
862 return kr;
863 }
864
865 /*
866 * Kernel-internal "thread" interfaces used outside this file:
867 */
868
869 /* Initialize (or re-initialize) a thread state. Called from execve
870 * with nothing locked, returns same way.
871 */
872 kern_return_t
thread_state_initialize(thread_t thread)873 thread_state_initialize(
874 thread_t thread)
875 {
876 kern_return_t result = KERN_SUCCESS;
877
878 if (thread == THREAD_NULL) {
879 return KERN_INVALID_ARGUMENT;
880 }
881
882 thread_mtx_lock(thread);
883
884 if (thread->active) {
885 if (thread != current_thread()) {
886 /* Thread created in exec should be blocked in UNINT wait */
887 assert(!(thread->state & TH_RUN));
888 }
889 machine_thread_state_initialize( thread );
890 } else {
891 result = KERN_TERMINATED;
892 }
893
894 thread_mtx_unlock(thread);
895
896 return result;
897 }
898
899 kern_return_t
thread_dup(thread_t target)900 thread_dup(
901 thread_t target)
902 {
903 thread_t self = current_thread();
904 kern_return_t result = KERN_SUCCESS;
905
906 if (target == THREAD_NULL || target == self) {
907 return KERN_INVALID_ARGUMENT;
908 }
909
910 thread_mtx_lock(target);
911
912 if (target->active) {
913 thread_hold(target);
914
915 thread_mtx_unlock(target);
916
917 if (thread_stop(target, TRUE)) {
918 thread_mtx_lock(target);
919 result = machine_thread_dup(self, target, FALSE);
920
921 if (self->affinity_set != AFFINITY_SET_NULL) {
922 thread_affinity_dup(self, target);
923 }
924 thread_unstop(target);
925 } else {
926 thread_mtx_lock(target);
927 result = KERN_ABORTED;
928 }
929
930 thread_release(target);
931 } else {
932 result = KERN_TERMINATED;
933 }
934
935 thread_mtx_unlock(target);
936
937 return result;
938 }
939
940
941 kern_return_t
thread_dup2(thread_t source,thread_t target)942 thread_dup2(
943 thread_t source,
944 thread_t target)
945 {
946 kern_return_t result = KERN_SUCCESS;
947 uint32_t active = 0;
948
949 if (source == THREAD_NULL || target == THREAD_NULL || target == source) {
950 return KERN_INVALID_ARGUMENT;
951 }
952
953 thread_mtx_lock(source);
954 active = source->active;
955 thread_mtx_unlock(source);
956
957 if (!active) {
958 return KERN_TERMINATED;
959 }
960
961 thread_mtx_lock(target);
962
963 if (target->active || target->inspection) {
964 thread_hold(target);
965
966 thread_mtx_unlock(target);
967
968 if (thread_stop(target, TRUE)) {
969 thread_mtx_lock(target);
970 result = machine_thread_dup(source, target, TRUE);
971 if (source->affinity_set != AFFINITY_SET_NULL) {
972 thread_affinity_dup(source, target);
973 }
974 thread_unstop(target);
975 } else {
976 thread_mtx_lock(target);
977 result = KERN_ABORTED;
978 }
979
980 thread_release(target);
981 } else {
982 result = KERN_TERMINATED;
983 }
984
985 thread_mtx_unlock(target);
986
987 return result;
988 }
989
990 /*
991 * thread_setstatus:
992 *
993 * Set the status of the specified thread.
994 * Called with (and returns with) no locks held.
995 */
996 kern_return_t
thread_setstatus(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t count)997 thread_setstatus(
998 thread_t thread,
999 int flavor,
1000 thread_state_t tstate,
1001 mach_msg_type_number_t count)
1002 {
1003 return thread_set_state(thread, flavor, tstate, count);
1004 }
1005
1006 kern_return_t
thread_setstatus_from_user(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t count,thread_state_t old_tstate,mach_msg_type_number_t old_count,thread_set_status_flags_t flags)1007 thread_setstatus_from_user(
1008 thread_t thread,
1009 int flavor,
1010 thread_state_t tstate,
1011 mach_msg_type_number_t count,
1012 thread_state_t old_tstate,
1013 mach_msg_type_number_t old_count,
1014 thread_set_status_flags_t flags)
1015 {
1016 return thread_set_state_internal(thread, flavor, tstate, count, old_tstate,
1017 old_count, flags | TSSF_TRANSLATE_TO_USER);
1018 }
1019
1020 /*
1021 * thread_getstatus:
1022 *
1023 * Get the status of the specified thread.
1024 */
1025 kern_return_t
thread_getstatus(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t * count)1026 thread_getstatus(
1027 thread_t thread,
1028 int flavor,
1029 thread_state_t tstate,
1030 mach_msg_type_number_t *count)
1031 {
1032 return thread_get_state(thread, flavor, tstate, count);
1033 }
1034
1035 kern_return_t
thread_getstatus_to_user(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t * count,thread_set_status_flags_t flags)1036 thread_getstatus_to_user(
1037 thread_t thread,
1038 int flavor,
1039 thread_state_t tstate,
1040 mach_msg_type_number_t *count,
1041 thread_set_status_flags_t flags)
1042 {
1043 return thread_get_state_internal(thread, flavor, tstate, count, flags | TSSF_TRANSLATE_TO_USER);
1044 }
1045
1046 /*
1047 * Change thread's machine-dependent userspace TSD base.
1048 * Called with nothing locked. Returns same way.
1049 */
1050 kern_return_t
thread_set_tsd_base(thread_t thread,mach_vm_offset_t tsd_base)1051 thread_set_tsd_base(
1052 thread_t thread,
1053 mach_vm_offset_t tsd_base)
1054 {
1055 kern_return_t result = KERN_SUCCESS;
1056
1057 if (thread == THREAD_NULL) {
1058 return KERN_INVALID_ARGUMENT;
1059 }
1060
1061 thread_mtx_lock(thread);
1062
1063 if (thread->active) {
1064 if (thread != current_thread()) {
1065 thread_hold(thread);
1066
1067 thread_mtx_unlock(thread);
1068
1069 if (thread_stop(thread, TRUE)) {
1070 thread_mtx_lock(thread);
1071 result = machine_thread_set_tsd_base(thread, tsd_base);
1072 thread_unstop(thread);
1073 } else {
1074 thread_mtx_lock(thread);
1075 result = KERN_ABORTED;
1076 }
1077
1078 thread_release(thread);
1079 } else {
1080 result = machine_thread_set_tsd_base(thread, tsd_base);
1081 }
1082 } else {
1083 result = KERN_TERMINATED;
1084 }
1085
1086 thread_mtx_unlock(thread);
1087
1088 return result;
1089 }
1090
1091 /*
1092 * thread_set_apc_ast:
1093 *
1094 * Register the AST_APC callback that handles suspension and
1095 * termination, if it hasn't been installed already.
1096 *
1097 * Called with the thread mutex held.
1098 */
1099 static void
thread_set_apc_ast(thread_t thread)1100 thread_set_apc_ast(thread_t thread)
1101 {
1102 spl_t s = splsched();
1103
1104 thread_lock(thread);
1105 thread_set_apc_ast_locked(thread);
1106 thread_unlock(thread);
1107
1108 splx(s);
1109 }
1110
1111 /*
1112 * thread_set_apc_ast_locked:
1113 *
1114 * Do the work of registering for the AST_APC callback.
1115 *
1116 * Called with the thread mutex and scheduling lock held.
1117 */
1118 static void
thread_set_apc_ast_locked(thread_t thread)1119 thread_set_apc_ast_locked(thread_t thread)
1120 {
1121 thread_ast_set(thread, AST_APC);
1122
1123 if (thread == current_thread()) {
1124 ast_propagate(thread);
1125 } else {
1126 processor_t processor = thread->last_processor;
1127
1128 if (processor != PROCESSOR_NULL &&
1129 processor->state == PROCESSOR_RUNNING &&
1130 processor->active_thread == thread) {
1131 cause_ast_check(processor);
1132 }
1133 }
1134 }
1135
1136 /*
1137 * Activation control support routines internal to this file:
1138 *
1139 */
1140
1141 /*
1142 * thread_suspended
1143 *
1144 * Continuation routine for thread suspension. It checks
1145 * to see whether there has been any new suspensions. If so, it
1146 * installs the AST_APC handler again.
1147 */
1148 __attribute__((noreturn))
1149 static void
thread_suspended(__unused void * parameter,wait_result_t result)1150 thread_suspended(__unused void *parameter, wait_result_t result)
1151 {
1152 thread_t thread = current_thread();
1153
1154 thread_mtx_lock(thread);
1155
1156 if (result == THREAD_INTERRUPTED) {
1157 thread->suspend_parked = FALSE;
1158 } else {
1159 assert(thread->suspend_parked == FALSE);
1160 }
1161
1162 if (thread->suspend_count > 0) {
1163 thread_set_apc_ast(thread);
1164 }
1165
1166 thread_mtx_unlock(thread);
1167
1168 thread_exception_return();
1169 /*NOTREACHED*/
1170 }
1171
1172 /*
1173 * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
1174 * Called with nothing locked. Returns (if it returns) the same way.
1175 */
1176 void
thread_apc_ast(thread_t thread)1177 thread_apc_ast(thread_t thread)
1178 {
1179 thread_mtx_lock(thread);
1180
1181 assert(thread->suspend_parked == FALSE);
1182
1183 spl_t s = splsched();
1184 thread_lock(thread);
1185
1186 /* TH_SFLAG_POLLDEPRESS is OK to have here */
1187 assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
1188
1189 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1190 thread_unlock(thread);
1191 splx(s);
1192
1193 if (!thread->active) {
1194 /* Thread is ready to terminate, time to tear it down */
1195 thread_mtx_unlock(thread);
1196
1197 thread_terminate_self();
1198 /*NOTREACHED*/
1199 }
1200
1201 /* If we're suspended, go to sleep and wait for someone to wake us up. */
1202 if (thread->suspend_count > 0) {
1203 thread->suspend_parked = TRUE;
1204 assert_wait(&thread->suspend_count,
1205 THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER);
1206 thread_mtx_unlock(thread);
1207
1208 thread_block(thread_suspended);
1209 /*NOTREACHED*/
1210 }
1211
1212 thread_mtx_unlock(thread);
1213 }
1214
1215 #if CONFIG_ROSETTA
1216 extern kern_return_t
1217 exception_deliver(
1218 thread_t thread,
1219 exception_type_t exception,
1220 mach_exception_data_t code,
1221 mach_msg_type_number_t codeCnt,
1222 struct exception_action *excp,
1223 lck_mtx_t *mutex);
1224
1225 kern_return_t
thread_raise_exception(thread_t thread,exception_type_t exception,natural_t code_count,int64_t code,int64_t sub_code)1226 thread_raise_exception(
1227 thread_t thread,
1228 exception_type_t exception,
1229 natural_t code_count,
1230 int64_t code,
1231 int64_t sub_code)
1232 {
1233 task_t task;
1234
1235 if (thread == THREAD_NULL) {
1236 return KERN_INVALID_ARGUMENT;
1237 }
1238
1239 task = get_threadtask(thread);
1240
1241 if (task != current_task()) {
1242 return KERN_FAILURE;
1243 }
1244
1245 if (!task_is_translated(task)) {
1246 return KERN_FAILURE;
1247 }
1248
1249 if (exception == EXC_CRASH) {
1250 return KERN_INVALID_ARGUMENT;
1251 }
1252
1253 int64_t codes[] = { code, sub_code };
1254 host_priv_t host_priv = host_priv_self();
1255 kern_return_t kr = exception_deliver(thread, exception, codes, code_count, host_priv->exc_actions, &host_priv->lock);
1256 if (kr != KERN_SUCCESS) {
1257 return kr;
1258 }
1259
1260 return thread_resume(thread);
1261 }
1262 #endif
1263
1264 void
thread_debug_return_to_user_ast(thread_t thread)1265 thread_debug_return_to_user_ast(
1266 thread_t thread)
1267 {
1268 #pragma unused(thread)
1269 #if MACH_ASSERT
1270 if ((thread->sched_flags & TH_SFLAG_RW_PROMOTED) ||
1271 thread->rwlock_count > 0) {
1272 panic("Returning to userspace with rw lock held, thread %p sched_flag %u rwlock_count %d", thread, thread->sched_flags, thread->rwlock_count);
1273 }
1274
1275 if ((thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED) ||
1276 thread->priority_floor_count > 0) {
1277 panic("Returning to userspace with floor boost set, thread %p sched_flag %u priority_floor_count %d", thread, thread->sched_flags, thread->priority_floor_count);
1278 }
1279
1280 if (thread->th_vm_faults_disabled) {
1281 panic("Returning to userspace with vm faults disabled, thread %p", thread);
1282 }
1283
1284 #if CONFIG_EXCLAVES
1285 assert3u(thread->th_exclaves_state & TH_EXCLAVES_STATE_ANY, ==, 0);
1286 #endif /* CONFIG_EXCLAVES */
1287
1288 #endif /* MACH_ASSERT */
1289 }
1290
1291
1292 /* Prototype, see justification above */
1293 kern_return_t
1294 act_set_state(
1295 thread_t thread,
1296 int flavor,
1297 thread_state_t state,
1298 mach_msg_type_number_t count);
1299
1300 kern_return_t
act_set_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t count)1301 act_set_state(
1302 thread_t thread,
1303 int flavor,
1304 thread_state_t state,
1305 mach_msg_type_number_t count)
1306 {
1307 if (thread == current_thread()) {
1308 return KERN_INVALID_ARGUMENT;
1309 }
1310
1311 return thread_set_state(thread, flavor, state, count);
1312 }
1313
1314 kern_return_t
act_set_state_from_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t count)1315 act_set_state_from_user(
1316 thread_t thread,
1317 int flavor,
1318 thread_state_t state,
1319 mach_msg_type_number_t count)
1320 {
1321 if (thread == current_thread()) {
1322 return KERN_INVALID_ARGUMENT;
1323 }
1324
1325 return thread_set_state_from_user(thread, flavor, state, count);
1326 }
1327
1328 /* Prototype, see justification above */
1329 kern_return_t
1330 act_get_state(
1331 thread_t thread,
1332 int flavor,
1333 thread_state_t state,
1334 mach_msg_type_number_t *count);
1335
1336 kern_return_t
act_get_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * count)1337 act_get_state(
1338 thread_t thread,
1339 int flavor,
1340 thread_state_t state,
1341 mach_msg_type_number_t *count)
1342 {
1343 if (thread == current_thread()) {
1344 return KERN_INVALID_ARGUMENT;
1345 }
1346
1347 return thread_get_state(thread, flavor, state, count);
1348 }
1349
1350 kern_return_t
act_get_state_to_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * count)1351 act_get_state_to_user(
1352 thread_t thread,
1353 int flavor,
1354 thread_state_t state,
1355 mach_msg_type_number_t *count)
1356 {
1357 if (thread == current_thread()) {
1358 return KERN_INVALID_ARGUMENT;
1359 }
1360
1361 return thread_get_state_to_user(thread, flavor, state, count);
1362 }
1363
1364 static void
act_set_ast(thread_t thread,ast_t ast)1365 act_set_ast(
1366 thread_t thread,
1367 ast_t ast)
1368 {
1369 spl_t s = splsched();
1370
1371 if (thread == current_thread()) {
1372 thread_ast_set(thread, ast);
1373 ast_propagate(thread);
1374 } else {
1375 processor_t processor;
1376
1377 thread_lock(thread);
1378 thread_ast_set(thread, ast);
1379 processor = thread->last_processor;
1380 if (processor != PROCESSOR_NULL &&
1381 processor->state == PROCESSOR_RUNNING &&
1382 processor->active_thread == thread) {
1383 cause_ast_check(processor);
1384 }
1385 thread_unlock(thread);
1386 }
1387
1388 splx(s);
1389 }
1390
1391 /*
1392 * set AST on thread without causing an AST check
1393 * and without taking the thread lock
1394 *
1395 * If thread is not the current thread, then it may take
1396 * up until the next context switch or quantum expiration
1397 * on that thread for it to notice the AST.
1398 */
1399 static void
act_set_ast_async(thread_t thread,ast_t ast)1400 act_set_ast_async(thread_t thread,
1401 ast_t ast)
1402 {
1403 thread_ast_set(thread, ast);
1404
1405 if (thread == current_thread()) {
1406 spl_t s = splsched();
1407 ast_propagate(thread);
1408 splx(s);
1409 }
1410 }
1411
1412 void
act_set_debug_assert(void)1413 act_set_debug_assert(void)
1414 {
1415 thread_t thread = current_thread();
1416 if (thread_ast_peek(thread, AST_DEBUG_ASSERT) != AST_DEBUG_ASSERT) {
1417 thread_ast_set(thread, AST_DEBUG_ASSERT);
1418 }
1419 if (ast_peek(AST_DEBUG_ASSERT) != AST_DEBUG_ASSERT) {
1420 spl_t s = splsched();
1421 ast_propagate(thread);
1422 splx(s);
1423 }
1424 }
1425
1426 void
act_set_astbsd(thread_t thread)1427 act_set_astbsd(thread_t thread)
1428 {
1429 act_set_ast(thread, AST_BSD);
1430 }
1431
1432 void
act_set_astkevent(thread_t thread,uint16_t bits)1433 act_set_astkevent(thread_t thread, uint16_t bits)
1434 {
1435 os_atomic_or(&thread->kevent_ast_bits, bits, relaxed);
1436
1437 /* kevent AST shouldn't send immediate IPIs */
1438 act_set_ast_async(thread, AST_KEVENT);
1439 }
1440
1441 uint16_t
act_clear_astkevent(thread_t thread,uint16_t bits)1442 act_clear_astkevent(thread_t thread, uint16_t bits)
1443 {
1444 /*
1445 * avoid the atomic operation if none of the bits is set,
1446 * which will be the common case.
1447 */
1448 uint16_t cur = os_atomic_load(&thread->kevent_ast_bits, relaxed);
1449 if (cur & bits) {
1450 cur = os_atomic_andnot_orig(&thread->kevent_ast_bits, bits, relaxed);
1451 }
1452 return cur & bits;
1453 }
1454
1455 bool
act_set_ast_reset_pcs(task_t task,thread_t thread)1456 act_set_ast_reset_pcs(task_t task, thread_t thread)
1457 {
1458 processor_t processor;
1459 bool needs_wait = false;
1460 spl_t s;
1461
1462 s = splsched();
1463
1464 if (thread == current_thread()) {
1465 /*
1466 * this is called from the signal code,
1467 * just set the AST and move on
1468 */
1469 thread_ast_set(thread, AST_RESET_PCS);
1470 ast_propagate(thread);
1471 } else {
1472 thread_lock(thread);
1473
1474 assert(thread->t_rr_state.trr_ipi_ack_pending == 0);
1475 assert(thread->t_rr_state.trr_sync_waiting == 0);
1476
1477 processor = thread->last_processor;
1478 if (!thread->active) {
1479 /*
1480 * ->active is being set before the thread is added
1481 * to the thread list (under the task lock which
1482 * the caller holds), and is reset before the thread
1483 * lock is being taken by thread_terminate_self().
1484 *
1485 * The result is that this will never fail to
1486 * set the AST on an thread that is active,
1487 * but will not set it past thread_terminate_self().
1488 */
1489 } else if (processor != PROCESSOR_NULL &&
1490 processor->state == PROCESSOR_RUNNING &&
1491 processor->active_thread == thread) {
1492 thread->t_rr_state.trr_ipi_ack_pending = true;
1493 needs_wait = true;
1494 thread_ast_set(thread, AST_RESET_PCS);
1495 cause_ast_check(processor);
1496 } else if (thread_reset_pcs_in_range(task, thread)) {
1497 if (thread->t_rr_state.trr_fault_state) {
1498 thread->t_rr_state.trr_fault_state =
1499 TRR_FAULT_OBSERVED;
1500 needs_wait = true;
1501 }
1502 thread_ast_set(thread, AST_RESET_PCS);
1503 }
1504 thread_unlock(thread);
1505 }
1506
1507 splx(s);
1508
1509 return needs_wait;
1510 }
1511
1512 void
act_set_kperf(thread_t thread)1513 act_set_kperf(thread_t thread)
1514 {
1515 /* safety check */
1516 if (thread != current_thread()) {
1517 if (!ml_get_interrupts_enabled()) {
1518 panic("unsafe act_set_kperf operation");
1519 }
1520 }
1521
1522 act_set_ast(thread, AST_KPERF);
1523 }
1524
1525 #if CONFIG_MACF
1526 void
act_set_astmacf(thread_t thread)1527 act_set_astmacf(
1528 thread_t thread)
1529 {
1530 act_set_ast( thread, AST_MACF);
1531 }
1532 #endif
1533
1534 void
act_set_astledger(thread_t thread)1535 act_set_astledger(thread_t thread)
1536 {
1537 act_set_ast(thread, AST_LEDGER);
1538 }
1539
1540 /*
1541 * The ledger AST may need to be set while already holding
1542 * the thread lock. This routine skips sending the IPI,
1543 * allowing us to avoid the lock hold.
1544 *
1545 * However, it means the targeted thread must context switch
1546 * to recognize the ledger AST.
1547 */
1548 void
act_set_astledger_async(thread_t thread)1549 act_set_astledger_async(thread_t thread)
1550 {
1551 act_set_ast_async(thread, AST_LEDGER);
1552 }
1553
1554 void
act_set_io_telemetry_ast(thread_t thread)1555 act_set_io_telemetry_ast(thread_t thread)
1556 {
1557 act_set_ast(thread, AST_TELEMETRY_IO);
1558 }
1559
1560 void
act_set_macf_telemetry_ast(thread_t thread)1561 act_set_macf_telemetry_ast(thread_t thread)
1562 {
1563 act_set_ast(thread, AST_TELEMETRY_MACF);
1564 }
1565
1566 void
act_set_astproc_resource(thread_t thread)1567 act_set_astproc_resource(thread_t thread)
1568 {
1569 act_set_ast(thread, AST_PROC_RESOURCE);
1570 }
1571