xref: /xnu-11215/bsd/pthread/pthread_shims.c (revision 8d741a5d)
1 /*
2  * Copyright (c) 2012-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #define PTHREAD_INTERNAL 1
30 
31 #include <stdatomic.h>
32 #include <kern/debug.h>
33 #include <kern/mach_param.h>
34 #include <kern/sched_prim.h>
35 #include <kern/task.h>
36 #include <kern/thread.h>
37 #include <kern/affinity.h>
38 #include <kern/zalloc.h>
39 #include <kern/policy_internal.h>
40 #include <kern/sync_sema.h>
41 
42 #include <machine/machine_routines.h>
43 #include <mach/task.h>
44 #include <mach/thread_act.h>
45 #include <sys/param.h>
46 #include <sys/eventvar.h>
47 #include <sys/pthread_shims.h>
48 #include <pthread/workqueue_internal.h>
49 #include <sys/cdefs.h>
50 #include <sys/proc_info.h>
51 #include <sys/proc_internal.h>
52 #include <sys/sysproto.h>
53 #include <sys/systm.h>
54 #include <sys/ulock.h>
55 #include <vm/vm_map_xnu.h>
56 #include <vm/vm_protos.h>
57 #include <kern/kcdata.h>
58 
59 /* version number of the in-kernel shims given to pthread.kext */
60 #define PTHREAD_SHIMS_VERSION 1
61 
62 #define PTHREAD_CALLBACK_MEMBER kevent_workq_internal
63 
64 /* compile time asserts to check the length of structures in pthread_shims.h */
65 static_assert((sizeof(struct pthread_functions_s) - offsetof(struct pthread_functions_s, psynch_rw_yieldwrlock) - sizeof(void*)) == (sizeof(void*) * 100));
66 static_assert((sizeof(struct pthread_callbacks_s) - offsetof(struct pthread_callbacks_s, PTHREAD_CALLBACK_MEMBER) - sizeof(void*)) == (sizeof(void*) * 100));
67 
68 /* old pthread code had definitions for these as they don't exist in headers */
69 extern kern_return_t mach_port_deallocate(ipc_space_t, mach_port_name_t);
70 extern void thread_deallocate_safe(thread_t thread);
71 
72 #define PTHREAD_STRUCT_ACCESSOR(get, set, rettype, structtype, member) \
73 	static rettype \
74 	get(structtype x) { \
75 	        return (x)->member; \
76 	} \
77 	static void \
78 	set(structtype x, rettype y) { \
79 	        (x)->member = y; \
80 	}
81 
82 PTHREAD_STRUCT_ACCESSOR(proc_get_threadstart, proc_set_threadstart, user_addr_t, struct proc*, p_threadstart);
83 PTHREAD_STRUCT_ACCESSOR(proc_get_pthsize, proc_set_pthsize, int, struct proc*, p_pthsize);
84 PTHREAD_STRUCT_ACCESSOR(proc_get_wqthread, proc_set_wqthread, user_addr_t, struct proc*, p_wqthread);
85 PTHREAD_STRUCT_ACCESSOR(proc_get_stack_addr_hint, proc_set_stack_addr_hint, user_addr_t, struct proc *, p_stack_addr_hint);
86 PTHREAD_STRUCT_ACCESSOR(proc_get_pthread_tsd_offset, proc_set_pthread_tsd_offset, uint32_t, struct proc *, p_pth_tsd_offset);
87 PTHREAD_STRUCT_ACCESSOR(proc_get_mach_thread_self_tsd_offset, proc_set_mach_thread_self_tsd_offset, uint64_t, struct proc *, p_mach_thread_self_offset);
88 PTHREAD_STRUCT_ACCESSOR(proc_get_pthhash, proc_set_pthhash, void*, struct proc*, p_pthhash);
89 
90 #define WQPTR_IS_INITING_VALUE ((void *)~(uintptr_t)0)
91 
92 static void
proc_set_dispatchqueue_offset(struct proc * p,uint64_t offset)93 proc_set_dispatchqueue_offset(struct proc *p, uint64_t offset)
94 {
95 	p->p_dispatchqueue_offset = offset;
96 }
97 
98 static void
proc_set_workqueue_quantum_offset(struct proc * p,uint64_t offset)99 proc_set_workqueue_quantum_offset(struct proc *p, uint64_t offset)
100 {
101 	p->p_pthread_wq_quantum_offset = offset;
102 }
103 
104 static void
proc_set_return_to_kernel_offset(struct proc * p,uint64_t offset)105 proc_set_return_to_kernel_offset(struct proc *p, uint64_t offset)
106 {
107 	p->p_return_to_kernel_offset = offset;
108 }
109 
110 static user_addr_t
proc_get_user_stack(struct proc * p)111 proc_get_user_stack(struct proc *p)
112 {
113 	return p->user_stack;
114 }
115 
116 static void
uthread_set_returnval(struct uthread * uth,int retval)117 uthread_set_returnval(struct uthread *uth, int retval)
118 {
119 	uth->uu_rval[0] = retval;
120 }
121 
122 __attribute__((noreturn))
123 static void
pthread_returning_to_userspace(void)124 pthread_returning_to_userspace(void)
125 {
126 	thread_exception_return();
127 }
128 
129 __attribute__((noreturn))
130 static void
pthread_bootstrap_return(void)131 pthread_bootstrap_return(void)
132 {
133 	thread_bootstrap_return();
134 }
135 
136 static uint32_t
get_task_threadmax(void)137 get_task_threadmax(void)
138 {
139 	return task_threadmax;
140 }
141 
142 static uint64_t
proc_get_register(struct proc * p)143 proc_get_register(struct proc *p)
144 {
145 	return p->p_lflag & P_LREGISTER;
146 }
147 
148 static void
proc_set_register(struct proc * p)149 proc_set_register(struct proc *p)
150 {
151 	proc_setregister(p);
152 }
153 
154 static void*
uthread_get_uukwe(struct uthread * t)155 uthread_get_uukwe(struct uthread *t)
156 {
157 	return &t->uu_save.uus_kwe;
158 }
159 
160 static int
uthread_is_cancelled(struct uthread * t)161 uthread_is_cancelled(struct uthread *t)
162 {
163 	return (t->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL;
164 }
165 
166 static vm_map_t
_current_map(void)167 _current_map(void)
168 {
169 	return current_map();
170 }
171 
172 static boolean_t
qos_main_thread_active(void)173 qos_main_thread_active(void)
174 {
175 	return TRUE;
176 }
177 
178 static int
proc_usynch_get_requested_thread_qos(struct uthread * uth)179 proc_usynch_get_requested_thread_qos(struct uthread *uth)
180 {
181 	thread_t thread = uth ? get_machthread(uth) : current_thread();
182 	int      requested_qos;
183 
184 	requested_qos = proc_get_thread_policy(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS);
185 
186 	/*
187 	 * For the purposes of userspace synchronization, it doesn't make sense to
188 	 * place an override of UNSPECIFIED on another thread, if the current thread
189 	 * doesn't have any QoS set. In these cases, upgrade to
190 	 * THREAD_QOS_USER_INTERACTIVE.
191 	 */
192 	if (requested_qos == THREAD_QOS_UNSPECIFIED) {
193 		requested_qos = THREAD_QOS_USER_INTERACTIVE;
194 	}
195 
196 	return requested_qos;
197 }
198 
199 static boolean_t
proc_usynch_thread_qos_add_override_for_resource(task_t task,struct uthread * uth,uint64_t tid,int override_qos,boolean_t first_override_for_resource,user_addr_t resource,int resource_type)200 proc_usynch_thread_qos_add_override_for_resource(task_t task, struct uthread *uth,
201     uint64_t tid, int override_qos, boolean_t first_override_for_resource,
202     user_addr_t resource, int resource_type)
203 {
204 	thread_t thread = uth ? get_machthread(uth) : THREAD_NULL;
205 
206 	return proc_thread_qos_add_override(task, thread, tid, override_qos,
207 	           first_override_for_resource, resource, resource_type) == 0;
208 }
209 
210 static boolean_t
proc_usynch_thread_qos_remove_override_for_resource(task_t task,struct uthread * uth,uint64_t tid,user_addr_t resource,int resource_type)211 proc_usynch_thread_qos_remove_override_for_resource(task_t task,
212     struct uthread *uth, uint64_t tid, user_addr_t resource, int resource_type)
213 {
214 	thread_t thread = uth ? get_machthread(uth) : THREAD_NULL;
215 
216 	return proc_thread_qos_remove_override(task, thread, tid, resource,
217 	           resource_type) == 0;
218 }
219 
220 
221 static wait_result_t
psynch_wait_prepare(uintptr_t kwq,struct turnstile ** tstore,thread_t owner,block_hint_t block_hint,uint64_t deadline)222 psynch_wait_prepare(uintptr_t kwq, struct turnstile **tstore,
223     thread_t owner, block_hint_t block_hint, uint64_t deadline)
224 {
225 	struct turnstile *ts;
226 	wait_result_t wr;
227 
228 	if (tstore) {
229 		ts = turnstile_prepare(kwq, tstore, TURNSTILE_NULL,
230 		    TURNSTILE_PTHREAD_MUTEX);
231 
232 		turnstile_update_inheritor(ts, owner,
233 		    (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
234 
235 		thread_set_pending_block_hint(current_thread(), block_hint);
236 
237 		wr = waitq_assert_wait64_leeway(&ts->ts_waitq, (event64_t)kwq,
238 		    THREAD_ABORTSAFE, TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
239 	} else {
240 		thread_set_pending_block_hint(current_thread(), block_hint);
241 
242 		wr = assert_wait_deadline_with_leeway((event_t)kwq, THREAD_ABORTSAFE,
243 		    TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
244 	}
245 
246 	return wr;
247 }
248 
249 static void
psynch_wait_update_complete(struct turnstile * ts)250 psynch_wait_update_complete(struct turnstile *ts)
251 {
252 	assert(ts);
253 	turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
254 }
255 
256 static void
psynch_wait_complete(uintptr_t kwq,struct turnstile ** tstore)257 psynch_wait_complete(uintptr_t kwq, struct turnstile **tstore)
258 {
259 	assert(tstore);
260 	turnstile_complete(kwq, tstore, NULL, TURNSTILE_PTHREAD_MUTEX);
261 }
262 
263 static void
psynch_wait_update_owner(uintptr_t kwq,thread_t owner,struct turnstile ** tstore)264 psynch_wait_update_owner(uintptr_t kwq, thread_t owner,
265     struct turnstile **tstore)
266 {
267 	struct turnstile *ts;
268 
269 	ts = turnstile_prepare(kwq, tstore, TURNSTILE_NULL,
270 	    TURNSTILE_PTHREAD_MUTEX);
271 
272 	turnstile_update_inheritor(ts, owner,
273 	    (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD));
274 	turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
275 	turnstile_complete(kwq, tstore, NULL, TURNSTILE_PTHREAD_MUTEX);
276 }
277 
278 static void
psynch_wait_cleanup(void)279 psynch_wait_cleanup(void)
280 {
281 	turnstile_cleanup();
282 }
283 
284 static kern_return_t
psynch_wait_wakeup(uintptr_t kwq,struct ksyn_waitq_element * kwe,struct turnstile ** tstore)285 psynch_wait_wakeup(uintptr_t kwq, struct ksyn_waitq_element *kwe,
286     struct turnstile **tstore)
287 {
288 	struct thread *th;
289 	struct turnstile *ts;
290 	kern_return_t kr;
291 
292 	th = get_machthread(__container_of(kwe, struct uthread, uu_save.uus_kwe));
293 
294 	if (tstore) {
295 		ts = turnstile_prepare(kwq, tstore, TURNSTILE_NULL,
296 		    TURNSTILE_PTHREAD_MUTEX);
297 		turnstile_update_inheritor(ts, th,
298 		    (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD));
299 
300 		kr = waitq_wakeup64_thread(&ts->ts_waitq, (event64_t)kwq, th,
301 		    THREAD_AWAKENED);
302 
303 		turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
304 		turnstile_complete(kwq, tstore, NULL, TURNSTILE_PTHREAD_MUTEX);
305 	} else {
306 		kr = thread_wakeup_thread((event_t)kwq, th);
307 	}
308 
309 	return kr;
310 }
311 
312 /* kernel (core) to kext shims */
313 
314 void
pthread_init(void)315 pthread_init(void)
316 {
317 	if (!pthread_functions) {
318 		panic("pthread kernel extension not loaded (function table is NULL).");
319 	}
320 	pthread_functions->pthread_init();
321 }
322 
323 void
pth_proc_hashinit(proc_t p)324 pth_proc_hashinit(proc_t p)
325 {
326 	pthread_functions->pth_proc_hashinit(p);
327 }
328 
329 void
pth_proc_hashdelete(proc_t p)330 pth_proc_hashdelete(proc_t p)
331 {
332 	pthread_functions->pth_proc_hashdelete(p);
333 }
334 
335 /* syscall shims */
336 int
bsdthread_create(struct proc * p,struct bsdthread_create_args * uap,user_addr_t * retval)337 bsdthread_create(struct proc *p, struct bsdthread_create_args *uap, user_addr_t *retval)
338 {
339 	return pthread_functions->bsdthread_create(p, uap->func, uap->func_arg, uap->stack, uap->pthread, uap->flags, retval);
340 }
341 
342 int
bsdthread_register(struct proc * p,struct bsdthread_register_args * uap,__unused int32_t * retval)343 bsdthread_register(struct proc *p, struct bsdthread_register_args *uap, __unused int32_t *retval)
344 {
345 	kern_return_t kr;
346 	static_assert(offsetof(struct bsdthread_register_args, threadstart) + sizeof(user_addr_t) ==
347 	    offsetof(struct bsdthread_register_args, wqthread));
348 	kr = machine_thread_function_pointers_convert_from_user(current_thread(), &uap->threadstart, 2);
349 	assert(kr == KERN_SUCCESS);
350 
351 	if (pthread_functions->version >= 1) {
352 		return pthread_functions->bsdthread_register2(p, uap->threadstart,
353 		           uap->wqthread, uap->flags, uap->stack_addr_hint,
354 		           uap->targetconc_ptr, uap->dispatchqueue_offset,
355 		           uap->tsd_offset, retval);
356 	} else {
357 		return pthread_functions->bsdthread_register(p, uap->threadstart,
358 		           uap->wqthread, uap->flags, uap->stack_addr_hint,
359 		           uap->targetconc_ptr, uap->dispatchqueue_offset,
360 		           retval);
361 	}
362 }
363 
364 int
bsdthread_terminate(struct proc * p,struct bsdthread_terminate_args * uap,int32_t * retval)365 bsdthread_terminate(struct proc *p, struct bsdthread_terminate_args *uap, int32_t *retval)
366 {
367 	thread_t th = current_thread();
368 	uthread_t uth = current_uthread();
369 	struct _bsdthread_terminate *bts = &uth->uu_save.uus_bsdthread_terminate;
370 	mach_port_name_t sem = (mach_port_name_t)uap->sema_or_ulock;
371 	mach_port_name_t thp = uap->port;
372 
373 	if (thread_get_tag(th) & THREAD_TAG_WORKQUEUE) {
374 		workq_thread_terminate(p, get_bsdthread_info(th));
375 	}
376 
377 	/*
378 	 * Gross compatibility hack: ports end in 0x3 and ulocks are aligned.
379 	 * If the `semaphore` value doesn't look like a port, then it is
380 	 * a ulock address that will be woken by uthread_joiner_wake()
381 	 *
382 	 * We also need to delay destroying the thread port so that
383 	 * pthread_join()'s ulock_wait() can resolve the thread until
384 	 * uthread_joiner_wake() has run.
385 	 */
386 	if (uap->sema_or_ulock && uap->sema_or_ulock != ipc_entry_name_mask(sem)) {
387 		thread_set_tag(th, THREAD_TAG_USER_JOIN);
388 		bts->ulock_addr = uap->sema_or_ulock;
389 		bts->kport = thp;
390 
391 		sem = thp = MACH_PORT_NULL;
392 	}
393 
394 	return pthread_functions->bsdthread_terminate(p, uap->stackaddr, uap->freesize, thp, sem, retval);
395 }
396 
397 int
thread_selfid(struct proc * p,__unused struct thread_selfid_args * uap,uint64_t * retval)398 thread_selfid(struct proc *p, __unused struct thread_selfid_args *uap, uint64_t *retval)
399 {
400 	return pthread_functions->thread_selfid(p, retval);
401 }
402 
403 /* pthread synchroniser syscalls */
404 
405 int
psynch_mutexwait(proc_t p,struct psynch_mutexwait_args * uap,uint32_t * retval)406 psynch_mutexwait(proc_t p, struct psynch_mutexwait_args *uap, uint32_t *retval)
407 {
408 	return pthread_functions->psynch_mutexwait(p, uap->mutex, uap->mgen, uap->ugen, uap->tid, uap->flags, retval);
409 }
410 
411 int
psynch_mutexdrop(proc_t p,struct psynch_mutexdrop_args * uap,uint32_t * retval)412 psynch_mutexdrop(proc_t p, struct psynch_mutexdrop_args *uap, uint32_t *retval)
413 {
414 	return pthread_functions->psynch_mutexdrop(p, uap->mutex, uap->mgen, uap->ugen, uap->tid, uap->flags, retval);
415 }
416 
417 int
psynch_cvbroad(proc_t p,struct psynch_cvbroad_args * uap,uint32_t * retval)418 psynch_cvbroad(proc_t p, struct psynch_cvbroad_args *uap, uint32_t *retval)
419 {
420 	return pthread_functions->psynch_cvbroad(p, uap->cv, uap->cvlsgen, uap->cvudgen, uap->flags, uap->mutex, uap->mugen, uap->tid, retval);
421 }
422 
423 int
psynch_cvsignal(proc_t p,struct psynch_cvsignal_args * uap,uint32_t * retval)424 psynch_cvsignal(proc_t p, struct psynch_cvsignal_args *uap, uint32_t *retval)
425 {
426 	return pthread_functions->psynch_cvsignal(p, uap->cv, uap->cvlsgen, uap->cvugen, uap->thread_port, uap->mutex, uap->mugen, uap->tid, uap->flags, retval);
427 }
428 
429 int
psynch_cvwait(proc_t p,struct psynch_cvwait_args * uap,uint32_t * retval)430 psynch_cvwait(proc_t p, struct psynch_cvwait_args * uap, uint32_t * retval)
431 {
432 	return pthread_functions->psynch_cvwait(p, uap->cv, uap->cvlsgen, uap->cvugen, uap->mutex, uap->mugen, uap->flags, uap->sec, uap->nsec, retval);
433 }
434 
435 int
psynch_cvclrprepost(proc_t p,struct psynch_cvclrprepost_args * uap,int * retval)436 psynch_cvclrprepost(proc_t p, struct psynch_cvclrprepost_args * uap, int *retval)
437 {
438 	return pthread_functions->psynch_cvclrprepost(p, uap->cv, uap->cvgen, uap->cvugen, uap->cvsgen, uap->prepocnt, uap->preposeq, uap->flags, retval);
439 }
440 
441 int
psynch_rw_longrdlock(proc_t p,struct psynch_rw_longrdlock_args * uap,uint32_t * retval)442 psynch_rw_longrdlock(proc_t p, struct psynch_rw_longrdlock_args * uap, uint32_t *retval)
443 {
444 	return pthread_functions->psynch_rw_longrdlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
445 }
446 
447 int
psynch_rw_rdlock(proc_t p,struct psynch_rw_rdlock_args * uap,uint32_t * retval)448 psynch_rw_rdlock(proc_t p, struct psynch_rw_rdlock_args * uap, uint32_t * retval)
449 {
450 	return pthread_functions->psynch_rw_rdlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
451 }
452 
453 int
psynch_rw_unlock(proc_t p,struct psynch_rw_unlock_args * uap,uint32_t * retval)454 psynch_rw_unlock(proc_t p, struct psynch_rw_unlock_args *uap, uint32_t *retval)
455 {
456 	return pthread_functions->psynch_rw_unlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
457 }
458 
459 int
psynch_rw_unlock2(__unused proc_t p,__unused struct psynch_rw_unlock2_args * uap,__unused uint32_t * retval)460 psynch_rw_unlock2(__unused proc_t p, __unused struct psynch_rw_unlock2_args *uap, __unused uint32_t *retval)
461 {
462 	return ENOTSUP;
463 }
464 
465 int
psynch_rw_wrlock(proc_t p,struct psynch_rw_wrlock_args * uap,uint32_t * retval)466 psynch_rw_wrlock(proc_t p, struct psynch_rw_wrlock_args *uap, uint32_t *retval)
467 {
468 	return pthread_functions->psynch_rw_wrlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
469 }
470 
471 int
psynch_rw_yieldwrlock(proc_t p,struct psynch_rw_yieldwrlock_args * uap,uint32_t * retval)472 psynch_rw_yieldwrlock(proc_t p, struct psynch_rw_yieldwrlock_args *uap, uint32_t *retval)
473 {
474 	return pthread_functions->psynch_rw_yieldwrlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
475 }
476 
477 int
psynch_rw_upgrade(__unused proc_t p,__unused struct psynch_rw_upgrade_args * uap,__unused uint32_t * retval)478 psynch_rw_upgrade(__unused proc_t p, __unused struct psynch_rw_upgrade_args * uap, __unused uint32_t *retval)
479 {
480 	return 0;
481 }
482 
483 int
psynch_rw_downgrade(__unused proc_t p,__unused struct psynch_rw_downgrade_args * uap,__unused int * retval)484 psynch_rw_downgrade(__unused proc_t p, __unused struct psynch_rw_downgrade_args * uap, __unused int *retval)
485 {
486 	return 0;
487 }
488 
489 void
kdp_pthread_find_owner(thread_t thread,struct stackshot_thread_waitinfo * waitinfo)490 kdp_pthread_find_owner(thread_t thread, struct stackshot_thread_waitinfo *waitinfo)
491 {
492 	if (pthread_functions->pthread_find_owner) {
493 		pthread_functions->pthread_find_owner(thread, waitinfo);
494 	}
495 }
496 
497 void *
kdp_pthread_get_thread_kwq(thread_t thread)498 kdp_pthread_get_thread_kwq(thread_t thread)
499 {
500 	if (pthread_functions->pthread_get_thread_kwq) {
501 		return pthread_functions->pthread_get_thread_kwq(thread);
502 	}
503 
504 	return NULL;
505 }
506 
507 void
thread_will_park_or_terminate(__unused thread_t thread)508 thread_will_park_or_terminate(__unused thread_t thread)
509 {
510 }
511 
512 static bool
proc_get_jit_entitled(struct proc * t)513 proc_get_jit_entitled(struct proc *t)
514 {
515 	task_t task = proc_task(t);
516 	if (!task) {
517 		return false;
518 	}
519 
520 	pmap_t pmap = get_task_pmap(task);
521 	return pmap_get_jit_entitled(pmap);
522 }
523 
524 /*
525  * The callbacks structure (defined in pthread_shims.h) contains a collection
526  * of kernel functions that were not deemed sensible to expose as a KPI to all
527  * kernel extensions. So the kext is given them in the form of a structure of
528  * function pointers.
529  */
530 static const struct pthread_callbacks_s pthread_callbacks = {
531 	.version = PTHREAD_SHIMS_VERSION,
532 	.config_thread_max = CONFIG_THREAD_MAX,
533 	.get_task_threadmax = get_task_threadmax,
534 
535 	.proc_get_threadstart = proc_get_threadstart,
536 	.proc_set_threadstart = proc_set_threadstart,
537 	.proc_get_pthsize = proc_get_pthsize,
538 	.proc_set_pthsize = proc_set_pthsize,
539 	.proc_get_wqthread = proc_get_wqthread,
540 	.proc_set_wqthread = proc_set_wqthread,
541 	.proc_set_dispatchqueue_offset = proc_set_dispatchqueue_offset,
542 	.proc_set_workqueue_quantum_offset = proc_set_workqueue_quantum_offset,
543 	.proc_get_pthhash = proc_get_pthhash,
544 	.proc_set_pthhash = proc_set_pthhash,
545 	.proc_get_register = proc_get_register,
546 	.proc_set_register = proc_set_register,
547 	.proc_get_jit_entitled = proc_get_jit_entitled,
548 	.proc_get_pthread_jit_allowlist2 = proc_get_pthread_jit_allowlist,
549 
550 	/* kernel IPI interfaces */
551 	.task_get_ipcspace = get_task_ipcspace,
552 	.vm_map_page_info = vm_map_page_info,
553 	.ipc_port_copyout_send_pinned = ipc_port_copyout_send_pinned,
554 	.thread_set_wq_state32 = thread_set_wq_state32,
555 	.thread_set_wq_state64 = thread_set_wq_state64,
556 
557 	.uthread_get_uukwe = uthread_get_uukwe,
558 	.uthread_set_returnval = uthread_set_returnval,
559 	.uthread_is_cancelled = uthread_is_cancelled,
560 
561 	.thread_exception_return = pthread_returning_to_userspace,
562 	.thread_bootstrap_return = pthread_bootstrap_return,
563 	.unix_syscall_return = unix_syscall_return,
564 
565 	.get_bsdthread_info = get_bsdthread_info,
566 	.thread_policy_set_internal = thread_policy_set_internal,
567 	.thread_policy_get = thread_policy_get,
568 
569 	.__pthread_testcancel = __pthread_testcancel,
570 
571 	.mach_port_deallocate = mach_port_deallocate,
572 	.semaphore_signal_internal_trap = semaphore_signal_internal_trap,
573 	.current_map = _current_map,
574 
575 	.thread_create_immovable = thread_create_immovable,
576 	.thread_terminate_pinned = thread_terminate_pinned,
577 	.thread_resume = thread_resume,
578 
579 	.kevent_workq_internal = kevent_workq_internal,
580 
581 	.convert_thread_to_port_pinned = convert_thread_to_port_pinned,
582 
583 	.proc_get_stack_addr_hint = proc_get_stack_addr_hint,
584 	.proc_set_stack_addr_hint = proc_set_stack_addr_hint,
585 	.proc_get_pthread_tsd_offset = proc_get_pthread_tsd_offset,
586 	.proc_set_pthread_tsd_offset = proc_set_pthread_tsd_offset,
587 	.proc_get_mach_thread_self_tsd_offset = proc_get_mach_thread_self_tsd_offset,
588 	.proc_set_mach_thread_self_tsd_offset = proc_set_mach_thread_self_tsd_offset,
589 
590 	.thread_set_tsd_base = thread_set_tsd_base,
591 
592 	.proc_usynch_get_requested_thread_qos = proc_usynch_get_requested_thread_qos,
593 
594 	.qos_main_thread_active = qos_main_thread_active,
595 	.thread_set_voucher_name = thread_set_voucher_name,
596 
597 	.proc_usynch_thread_qos_add_override_for_resource = proc_usynch_thread_qos_add_override_for_resource,
598 	.proc_usynch_thread_qos_remove_override_for_resource = proc_usynch_thread_qos_remove_override_for_resource,
599 
600 	.thread_set_tag = thread_set_tag,
601 	.thread_get_tag = thread_get_tag,
602 
603 	.proc_set_return_to_kernel_offset = proc_set_return_to_kernel_offset,
604 	.thread_will_park_or_terminate = thread_will_park_or_terminate,
605 
606 	.proc_get_user_stack = proc_get_user_stack,
607 	.task_findtid = task_findtid,
608 	.thread_deallocate_safe = thread_deallocate_safe,
609 
610 	.psynch_wait_prepare = psynch_wait_prepare,
611 	.psynch_wait_update_complete = psynch_wait_update_complete,
612 	.psynch_wait_complete = psynch_wait_complete,
613 	.psynch_wait_cleanup = psynch_wait_cleanup,
614 	.psynch_wait_wakeup = psynch_wait_wakeup,
615 	.psynch_wait_update_owner = psynch_wait_update_owner,
616 };
617 
618 pthread_callbacks_t pthread_kern = &pthread_callbacks;
619 pthread_functions_t pthread_functions = NULL;
620 
621 /*
622  * pthread_kext_register is called by pthread.kext upon load, it has to provide
623  * us with a function pointer table of pthread internal calls. In return, this
624  * file provides it with a table of function pointers it needs.
625  */
626 
627 void
pthread_kext_register(pthread_functions_t fns,pthread_callbacks_t * callbacks)628 pthread_kext_register(pthread_functions_t fns, pthread_callbacks_t *callbacks)
629 {
630 	if (pthread_functions != NULL) {
631 		panic("Re-initialisation of pthread kext callbacks.");
632 	}
633 
634 	if (callbacks != NULL) {
635 		*callbacks = &pthread_callbacks;
636 	} else {
637 		panic("pthread_kext_register called without callbacks pointer.");
638 	}
639 
640 	if (fns) {
641 		pthread_functions = fns;
642 	}
643 }
644