1cc9a6355SApple OSS Distributions /*
2bb611c8fSApple OSS Distributions * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3cc9a6355SApple OSS Distributions *
4cc9a6355SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5cc9a6355SApple OSS Distributions *
6cc9a6355SApple OSS Distributions * This file contains Original Code and/or Modifications of Original Code
7cc9a6355SApple OSS Distributions * as defined in and that are subject to the Apple Public Source License
8cc9a6355SApple OSS Distributions * Version 2.0 (the 'License'). You may not use this file except in
9cc9a6355SApple OSS Distributions * compliance with the License. The rights granted to you under the License
10cc9a6355SApple OSS Distributions * may not be used to create, or enable the creation or redistribution of,
11cc9a6355SApple OSS Distributions * unlawful or unlicensed copies of an Apple operating system, or to
12cc9a6355SApple OSS Distributions * circumvent, violate, or enable the circumvention or violation of, any
13cc9a6355SApple OSS Distributions * terms of an Apple operating system software license agreement.
14cc9a6355SApple OSS Distributions *
15cc9a6355SApple OSS Distributions * Please obtain a copy of the License at
16cc9a6355SApple OSS Distributions * http://www.opensource.apple.com/apsl/ and read it before using this file.
17cc9a6355SApple OSS Distributions *
18cc9a6355SApple OSS Distributions * The Original Code and all software distributed under the License are
19cc9a6355SApple OSS Distributions * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20cc9a6355SApple OSS Distributions * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21cc9a6355SApple OSS Distributions * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22cc9a6355SApple OSS Distributions * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23cc9a6355SApple OSS Distributions * Please see the License for the specific language governing rights and
24cc9a6355SApple OSS Distributions * limitations under the License.
25cc9a6355SApple OSS Distributions *
26cc9a6355SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27cc9a6355SApple OSS Distributions */
28cc9a6355SApple OSS Distributions /* Copyright (c) 1995-2018 Apple, Inc. All Rights Reserved */
29cc9a6355SApple OSS Distributions
30cc9a6355SApple OSS Distributions #include <sys/cdefs.h>
31cc9a6355SApple OSS Distributions
32cc9a6355SApple OSS Distributions #include <kern/assert.h>
33cc9a6355SApple OSS Distributions #include <kern/ast.h>
34cc9a6355SApple OSS Distributions #include <kern/clock.h>
35cc9a6355SApple OSS Distributions #include <kern/cpu_data.h>
36cc9a6355SApple OSS Distributions #include <kern/kern_types.h>
37cc9a6355SApple OSS Distributions #include <kern/policy_internal.h>
38cc9a6355SApple OSS Distributions #include <kern/processor.h>
39cc9a6355SApple OSS Distributions #include <kern/sched_prim.h> /* for thread_exception_return */
40cc9a6355SApple OSS Distributions #include <kern/task.h>
41cc9a6355SApple OSS Distributions #include <kern/thread.h>
42e6231be0SApple OSS Distributions #include <kern/thread_group.h>
43cc9a6355SApple OSS Distributions #include <kern/zalloc.h>
44*8d741a5dSApple OSS Distributions #include <kern/work_interval.h>
45cc9a6355SApple OSS Distributions #include <mach/kern_return.h>
46cc9a6355SApple OSS Distributions #include <mach/mach_param.h>
47cc9a6355SApple OSS Distributions #include <mach/mach_port.h>
48cc9a6355SApple OSS Distributions #include <mach/mach_types.h>
49cc9a6355SApple OSS Distributions #include <mach/mach_vm.h>
50cc9a6355SApple OSS Distributions #include <mach/sync_policy.h>
51cc9a6355SApple OSS Distributions #include <mach/task.h>
52cc9a6355SApple OSS Distributions #include <mach/thread_act.h> /* for thread_resume */
53cc9a6355SApple OSS Distributions #include <mach/thread_policy.h>
54cc9a6355SApple OSS Distributions #include <mach/thread_status.h>
55cc9a6355SApple OSS Distributions #include <mach/vm_prot.h>
56cc9a6355SApple OSS Distributions #include <mach/vm_statistics.h>
57cc9a6355SApple OSS Distributions #include <machine/atomic.h>
58cc9a6355SApple OSS Distributions #include <machine/machine_routines.h>
59e6231be0SApple OSS Distributions #include <machine/smp.h>
60cc9a6355SApple OSS Distributions #include <vm/vm_map.h>
61cc9a6355SApple OSS Distributions #include <vm/vm_protos.h>
62cc9a6355SApple OSS Distributions
63cc9a6355SApple OSS Distributions #include <sys/eventvar.h>
64cc9a6355SApple OSS Distributions #include <sys/kdebug.h>
65cc9a6355SApple OSS Distributions #include <sys/kernel.h>
66cc9a6355SApple OSS Distributions #include <sys/lock.h>
67cc9a6355SApple OSS Distributions #include <sys/param.h>
68cc9a6355SApple OSS Distributions #include <sys/proc_info.h> /* for fill_procworkqueue */
69cc9a6355SApple OSS Distributions #include <sys/proc_internal.h>
70cc9a6355SApple OSS Distributions #include <sys/pthread_shims.h>
71cc9a6355SApple OSS Distributions #include <sys/resourcevar.h>
72cc9a6355SApple OSS Distributions #include <sys/signalvar.h>
73cc9a6355SApple OSS Distributions #include <sys/sysctl.h>
74cc9a6355SApple OSS Distributions #include <sys/sysproto.h>
75cc9a6355SApple OSS Distributions #include <sys/systm.h>
76cc9a6355SApple OSS Distributions #include <sys/ulock.h> /* for ulock_owner_value_to_port_name */
77cc9a6355SApple OSS Distributions
78cc9a6355SApple OSS Distributions #include <pthread/bsdthread_private.h>
79cc9a6355SApple OSS Distributions #include <pthread/workqueue_syscalls.h>
80cc9a6355SApple OSS Distributions #include <pthread/workqueue_internal.h>
81cc9a6355SApple OSS Distributions #include <pthread/workqueue_trace.h>
82cc9a6355SApple OSS Distributions
83cc9a6355SApple OSS Distributions #include <os/log.h>
84cc9a6355SApple OSS Distributions
85cc9a6355SApple OSS Distributions static void workq_unpark_continue(void *uth, wait_result_t wr) __dead2;
86*8d741a5dSApple OSS Distributions
87*8d741a5dSApple OSS Distributions static void workq_bound_thread_unpark_continue(void *uth, wait_result_t wr) __dead2;
88*8d741a5dSApple OSS Distributions
89*8d741a5dSApple OSS Distributions static void workq_bound_thread_initialize_and_unpark_continue(void *uth, wait_result_t wr) __dead2;
90*8d741a5dSApple OSS Distributions
91*8d741a5dSApple OSS Distributions static void workq_bound_thread_setup_and_run(struct uthread *uth, int setup_flags) __dead2;
92*8d741a5dSApple OSS Distributions
93a5e72196SApple OSS Distributions static void workq_schedule_creator(proc_t p, struct workqueue *wq,
94a5e72196SApple OSS Distributions workq_kern_threadreq_flags_t flags);
95cc9a6355SApple OSS Distributions
96cc9a6355SApple OSS Distributions static bool workq_threadreq_admissible(struct workqueue *wq, struct uthread *uth,
97cc9a6355SApple OSS Distributions workq_threadreq_t req);
98cc9a6355SApple OSS Distributions
99cc9a6355SApple OSS Distributions static uint32_t workq_constrained_allowance(struct workqueue *wq,
100*8d741a5dSApple OSS Distributions thread_qos_t at_qos, struct uthread *uth,
101*8d741a5dSApple OSS Distributions bool may_start_timer, bool record_failed_allowance);
102cc9a6355SApple OSS Distributions
103e6231be0SApple OSS Distributions static bool _wq_cooperative_queue_refresh_best_req_qos(struct workqueue *wq);
104e6231be0SApple OSS Distributions
105cc9a6355SApple OSS Distributions static bool workq_thread_is_busy(uint64_t cur_ts,
106cc9a6355SApple OSS Distributions _Atomic uint64_t *lastblocked_tsp);
107cc9a6355SApple OSS Distributions
108cc9a6355SApple OSS Distributions static int workq_sysctl_handle_usecs SYSCTL_HANDLER_ARGS;
109cc9a6355SApple OSS Distributions
110e6231be0SApple OSS Distributions static bool
111e6231be0SApple OSS Distributions workq_schedule_delayed_thread_creation(struct workqueue *wq, int flags);
112e6231be0SApple OSS Distributions
113e6231be0SApple OSS Distributions static inline void
114e6231be0SApple OSS Distributions workq_lock_spin(struct workqueue *wq);
115e6231be0SApple OSS Distributions
116e6231be0SApple OSS Distributions static inline void
117e6231be0SApple OSS Distributions workq_unlock(struct workqueue *wq);
118e6231be0SApple OSS Distributions
119cc9a6355SApple OSS Distributions #pragma mark globals
120cc9a6355SApple OSS Distributions
121cc9a6355SApple OSS Distributions struct workq_usec_var {
122cc9a6355SApple OSS Distributions uint32_t usecs;
123cc9a6355SApple OSS Distributions uint64_t abstime;
124cc9a6355SApple OSS Distributions };
125cc9a6355SApple OSS Distributions
126cc9a6355SApple OSS Distributions #define WORKQ_SYSCTL_USECS(var, init) \
127cc9a6355SApple OSS Distributions static struct workq_usec_var var = { .usecs = init }; \
128cc9a6355SApple OSS Distributions SYSCTL_OID(_kern, OID_AUTO, var##_usecs, \
129cc9a6355SApple OSS Distributions CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &var, 0, \
130cc9a6355SApple OSS Distributions workq_sysctl_handle_usecs, "I", "")
131cc9a6355SApple OSS Distributions
132bb611c8fSApple OSS Distributions static LCK_GRP_DECLARE(workq_lck_grp, "workq");
133cc9a6355SApple OSS Distributions os_refgrp_decl(static, workq_refgrp, "workq", NULL);
134cc9a6355SApple OSS Distributions
135e7776783SApple OSS Distributions static ZONE_DEFINE(workq_zone_workqueue, "workq.wq",
136bb611c8fSApple OSS Distributions sizeof(struct workqueue), ZC_NONE);
137e7776783SApple OSS Distributions static ZONE_DEFINE(workq_zone_threadreq, "workq.threadreq",
138bb611c8fSApple OSS Distributions sizeof(struct workq_threadreq_s), ZC_CACHING);
139bb611c8fSApple OSS Distributions
140a5e72196SApple OSS Distributions static struct mpsc_daemon_queue workq_deallocate_queue;
141cc9a6355SApple OSS Distributions
142cc9a6355SApple OSS Distributions WORKQ_SYSCTL_USECS(wq_stalled_window, WQ_STALLED_WINDOW_USECS);
143cc9a6355SApple OSS Distributions WORKQ_SYSCTL_USECS(wq_reduce_pool_window, WQ_REDUCE_POOL_WINDOW_USECS);
144cc9a6355SApple OSS Distributions WORKQ_SYSCTL_USECS(wq_max_timer_interval, WQ_MAX_TIMER_INTERVAL_USECS);
145cc9a6355SApple OSS Distributions static uint32_t wq_max_threads = WORKQUEUE_MAXTHREADS;
146cc9a6355SApple OSS Distributions static uint32_t wq_max_constrained_threads = WORKQUEUE_MAXTHREADS / 8;
147cc9a6355SApple OSS Distributions static uint32_t wq_init_constrained_limit = 1;
148cc9a6355SApple OSS Distributions static uint16_t wq_death_max_load;
149cc9a6355SApple OSS Distributions static uint32_t wq_max_parallelism[WORKQ_NUM_QOS_BUCKETS];
150cc9a6355SApple OSS Distributions
151e6231be0SApple OSS Distributions /*
152e6231be0SApple OSS Distributions * This is not a hard limit but the max size we want to aim to hit across the
153e6231be0SApple OSS Distributions * entire cooperative pool. We can oversubscribe the pool due to non-cooperative
154e6231be0SApple OSS Distributions * workers and the max we will oversubscribe the pool by, is a total of
155e6231be0SApple OSS Distributions * wq_max_cooperative_threads * WORKQ_NUM_QOS_BUCKETS.
156e6231be0SApple OSS Distributions */
157e6231be0SApple OSS Distributions static uint32_t wq_max_cooperative_threads;
158e6231be0SApple OSS Distributions
159e6231be0SApple OSS Distributions static inline uint32_t
wq_cooperative_queue_max_size(struct workqueue * wq)160e6231be0SApple OSS Distributions wq_cooperative_queue_max_size(struct workqueue *wq)
161e6231be0SApple OSS Distributions {
162e6231be0SApple OSS Distributions return wq->wq_cooperative_queue_has_limited_max_size ? 1 : wq_max_cooperative_threads;
163e6231be0SApple OSS Distributions }
164e6231be0SApple OSS Distributions
165cc9a6355SApple OSS Distributions #pragma mark sysctls
166cc9a6355SApple OSS Distributions
167cc9a6355SApple OSS Distributions static int
168cc9a6355SApple OSS Distributions workq_sysctl_handle_usecs SYSCTL_HANDLER_ARGS
169cc9a6355SApple OSS Distributions {
170cc9a6355SApple OSS Distributions #pragma unused(arg2)
171cc9a6355SApple OSS Distributions struct workq_usec_var *v = arg1;
172cc9a6355SApple OSS Distributions int error = sysctl_handle_int(oidp, &v->usecs, 0, req);
173a5e72196SApple OSS Distributions if (error || !req->newptr) {
174cc9a6355SApple OSS Distributions return error;
175a5e72196SApple OSS Distributions }
176cc9a6355SApple OSS Distributions clock_interval_to_absolutetime_interval(v->usecs, NSEC_PER_USEC,
177cc9a6355SApple OSS Distributions &v->abstime);
178cc9a6355SApple OSS Distributions return 0;
179cc9a6355SApple OSS Distributions }
180cc9a6355SApple OSS Distributions
181cc9a6355SApple OSS Distributions SYSCTL_INT(_kern, OID_AUTO, wq_max_threads, CTLFLAG_RW | CTLFLAG_LOCKED,
182cc9a6355SApple OSS Distributions &wq_max_threads, 0, "");
183cc9a6355SApple OSS Distributions
184cc9a6355SApple OSS Distributions SYSCTL_INT(_kern, OID_AUTO, wq_max_constrained_threads, CTLFLAG_RW | CTLFLAG_LOCKED,
185cc9a6355SApple OSS Distributions &wq_max_constrained_threads, 0, "");
186cc9a6355SApple OSS Distributions
187e6231be0SApple OSS Distributions static int
188e6231be0SApple OSS Distributions wq_limit_cooperative_threads_for_proc SYSCTL_HANDLER_ARGS
189e6231be0SApple OSS Distributions {
190e6231be0SApple OSS Distributions #pragma unused(arg1, arg2, oidp)
191e6231be0SApple OSS Distributions int input_pool_size = 0;
192e6231be0SApple OSS Distributions int changed;
193e6231be0SApple OSS Distributions int error = 0;
194e6231be0SApple OSS Distributions
195e6231be0SApple OSS Distributions error = sysctl_io_number(req, 0, sizeof(int), &input_pool_size, &changed);
196e6231be0SApple OSS Distributions if (error || !changed) {
197e6231be0SApple OSS Distributions return error;
198e6231be0SApple OSS Distributions }
199e6231be0SApple OSS Distributions
200e6231be0SApple OSS Distributions #define WQ_COOPERATIVE_POOL_SIZE_DEFAULT 0
201e6231be0SApple OSS Distributions #define WQ_COOPERATIVE_POOL_SIZE_STRICT_PER_QOS -1
202e6231be0SApple OSS Distributions /* Not available currently, but sysctl interface is designed to allow these
203e6231be0SApple OSS Distributions * extra parameters:
204e6231be0SApple OSS Distributions * WQ_COOPERATIVE_POOL_SIZE_STRICT : -2 (across all bucket)
205e6231be0SApple OSS Distributions * WQ_COOPERATIVE_POOL_SIZE_CUSTOM : [1, 512]
206e6231be0SApple OSS Distributions */
207e6231be0SApple OSS Distributions
208e6231be0SApple OSS Distributions if (input_pool_size != WQ_COOPERATIVE_POOL_SIZE_DEFAULT
209e6231be0SApple OSS Distributions && input_pool_size != WQ_COOPERATIVE_POOL_SIZE_STRICT_PER_QOS) {
210e6231be0SApple OSS Distributions error = EINVAL;
211e6231be0SApple OSS Distributions goto out;
212e6231be0SApple OSS Distributions }
213e6231be0SApple OSS Distributions
214e6231be0SApple OSS Distributions proc_t p = req->p;
215e6231be0SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
216e6231be0SApple OSS Distributions
217e6231be0SApple OSS Distributions if (wq != NULL) {
218e6231be0SApple OSS Distributions workq_lock_spin(wq);
219e6231be0SApple OSS Distributions if (wq->wq_reqcount > 0 || wq->wq_nthreads > 0) {
220e6231be0SApple OSS Distributions // Hackily enforce that the workqueue is still new (no requests or
221e6231be0SApple OSS Distributions // threads)
222e6231be0SApple OSS Distributions error = ENOTSUP;
223e6231be0SApple OSS Distributions } else {
224e6231be0SApple OSS Distributions wq->wq_cooperative_queue_has_limited_max_size = (input_pool_size == WQ_COOPERATIVE_POOL_SIZE_STRICT_PER_QOS);
225e6231be0SApple OSS Distributions }
226e6231be0SApple OSS Distributions workq_unlock(wq);
227e6231be0SApple OSS Distributions } else {
228e6231be0SApple OSS Distributions /* This process has no workqueue, calling this syctl makes no sense */
229e6231be0SApple OSS Distributions return ENOTSUP;
230e6231be0SApple OSS Distributions }
231e6231be0SApple OSS Distributions
232e6231be0SApple OSS Distributions out:
233e6231be0SApple OSS Distributions return error;
234e6231be0SApple OSS Distributions }
235e6231be0SApple OSS Distributions
236e6231be0SApple OSS Distributions SYSCTL_PROC(_kern, OID_AUTO, wq_limit_cooperative_threads,
237e6231be0SApple OSS Distributions CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_WR | CTLFLAG_LOCKED | CTLTYPE_INT, 0, 0,
238e6231be0SApple OSS Distributions wq_limit_cooperative_threads_for_proc,
239e6231be0SApple OSS Distributions "I", "Modify the max pool size of the cooperative pool");
240e6231be0SApple OSS Distributions
241cc9a6355SApple OSS Distributions #pragma mark p_wqptr
242cc9a6355SApple OSS Distributions
243cc9a6355SApple OSS Distributions #define WQPTR_IS_INITING_VALUE ((struct workqueue *)~(uintptr_t)0)
244cc9a6355SApple OSS Distributions
245cc9a6355SApple OSS Distributions static struct workqueue *
proc_get_wqptr_fast(struct proc * p)246cc9a6355SApple OSS Distributions proc_get_wqptr_fast(struct proc *p)
247cc9a6355SApple OSS Distributions {
248cc9a6355SApple OSS Distributions return os_atomic_load(&p->p_wqptr, relaxed);
249cc9a6355SApple OSS Distributions }
250cc9a6355SApple OSS Distributions
251e6231be0SApple OSS Distributions struct workqueue *
proc_get_wqptr(struct proc * p)252cc9a6355SApple OSS Distributions proc_get_wqptr(struct proc *p)
253cc9a6355SApple OSS Distributions {
254cc9a6355SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
255cc9a6355SApple OSS Distributions return wq == WQPTR_IS_INITING_VALUE ? NULL : wq;
256cc9a6355SApple OSS Distributions }
257cc9a6355SApple OSS Distributions
258cc9a6355SApple OSS Distributions static void
proc_set_wqptr(struct proc * p,struct workqueue * wq)259cc9a6355SApple OSS Distributions proc_set_wqptr(struct proc *p, struct workqueue *wq)
260cc9a6355SApple OSS Distributions {
261cc9a6355SApple OSS Distributions wq = os_atomic_xchg(&p->p_wqptr, wq, release);
262cc9a6355SApple OSS Distributions if (wq == WQPTR_IS_INITING_VALUE) {
263cc9a6355SApple OSS Distributions proc_lock(p);
264cc9a6355SApple OSS Distributions thread_wakeup(&p->p_wqptr);
265cc9a6355SApple OSS Distributions proc_unlock(p);
266cc9a6355SApple OSS Distributions }
267cc9a6355SApple OSS Distributions }
268cc9a6355SApple OSS Distributions
269cc9a6355SApple OSS Distributions static bool
proc_init_wqptr_or_wait(struct proc * p)270cc9a6355SApple OSS Distributions proc_init_wqptr_or_wait(struct proc *p)
271cc9a6355SApple OSS Distributions {
272cc9a6355SApple OSS Distributions struct workqueue *wq;
273cc9a6355SApple OSS Distributions
274cc9a6355SApple OSS Distributions proc_lock(p);
275a5e72196SApple OSS Distributions wq = os_atomic_load(&p->p_wqptr, relaxed);
276cc9a6355SApple OSS Distributions
277cc9a6355SApple OSS Distributions if (wq == NULL) {
278a5e72196SApple OSS Distributions os_atomic_store(&p->p_wqptr, WQPTR_IS_INITING_VALUE, relaxed);
279cc9a6355SApple OSS Distributions proc_unlock(p);
280cc9a6355SApple OSS Distributions return true;
281cc9a6355SApple OSS Distributions }
282cc9a6355SApple OSS Distributions
283cc9a6355SApple OSS Distributions if (wq == WQPTR_IS_INITING_VALUE) {
284cc9a6355SApple OSS Distributions assert_wait(&p->p_wqptr, THREAD_UNINT);
285cc9a6355SApple OSS Distributions proc_unlock(p);
286cc9a6355SApple OSS Distributions thread_block(THREAD_CONTINUE_NULL);
287cc9a6355SApple OSS Distributions } else {
288cc9a6355SApple OSS Distributions proc_unlock(p);
289cc9a6355SApple OSS Distributions }
290cc9a6355SApple OSS Distributions return false;
291cc9a6355SApple OSS Distributions }
292cc9a6355SApple OSS Distributions
293cc9a6355SApple OSS Distributions static inline event_t
workq_parked_wait_event(struct uthread * uth)294cc9a6355SApple OSS Distributions workq_parked_wait_event(struct uthread *uth)
295cc9a6355SApple OSS Distributions {
296cc9a6355SApple OSS Distributions return (event_t)&uth->uu_workq_stackaddr;
297cc9a6355SApple OSS Distributions }
298cc9a6355SApple OSS Distributions
299cc9a6355SApple OSS Distributions static inline void
workq_thread_wakeup(struct uthread * uth)300cc9a6355SApple OSS Distributions workq_thread_wakeup(struct uthread *uth)
301cc9a6355SApple OSS Distributions {
302e7776783SApple OSS Distributions thread_wakeup_thread(workq_parked_wait_event(uth), get_machthread(uth));
303cc9a6355SApple OSS Distributions }
304cc9a6355SApple OSS Distributions
305cc9a6355SApple OSS Distributions #pragma mark wq_thactive
306cc9a6355SApple OSS Distributions
307cc9a6355SApple OSS Distributions #if defined(__LP64__)
308cc9a6355SApple OSS Distributions // Layout is:
309cc9a6355SApple OSS Distributions // 127 - 115 : 13 bits of zeroes
310cc9a6355SApple OSS Distributions // 114 - 112 : best QoS among all pending constrained requests
311cc9a6355SApple OSS Distributions // 111 - 0 : MGR, AUI, UI, IN, DF, UT, BG+MT buckets every 16 bits
312cc9a6355SApple OSS Distributions #define WQ_THACTIVE_BUCKET_WIDTH 16
313cc9a6355SApple OSS Distributions #define WQ_THACTIVE_QOS_SHIFT (7 * WQ_THACTIVE_BUCKET_WIDTH)
314cc9a6355SApple OSS Distributions #else
315cc9a6355SApple OSS Distributions // Layout is:
316cc9a6355SApple OSS Distributions // 63 - 61 : best QoS among all pending constrained requests
317cc9a6355SApple OSS Distributions // 60 : Manager bucket (0 or 1)
318cc9a6355SApple OSS Distributions // 59 - 0 : AUI, UI, IN, DF, UT, BG+MT buckets every 10 bits
319cc9a6355SApple OSS Distributions #define WQ_THACTIVE_BUCKET_WIDTH 10
320cc9a6355SApple OSS Distributions #define WQ_THACTIVE_QOS_SHIFT (6 * WQ_THACTIVE_BUCKET_WIDTH + 1)
321cc9a6355SApple OSS Distributions #endif
322cc9a6355SApple OSS Distributions #define WQ_THACTIVE_BUCKET_MASK ((1U << WQ_THACTIVE_BUCKET_WIDTH) - 1)
323cc9a6355SApple OSS Distributions #define WQ_THACTIVE_BUCKET_HALF (1U << (WQ_THACTIVE_BUCKET_WIDTH - 1))
324cc9a6355SApple OSS Distributions
325cc9a6355SApple OSS Distributions static_assert(sizeof(wq_thactive_t) * CHAR_BIT - WQ_THACTIVE_QOS_SHIFT >= 3,
326cc9a6355SApple OSS Distributions "Make sure we have space to encode a QoS");
327cc9a6355SApple OSS Distributions
328cc9a6355SApple OSS Distributions static inline wq_thactive_t
_wq_thactive(struct workqueue * wq)329cc9a6355SApple OSS Distributions _wq_thactive(struct workqueue *wq)
330cc9a6355SApple OSS Distributions {
331a5e72196SApple OSS Distributions return os_atomic_load_wide(&wq->wq_thactive, relaxed);
332cc9a6355SApple OSS Distributions }
333cc9a6355SApple OSS Distributions
334e7776783SApple OSS Distributions static inline uint8_t
_wq_bucket(thread_qos_t qos)335cc9a6355SApple OSS Distributions _wq_bucket(thread_qos_t qos)
336cc9a6355SApple OSS Distributions {
337cc9a6355SApple OSS Distributions // Map both BG and MT to the same bucket by over-shifting down and
338cc9a6355SApple OSS Distributions // clamping MT and BG together.
339cc9a6355SApple OSS Distributions switch (qos) {
340cc9a6355SApple OSS Distributions case THREAD_QOS_MAINTENANCE:
341cc9a6355SApple OSS Distributions return 0;
342cc9a6355SApple OSS Distributions default:
343cc9a6355SApple OSS Distributions return qos - 2;
344cc9a6355SApple OSS Distributions }
345cc9a6355SApple OSS Distributions }
346cc9a6355SApple OSS Distributions
347cc9a6355SApple OSS Distributions #define WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(tha) \
348bb611c8fSApple OSS Distributions ((thread_qos_t)((tha) >> WQ_THACTIVE_QOS_SHIFT))
349cc9a6355SApple OSS Distributions
350cc9a6355SApple OSS Distributions static inline thread_qos_t
_wq_thactive_best_constrained_req_qos(struct workqueue * wq)351cc9a6355SApple OSS Distributions _wq_thactive_best_constrained_req_qos(struct workqueue *wq)
352cc9a6355SApple OSS Distributions {
353cc9a6355SApple OSS Distributions // Avoid expensive atomic operations: the three bits we're loading are in
354cc9a6355SApple OSS Distributions // a single byte, and always updated under the workqueue lock
355cc9a6355SApple OSS Distributions wq_thactive_t v = *(wq_thactive_t *)&wq->wq_thactive;
356cc9a6355SApple OSS Distributions return WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(v);
357cc9a6355SApple OSS Distributions }
358cc9a6355SApple OSS Distributions
359cc9a6355SApple OSS Distributions static void
_wq_thactive_refresh_best_constrained_req_qos(struct workqueue * wq)360cc9a6355SApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(struct workqueue *wq)
361cc9a6355SApple OSS Distributions {
362cc9a6355SApple OSS Distributions thread_qos_t old_qos, new_qos;
363cc9a6355SApple OSS Distributions workq_threadreq_t req;
364cc9a6355SApple OSS Distributions
365cc9a6355SApple OSS Distributions req = priority_queue_max(&wq->wq_constrained_queue,
366cc9a6355SApple OSS Distributions struct workq_threadreq_s, tr_entry);
367cc9a6355SApple OSS Distributions new_qos = req ? req->tr_qos : THREAD_QOS_UNSPECIFIED;
368cc9a6355SApple OSS Distributions old_qos = _wq_thactive_best_constrained_req_qos(wq);
369cc9a6355SApple OSS Distributions if (old_qos != new_qos) {
370cc9a6355SApple OSS Distributions long delta = (long)new_qos - (long)old_qos;
371cc9a6355SApple OSS Distributions wq_thactive_t v = (wq_thactive_t)delta << WQ_THACTIVE_QOS_SHIFT;
372cc9a6355SApple OSS Distributions /*
373cc9a6355SApple OSS Distributions * We can do an atomic add relative to the initial load because updates
374cc9a6355SApple OSS Distributions * to this qos are always serialized under the workqueue lock.
375cc9a6355SApple OSS Distributions */
376cc9a6355SApple OSS Distributions v = os_atomic_add(&wq->wq_thactive, v, relaxed);
377cc9a6355SApple OSS Distributions #ifdef __LP64__
378cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thactive_update, wq, (uint64_t)v,
379e6231be0SApple OSS Distributions (uint64_t)(v >> 64), 0);
380cc9a6355SApple OSS Distributions #else
381e6231be0SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thactive_update, wq, v, 0, 0);
382cc9a6355SApple OSS Distributions #endif
383cc9a6355SApple OSS Distributions }
384cc9a6355SApple OSS Distributions }
385cc9a6355SApple OSS Distributions
386cc9a6355SApple OSS Distributions static inline wq_thactive_t
_wq_thactive_offset_for_qos(thread_qos_t qos)387cc9a6355SApple OSS Distributions _wq_thactive_offset_for_qos(thread_qos_t qos)
388cc9a6355SApple OSS Distributions {
389e7776783SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
390e7776783SApple OSS Distributions __builtin_assume(bucket < WORKQ_NUM_BUCKETS);
391e7776783SApple OSS Distributions return (wq_thactive_t)1 << (bucket * WQ_THACTIVE_BUCKET_WIDTH);
392cc9a6355SApple OSS Distributions }
393cc9a6355SApple OSS Distributions
394cc9a6355SApple OSS Distributions static inline wq_thactive_t
_wq_thactive_inc(struct workqueue * wq,thread_qos_t qos)395cc9a6355SApple OSS Distributions _wq_thactive_inc(struct workqueue *wq, thread_qos_t qos)
396cc9a6355SApple OSS Distributions {
397cc9a6355SApple OSS Distributions wq_thactive_t v = _wq_thactive_offset_for_qos(qos);
398cc9a6355SApple OSS Distributions return os_atomic_add_orig(&wq->wq_thactive, v, relaxed);
399cc9a6355SApple OSS Distributions }
400cc9a6355SApple OSS Distributions
401cc9a6355SApple OSS Distributions static inline wq_thactive_t
_wq_thactive_dec(struct workqueue * wq,thread_qos_t qos)402cc9a6355SApple OSS Distributions _wq_thactive_dec(struct workqueue *wq, thread_qos_t qos)
403cc9a6355SApple OSS Distributions {
404cc9a6355SApple OSS Distributions wq_thactive_t v = _wq_thactive_offset_for_qos(qos);
405cc9a6355SApple OSS Distributions return os_atomic_sub_orig(&wq->wq_thactive, v, relaxed);
406cc9a6355SApple OSS Distributions }
407cc9a6355SApple OSS Distributions
408cc9a6355SApple OSS Distributions static inline void
_wq_thactive_move(struct workqueue * wq,thread_qos_t old_qos,thread_qos_t new_qos)409cc9a6355SApple OSS Distributions _wq_thactive_move(struct workqueue *wq,
410cc9a6355SApple OSS Distributions thread_qos_t old_qos, thread_qos_t new_qos)
411cc9a6355SApple OSS Distributions {
412cc9a6355SApple OSS Distributions wq_thactive_t v = _wq_thactive_offset_for_qos(new_qos) -
413cc9a6355SApple OSS Distributions _wq_thactive_offset_for_qos(old_qos);
414a5e72196SApple OSS Distributions os_atomic_add(&wq->wq_thactive, v, relaxed);
415cc9a6355SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(old_qos)]--;
416cc9a6355SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(new_qos)]++;
417cc9a6355SApple OSS Distributions }
418cc9a6355SApple OSS Distributions
419cc9a6355SApple OSS Distributions static inline uint32_t
_wq_thactive_aggregate_downto_qos(struct workqueue * wq,wq_thactive_t v,thread_qos_t qos,uint32_t * busycount,uint32_t * max_busycount)420cc9a6355SApple OSS Distributions _wq_thactive_aggregate_downto_qos(struct workqueue *wq, wq_thactive_t v,
421cc9a6355SApple OSS Distributions thread_qos_t qos, uint32_t *busycount, uint32_t *max_busycount)
422cc9a6355SApple OSS Distributions {
423cc9a6355SApple OSS Distributions uint32_t count = 0, active;
424cc9a6355SApple OSS Distributions uint64_t curtime;
425cc9a6355SApple OSS Distributions
426cc9a6355SApple OSS Distributions assert(WORKQ_THREAD_QOS_MIN <= qos && qos <= WORKQ_THREAD_QOS_MAX);
427cc9a6355SApple OSS Distributions
428cc9a6355SApple OSS Distributions if (busycount) {
429cc9a6355SApple OSS Distributions curtime = mach_absolute_time();
430cc9a6355SApple OSS Distributions *busycount = 0;
431cc9a6355SApple OSS Distributions }
432cc9a6355SApple OSS Distributions if (max_busycount) {
433cc9a6355SApple OSS Distributions *max_busycount = THREAD_QOS_LAST - qos;
434cc9a6355SApple OSS Distributions }
435cc9a6355SApple OSS Distributions
436e7776783SApple OSS Distributions uint8_t i = _wq_bucket(qos);
437cc9a6355SApple OSS Distributions v >>= i * WQ_THACTIVE_BUCKET_WIDTH;
438cc9a6355SApple OSS Distributions for (; i < WORKQ_NUM_QOS_BUCKETS; i++, v >>= WQ_THACTIVE_BUCKET_WIDTH) {
439cc9a6355SApple OSS Distributions active = v & WQ_THACTIVE_BUCKET_MASK;
440cc9a6355SApple OSS Distributions count += active;
441cc9a6355SApple OSS Distributions
442cc9a6355SApple OSS Distributions if (busycount && wq->wq_thscheduled_count[i] > active) {
443cc9a6355SApple OSS Distributions if (workq_thread_is_busy(curtime, &wq->wq_lastblocked_ts[i])) {
444cc9a6355SApple OSS Distributions /*
445cc9a6355SApple OSS Distributions * We only consider the last blocked thread for a given bucket
446cc9a6355SApple OSS Distributions * as busy because we don't want to take the list lock in each
447cc9a6355SApple OSS Distributions * sched callback. However this is an approximation that could
448cc9a6355SApple OSS Distributions * contribute to thread creation storms.
449cc9a6355SApple OSS Distributions */
450cc9a6355SApple OSS Distributions (*busycount)++;
451cc9a6355SApple OSS Distributions }
452cc9a6355SApple OSS Distributions }
453cc9a6355SApple OSS Distributions }
454cc9a6355SApple OSS Distributions
455cc9a6355SApple OSS Distributions return count;
456cc9a6355SApple OSS Distributions }
457cc9a6355SApple OSS Distributions
4585c2921b0SApple OSS Distributions /* The input qos here should be the requested QoS of the thread, not accounting
4595c2921b0SApple OSS Distributions * for any overrides */
460e6231be0SApple OSS Distributions static inline void
_wq_cooperative_queue_scheduled_count_dec(struct workqueue * wq,thread_qos_t qos)461e6231be0SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(struct workqueue *wq, thread_qos_t qos)
462e6231be0SApple OSS Distributions {
463e6231be0SApple OSS Distributions __assert_only uint8_t old_scheduled_count = wq->wq_cooperative_queue_scheduled_count[_wq_bucket(qos)]--;
464e6231be0SApple OSS Distributions assert(old_scheduled_count > 0);
465e6231be0SApple OSS Distributions }
466e6231be0SApple OSS Distributions
4675c2921b0SApple OSS Distributions /* The input qos here should be the requested QoS of the thread, not accounting
4685c2921b0SApple OSS Distributions * for any overrides */
469e6231be0SApple OSS Distributions static inline void
_wq_cooperative_queue_scheduled_count_inc(struct workqueue * wq,thread_qos_t qos)470e6231be0SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(struct workqueue *wq, thread_qos_t qos)
471e6231be0SApple OSS Distributions {
472e6231be0SApple OSS Distributions __assert_only uint8_t old_scheduled_count = wq->wq_cooperative_queue_scheduled_count[_wq_bucket(qos)]++;
473e6231be0SApple OSS Distributions assert(old_scheduled_count < UINT8_MAX);
474e6231be0SApple OSS Distributions }
475e6231be0SApple OSS Distributions
476cc9a6355SApple OSS Distributions #pragma mark wq_flags
477cc9a6355SApple OSS Distributions
478cc9a6355SApple OSS Distributions static inline uint32_t
_wq_flags(struct workqueue * wq)479cc9a6355SApple OSS Distributions _wq_flags(struct workqueue *wq)
480cc9a6355SApple OSS Distributions {
481cc9a6355SApple OSS Distributions return os_atomic_load(&wq->wq_flags, relaxed);
482cc9a6355SApple OSS Distributions }
483cc9a6355SApple OSS Distributions
484cc9a6355SApple OSS Distributions static inline bool
_wq_exiting(struct workqueue * wq)485cc9a6355SApple OSS Distributions _wq_exiting(struct workqueue *wq)
486cc9a6355SApple OSS Distributions {
487cc9a6355SApple OSS Distributions return _wq_flags(wq) & WQ_EXITING;
488cc9a6355SApple OSS Distributions }
489cc9a6355SApple OSS Distributions
490cc9a6355SApple OSS Distributions bool
workq_is_exiting(struct proc * p)491cc9a6355SApple OSS Distributions workq_is_exiting(struct proc *p)
492cc9a6355SApple OSS Distributions {
493cc9a6355SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
494cc9a6355SApple OSS Distributions return !wq || _wq_exiting(wq);
495cc9a6355SApple OSS Distributions }
496cc9a6355SApple OSS Distributions
497e6231be0SApple OSS Distributions
498cc9a6355SApple OSS Distributions #pragma mark workqueue lock
499cc9a6355SApple OSS Distributions
500cc9a6355SApple OSS Distributions static bool
workq_lock_is_acquired_kdp(struct workqueue * wq)501e6231be0SApple OSS Distributions workq_lock_is_acquired_kdp(struct workqueue *wq)
502cc9a6355SApple OSS Distributions {
503e6231be0SApple OSS Distributions return kdp_lck_ticket_is_acquired(&wq->wq_lock);
504cc9a6355SApple OSS Distributions }
505cc9a6355SApple OSS Distributions
506cc9a6355SApple OSS Distributions static inline void
workq_lock_spin(struct workqueue * wq)507cc9a6355SApple OSS Distributions workq_lock_spin(struct workqueue *wq)
508cc9a6355SApple OSS Distributions {
509e6231be0SApple OSS Distributions lck_ticket_lock(&wq->wq_lock, &workq_lck_grp);
510cc9a6355SApple OSS Distributions }
511cc9a6355SApple OSS Distributions
512cc9a6355SApple OSS Distributions static inline void
workq_lock_held(struct workqueue * wq)513e6231be0SApple OSS Distributions workq_lock_held(struct workqueue *wq)
514cc9a6355SApple OSS Distributions {
515e6231be0SApple OSS Distributions LCK_TICKET_ASSERT_OWNED(&wq->wq_lock);
516cc9a6355SApple OSS Distributions }
517cc9a6355SApple OSS Distributions
518cc9a6355SApple OSS Distributions static inline bool
workq_lock_try(struct workqueue * wq)519cc9a6355SApple OSS Distributions workq_lock_try(struct workqueue *wq)
520cc9a6355SApple OSS Distributions {
521e6231be0SApple OSS Distributions return lck_ticket_lock_try(&wq->wq_lock, &workq_lck_grp);
522cc9a6355SApple OSS Distributions }
523cc9a6355SApple OSS Distributions
524cc9a6355SApple OSS Distributions static inline void
workq_unlock(struct workqueue * wq)525cc9a6355SApple OSS Distributions workq_unlock(struct workqueue *wq)
526cc9a6355SApple OSS Distributions {
527e6231be0SApple OSS Distributions lck_ticket_unlock(&wq->wq_lock);
528cc9a6355SApple OSS Distributions }
529cc9a6355SApple OSS Distributions
530cc9a6355SApple OSS Distributions #pragma mark idle thread lists
531cc9a6355SApple OSS Distributions
532cc9a6355SApple OSS Distributions #define WORKQ_POLICY_INIT(qos) \
533cc9a6355SApple OSS Distributions (struct uu_workq_policy){ .qos_req = qos, .qos_bucket = qos }
534cc9a6355SApple OSS Distributions
535cc9a6355SApple OSS Distributions static inline thread_qos_t
workq_pri_bucket(struct uu_workq_policy req)536cc9a6355SApple OSS Distributions workq_pri_bucket(struct uu_workq_policy req)
537cc9a6355SApple OSS Distributions {
538cc9a6355SApple OSS Distributions return MAX(MAX(req.qos_req, req.qos_max), req.qos_override);
539cc9a6355SApple OSS Distributions }
540cc9a6355SApple OSS Distributions
541cc9a6355SApple OSS Distributions static inline thread_qos_t
workq_pri_override(struct uu_workq_policy req)542cc9a6355SApple OSS Distributions workq_pri_override(struct uu_workq_policy req)
543cc9a6355SApple OSS Distributions {
544cc9a6355SApple OSS Distributions return MAX(workq_pri_bucket(req), req.qos_bucket);
545cc9a6355SApple OSS Distributions }
546cc9a6355SApple OSS Distributions
547cc9a6355SApple OSS Distributions static inline bool
workq_thread_needs_params_change(workq_threadreq_t req,struct uthread * uth)548cc9a6355SApple OSS Distributions workq_thread_needs_params_change(workq_threadreq_t req, struct uthread *uth)
549cc9a6355SApple OSS Distributions {
550cc9a6355SApple OSS Distributions workq_threadreq_param_t cur_trp, req_trp = { };
551cc9a6355SApple OSS Distributions
552cc9a6355SApple OSS Distributions cur_trp.trp_value = uth->uu_save.uus_workq_park_data.workloop_params;
553a5e72196SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS) {
554cc9a6355SApple OSS Distributions req_trp = kqueue_threadreq_workloop_param(req);
555cc9a6355SApple OSS Distributions }
556cc9a6355SApple OSS Distributions
557cc9a6355SApple OSS Distributions /*
558cc9a6355SApple OSS Distributions * CPU percent flags are handled separately to policy changes, so ignore
559cc9a6355SApple OSS Distributions * them for all of these checks.
560cc9a6355SApple OSS Distributions */
561cc9a6355SApple OSS Distributions uint16_t cur_flags = (cur_trp.trp_flags & ~TRP_CPUPERCENT);
562cc9a6355SApple OSS Distributions uint16_t req_flags = (req_trp.trp_flags & ~TRP_CPUPERCENT);
563cc9a6355SApple OSS Distributions
564cc9a6355SApple OSS Distributions if (!req_flags && !cur_flags) {
565cc9a6355SApple OSS Distributions return false;
566cc9a6355SApple OSS Distributions }
567cc9a6355SApple OSS Distributions
568cc9a6355SApple OSS Distributions if (req_flags != cur_flags) {
569cc9a6355SApple OSS Distributions return true;
570cc9a6355SApple OSS Distributions }
571cc9a6355SApple OSS Distributions
572cc9a6355SApple OSS Distributions if ((req_flags & TRP_PRIORITY) && req_trp.trp_pri != cur_trp.trp_pri) {
573cc9a6355SApple OSS Distributions return true;
574cc9a6355SApple OSS Distributions }
575cc9a6355SApple OSS Distributions
576bb611c8fSApple OSS Distributions if ((req_flags & TRP_POLICY) && req_trp.trp_pol != cur_trp.trp_pol) {
577cc9a6355SApple OSS Distributions return true;
578cc9a6355SApple OSS Distributions }
579cc9a6355SApple OSS Distributions
580cc9a6355SApple OSS Distributions return false;
581cc9a6355SApple OSS Distributions }
582cc9a6355SApple OSS Distributions
583cc9a6355SApple OSS Distributions static inline bool
workq_thread_needs_priority_change(workq_threadreq_t req,struct uthread * uth)584cc9a6355SApple OSS Distributions workq_thread_needs_priority_change(workq_threadreq_t req, struct uthread *uth)
585cc9a6355SApple OSS Distributions {
586cc9a6355SApple OSS Distributions if (workq_thread_needs_params_change(req, uth)) {
587cc9a6355SApple OSS Distributions return true;
588cc9a6355SApple OSS Distributions }
589cc9a6355SApple OSS Distributions
590e6231be0SApple OSS Distributions if (req->tr_qos != workq_pri_override(uth->uu_workq_pri)) {
591e6231be0SApple OSS Distributions return true;
592e6231be0SApple OSS Distributions }
593e6231be0SApple OSS Distributions
594e6231be0SApple OSS Distributions #if CONFIG_PREADOPT_TG
595e6231be0SApple OSS Distributions thread_group_qos_t tg = kqr_preadopt_thread_group(req);
596e6231be0SApple OSS Distributions if (KQWL_HAS_VALID_PREADOPTED_TG(tg)) {
597e6231be0SApple OSS Distributions /*
598e6231be0SApple OSS Distributions * Ideally, we'd add check here to see if thread's preadopt TG is same
599e6231be0SApple OSS Distributions * as the thread requests's thread group and short circuit if that is
600e6231be0SApple OSS Distributions * the case. But in the interest of keeping the code clean and not
601e6231be0SApple OSS Distributions * taking the thread lock here, we're going to skip this. We will
602e6231be0SApple OSS Distributions * eventually shortcircuit once we try to set the preadoption thread
603e6231be0SApple OSS Distributions * group on the thread.
604e6231be0SApple OSS Distributions */
605e6231be0SApple OSS Distributions return true;
606e6231be0SApple OSS Distributions }
607e6231be0SApple OSS Distributions #endif
608e6231be0SApple OSS Distributions
609e6231be0SApple OSS Distributions return false;
610cc9a6355SApple OSS Distributions }
611cc9a6355SApple OSS Distributions
6125c2921b0SApple OSS Distributions /* Input thread must be self. Called during self override, resetting overrides
6135c2921b0SApple OSS Distributions * or while processing kevents
6145c2921b0SApple OSS Distributions *
6155c2921b0SApple OSS Distributions * Called with workq lock held. Sometimes also the thread mutex
6165c2921b0SApple OSS Distributions */
617cc9a6355SApple OSS Distributions static void
workq_thread_update_bucket(proc_t p,struct workqueue * wq,struct uthread * uth,struct uu_workq_policy old_pri,struct uu_workq_policy new_pri,bool force_run)618cc9a6355SApple OSS Distributions workq_thread_update_bucket(proc_t p, struct workqueue *wq, struct uthread *uth,
619cc9a6355SApple OSS Distributions struct uu_workq_policy old_pri, struct uu_workq_policy new_pri,
620cc9a6355SApple OSS Distributions bool force_run)
621cc9a6355SApple OSS Distributions {
6225c2921b0SApple OSS Distributions assert(uth == current_uthread());
6235c2921b0SApple OSS Distributions
624cc9a6355SApple OSS Distributions thread_qos_t old_bucket = old_pri.qos_bucket;
625cc9a6355SApple OSS Distributions thread_qos_t new_bucket = workq_pri_bucket(new_pri);
626cc9a6355SApple OSS Distributions
627*8d741a5dSApple OSS Distributions if ((old_bucket != new_bucket) &&
628*8d741a5dSApple OSS Distributions !workq_thread_is_permanently_bound(uth)) {
629cc9a6355SApple OSS Distributions _wq_thactive_move(wq, old_bucket, new_bucket);
630cc9a6355SApple OSS Distributions }
631cc9a6355SApple OSS Distributions
632cc9a6355SApple OSS Distributions new_pri.qos_bucket = new_bucket;
633cc9a6355SApple OSS Distributions uth->uu_workq_pri = new_pri;
634cc9a6355SApple OSS Distributions
6355c2921b0SApple OSS Distributions if (old_pri.qos_override != new_pri.qos_override) {
6365c2921b0SApple OSS Distributions thread_set_workq_override(get_machthread(uth), new_pri.qos_override);
637cc9a6355SApple OSS Distributions }
638cc9a6355SApple OSS Distributions
639*8d741a5dSApple OSS Distributions if (wq->wq_reqcount &&
640*8d741a5dSApple OSS Distributions !workq_thread_is_permanently_bound(uth) &&
641*8d741a5dSApple OSS Distributions (old_bucket > new_bucket || force_run)) {
642cc9a6355SApple OSS Distributions int flags = WORKQ_THREADREQ_CAN_CREATE_THREADS;
643cc9a6355SApple OSS Distributions if (old_bucket > new_bucket) {
644cc9a6355SApple OSS Distributions /*
645cc9a6355SApple OSS Distributions * When lowering our bucket, we may unblock a thread request,
646cc9a6355SApple OSS Distributions * but we can't drop our priority before we have evaluated
647cc9a6355SApple OSS Distributions * whether this is the case, and if we ever drop the workqueue lock
648cc9a6355SApple OSS Distributions * that would cause a priority inversion.
649cc9a6355SApple OSS Distributions *
650cc9a6355SApple OSS Distributions * We hence have to disallow thread creation in that case.
651cc9a6355SApple OSS Distributions */
652cc9a6355SApple OSS Distributions flags = 0;
653cc9a6355SApple OSS Distributions }
654cc9a6355SApple OSS Distributions workq_schedule_creator(p, wq, flags);
655cc9a6355SApple OSS Distributions }
656cc9a6355SApple OSS Distributions }
657cc9a6355SApple OSS Distributions
658cc9a6355SApple OSS Distributions /*
659cc9a6355SApple OSS Distributions * Sets/resets the cpu percent limits on the current thread. We can't set
660cc9a6355SApple OSS Distributions * these limits from outside of the current thread, so this function needs
661cc9a6355SApple OSS Distributions * to be called when we're executing on the intended
662cc9a6355SApple OSS Distributions */
663cc9a6355SApple OSS Distributions static void
workq_thread_reset_cpupercent(workq_threadreq_t req,struct uthread * uth)664cc9a6355SApple OSS Distributions workq_thread_reset_cpupercent(workq_threadreq_t req, struct uthread *uth)
665cc9a6355SApple OSS Distributions {
666cc9a6355SApple OSS Distributions assert(uth == current_uthread());
667cc9a6355SApple OSS Distributions workq_threadreq_param_t trp = { };
668cc9a6355SApple OSS Distributions
669a5e72196SApple OSS Distributions if (req && (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS)) {
670cc9a6355SApple OSS Distributions trp = kqueue_threadreq_workloop_param(req);
671cc9a6355SApple OSS Distributions }
672cc9a6355SApple OSS Distributions
673cc9a6355SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_CPUPERCENT) {
674cc9a6355SApple OSS Distributions /*
675cc9a6355SApple OSS Distributions * Going through disable when we have an existing CPU percent limit
676cc9a6355SApple OSS Distributions * set will force the ledger to refill the token bucket of the current
677cc9a6355SApple OSS Distributions * thread. Removing any penalty applied by previous thread use.
678cc9a6355SApple OSS Distributions */
679cc9a6355SApple OSS Distributions thread_set_cpulimit(THREAD_CPULIMIT_DISABLE, 0, 0);
680cc9a6355SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_CPUPERCENT;
681cc9a6355SApple OSS Distributions }
682cc9a6355SApple OSS Distributions
683cc9a6355SApple OSS Distributions if (trp.trp_flags & TRP_CPUPERCENT) {
684cc9a6355SApple OSS Distributions thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, trp.trp_cpupercent,
685cc9a6355SApple OSS Distributions (uint64_t)trp.trp_refillms * NSEC_PER_SEC);
686cc9a6355SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_CPUPERCENT;
687cc9a6355SApple OSS Distributions }
688cc9a6355SApple OSS Distributions }
689cc9a6355SApple OSS Distributions
690*8d741a5dSApple OSS Distributions /*
691*8d741a5dSApple OSS Distributions * This function is always called with the workq lock, except for the
692*8d741a5dSApple OSS Distributions * permanently bound workqueue thread, which instead requires the kqlock.
693*8d741a5dSApple OSS Distributions * See locking model for bound thread's uu_workq_flags.
694*8d741a5dSApple OSS Distributions */
695cc9a6355SApple OSS Distributions static void
workq_thread_reset_pri(struct workqueue * wq,struct uthread * uth,workq_threadreq_t req,bool unpark)696cc9a6355SApple OSS Distributions workq_thread_reset_pri(struct workqueue *wq, struct uthread *uth,
697a5e72196SApple OSS Distributions workq_threadreq_t req, bool unpark)
698cc9a6355SApple OSS Distributions {
699e7776783SApple OSS Distributions thread_t th = get_machthread(uth);
700cc9a6355SApple OSS Distributions thread_qos_t qos = req ? req->tr_qos : WORKQ_THREAD_QOS_CLEANUP;
701cc9a6355SApple OSS Distributions workq_threadreq_param_t trp = { };
702cc9a6355SApple OSS Distributions int priority = 31;
703cc9a6355SApple OSS Distributions int policy = POLICY_TIMESHARE;
704cc9a6355SApple OSS Distributions
705a5e72196SApple OSS Distributions if (req && (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS)) {
706cc9a6355SApple OSS Distributions trp = kqueue_threadreq_workloop_param(req);
707cc9a6355SApple OSS Distributions }
708cc9a6355SApple OSS Distributions
709cc9a6355SApple OSS Distributions uth->uu_workq_pri = WORKQ_POLICY_INIT(qos);
710cc9a6355SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_OUTSIDE_QOS;
711cc9a6355SApple OSS Distributions
712a5e72196SApple OSS Distributions if (unpark) {
713a5e72196SApple OSS Distributions uth->uu_save.uus_workq_park_data.workloop_params = trp.trp_value;
714cc9a6355SApple OSS Distributions // qos sent out to userspace (may differ from uu_workq_pri on param threads)
715cc9a6355SApple OSS Distributions uth->uu_save.uus_workq_park_data.qos = qos;
716a5e72196SApple OSS Distributions }
717cc9a6355SApple OSS Distributions
718cc9a6355SApple OSS Distributions if (qos == WORKQ_THREAD_QOS_MANAGER) {
719cc9a6355SApple OSS Distributions uint32_t mgr_pri = wq->wq_event_manager_priority;
720cc9a6355SApple OSS Distributions assert(trp.trp_value == 0); // manager qos and thread policy don't mix
721cc9a6355SApple OSS Distributions
7225c2921b0SApple OSS Distributions if (_pthread_priority_has_sched_pri(mgr_pri)) {
723cc9a6355SApple OSS Distributions mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
724cc9a6355SApple OSS Distributions thread_set_workq_pri(th, THREAD_QOS_UNSPECIFIED, mgr_pri,
725cc9a6355SApple OSS Distributions POLICY_TIMESHARE);
726cc9a6355SApple OSS Distributions return;
727cc9a6355SApple OSS Distributions }
728cc9a6355SApple OSS Distributions
729cc9a6355SApple OSS Distributions qos = _pthread_priority_thread_qos(mgr_pri);
730cc9a6355SApple OSS Distributions } else {
731cc9a6355SApple OSS Distributions if (trp.trp_flags & TRP_PRIORITY) {
732cc9a6355SApple OSS Distributions qos = THREAD_QOS_UNSPECIFIED;
733cc9a6355SApple OSS Distributions priority = trp.trp_pri;
734cc9a6355SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_OUTSIDE_QOS;
735cc9a6355SApple OSS Distributions }
736cc9a6355SApple OSS Distributions
737cc9a6355SApple OSS Distributions if (trp.trp_flags & TRP_POLICY) {
738cc9a6355SApple OSS Distributions policy = trp.trp_pol;
739cc9a6355SApple OSS Distributions }
740cc9a6355SApple OSS Distributions }
741cc9a6355SApple OSS Distributions
742e6231be0SApple OSS Distributions #if CONFIG_PREADOPT_TG
743e6231be0SApple OSS Distributions if (req && (req->tr_flags & WORKQ_TR_FLAG_WORKLOOP)) {
744e6231be0SApple OSS Distributions /*
74594d3b452SApple OSS Distributions * For kqwl permanently configured with a thread group, we can safely borrow
74694d3b452SApple OSS Distributions * +1 ref from kqwl_preadopt_tg. A thread then takes additional +1 ref
74794d3b452SApple OSS Distributions * for itself via thread_set_preadopt_thread_group.
74894d3b452SApple OSS Distributions *
74994d3b452SApple OSS Distributions * In all other cases, we cannot safely read and borrow the reference from the kqwl
75094d3b452SApple OSS Distributions * since it can disappear from under us at any time due to the max-ing logic in
751e6231be0SApple OSS Distributions * kqueue_set_preadopted_thread_group.
752e6231be0SApple OSS Distributions *
753e6231be0SApple OSS Distributions * As such, we do the following dance:
754e6231be0SApple OSS Distributions *
755e6231be0SApple OSS Distributions * 1) cmpxchng and steal the kqwl's preadopt thread group and leave
756e6231be0SApple OSS Distributions * behind with (NULL + QoS). At this point, we have the reference
757e6231be0SApple OSS Distributions * to the thread group from the kqwl.
758e6231be0SApple OSS Distributions * 2) Have the thread set the preadoption thread group on itself.
759e6231be0SApple OSS Distributions * 3) cmpxchng from (NULL + QoS) which we set earlier in (1), back to
760e6231be0SApple OSS Distributions * thread_group + QoS. ie we try to give the reference back to the kqwl.
761e6231be0SApple OSS Distributions * If we fail, that's because a higher QoS thread group was set on the
762e6231be0SApple OSS Distributions * kqwl in kqueue_set_preadopted_thread_group in which case, we need to
763e6231be0SApple OSS Distributions * go back to (1).
764e6231be0SApple OSS Distributions */
765e6231be0SApple OSS Distributions
766e6231be0SApple OSS Distributions _Atomic(struct thread_group *) * tg_loc = kqr_preadopt_thread_group_addr(req);
767e6231be0SApple OSS Distributions
768e6231be0SApple OSS Distributions thread_group_qos_t old_tg, new_tg;
769e6231be0SApple OSS Distributions int ret = 0;
770e6231be0SApple OSS Distributions again:
771e6231be0SApple OSS Distributions ret = os_atomic_rmw_loop(tg_loc, old_tg, new_tg, relaxed, {
77294d3b452SApple OSS Distributions if ((!KQWL_HAS_VALID_PREADOPTED_TG(old_tg)) ||
77394d3b452SApple OSS Distributions KQWL_HAS_PERMANENT_PREADOPTED_TG(old_tg)) {
774e6231be0SApple OSS Distributions os_atomic_rmw_loop_give_up(break);
775e6231be0SApple OSS Distributions }
776e6231be0SApple OSS Distributions
777e6231be0SApple OSS Distributions /*
778e6231be0SApple OSS Distributions * Leave the QoS behind - kqueue_set_preadopted_thread_group will
779e6231be0SApple OSS Distributions * only modify it if there is a higher QoS thread group to attach
780e6231be0SApple OSS Distributions */
781e6231be0SApple OSS Distributions new_tg = (thread_group_qos_t) ((uintptr_t) old_tg & KQWL_PREADOPT_TG_QOS_MASK);
782e6231be0SApple OSS Distributions });
783e6231be0SApple OSS Distributions
784e6231be0SApple OSS Distributions if (ret) {
785e6231be0SApple OSS Distributions /*
786e6231be0SApple OSS Distributions * We successfully took the ref from the kqwl so set it on the
787e6231be0SApple OSS Distributions * thread now
788e6231be0SApple OSS Distributions */
789e6231be0SApple OSS Distributions thread_set_preadopt_thread_group(th, KQWL_GET_PREADOPTED_TG(old_tg));
790e6231be0SApple OSS Distributions
791e6231be0SApple OSS Distributions thread_group_qos_t thread_group_to_expect = new_tg;
792e6231be0SApple OSS Distributions thread_group_qos_t thread_group_to_set = old_tg;
793e6231be0SApple OSS Distributions
794e6231be0SApple OSS Distributions os_atomic_rmw_loop(tg_loc, old_tg, new_tg, relaxed, {
795e6231be0SApple OSS Distributions if (old_tg != thread_group_to_expect) {
796e6231be0SApple OSS Distributions /*
797e6231be0SApple OSS Distributions * There was an intervening write to the kqwl_preadopt_tg,
798e6231be0SApple OSS Distributions * and it has a higher QoS than what we are working with
799e6231be0SApple OSS Distributions * here. Abandon our current adopted thread group and redo
800e6231be0SApple OSS Distributions * the full dance
801e6231be0SApple OSS Distributions */
802e6231be0SApple OSS Distributions thread_group_deallocate_safe(KQWL_GET_PREADOPTED_TG(thread_group_to_set));
803e6231be0SApple OSS Distributions os_atomic_rmw_loop_give_up(goto again);
804e6231be0SApple OSS Distributions }
805e6231be0SApple OSS Distributions
806e6231be0SApple OSS Distributions new_tg = thread_group_to_set;
807e6231be0SApple OSS Distributions });
808e6231be0SApple OSS Distributions } else {
80994d3b452SApple OSS Distributions if (KQWL_HAS_PERMANENT_PREADOPTED_TG(old_tg)) {
81094d3b452SApple OSS Distributions thread_set_preadopt_thread_group(th, KQWL_GET_PREADOPTED_TG(old_tg));
81194d3b452SApple OSS Distributions } else {
812e6231be0SApple OSS Distributions /* Nothing valid on the kqwl, just clear what's on the thread */
813e6231be0SApple OSS Distributions thread_set_preadopt_thread_group(th, NULL);
814e6231be0SApple OSS Distributions }
81594d3b452SApple OSS Distributions }
816e6231be0SApple OSS Distributions } else {
817e6231be0SApple OSS Distributions /* Not even a kqwl, clear what's on the thread */
818e6231be0SApple OSS Distributions thread_set_preadopt_thread_group(th, NULL);
819e6231be0SApple OSS Distributions }
820e6231be0SApple OSS Distributions #endif
821cc9a6355SApple OSS Distributions thread_set_workq_pri(th, qos, priority, policy);
822cc9a6355SApple OSS Distributions }
823cc9a6355SApple OSS Distributions
824cc9a6355SApple OSS Distributions /*
825cc9a6355SApple OSS Distributions * Called by kevent with the NOTE_WL_THREAD_REQUEST knote lock held,
826cc9a6355SApple OSS Distributions * every time a servicer is being told about a new max QoS.
827cc9a6355SApple OSS Distributions */
828cc9a6355SApple OSS Distributions void
workq_thread_set_max_qos(struct proc * p,workq_threadreq_t kqr)829a5e72196SApple OSS Distributions workq_thread_set_max_qos(struct proc *p, workq_threadreq_t kqr)
830cc9a6355SApple OSS Distributions {
831cc9a6355SApple OSS Distributions struct uu_workq_policy old_pri, new_pri;
832a5e72196SApple OSS Distributions struct uthread *uth = current_uthread();
833cc9a6355SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
834a5e72196SApple OSS Distributions thread_qos_t qos = kqr->tr_kq_qos_index;
835cc9a6355SApple OSS Distributions
836a5e72196SApple OSS Distributions if (uth->uu_workq_pri.qos_max == qos) {
837cc9a6355SApple OSS Distributions return;
838a5e72196SApple OSS Distributions }
839cc9a6355SApple OSS Distributions
840cc9a6355SApple OSS Distributions workq_lock_spin(wq);
841cc9a6355SApple OSS Distributions old_pri = new_pri = uth->uu_workq_pri;
842cc9a6355SApple OSS Distributions new_pri.qos_max = qos;
843cc9a6355SApple OSS Distributions workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, false);
844cc9a6355SApple OSS Distributions workq_unlock(wq);
845cc9a6355SApple OSS Distributions }
846cc9a6355SApple OSS Distributions
847cc9a6355SApple OSS Distributions #pragma mark idle threads accounting and handling
848cc9a6355SApple OSS Distributions
849cc9a6355SApple OSS Distributions static inline struct uthread *
workq_oldest_killable_idle_thread(struct workqueue * wq)850cc9a6355SApple OSS Distributions workq_oldest_killable_idle_thread(struct workqueue *wq)
851cc9a6355SApple OSS Distributions {
852cc9a6355SApple OSS Distributions struct uthread *uth = TAILQ_LAST(&wq->wq_thidlelist, workq_uthread_head);
853cc9a6355SApple OSS Distributions
854cc9a6355SApple OSS Distributions if (uth && !uth->uu_save.uus_workq_park_data.has_stack) {
855cc9a6355SApple OSS Distributions uth = TAILQ_PREV(uth, workq_uthread_head, uu_workq_entry);
856cc9a6355SApple OSS Distributions if (uth) {
857cc9a6355SApple OSS Distributions assert(uth->uu_save.uus_workq_park_data.has_stack);
858cc9a6355SApple OSS Distributions }
859cc9a6355SApple OSS Distributions }
860cc9a6355SApple OSS Distributions return uth;
861cc9a6355SApple OSS Distributions }
862cc9a6355SApple OSS Distributions
863cc9a6355SApple OSS Distributions static inline uint64_t
workq_kill_delay_for_idle_thread(struct workqueue * wq)864cc9a6355SApple OSS Distributions workq_kill_delay_for_idle_thread(struct workqueue *wq)
865cc9a6355SApple OSS Distributions {
866cc9a6355SApple OSS Distributions uint64_t delay = wq_reduce_pool_window.abstime;
867cc9a6355SApple OSS Distributions uint16_t idle = wq->wq_thidlecount;
868cc9a6355SApple OSS Distributions
869cc9a6355SApple OSS Distributions /*
870cc9a6355SApple OSS Distributions * If we have less than wq_death_max_load threads, have a 5s timer.
871cc9a6355SApple OSS Distributions *
872cc9a6355SApple OSS Distributions * For the next wq_max_constrained_threads ones, decay linearly from
873cc9a6355SApple OSS Distributions * from 5s to 50ms.
874cc9a6355SApple OSS Distributions */
875cc9a6355SApple OSS Distributions if (idle <= wq_death_max_load) {
876cc9a6355SApple OSS Distributions return delay;
877cc9a6355SApple OSS Distributions }
878cc9a6355SApple OSS Distributions
879cc9a6355SApple OSS Distributions if (wq_max_constrained_threads > idle - wq_death_max_load) {
880cc9a6355SApple OSS Distributions delay *= (wq_max_constrained_threads - (idle - wq_death_max_load));
881cc9a6355SApple OSS Distributions }
882cc9a6355SApple OSS Distributions return delay / wq_max_constrained_threads;
883cc9a6355SApple OSS Distributions }
884cc9a6355SApple OSS Distributions
885cc9a6355SApple OSS Distributions static inline bool
workq_should_kill_idle_thread(struct workqueue * wq,struct uthread * uth,uint64_t now)886cc9a6355SApple OSS Distributions workq_should_kill_idle_thread(struct workqueue *wq, struct uthread *uth,
887cc9a6355SApple OSS Distributions uint64_t now)
888cc9a6355SApple OSS Distributions {
889cc9a6355SApple OSS Distributions uint64_t delay = workq_kill_delay_for_idle_thread(wq);
890cc9a6355SApple OSS Distributions return now - uth->uu_save.uus_workq_park_data.idle_stamp > delay;
891cc9a6355SApple OSS Distributions }
892cc9a6355SApple OSS Distributions
893cc9a6355SApple OSS Distributions static void
workq_death_call_schedule(struct workqueue * wq,uint64_t deadline)894cc9a6355SApple OSS Distributions workq_death_call_schedule(struct workqueue *wq, uint64_t deadline)
895cc9a6355SApple OSS Distributions {
896cc9a6355SApple OSS Distributions uint32_t wq_flags = os_atomic_load(&wq->wq_flags, relaxed);
897cc9a6355SApple OSS Distributions
898cc9a6355SApple OSS Distributions if (wq_flags & (WQ_EXITING | WQ_DEATH_CALL_SCHEDULED)) {
899cc9a6355SApple OSS Distributions return;
900cc9a6355SApple OSS Distributions }
901cc9a6355SApple OSS Distributions os_atomic_or(&wq->wq_flags, WQ_DEATH_CALL_SCHEDULED, relaxed);
902cc9a6355SApple OSS Distributions
903e6231be0SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_death_call | DBG_FUNC_NONE, wq, 1, 0, 0);
904cc9a6355SApple OSS Distributions
905cc9a6355SApple OSS Distributions /*
906cc9a6355SApple OSS Distributions * <rdar://problem/13139182> Due to how long term timers work, the leeway
907cc9a6355SApple OSS Distributions * can't be too short, so use 500ms which is long enough that we will not
908cc9a6355SApple OSS Distributions * wake up the CPU for killing threads, but short enough that it doesn't
909cc9a6355SApple OSS Distributions * fall into long-term timer list shenanigans.
910cc9a6355SApple OSS Distributions */
911cc9a6355SApple OSS Distributions thread_call_enter_delayed_with_leeway(wq->wq_death_call, NULL, deadline,
912cc9a6355SApple OSS Distributions wq_reduce_pool_window.abstime / 10,
913cc9a6355SApple OSS Distributions THREAD_CALL_DELAY_LEEWAY | THREAD_CALL_DELAY_USER_BACKGROUND);
914cc9a6355SApple OSS Distributions }
915cc9a6355SApple OSS Distributions
916cc9a6355SApple OSS Distributions /*
917cc9a6355SApple OSS Distributions * `decrement` is set to the number of threads that are no longer dying:
918cc9a6355SApple OSS Distributions * - because they have been resuscitated just in time (workq_pop_idle_thread)
919cc9a6355SApple OSS Distributions * - or have been killed (workq_thread_terminate).
920cc9a6355SApple OSS Distributions */
921cc9a6355SApple OSS Distributions static void
workq_death_policy_evaluate(struct workqueue * wq,uint16_t decrement)922cc9a6355SApple OSS Distributions workq_death_policy_evaluate(struct workqueue *wq, uint16_t decrement)
923cc9a6355SApple OSS Distributions {
924cc9a6355SApple OSS Distributions struct uthread *uth;
925cc9a6355SApple OSS Distributions
926cc9a6355SApple OSS Distributions assert(wq->wq_thdying_count >= decrement);
927a5e72196SApple OSS Distributions if ((wq->wq_thdying_count -= decrement) > 0) {
928cc9a6355SApple OSS Distributions return;
929a5e72196SApple OSS Distributions }
930cc9a6355SApple OSS Distributions
931a5e72196SApple OSS Distributions if (wq->wq_thidlecount <= 1) {
932cc9a6355SApple OSS Distributions return;
933a5e72196SApple OSS Distributions }
934cc9a6355SApple OSS Distributions
935a5e72196SApple OSS Distributions if ((uth = workq_oldest_killable_idle_thread(wq)) == NULL) {
936cc9a6355SApple OSS Distributions return;
937a5e72196SApple OSS Distributions }
938cc9a6355SApple OSS Distributions
939cc9a6355SApple OSS Distributions uint64_t now = mach_absolute_time();
940cc9a6355SApple OSS Distributions uint64_t delay = workq_kill_delay_for_idle_thread(wq);
941cc9a6355SApple OSS Distributions
942cc9a6355SApple OSS Distributions if (now - uth->uu_save.uus_workq_park_data.idle_stamp > delay) {
943cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_START,
944e6231be0SApple OSS Distributions wq, wq->wq_thidlecount, 0, 0);
945cc9a6355SApple OSS Distributions wq->wq_thdying_count++;
946cc9a6355SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_DYING;
947a5e72196SApple OSS Distributions if ((uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) == 0) {
948cc9a6355SApple OSS Distributions workq_thread_wakeup(uth);
949a5e72196SApple OSS Distributions }
950cc9a6355SApple OSS Distributions return;
951cc9a6355SApple OSS Distributions }
952cc9a6355SApple OSS Distributions
953cc9a6355SApple OSS Distributions workq_death_call_schedule(wq,
954cc9a6355SApple OSS Distributions uth->uu_save.uus_workq_park_data.idle_stamp + delay);
955cc9a6355SApple OSS Distributions }
956cc9a6355SApple OSS Distributions
957cc9a6355SApple OSS Distributions void
workq_thread_terminate(struct proc * p,struct uthread * uth)958cc9a6355SApple OSS Distributions workq_thread_terminate(struct proc *p, struct uthread *uth)
959cc9a6355SApple OSS Distributions {
960cc9a6355SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
961cc9a6355SApple OSS Distributions
962cc9a6355SApple OSS Distributions workq_lock_spin(wq);
963*8d741a5dSApple OSS Distributions if (!workq_thread_is_permanently_bound(uth)) {
964cc9a6355SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thrunlist, uth, uu_workq_entry);
965cc9a6355SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_DYING) {
966cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_END,
967e6231be0SApple OSS Distributions wq, wq->wq_thidlecount, 0, 0);
968cc9a6355SApple OSS Distributions workq_death_policy_evaluate(wq, 1);
969cc9a6355SApple OSS Distributions }
970*8d741a5dSApple OSS Distributions }
971cc9a6355SApple OSS Distributions if (wq->wq_nthreads-- == wq_max_threads) {
972cc9a6355SApple OSS Distributions /*
973cc9a6355SApple OSS Distributions * We got under the thread limit again, which may have prevented
974cc9a6355SApple OSS Distributions * thread creation from happening, redrive if there are pending requests
975cc9a6355SApple OSS Distributions */
976cc9a6355SApple OSS Distributions if (wq->wq_reqcount) {
977cc9a6355SApple OSS Distributions workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
978cc9a6355SApple OSS Distributions }
979cc9a6355SApple OSS Distributions }
980cc9a6355SApple OSS Distributions workq_unlock(wq);
981cc9a6355SApple OSS Distributions
982e7776783SApple OSS Distributions thread_deallocate(get_machthread(uth));
983cc9a6355SApple OSS Distributions }
984cc9a6355SApple OSS Distributions
985cc9a6355SApple OSS Distributions static void
workq_kill_old_threads_call(void * param0,void * param1 __unused)986cc9a6355SApple OSS Distributions workq_kill_old_threads_call(void *param0, void *param1 __unused)
987cc9a6355SApple OSS Distributions {
988cc9a6355SApple OSS Distributions struct workqueue *wq = param0;
989cc9a6355SApple OSS Distributions
990cc9a6355SApple OSS Distributions workq_lock_spin(wq);
991e6231be0SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_death_call | DBG_FUNC_START, wq, 0, 0, 0);
992a5e72196SApple OSS Distributions os_atomic_andnot(&wq->wq_flags, WQ_DEATH_CALL_SCHEDULED, relaxed);
993cc9a6355SApple OSS Distributions workq_death_policy_evaluate(wq, 0);
994e6231be0SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_death_call | DBG_FUNC_END, wq, 0, 0, 0);
995cc9a6355SApple OSS Distributions workq_unlock(wq);
996cc9a6355SApple OSS Distributions }
997cc9a6355SApple OSS Distributions
998cc9a6355SApple OSS Distributions static struct uthread *
workq_pop_idle_thread(struct workqueue * wq,uint16_t uu_flags,bool * needs_wakeup)999e6231be0SApple OSS Distributions workq_pop_idle_thread(struct workqueue *wq, uint16_t uu_flags,
1000a5e72196SApple OSS Distributions bool *needs_wakeup)
1001cc9a6355SApple OSS Distributions {
1002cc9a6355SApple OSS Distributions struct uthread *uth;
1003cc9a6355SApple OSS Distributions
1004cc9a6355SApple OSS Distributions if ((uth = TAILQ_FIRST(&wq->wq_thidlelist))) {
1005cc9a6355SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thidlelist, uth, uu_workq_entry);
1006cc9a6355SApple OSS Distributions } else {
1007cc9a6355SApple OSS Distributions uth = TAILQ_FIRST(&wq->wq_thnewlist);
1008cc9a6355SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thnewlist, uth, uu_workq_entry);
1009cc9a6355SApple OSS Distributions }
1010cc9a6355SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thrunlist, uth, uu_workq_entry);
1011cc9a6355SApple OSS Distributions
1012cc9a6355SApple OSS Distributions assert((uth->uu_workq_flags & UT_WORKQ_RUNNING) == 0);
1013a5e72196SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_RUNNING | uu_flags;
1014e6231be0SApple OSS Distributions
1015e6231be0SApple OSS Distributions /* A thread is never woken up as part of the cooperative pool */
1016e6231be0SApple OSS Distributions assert((uu_flags & UT_WORKQ_COOPERATIVE) == 0);
1017e6231be0SApple OSS Distributions
1018a5e72196SApple OSS Distributions if ((uu_flags & UT_WORKQ_OVERCOMMIT) == 0) {
1019a5e72196SApple OSS Distributions wq->wq_constrained_threads_scheduled++;
1020a5e72196SApple OSS Distributions }
1021cc9a6355SApple OSS Distributions wq->wq_threads_scheduled++;
1022cc9a6355SApple OSS Distributions wq->wq_thidlecount--;
1023cc9a6355SApple OSS Distributions
1024cc9a6355SApple OSS Distributions if (__improbable(uth->uu_workq_flags & UT_WORKQ_DYING)) {
1025cc9a6355SApple OSS Distributions uth->uu_workq_flags ^= UT_WORKQ_DYING;
1026cc9a6355SApple OSS Distributions workq_death_policy_evaluate(wq, 1);
1027a5e72196SApple OSS Distributions *needs_wakeup = false;
1028a5e72196SApple OSS Distributions } else if (uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) {
1029a5e72196SApple OSS Distributions *needs_wakeup = false;
1030a5e72196SApple OSS Distributions } else {
1031a5e72196SApple OSS Distributions *needs_wakeup = true;
1032cc9a6355SApple OSS Distributions }
1033cc9a6355SApple OSS Distributions return uth;
1034cc9a6355SApple OSS Distributions }
1035cc9a6355SApple OSS Distributions
1036cc9a6355SApple OSS Distributions /*
1037cc9a6355SApple OSS Distributions * Called by thread_create_workq_waiting() during thread initialization, before
1038cc9a6355SApple OSS Distributions * assert_wait, before the thread has been started.
1039cc9a6355SApple OSS Distributions */
1040cc9a6355SApple OSS Distributions event_t
workq_thread_init_and_wq_lock(task_t task,thread_t th)1041cc9a6355SApple OSS Distributions workq_thread_init_and_wq_lock(task_t task, thread_t th)
1042cc9a6355SApple OSS Distributions {
1043cc9a6355SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
1044cc9a6355SApple OSS Distributions
1045cc9a6355SApple OSS Distributions uth->uu_workq_flags = UT_WORKQ_NEW;
1046cc9a6355SApple OSS Distributions uth->uu_workq_pri = WORKQ_POLICY_INIT(THREAD_QOS_LEGACY);
1047cc9a6355SApple OSS Distributions uth->uu_workq_thport = MACH_PORT_NULL;
1048cc9a6355SApple OSS Distributions uth->uu_workq_stackaddr = 0;
1049a5e72196SApple OSS Distributions uth->uu_workq_pthread_kill_allowed = 0;
1050cc9a6355SApple OSS Distributions
1051cc9a6355SApple OSS Distributions thread_set_tag(th, THREAD_TAG_PTHREAD | THREAD_TAG_WORKQUEUE);
1052cc9a6355SApple OSS Distributions thread_reset_workq_qos(th, THREAD_QOS_LEGACY);
1053cc9a6355SApple OSS Distributions
1054cc9a6355SApple OSS Distributions workq_lock_spin(proc_get_wqptr_fast(get_bsdtask_info(task)));
1055cc9a6355SApple OSS Distributions return workq_parked_wait_event(uth);
1056cc9a6355SApple OSS Distributions }
1057cc9a6355SApple OSS Distributions
1058cc9a6355SApple OSS Distributions /**
1059cc9a6355SApple OSS Distributions * Try to add a new workqueue thread.
1060cc9a6355SApple OSS Distributions *
1061cc9a6355SApple OSS Distributions * - called with workq lock held
1062cc9a6355SApple OSS Distributions * - dropped and retaken around thread creation
1063cc9a6355SApple OSS Distributions * - return with workq lock held
1064cc9a6355SApple OSS Distributions */
1065*8d741a5dSApple OSS Distributions static kern_return_t
workq_add_new_idle_thread(proc_t p,struct workqueue * wq,thread_continue_t continuation,bool is_permanently_bound,thread_t * new_thread)1066*8d741a5dSApple OSS Distributions workq_add_new_idle_thread(
1067*8d741a5dSApple OSS Distributions proc_t p,
1068*8d741a5dSApple OSS Distributions struct workqueue *wq,
1069*8d741a5dSApple OSS Distributions thread_continue_t continuation,
1070*8d741a5dSApple OSS Distributions bool is_permanently_bound,
1071*8d741a5dSApple OSS Distributions thread_t *new_thread)
1072cc9a6355SApple OSS Distributions {
1073cc9a6355SApple OSS Distributions mach_vm_offset_t th_stackaddr;
1074cc9a6355SApple OSS Distributions kern_return_t kret;
1075cc9a6355SApple OSS Distributions thread_t th;
1076cc9a6355SApple OSS Distributions
1077cc9a6355SApple OSS Distributions wq->wq_nthreads++;
1078cc9a6355SApple OSS Distributions
1079cc9a6355SApple OSS Distributions workq_unlock(wq);
1080cc9a6355SApple OSS Distributions
10815c2921b0SApple OSS Distributions vm_map_t vmap = get_task_map(proc_task(p));
1082cc9a6355SApple OSS Distributions
1083cc9a6355SApple OSS Distributions kret = pthread_functions->workq_create_threadstack(p, vmap, &th_stackaddr);
1084cc9a6355SApple OSS Distributions if (kret != KERN_SUCCESS) {
1085cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq,
1086e6231be0SApple OSS Distributions kret, 1, 0);
1087cc9a6355SApple OSS Distributions goto out;
1088cc9a6355SApple OSS Distributions }
1089cc9a6355SApple OSS Distributions
1090*8d741a5dSApple OSS Distributions kret = thread_create_workq_waiting(proc_task(p),
1091*8d741a5dSApple OSS Distributions continuation,
1092*8d741a5dSApple OSS Distributions &th,
1093*8d741a5dSApple OSS Distributions is_permanently_bound);
1094cc9a6355SApple OSS Distributions if (kret != KERN_SUCCESS) {
1095cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq,
1096e6231be0SApple OSS Distributions kret, 0, 0);
1097cc9a6355SApple OSS Distributions pthread_functions->workq_destroy_threadstack(p, vmap, th_stackaddr);
1098cc9a6355SApple OSS Distributions goto out;
1099cc9a6355SApple OSS Distributions }
1100cc9a6355SApple OSS Distributions
1101cc9a6355SApple OSS Distributions // thread_create_workq_waiting() will return with the wq lock held
1102cc9a6355SApple OSS Distributions // on success, because it calls workq_thread_init_and_wq_lock() above
1103cc9a6355SApple OSS Distributions
1104cc9a6355SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
1105*8d741a5dSApple OSS Distributions uth->uu_workq_stackaddr = (user_addr_t)th_stackaddr;
1106cc9a6355SApple OSS Distributions
1107cc9a6355SApple OSS Distributions wq->wq_creations++;
1108*8d741a5dSApple OSS Distributions if (!is_permanently_bound) {
1109cc9a6355SApple OSS Distributions wq->wq_thidlecount++;
1110cc9a6355SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thnewlist, uth, uu_workq_entry);
1111*8d741a5dSApple OSS Distributions }
1112*8d741a5dSApple OSS Distributions
1113*8d741a5dSApple OSS Distributions if (new_thread) {
1114*8d741a5dSApple OSS Distributions *new_thread = th;
1115*8d741a5dSApple OSS Distributions }
1116cc9a6355SApple OSS Distributions
1117e6231be0SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_create | DBG_FUNC_NONE, wq, 0, 0, 0);
1118*8d741a5dSApple OSS Distributions return kret;
1119cc9a6355SApple OSS Distributions
1120cc9a6355SApple OSS Distributions out:
1121cc9a6355SApple OSS Distributions workq_lock_spin(wq);
1122cc9a6355SApple OSS Distributions /*
1123cc9a6355SApple OSS Distributions * Do not redrive here if we went under wq_max_threads again,
1124cc9a6355SApple OSS Distributions * it is the responsibility of the callers of this function
1125cc9a6355SApple OSS Distributions * to do so when it fails.
1126cc9a6355SApple OSS Distributions */
1127cc9a6355SApple OSS Distributions wq->wq_nthreads--;
1128*8d741a5dSApple OSS Distributions return kret;
1129cc9a6355SApple OSS Distributions }
1130cc9a6355SApple OSS Distributions
1131e6231be0SApple OSS Distributions static inline bool
workq_thread_is_overcommit(struct uthread * uth)1132e6231be0SApple OSS Distributions workq_thread_is_overcommit(struct uthread *uth)
1133e6231be0SApple OSS Distributions {
1134e6231be0SApple OSS Distributions return (uth->uu_workq_flags & UT_WORKQ_OVERCOMMIT) != 0;
1135e6231be0SApple OSS Distributions }
1136e6231be0SApple OSS Distributions
1137e6231be0SApple OSS Distributions static inline bool
workq_thread_is_nonovercommit(struct uthread * uth)1138e6231be0SApple OSS Distributions workq_thread_is_nonovercommit(struct uthread *uth)
1139e6231be0SApple OSS Distributions {
1140*8d741a5dSApple OSS Distributions return (uth->uu_workq_flags & (UT_WORKQ_OVERCOMMIT |
1141*8d741a5dSApple OSS Distributions UT_WORKQ_COOPERATIVE)) == 0;
1142e6231be0SApple OSS Distributions }
1143e6231be0SApple OSS Distributions
1144e6231be0SApple OSS Distributions static inline bool
workq_thread_is_cooperative(struct uthread * uth)1145e6231be0SApple OSS Distributions workq_thread_is_cooperative(struct uthread *uth)
1146e6231be0SApple OSS Distributions {
1147e6231be0SApple OSS Distributions return (uth->uu_workq_flags & UT_WORKQ_COOPERATIVE) != 0;
1148e6231be0SApple OSS Distributions }
1149e6231be0SApple OSS Distributions
1150*8d741a5dSApple OSS Distributions bool
workq_thread_is_permanently_bound(struct uthread * uth)1151*8d741a5dSApple OSS Distributions workq_thread_is_permanently_bound(struct uthread *uth)
1152*8d741a5dSApple OSS Distributions {
1153*8d741a5dSApple OSS Distributions return (uth->uu_workq_flags & UT_WORKQ_PERMANENT_BIND) != 0;
1154*8d741a5dSApple OSS Distributions }
1155*8d741a5dSApple OSS Distributions
1156e6231be0SApple OSS Distributions static inline void
workq_thread_set_type(struct uthread * uth,uint16_t flags)1157e6231be0SApple OSS Distributions workq_thread_set_type(struct uthread *uth, uint16_t flags)
1158e6231be0SApple OSS Distributions {
1159e6231be0SApple OSS Distributions uth->uu_workq_flags &= ~(UT_WORKQ_OVERCOMMIT | UT_WORKQ_COOPERATIVE);
1160e6231be0SApple OSS Distributions uth->uu_workq_flags |= flags;
1161e6231be0SApple OSS Distributions }
1162e6231be0SApple OSS Distributions
1163e6231be0SApple OSS Distributions
1164cc9a6355SApple OSS Distributions #define WORKQ_UNPARK_FOR_DEATH_WAS_IDLE 0x1
1165cc9a6355SApple OSS Distributions
1166cc9a6355SApple OSS Distributions __attribute__((noreturn, noinline))
1167cc9a6355SApple OSS Distributions static void
workq_unpark_for_death_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t death_flags,uint32_t setup_flags)1168cc9a6355SApple OSS Distributions workq_unpark_for_death_and_unlock(proc_t p, struct workqueue *wq,
1169a5e72196SApple OSS Distributions struct uthread *uth, uint32_t death_flags, uint32_t setup_flags)
1170cc9a6355SApple OSS Distributions {
1171cc9a6355SApple OSS Distributions thread_qos_t qos = workq_pri_override(uth->uu_workq_pri);
1172cc9a6355SApple OSS Distributions bool first_use = uth->uu_workq_flags & UT_WORKQ_NEW;
1173cc9a6355SApple OSS Distributions
1174cc9a6355SApple OSS Distributions if (qos > WORKQ_THREAD_QOS_CLEANUP) {
1175a5e72196SApple OSS Distributions workq_thread_reset_pri(wq, uth, NULL, /*unpark*/ true);
1176cc9a6355SApple OSS Distributions qos = WORKQ_THREAD_QOS_CLEANUP;
1177cc9a6355SApple OSS Distributions }
1178cc9a6355SApple OSS Distributions
1179cc9a6355SApple OSS Distributions workq_thread_reset_cpupercent(NULL, uth);
1180cc9a6355SApple OSS Distributions
1181cc9a6355SApple OSS Distributions if (death_flags & WORKQ_UNPARK_FOR_DEATH_WAS_IDLE) {
1182cc9a6355SApple OSS Distributions wq->wq_thidlecount--;
1183cc9a6355SApple OSS Distributions if (first_use) {
1184cc9a6355SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thnewlist, uth, uu_workq_entry);
1185cc9a6355SApple OSS Distributions } else {
1186cc9a6355SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thidlelist, uth, uu_workq_entry);
1187cc9a6355SApple OSS Distributions }
1188cc9a6355SApple OSS Distributions }
1189cc9a6355SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thrunlist, uth, uu_workq_entry);
1190cc9a6355SApple OSS Distributions
1191cc9a6355SApple OSS Distributions workq_unlock(wq);
1192cc9a6355SApple OSS Distributions
1193a5e72196SApple OSS Distributions if (setup_flags & WQ_SETUP_CLEAR_VOUCHER) {
1194a5e72196SApple OSS Distributions __assert_only kern_return_t kr;
1195a5e72196SApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
1196a5e72196SApple OSS Distributions assert(kr == KERN_SUCCESS);
1197a5e72196SApple OSS Distributions }
1198a5e72196SApple OSS Distributions
1199cc9a6355SApple OSS Distributions uint32_t flags = WQ_FLAG_THREAD_NEWSPI | qos | WQ_FLAG_THREAD_PRIO_QOS;
1200e7776783SApple OSS Distributions thread_t th = get_machthread(uth);
12015c2921b0SApple OSS Distributions vm_map_t vmap = get_task_map(proc_task(p));
1202cc9a6355SApple OSS Distributions
1203a5e72196SApple OSS Distributions if (!first_use) {
1204a5e72196SApple OSS Distributions flags |= WQ_FLAG_THREAD_REUSE;
1205a5e72196SApple OSS Distributions }
1206cc9a6355SApple OSS Distributions
1207cc9a6355SApple OSS Distributions pthread_functions->workq_setup_thread(p, th, vmap, uth->uu_workq_stackaddr,
1208a5e72196SApple OSS Distributions uth->uu_workq_thport, 0, WQ_SETUP_EXIT_THREAD, flags);
1209cc9a6355SApple OSS Distributions __builtin_unreachable();
1210cc9a6355SApple OSS Distributions }
1211cc9a6355SApple OSS Distributions
1212cc9a6355SApple OSS Distributions bool
workq_is_current_thread_updating_turnstile(struct workqueue * wq)1213cc9a6355SApple OSS Distributions workq_is_current_thread_updating_turnstile(struct workqueue *wq)
1214cc9a6355SApple OSS Distributions {
1215cc9a6355SApple OSS Distributions return wq->wq_turnstile_updater == current_thread();
1216cc9a6355SApple OSS Distributions }
1217cc9a6355SApple OSS Distributions
1218cc9a6355SApple OSS Distributions __attribute__((always_inline))
1219cc9a6355SApple OSS Distributions static inline void
1220cc9a6355SApple OSS Distributions workq_perform_turnstile_operation_locked(struct workqueue *wq,
1221cc9a6355SApple OSS Distributions void (^operation)(void))
1222cc9a6355SApple OSS Distributions {
1223cc9a6355SApple OSS Distributions workq_lock_held(wq);
1224cc9a6355SApple OSS Distributions wq->wq_turnstile_updater = current_thread();
1225cc9a6355SApple OSS Distributions operation();
1226cc9a6355SApple OSS Distributions wq->wq_turnstile_updater = THREAD_NULL;
1227cc9a6355SApple OSS Distributions }
1228cc9a6355SApple OSS Distributions
1229cc9a6355SApple OSS Distributions static void
workq_turnstile_update_inheritor(struct workqueue * wq,turnstile_inheritor_t inheritor,turnstile_update_flags_t flags)1230cc9a6355SApple OSS Distributions workq_turnstile_update_inheritor(struct workqueue *wq,
1231cc9a6355SApple OSS Distributions turnstile_inheritor_t inheritor,
1232cc9a6355SApple OSS Distributions turnstile_update_flags_t flags)
1233cc9a6355SApple OSS Distributions {
1234a5e72196SApple OSS Distributions if (wq->wq_inheritor == inheritor) {
1235a5e72196SApple OSS Distributions return;
1236a5e72196SApple OSS Distributions }
1237a5e72196SApple OSS Distributions wq->wq_inheritor = inheritor;
1238cc9a6355SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
1239cc9a6355SApple OSS Distributions turnstile_update_inheritor(wq->wq_turnstile, inheritor,
1240cc9a6355SApple OSS Distributions flags | TURNSTILE_IMMEDIATE_UPDATE);
1241cc9a6355SApple OSS Distributions turnstile_update_inheritor_complete(wq->wq_turnstile,
1242cc9a6355SApple OSS Distributions TURNSTILE_INTERLOCK_HELD);
1243cc9a6355SApple OSS Distributions });
1244cc9a6355SApple OSS Distributions }
1245cc9a6355SApple OSS Distributions
1246cc9a6355SApple OSS Distributions static void
workq_push_idle_thread(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)1247a5e72196SApple OSS Distributions workq_push_idle_thread(proc_t p, struct workqueue *wq, struct uthread *uth,
1248a5e72196SApple OSS Distributions uint32_t setup_flags)
1249cc9a6355SApple OSS Distributions {
1250cc9a6355SApple OSS Distributions uint64_t now = mach_absolute_time();
1251a5e72196SApple OSS Distributions bool is_creator = (uth == wq->wq_creator);
1252cc9a6355SApple OSS Distributions
1253e6231be0SApple OSS Distributions if (workq_thread_is_cooperative(uth)) {
1254e6231be0SApple OSS Distributions assert(!is_creator);
1255e6231be0SApple OSS Distributions
12565c2921b0SApple OSS Distributions thread_qos_t thread_qos = uth->uu_workq_pri.qos_req;
1257e6231be0SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, thread_qos);
1258e6231be0SApple OSS Distributions
1259e6231be0SApple OSS Distributions /* Before we get here, we always go through
1260e6231be0SApple OSS Distributions * workq_select_threadreq_or_park_and_unlock. If we got here, it means
1261e6231be0SApple OSS Distributions * that we went through the logic in workq_threadreq_select which
1262e6231be0SApple OSS Distributions * did the refresh for the next best cooperative qos while
1263e6231be0SApple OSS Distributions * excluding the current thread - we shouldn't need to do it again.
1264e6231be0SApple OSS Distributions */
1265e6231be0SApple OSS Distributions assert(_wq_cooperative_queue_refresh_best_req_qos(wq) == false);
1266e6231be0SApple OSS Distributions } else if (workq_thread_is_nonovercommit(uth)) {
1267e6231be0SApple OSS Distributions assert(!is_creator);
1268e6231be0SApple OSS Distributions
1269cc9a6355SApple OSS Distributions wq->wq_constrained_threads_scheduled--;
1270cc9a6355SApple OSS Distributions }
1271e6231be0SApple OSS Distributions
1272e6231be0SApple OSS Distributions uth->uu_workq_flags &= ~(UT_WORKQ_RUNNING | UT_WORKQ_OVERCOMMIT | UT_WORKQ_COOPERATIVE);
1273cc9a6355SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thrunlist, uth, uu_workq_entry);
1274cc9a6355SApple OSS Distributions wq->wq_threads_scheduled--;
1275cc9a6355SApple OSS Distributions
1276a5e72196SApple OSS Distributions if (is_creator) {
1277a5e72196SApple OSS Distributions wq->wq_creator = NULL;
1278cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_select, wq, 3, 0,
1279e6231be0SApple OSS Distributions uth->uu_save.uus_workq_park_data.yields);
1280a5e72196SApple OSS Distributions }
1281a5e72196SApple OSS Distributions
1282e7776783SApple OSS Distributions if (wq->wq_inheritor == get_machthread(uth)) {
1283a5e72196SApple OSS Distributions assert(wq->wq_creator == NULL);
1284cc9a6355SApple OSS Distributions if (wq->wq_reqcount) {
1285cc9a6355SApple OSS Distributions workq_turnstile_update_inheritor(wq, wq, TURNSTILE_INHERITOR_WORKQ);
1286cc9a6355SApple OSS Distributions } else {
1287cc9a6355SApple OSS Distributions workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
1288cc9a6355SApple OSS Distributions }
1289a5e72196SApple OSS Distributions }
1290a5e72196SApple OSS Distributions
1291cc9a6355SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_NEW) {
1292a5e72196SApple OSS Distributions assert(is_creator || (_wq_flags(wq) & WQ_EXITING));
1293cc9a6355SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thnewlist, uth, uu_workq_entry);
1294cc9a6355SApple OSS Distributions wq->wq_thidlecount++;
1295cc9a6355SApple OSS Distributions return;
1296cc9a6355SApple OSS Distributions }
1297a5e72196SApple OSS Distributions
1298a5e72196SApple OSS Distributions if (!is_creator) {
1299cc9a6355SApple OSS Distributions _wq_thactive_dec(wq, uth->uu_workq_pri.qos_bucket);
1300cc9a6355SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(uth->uu_workq_pri.qos_bucket)]--;
1301cc9a6355SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_IDLE_CLEANUP;
1302cc9a6355SApple OSS Distributions }
1303cc9a6355SApple OSS Distributions
1304cc9a6355SApple OSS Distributions uth->uu_save.uus_workq_park_data.idle_stamp = now;
1305cc9a6355SApple OSS Distributions
1306cc9a6355SApple OSS Distributions struct uthread *oldest = workq_oldest_killable_idle_thread(wq);
1307cc9a6355SApple OSS Distributions uint16_t cur_idle = wq->wq_thidlecount;
1308cc9a6355SApple OSS Distributions
1309cc9a6355SApple OSS Distributions if (cur_idle >= wq_max_constrained_threads ||
1310cc9a6355SApple OSS Distributions (wq->wq_thdying_count == 0 && oldest &&
1311cc9a6355SApple OSS Distributions workq_should_kill_idle_thread(wq, oldest, now))) {
1312cc9a6355SApple OSS Distributions /*
1313cc9a6355SApple OSS Distributions * Immediately kill threads if we have too may of them.
1314cc9a6355SApple OSS Distributions *
1315cc9a6355SApple OSS Distributions * And swap "place" with the oldest one we'd have woken up.
1316cc9a6355SApple OSS Distributions * This is a relatively desperate situation where we really
1317cc9a6355SApple OSS Distributions * need to kill threads quickly and it's best to kill
1318cc9a6355SApple OSS Distributions * the one that's currently on core than context switching.
1319cc9a6355SApple OSS Distributions */
1320cc9a6355SApple OSS Distributions if (oldest) {
1321cc9a6355SApple OSS Distributions oldest->uu_save.uus_workq_park_data.idle_stamp = now;
1322cc9a6355SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thidlelist, oldest, uu_workq_entry);
1323cc9a6355SApple OSS Distributions TAILQ_INSERT_HEAD(&wq->wq_thidlelist, oldest, uu_workq_entry);
1324cc9a6355SApple OSS Distributions }
1325cc9a6355SApple OSS Distributions
1326cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_START,
1327e6231be0SApple OSS Distributions wq, cur_idle, 0, 0);
1328cc9a6355SApple OSS Distributions wq->wq_thdying_count++;
1329cc9a6355SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_DYING;
1330cc9a6355SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_IDLE_CLEANUP;
1331a5e72196SApple OSS Distributions workq_unpark_for_death_and_unlock(p, wq, uth, 0, setup_flags);
1332cc9a6355SApple OSS Distributions __builtin_unreachable();
1333cc9a6355SApple OSS Distributions }
1334cc9a6355SApple OSS Distributions
1335cc9a6355SApple OSS Distributions struct uthread *tail = TAILQ_LAST(&wq->wq_thidlelist, workq_uthread_head);
1336cc9a6355SApple OSS Distributions
1337cc9a6355SApple OSS Distributions cur_idle += 1;
1338cc9a6355SApple OSS Distributions wq->wq_thidlecount = cur_idle;
1339cc9a6355SApple OSS Distributions
1340cc9a6355SApple OSS Distributions if (cur_idle >= wq_death_max_load && tail &&
1341cc9a6355SApple OSS Distributions tail->uu_save.uus_workq_park_data.has_stack) {
1342cc9a6355SApple OSS Distributions uth->uu_save.uus_workq_park_data.has_stack = false;
1343cc9a6355SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thidlelist, uth, uu_workq_entry);
1344cc9a6355SApple OSS Distributions } else {
1345cc9a6355SApple OSS Distributions uth->uu_save.uus_workq_park_data.has_stack = true;
1346cc9a6355SApple OSS Distributions TAILQ_INSERT_HEAD(&wq->wq_thidlelist, uth, uu_workq_entry);
1347cc9a6355SApple OSS Distributions }
1348cc9a6355SApple OSS Distributions
1349cc9a6355SApple OSS Distributions if (!tail) {
1350cc9a6355SApple OSS Distributions uint64_t delay = workq_kill_delay_for_idle_thread(wq);
1351cc9a6355SApple OSS Distributions workq_death_call_schedule(wq, now + delay);
1352cc9a6355SApple OSS Distributions }
1353cc9a6355SApple OSS Distributions }
1354cc9a6355SApple OSS Distributions
1355cc9a6355SApple OSS Distributions #pragma mark thread requests
1356cc9a6355SApple OSS Distributions
1357e6231be0SApple OSS Distributions static inline bool
workq_tr_is_overcommit(workq_tr_flags_t tr_flags)1358e6231be0SApple OSS Distributions workq_tr_is_overcommit(workq_tr_flags_t tr_flags)
1359e6231be0SApple OSS Distributions {
1360e6231be0SApple OSS Distributions return (tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) != 0;
1361e6231be0SApple OSS Distributions }
1362e6231be0SApple OSS Distributions
1363e6231be0SApple OSS Distributions static inline bool
workq_tr_is_nonovercommit(workq_tr_flags_t tr_flags)1364e6231be0SApple OSS Distributions workq_tr_is_nonovercommit(workq_tr_flags_t tr_flags)
1365e6231be0SApple OSS Distributions {
1366*8d741a5dSApple OSS Distributions return (tr_flags & (WORKQ_TR_FLAG_OVERCOMMIT |
1367*8d741a5dSApple OSS Distributions WORKQ_TR_FLAG_COOPERATIVE |
1368*8d741a5dSApple OSS Distributions WORKQ_TR_FLAG_PERMANENT_BIND)) == 0;
1369e6231be0SApple OSS Distributions }
1370e6231be0SApple OSS Distributions
1371e6231be0SApple OSS Distributions static inline bool
workq_tr_is_cooperative(workq_tr_flags_t tr_flags)1372e6231be0SApple OSS Distributions workq_tr_is_cooperative(workq_tr_flags_t tr_flags)
1373e6231be0SApple OSS Distributions {
1374e6231be0SApple OSS Distributions return (tr_flags & WORKQ_TR_FLAG_COOPERATIVE) != 0;
1375e6231be0SApple OSS Distributions }
1376e6231be0SApple OSS Distributions
1377e6231be0SApple OSS Distributions #define workq_threadreq_is_overcommit(req) workq_tr_is_overcommit((req)->tr_flags)
1378e6231be0SApple OSS Distributions #define workq_threadreq_is_nonovercommit(req) workq_tr_is_nonovercommit((req)->tr_flags)
1379e6231be0SApple OSS Distributions #define workq_threadreq_is_cooperative(req) workq_tr_is_cooperative((req)->tr_flags)
1380e6231be0SApple OSS Distributions
1381cc9a6355SApple OSS Distributions static inline int
workq_priority_for_req(workq_threadreq_t req)1382cc9a6355SApple OSS Distributions workq_priority_for_req(workq_threadreq_t req)
1383cc9a6355SApple OSS Distributions {
1384cc9a6355SApple OSS Distributions thread_qos_t qos = req->tr_qos;
1385cc9a6355SApple OSS Distributions
1386a5e72196SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
1387cc9a6355SApple OSS Distributions workq_threadreq_param_t trp = kqueue_threadreq_workloop_param(req);
1388cc9a6355SApple OSS Distributions assert(trp.trp_flags & TRP_PRIORITY);
1389cc9a6355SApple OSS Distributions return trp.trp_pri;
1390cc9a6355SApple OSS Distributions }
1391cc9a6355SApple OSS Distributions return thread_workq_pri_for_qos(qos);
1392cc9a6355SApple OSS Distributions }
1393cc9a6355SApple OSS Distributions
1394bb611c8fSApple OSS Distributions static inline struct priority_queue_sched_max *
workq_priority_queue_for_req(struct workqueue * wq,workq_threadreq_t req)1395cc9a6355SApple OSS Distributions workq_priority_queue_for_req(struct workqueue *wq, workq_threadreq_t req)
1396cc9a6355SApple OSS Distributions {
1397e6231be0SApple OSS Distributions assert(!workq_tr_is_cooperative(req->tr_flags));
1398e6231be0SApple OSS Distributions
1399a5e72196SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
1400cc9a6355SApple OSS Distributions return &wq->wq_special_queue;
1401e6231be0SApple OSS Distributions } else if (workq_tr_is_overcommit(req->tr_flags)) {
1402cc9a6355SApple OSS Distributions return &wq->wq_overcommit_queue;
1403cc9a6355SApple OSS Distributions } else {
1404cc9a6355SApple OSS Distributions return &wq->wq_constrained_queue;
1405cc9a6355SApple OSS Distributions }
1406cc9a6355SApple OSS Distributions }
1407cc9a6355SApple OSS Distributions
1408e6231be0SApple OSS Distributions /* Calculates the number of threads scheduled >= the input QoS */
1409e6231be0SApple OSS Distributions static uint64_t
workq_num_cooperative_threads_scheduled_to_qos_internal(struct workqueue * wq,thread_qos_t qos)1410*8d741a5dSApple OSS Distributions workq_num_cooperative_threads_scheduled_to_qos_internal(struct workqueue *wq, thread_qos_t qos)
1411e6231be0SApple OSS Distributions {
1412e6231be0SApple OSS Distributions uint64_t num_cooperative_threads = 0;
1413e6231be0SApple OSS Distributions
1414e6231be0SApple OSS Distributions for (thread_qos_t cur_qos = WORKQ_THREAD_QOS_MAX; cur_qos >= qos; cur_qos--) {
1415e7776783SApple OSS Distributions uint8_t bucket = _wq_bucket(cur_qos);
1416e6231be0SApple OSS Distributions num_cooperative_threads += wq->wq_cooperative_queue_scheduled_count[bucket];
1417e6231be0SApple OSS Distributions }
1418e6231be0SApple OSS Distributions
1419e6231be0SApple OSS Distributions return num_cooperative_threads;
1420e6231be0SApple OSS Distributions }
1421e6231be0SApple OSS Distributions
1422*8d741a5dSApple OSS Distributions /* Calculates the number of threads scheduled >= the input QoS */
1423*8d741a5dSApple OSS Distributions static uint64_t
workq_num_cooperative_threads_scheduled_to_qos_locked(struct workqueue * wq,thread_qos_t qos)1424*8d741a5dSApple OSS Distributions workq_num_cooperative_threads_scheduled_to_qos_locked(struct workqueue *wq, thread_qos_t qos)
1425*8d741a5dSApple OSS Distributions {
1426*8d741a5dSApple OSS Distributions workq_lock_held(wq);
1427*8d741a5dSApple OSS Distributions return workq_num_cooperative_threads_scheduled_to_qos_internal(wq, qos);
1428*8d741a5dSApple OSS Distributions }
1429*8d741a5dSApple OSS Distributions
1430e6231be0SApple OSS Distributions static uint64_t
workq_num_cooperative_threads_scheduled_total(struct workqueue * wq)1431e6231be0SApple OSS Distributions workq_num_cooperative_threads_scheduled_total(struct workqueue *wq)
1432e6231be0SApple OSS Distributions {
1433*8d741a5dSApple OSS Distributions return workq_num_cooperative_threads_scheduled_to_qos_locked(wq, WORKQ_THREAD_QOS_MIN);
1434e6231be0SApple OSS Distributions }
1435e6231be0SApple OSS Distributions
1436e6231be0SApple OSS Distributions static bool
workq_has_cooperative_thread_requests(struct workqueue * wq)1437e6231be0SApple OSS Distributions workq_has_cooperative_thread_requests(struct workqueue *wq)
1438e6231be0SApple OSS Distributions {
1439e6231be0SApple OSS Distributions for (thread_qos_t qos = WORKQ_THREAD_QOS_MAX; qos >= WORKQ_THREAD_QOS_MIN; qos--) {
1440e7776783SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
1441e6231be0SApple OSS Distributions if (!STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket])) {
1442e6231be0SApple OSS Distributions return true;
1443e6231be0SApple OSS Distributions }
1444e6231be0SApple OSS Distributions }
1445e6231be0SApple OSS Distributions
1446e6231be0SApple OSS Distributions return false;
1447e6231be0SApple OSS Distributions }
1448e6231be0SApple OSS Distributions
1449cc9a6355SApple OSS Distributions /*
1450e6231be0SApple OSS Distributions * Determines the next QoS bucket we should service next in the cooperative
1451e6231be0SApple OSS Distributions * pool. This function will always return a QoS for cooperative pool as long as
1452e6231be0SApple OSS Distributions * there are requests to be serviced.
1453e6231be0SApple OSS Distributions *
1454e6231be0SApple OSS Distributions * Unlike the other thread pools, for the cooperative thread pool the schedule
1455e6231be0SApple OSS Distributions * counts for the various buckets in the pool affect the next best request for
1456e6231be0SApple OSS Distributions * it.
1457e6231be0SApple OSS Distributions *
1458e6231be0SApple OSS Distributions * This function is called in the following contexts:
1459e6231be0SApple OSS Distributions *
1460e6231be0SApple OSS Distributions * a) When determining the best thread QoS for cooperative bucket for the
1461e6231be0SApple OSS Distributions * creator/thread reuse
1462e6231be0SApple OSS Distributions *
1463e6231be0SApple OSS Distributions * b) Once (a) has happened and thread has bound to a thread request, figuring
1464e6231be0SApple OSS Distributions * out whether the next best request for this pool has changed so that creator
1465e6231be0SApple OSS Distributions * can be scheduled.
1466e6231be0SApple OSS Distributions *
1467e6231be0SApple OSS Distributions * Returns true if the cooperative queue's best qos changed from previous
1468e6231be0SApple OSS Distributions * value.
1469e6231be0SApple OSS Distributions */
1470e6231be0SApple OSS Distributions static bool
_wq_cooperative_queue_refresh_best_req_qos(struct workqueue * wq)1471e6231be0SApple OSS Distributions _wq_cooperative_queue_refresh_best_req_qos(struct workqueue *wq)
1472e6231be0SApple OSS Distributions {
1473e6231be0SApple OSS Distributions workq_lock_held(wq);
1474e6231be0SApple OSS Distributions
1475e6231be0SApple OSS Distributions thread_qos_t old_best_req_qos = wq->wq_cooperative_queue_best_req_qos;
1476e6231be0SApple OSS Distributions
1477e6231be0SApple OSS Distributions /* We determine the next best cooperative thread request based on the
1478e6231be0SApple OSS Distributions * following:
1479e6231be0SApple OSS Distributions *
1480e6231be0SApple OSS Distributions * 1. Take the MAX of the following:
1481e6231be0SApple OSS Distributions * a) Highest qos with pending TRs such that number of scheduled
1482e6231be0SApple OSS Distributions * threads so far with >= qos is < wq_max_cooperative_threads
1483e6231be0SApple OSS Distributions * b) Highest qos bucket with pending TRs but no scheduled threads for that bucket
1484e6231be0SApple OSS Distributions *
1485e6231be0SApple OSS Distributions * 2. If the result of (1) is UN, then we pick the highest priority amongst
1486e6231be0SApple OSS Distributions * pending thread requests in the pool.
1487e6231be0SApple OSS Distributions *
1488e6231be0SApple OSS Distributions */
1489e6231be0SApple OSS Distributions thread_qos_t highest_qos_with_no_scheduled = THREAD_QOS_UNSPECIFIED;
1490e6231be0SApple OSS Distributions thread_qos_t highest_qos_req_with_width = THREAD_QOS_UNSPECIFIED;
1491e6231be0SApple OSS Distributions
1492e6231be0SApple OSS Distributions thread_qos_t highest_qos_req = THREAD_QOS_UNSPECIFIED;
1493e6231be0SApple OSS Distributions
1494e6231be0SApple OSS Distributions int scheduled_count_till_qos = 0;
1495e6231be0SApple OSS Distributions
1496e6231be0SApple OSS Distributions for (thread_qos_t qos = WORKQ_THREAD_QOS_MAX; qos >= WORKQ_THREAD_QOS_MIN; qos--) {
1497e7776783SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
1498e6231be0SApple OSS Distributions uint8_t scheduled_count_for_bucket = wq->wq_cooperative_queue_scheduled_count[bucket];
1499e6231be0SApple OSS Distributions scheduled_count_till_qos += scheduled_count_for_bucket;
1500e6231be0SApple OSS Distributions
1501e6231be0SApple OSS Distributions if (!STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket])) {
1502e6231be0SApple OSS Distributions if (qos > highest_qos_req) {
1503e6231be0SApple OSS Distributions highest_qos_req = qos;
1504e6231be0SApple OSS Distributions }
1505e6231be0SApple OSS Distributions /*
1506e6231be0SApple OSS Distributions * The pool isn't saturated for threads at and above this QoS, and
1507e6231be0SApple OSS Distributions * this qos bucket has pending requests
1508e6231be0SApple OSS Distributions */
1509e6231be0SApple OSS Distributions if (scheduled_count_till_qos < wq_cooperative_queue_max_size(wq)) {
1510e6231be0SApple OSS Distributions if (qos > highest_qos_req_with_width) {
1511e6231be0SApple OSS Distributions highest_qos_req_with_width = qos;
1512e6231be0SApple OSS Distributions }
1513e6231be0SApple OSS Distributions }
1514e6231be0SApple OSS Distributions
1515e6231be0SApple OSS Distributions /*
1516e6231be0SApple OSS Distributions * There are no threads scheduled for this bucket but there
1517e6231be0SApple OSS Distributions * is work pending, give it at least 1 thread
1518e6231be0SApple OSS Distributions */
1519e6231be0SApple OSS Distributions if (scheduled_count_for_bucket == 0) {
1520e6231be0SApple OSS Distributions if (qos > highest_qos_with_no_scheduled) {
1521e6231be0SApple OSS Distributions highest_qos_with_no_scheduled = qos;
1522e6231be0SApple OSS Distributions }
1523e6231be0SApple OSS Distributions }
1524e6231be0SApple OSS Distributions }
1525e6231be0SApple OSS Distributions }
1526e6231be0SApple OSS Distributions
1527e6231be0SApple OSS Distributions wq->wq_cooperative_queue_best_req_qos = MAX(highest_qos_with_no_scheduled, highest_qos_req_with_width);
1528e6231be0SApple OSS Distributions if (wq->wq_cooperative_queue_best_req_qos == THREAD_QOS_UNSPECIFIED) {
1529e6231be0SApple OSS Distributions wq->wq_cooperative_queue_best_req_qos = highest_qos_req;
1530e6231be0SApple OSS Distributions }
1531e6231be0SApple OSS Distributions
1532*8d741a5dSApple OSS Distributions #if MACH_ASSERT
1533e6231be0SApple OSS Distributions /* Assert that if we are showing up the next best req as UN, then there
1534e6231be0SApple OSS Distributions * actually is no thread request in the cooperative pool buckets */
1535e6231be0SApple OSS Distributions if (wq->wq_cooperative_queue_best_req_qos == THREAD_QOS_UNSPECIFIED) {
1536e6231be0SApple OSS Distributions assert(!workq_has_cooperative_thread_requests(wq));
1537e6231be0SApple OSS Distributions }
1538e6231be0SApple OSS Distributions #endif
1539e6231be0SApple OSS Distributions
1540e6231be0SApple OSS Distributions return old_best_req_qos != wq->wq_cooperative_queue_best_req_qos;
1541e6231be0SApple OSS Distributions }
1542e6231be0SApple OSS Distributions
1543e6231be0SApple OSS Distributions /*
1544e6231be0SApple OSS Distributions * Returns whether or not the input thread (or creator thread if uth is NULL)
1545e6231be0SApple OSS Distributions * should be allowed to work as part of the cooperative pool for the <input qos>
1546e6231be0SApple OSS Distributions * bucket.
1547e6231be0SApple OSS Distributions *
1548e6231be0SApple OSS Distributions * This function is called in a bunch of places:
1549e6231be0SApple OSS Distributions * a) Quantum expires for a thread and it is part of the cooperative pool
1550e6231be0SApple OSS Distributions * b) When trying to pick a thread request for the creator thread to
1551e6231be0SApple OSS Distributions * represent.
1552e6231be0SApple OSS Distributions * c) When a thread is trying to pick a thread request to actually bind to
1553e6231be0SApple OSS Distributions * and service.
1554e6231be0SApple OSS Distributions *
1555e6231be0SApple OSS Distributions * Called with workq lock held.
1556e6231be0SApple OSS Distributions */
1557e6231be0SApple OSS Distributions
1558e6231be0SApple OSS Distributions #define WQ_COOPERATIVE_POOL_UNSATURATED 1
1559e6231be0SApple OSS Distributions #define WQ_COOPERATIVE_BUCKET_UNSERVICED 2
1560e6231be0SApple OSS Distributions #define WQ_COOPERATIVE_POOL_SATURATED_UP_TO_QOS 3
1561e6231be0SApple OSS Distributions
1562e6231be0SApple OSS Distributions static bool
workq_cooperative_allowance(struct workqueue * wq,thread_qos_t qos,struct uthread * uth,bool may_start_timer)1563e6231be0SApple OSS Distributions workq_cooperative_allowance(struct workqueue *wq, thread_qos_t qos, struct uthread *uth,
1564e6231be0SApple OSS Distributions bool may_start_timer)
1565e6231be0SApple OSS Distributions {
1566e6231be0SApple OSS Distributions workq_lock_held(wq);
1567e6231be0SApple OSS Distributions
1568e6231be0SApple OSS Distributions bool exclude_thread_as_scheduled = false;
1569e6231be0SApple OSS Distributions bool passed_admissions = false;
1570e7776783SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
1571e6231be0SApple OSS Distributions
1572e6231be0SApple OSS Distributions if (uth && workq_thread_is_cooperative(uth)) {
1573e6231be0SApple OSS Distributions exclude_thread_as_scheduled = true;
15745c2921b0SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, uth->uu_workq_pri.qos_req);
1575e6231be0SApple OSS Distributions }
1576e6231be0SApple OSS Distributions
1577e6231be0SApple OSS Distributions /*
1578e6231be0SApple OSS Distributions * We have not saturated the pool yet, let this thread continue
1579e6231be0SApple OSS Distributions */
1580e6231be0SApple OSS Distributions uint64_t total_cooperative_threads;
1581e6231be0SApple OSS Distributions total_cooperative_threads = workq_num_cooperative_threads_scheduled_total(wq);
1582e6231be0SApple OSS Distributions if (total_cooperative_threads < wq_cooperative_queue_max_size(wq)) {
1583e6231be0SApple OSS Distributions passed_admissions = true;
1584e6231be0SApple OSS Distributions WQ_TRACE(TRACE_wq_cooperative_admission | DBG_FUNC_NONE,
1585e6231be0SApple OSS Distributions total_cooperative_threads, qos, passed_admissions,
1586e6231be0SApple OSS Distributions WQ_COOPERATIVE_POOL_UNSATURATED);
1587e6231be0SApple OSS Distributions goto out;
1588e6231be0SApple OSS Distributions }
1589e6231be0SApple OSS Distributions
1590e6231be0SApple OSS Distributions /*
1591e6231be0SApple OSS Distributions * Without this thread, nothing is servicing the bucket which has pending
1592e6231be0SApple OSS Distributions * work
1593e6231be0SApple OSS Distributions */
1594e6231be0SApple OSS Distributions uint64_t bucket_scheduled = wq->wq_cooperative_queue_scheduled_count[bucket];
1595e6231be0SApple OSS Distributions if (bucket_scheduled == 0 &&
1596e6231be0SApple OSS Distributions !STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket])) {
1597e6231be0SApple OSS Distributions passed_admissions = true;
1598e6231be0SApple OSS Distributions WQ_TRACE(TRACE_wq_cooperative_admission | DBG_FUNC_NONE,
1599e6231be0SApple OSS Distributions total_cooperative_threads, qos, passed_admissions,
1600e6231be0SApple OSS Distributions WQ_COOPERATIVE_BUCKET_UNSERVICED);
1601e6231be0SApple OSS Distributions goto out;
1602e6231be0SApple OSS Distributions }
1603e6231be0SApple OSS Distributions
1604e6231be0SApple OSS Distributions /*
1605e6231be0SApple OSS Distributions * If number of threads at the QoS bucket >= input QoS exceeds the max we want
1606e6231be0SApple OSS Distributions * for the pool, deny this thread
1607e6231be0SApple OSS Distributions */
1608*8d741a5dSApple OSS Distributions uint64_t aggregate_down_to_qos = workq_num_cooperative_threads_scheduled_to_qos_locked(wq, qos);
1609e6231be0SApple OSS Distributions passed_admissions = (aggregate_down_to_qos < wq_cooperative_queue_max_size(wq));
1610e6231be0SApple OSS Distributions WQ_TRACE(TRACE_wq_cooperative_admission | DBG_FUNC_NONE, aggregate_down_to_qos,
1611e6231be0SApple OSS Distributions qos, passed_admissions, WQ_COOPERATIVE_POOL_SATURATED_UP_TO_QOS);
1612e6231be0SApple OSS Distributions
1613e6231be0SApple OSS Distributions if (!passed_admissions && may_start_timer) {
1614e6231be0SApple OSS Distributions workq_schedule_delayed_thread_creation(wq, 0);
1615e6231be0SApple OSS Distributions }
1616e6231be0SApple OSS Distributions
1617e6231be0SApple OSS Distributions out:
1618e6231be0SApple OSS Distributions if (exclude_thread_as_scheduled) {
16195c2921b0SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, uth->uu_workq_pri.qos_req);
1620e6231be0SApple OSS Distributions }
1621e6231be0SApple OSS Distributions return passed_admissions;
1622e6231be0SApple OSS Distributions }
1623e6231be0SApple OSS Distributions
1624e6231be0SApple OSS Distributions /*
1625e6231be0SApple OSS Distributions * returns true if the best request for the pool changed as a result of
1626e6231be0SApple OSS Distributions * enqueuing this thread request.
1627cc9a6355SApple OSS Distributions */
1628cc9a6355SApple OSS Distributions static bool
workq_threadreq_enqueue(struct workqueue * wq,workq_threadreq_t req)1629cc9a6355SApple OSS Distributions workq_threadreq_enqueue(struct workqueue *wq, workq_threadreq_t req)
1630cc9a6355SApple OSS Distributions {
1631a5e72196SApple OSS Distributions assert(req->tr_state == WORKQ_TR_STATE_NEW);
1632cc9a6355SApple OSS Distributions
1633a5e72196SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_QUEUED;
1634cc9a6355SApple OSS Distributions wq->wq_reqcount += req->tr_count;
1635cc9a6355SApple OSS Distributions
1636cc9a6355SApple OSS Distributions if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
1637cc9a6355SApple OSS Distributions assert(wq->wq_event_manager_threadreq == NULL);
1638a5e72196SApple OSS Distributions assert(req->tr_flags & WORKQ_TR_FLAG_KEVENT);
1639cc9a6355SApple OSS Distributions assert(req->tr_count == 1);
1640cc9a6355SApple OSS Distributions wq->wq_event_manager_threadreq = req;
1641cc9a6355SApple OSS Distributions return true;
1642cc9a6355SApple OSS Distributions }
1643bb611c8fSApple OSS Distributions
1644e6231be0SApple OSS Distributions if (workq_threadreq_is_cooperative(req)) {
1645e6231be0SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
1646e6231be0SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_ABOVEUI);
1647e6231be0SApple OSS Distributions
1648e6231be0SApple OSS Distributions struct workq_threadreq_tailq *bucket = &wq->wq_cooperative_queue[_wq_bucket(req->tr_qos)];
1649e6231be0SApple OSS Distributions STAILQ_INSERT_TAIL(bucket, req, tr_link);
1650e6231be0SApple OSS Distributions
1651e6231be0SApple OSS Distributions return _wq_cooperative_queue_refresh_best_req_qos(wq);
1652e6231be0SApple OSS Distributions }
1653e6231be0SApple OSS Distributions
1654bb611c8fSApple OSS Distributions struct priority_queue_sched_max *q = workq_priority_queue_for_req(wq, req);
1655e6231be0SApple OSS Distributions
1656bb611c8fSApple OSS Distributions priority_queue_entry_set_sched_pri(q, &req->tr_entry,
1657bb611c8fSApple OSS Distributions workq_priority_for_req(req), false);
1658bb611c8fSApple OSS Distributions
1659bb611c8fSApple OSS Distributions if (priority_queue_insert(q, &req->tr_entry)) {
1660e6231be0SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
1661cc9a6355SApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(wq);
1662cc9a6355SApple OSS Distributions }
1663cc9a6355SApple OSS Distributions return true;
1664cc9a6355SApple OSS Distributions }
1665cc9a6355SApple OSS Distributions return false;
1666cc9a6355SApple OSS Distributions }
1667cc9a6355SApple OSS Distributions
1668cc9a6355SApple OSS Distributions /*
1669e6231be0SApple OSS Distributions * returns true if one of the following is true (so as to update creator if
1670e6231be0SApple OSS Distributions * needed):
1671e6231be0SApple OSS Distributions *
1672e6231be0SApple OSS Distributions * (a) the next highest request of the pool we dequeued the request from changed
1673e6231be0SApple OSS Distributions * (b) the next highest requests of the pool the current thread used to be a
1674e6231be0SApple OSS Distributions * part of, changed
1675e6231be0SApple OSS Distributions *
1676e6231be0SApple OSS Distributions * For overcommit, special and constrained pools, the next highest QoS for each
1677e6231be0SApple OSS Distributions * pool just a MAX of pending requests so tracking (a) is sufficient.
1678e6231be0SApple OSS Distributions *
1679e6231be0SApple OSS Distributions * But for cooperative thread pool, the next highest QoS for the pool depends on
1680e6231be0SApple OSS Distributions * schedule counts in the pool as well. So if the current thread used to be
1681e6231be0SApple OSS Distributions * cooperative in it's previous logical run ie (b), then that can also affect
1682e6231be0SApple OSS Distributions * cooperative pool's next best QoS requests.
1683cc9a6355SApple OSS Distributions */
1684cc9a6355SApple OSS Distributions static bool
workq_threadreq_dequeue(struct workqueue * wq,workq_threadreq_t req,bool cooperative_sched_count_changed)1685e6231be0SApple OSS Distributions workq_threadreq_dequeue(struct workqueue *wq, workq_threadreq_t req,
1686e6231be0SApple OSS Distributions bool cooperative_sched_count_changed)
1687cc9a6355SApple OSS Distributions {
1688cc9a6355SApple OSS Distributions wq->wq_reqcount--;
1689cc9a6355SApple OSS Distributions
1690e6231be0SApple OSS Distributions bool next_highest_request_changed = false;
1691e6231be0SApple OSS Distributions
1692cc9a6355SApple OSS Distributions if (--req->tr_count == 0) {
1693cc9a6355SApple OSS Distributions if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
1694cc9a6355SApple OSS Distributions assert(wq->wq_event_manager_threadreq == req);
1695cc9a6355SApple OSS Distributions assert(req->tr_count == 0);
1696cc9a6355SApple OSS Distributions wq->wq_event_manager_threadreq = NULL;
1697e6231be0SApple OSS Distributions
1698e6231be0SApple OSS Distributions /* If a cooperative thread was the one which picked up the manager
1699e6231be0SApple OSS Distributions * thread request, we need to reevaluate the cooperative pool
1700e6231be0SApple OSS Distributions * anyways.
1701e6231be0SApple OSS Distributions */
1702e6231be0SApple OSS Distributions if (cooperative_sched_count_changed) {
1703e6231be0SApple OSS Distributions _wq_cooperative_queue_refresh_best_req_qos(wq);
1704e6231be0SApple OSS Distributions }
1705cc9a6355SApple OSS Distributions return true;
1706cc9a6355SApple OSS Distributions }
1707e6231be0SApple OSS Distributions
1708e6231be0SApple OSS Distributions if (workq_threadreq_is_cooperative(req)) {
1709e6231be0SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
1710e6231be0SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_ABOVEUI);
1711e7776783SApple OSS Distributions /* Account for the fact that BG and MT are coalesced when
1712e7776783SApple OSS Distributions * calculating best request for cooperative pool
1713e7776783SApple OSS Distributions */
1714e7776783SApple OSS Distributions assert(_wq_bucket(req->tr_qos) == _wq_bucket(wq->wq_cooperative_queue_best_req_qos));
1715e6231be0SApple OSS Distributions
1716e6231be0SApple OSS Distributions struct workq_threadreq_tailq *bucket = &wq->wq_cooperative_queue[_wq_bucket(req->tr_qos)];
1717e6231be0SApple OSS Distributions __assert_only workq_threadreq_t head = STAILQ_FIRST(bucket);
1718e6231be0SApple OSS Distributions
1719e6231be0SApple OSS Distributions assert(head == req);
1720e6231be0SApple OSS Distributions STAILQ_REMOVE_HEAD(bucket, tr_link);
1721e6231be0SApple OSS Distributions
1722e6231be0SApple OSS Distributions /*
1723e6231be0SApple OSS Distributions * If the request we're dequeueing is cooperative, then the sched
1724e6231be0SApple OSS Distributions * counts definitely changed.
1725e6231be0SApple OSS Distributions */
1726e6231be0SApple OSS Distributions assert(cooperative_sched_count_changed);
1727e6231be0SApple OSS Distributions }
1728e6231be0SApple OSS Distributions
1729e6231be0SApple OSS Distributions /*
1730e6231be0SApple OSS Distributions * We want to do the cooperative pool refresh after dequeueing a
1731e6231be0SApple OSS Distributions * cooperative thread request if any (to combine both effects into 1
1732e6231be0SApple OSS Distributions * refresh operation)
1733e6231be0SApple OSS Distributions */
1734e6231be0SApple OSS Distributions if (cooperative_sched_count_changed) {
1735e6231be0SApple OSS Distributions next_highest_request_changed = _wq_cooperative_queue_refresh_best_req_qos(wq);
1736e6231be0SApple OSS Distributions }
1737e6231be0SApple OSS Distributions
1738e6231be0SApple OSS Distributions if (!workq_threadreq_is_cooperative(req)) {
1739e6231be0SApple OSS Distributions /*
1740e6231be0SApple OSS Distributions * All other types of requests are enqueued in priority queues
1741e6231be0SApple OSS Distributions */
1742e6231be0SApple OSS Distributions
1743cc9a6355SApple OSS Distributions if (priority_queue_remove(workq_priority_queue_for_req(wq, req),
1744bb611c8fSApple OSS Distributions &req->tr_entry)) {
1745e6231be0SApple OSS Distributions next_highest_request_changed |= true;
1746e6231be0SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
1747cc9a6355SApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(wq);
1748cc9a6355SApple OSS Distributions }
1749cc9a6355SApple OSS Distributions }
1750cc9a6355SApple OSS Distributions }
1751e6231be0SApple OSS Distributions }
1752e6231be0SApple OSS Distributions
1753e6231be0SApple OSS Distributions return next_highest_request_changed;
1754cc9a6355SApple OSS Distributions }
1755cc9a6355SApple OSS Distributions
1756cc9a6355SApple OSS Distributions static void
workq_threadreq_destroy(proc_t p,workq_threadreq_t req)1757cc9a6355SApple OSS Distributions workq_threadreq_destroy(proc_t p, workq_threadreq_t req)
1758cc9a6355SApple OSS Distributions {
1759a5e72196SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_CANCELED;
1760a5e72196SApple OSS Distributions if (req->tr_flags & (WORKQ_TR_FLAG_WORKLOOP | WORKQ_TR_FLAG_KEVENT)) {
1761cc9a6355SApple OSS Distributions kqueue_threadreq_cancel(p, req);
1762cc9a6355SApple OSS Distributions } else {
1763cc9a6355SApple OSS Distributions zfree(workq_zone_threadreq, req);
1764cc9a6355SApple OSS Distributions }
1765cc9a6355SApple OSS Distributions }
1766cc9a6355SApple OSS Distributions
1767cc9a6355SApple OSS Distributions #pragma mark workqueue thread creation thread calls
1768cc9a6355SApple OSS Distributions
1769cc9a6355SApple OSS Distributions static inline bool
workq_thread_call_prepost(struct workqueue * wq,uint32_t sched,uint32_t pend,uint32_t fail_mask)1770cc9a6355SApple OSS Distributions workq_thread_call_prepost(struct workqueue *wq, uint32_t sched, uint32_t pend,
1771cc9a6355SApple OSS Distributions uint32_t fail_mask)
1772cc9a6355SApple OSS Distributions {
1773cc9a6355SApple OSS Distributions uint32_t old_flags, new_flags;
1774cc9a6355SApple OSS Distributions
1775cc9a6355SApple OSS Distributions os_atomic_rmw_loop(&wq->wq_flags, old_flags, new_flags, acquire, {
1776cc9a6355SApple OSS Distributions if (__improbable(old_flags & (WQ_EXITING | sched | pend | fail_mask))) {
1777cc9a6355SApple OSS Distributions os_atomic_rmw_loop_give_up(return false);
1778cc9a6355SApple OSS Distributions }
1779cc9a6355SApple OSS Distributions if (__improbable(old_flags & WQ_PROC_SUSPENDED)) {
1780cc9a6355SApple OSS Distributions new_flags = old_flags | pend;
1781cc9a6355SApple OSS Distributions } else {
1782cc9a6355SApple OSS Distributions new_flags = old_flags | sched;
1783cc9a6355SApple OSS Distributions }
1784cc9a6355SApple OSS Distributions });
1785cc9a6355SApple OSS Distributions
1786cc9a6355SApple OSS Distributions return (old_flags & WQ_PROC_SUSPENDED) == 0;
1787cc9a6355SApple OSS Distributions }
1788cc9a6355SApple OSS Distributions
1789cc9a6355SApple OSS Distributions #define WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART 0x1
1790cc9a6355SApple OSS Distributions
1791cc9a6355SApple OSS Distributions static bool
workq_schedule_delayed_thread_creation(struct workqueue * wq,int flags)1792cc9a6355SApple OSS Distributions workq_schedule_delayed_thread_creation(struct workqueue *wq, int flags)
1793cc9a6355SApple OSS Distributions {
1794cc9a6355SApple OSS Distributions assert(!preemption_enabled());
1795cc9a6355SApple OSS Distributions
1796cc9a6355SApple OSS Distributions if (!workq_thread_call_prepost(wq, WQ_DELAYED_CALL_SCHEDULED,
1797cc9a6355SApple OSS Distributions WQ_DELAYED_CALL_PENDED, WQ_IMMEDIATE_CALL_PENDED |
1798cc9a6355SApple OSS Distributions WQ_IMMEDIATE_CALL_SCHEDULED)) {
1799cc9a6355SApple OSS Distributions return false;
1800cc9a6355SApple OSS Distributions }
1801cc9a6355SApple OSS Distributions
1802cc9a6355SApple OSS Distributions uint64_t now = mach_absolute_time();
1803cc9a6355SApple OSS Distributions
1804cc9a6355SApple OSS Distributions if (flags & WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART) {
1805cc9a6355SApple OSS Distributions /* do not change the window */
1806cc9a6355SApple OSS Distributions } else if (now - wq->wq_thread_call_last_run <= wq->wq_timer_interval) {
1807cc9a6355SApple OSS Distributions wq->wq_timer_interval *= 2;
1808cc9a6355SApple OSS Distributions if (wq->wq_timer_interval > wq_max_timer_interval.abstime) {
1809bb611c8fSApple OSS Distributions wq->wq_timer_interval = (uint32_t)wq_max_timer_interval.abstime;
1810cc9a6355SApple OSS Distributions }
1811cc9a6355SApple OSS Distributions } else if (now - wq->wq_thread_call_last_run > 2 * wq->wq_timer_interval) {
1812cc9a6355SApple OSS Distributions wq->wq_timer_interval /= 2;
1813cc9a6355SApple OSS Distributions if (wq->wq_timer_interval < wq_stalled_window.abstime) {
1814bb611c8fSApple OSS Distributions wq->wq_timer_interval = (uint32_t)wq_stalled_window.abstime;
1815cc9a6355SApple OSS Distributions }
1816cc9a6355SApple OSS Distributions }
1817cc9a6355SApple OSS Distributions
1818cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount,
1819e6231be0SApple OSS Distributions _wq_flags(wq), wq->wq_timer_interval);
1820cc9a6355SApple OSS Distributions
1821cc9a6355SApple OSS Distributions thread_call_t call = wq->wq_delayed_call;
1822cc9a6355SApple OSS Distributions uintptr_t arg = WQ_DELAYED_CALL_SCHEDULED;
1823cc9a6355SApple OSS Distributions uint64_t deadline = now + wq->wq_timer_interval;
1824cc9a6355SApple OSS Distributions if (thread_call_enter1_delayed(call, (void *)arg, deadline)) {
1825cc9a6355SApple OSS Distributions panic("delayed_call was already enqueued");
1826cc9a6355SApple OSS Distributions }
1827cc9a6355SApple OSS Distributions return true;
1828cc9a6355SApple OSS Distributions }
1829cc9a6355SApple OSS Distributions
1830cc9a6355SApple OSS Distributions static void
workq_schedule_immediate_thread_creation(struct workqueue * wq)1831cc9a6355SApple OSS Distributions workq_schedule_immediate_thread_creation(struct workqueue *wq)
1832cc9a6355SApple OSS Distributions {
1833cc9a6355SApple OSS Distributions assert(!preemption_enabled());
1834cc9a6355SApple OSS Distributions
1835cc9a6355SApple OSS Distributions if (workq_thread_call_prepost(wq, WQ_IMMEDIATE_CALL_SCHEDULED,
1836cc9a6355SApple OSS Distributions WQ_IMMEDIATE_CALL_PENDED, 0)) {
1837cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount,
1838e6231be0SApple OSS Distributions _wq_flags(wq), 0);
1839cc9a6355SApple OSS Distributions
1840cc9a6355SApple OSS Distributions uintptr_t arg = WQ_IMMEDIATE_CALL_SCHEDULED;
1841cc9a6355SApple OSS Distributions if (thread_call_enter1(wq->wq_immediate_call, (void *)arg)) {
1842cc9a6355SApple OSS Distributions panic("immediate_call was already enqueued");
1843cc9a6355SApple OSS Distributions }
1844cc9a6355SApple OSS Distributions }
1845cc9a6355SApple OSS Distributions }
1846cc9a6355SApple OSS Distributions
1847cc9a6355SApple OSS Distributions void
workq_proc_suspended(struct proc * p)1848cc9a6355SApple OSS Distributions workq_proc_suspended(struct proc *p)
1849cc9a6355SApple OSS Distributions {
1850cc9a6355SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
1851cc9a6355SApple OSS Distributions
1852a5e72196SApple OSS Distributions if (wq) {
1853a5e72196SApple OSS Distributions os_atomic_or(&wq->wq_flags, WQ_PROC_SUSPENDED, relaxed);
1854a5e72196SApple OSS Distributions }
1855cc9a6355SApple OSS Distributions }
1856cc9a6355SApple OSS Distributions
1857cc9a6355SApple OSS Distributions void
workq_proc_resumed(struct proc * p)1858cc9a6355SApple OSS Distributions workq_proc_resumed(struct proc *p)
1859cc9a6355SApple OSS Distributions {
1860cc9a6355SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
1861cc9a6355SApple OSS Distributions uint32_t wq_flags;
1862cc9a6355SApple OSS Distributions
1863a5e72196SApple OSS Distributions if (!wq) {
1864a5e72196SApple OSS Distributions return;
1865a5e72196SApple OSS Distributions }
1866cc9a6355SApple OSS Distributions
1867a5e72196SApple OSS Distributions wq_flags = os_atomic_andnot_orig(&wq->wq_flags, WQ_PROC_SUSPENDED |
1868a5e72196SApple OSS Distributions WQ_DELAYED_CALL_PENDED | WQ_IMMEDIATE_CALL_PENDED, relaxed);
1869cc9a6355SApple OSS Distributions if ((wq_flags & WQ_EXITING) == 0) {
1870cc9a6355SApple OSS Distributions disable_preemption();
1871cc9a6355SApple OSS Distributions if (wq_flags & WQ_IMMEDIATE_CALL_PENDED) {
1872cc9a6355SApple OSS Distributions workq_schedule_immediate_thread_creation(wq);
1873cc9a6355SApple OSS Distributions } else if (wq_flags & WQ_DELAYED_CALL_PENDED) {
1874cc9a6355SApple OSS Distributions workq_schedule_delayed_thread_creation(wq,
1875cc9a6355SApple OSS Distributions WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART);
1876cc9a6355SApple OSS Distributions }
1877cc9a6355SApple OSS Distributions enable_preemption();
1878cc9a6355SApple OSS Distributions }
1879cc9a6355SApple OSS Distributions }
1880cc9a6355SApple OSS Distributions
1881cc9a6355SApple OSS Distributions /**
1882cc9a6355SApple OSS Distributions * returns whether lastblocked_tsp is within wq_stalled_window usecs of now
1883cc9a6355SApple OSS Distributions */
1884cc9a6355SApple OSS Distributions static bool
workq_thread_is_busy(uint64_t now,_Atomic uint64_t * lastblocked_tsp)1885cc9a6355SApple OSS Distributions workq_thread_is_busy(uint64_t now, _Atomic uint64_t *lastblocked_tsp)
1886cc9a6355SApple OSS Distributions {
1887a5e72196SApple OSS Distributions uint64_t lastblocked_ts = os_atomic_load_wide(lastblocked_tsp, relaxed);
1888cc9a6355SApple OSS Distributions if (now <= lastblocked_ts) {
1889cc9a6355SApple OSS Distributions /*
1890cc9a6355SApple OSS Distributions * Because the update of the timestamp when a thread blocks
1891cc9a6355SApple OSS Distributions * isn't serialized against us looking at it (i.e. we don't hold
1892cc9a6355SApple OSS Distributions * the workq lock), it's possible to have a timestamp that matches
1893cc9a6355SApple OSS Distributions * the current time or that even looks to be in the future relative
1894cc9a6355SApple OSS Distributions * to when we grabbed the current time...
1895cc9a6355SApple OSS Distributions *
1896cc9a6355SApple OSS Distributions * Just treat this as a busy thread since it must have just blocked.
1897cc9a6355SApple OSS Distributions */
1898cc9a6355SApple OSS Distributions return true;
1899cc9a6355SApple OSS Distributions }
1900cc9a6355SApple OSS Distributions return (now - lastblocked_ts) < wq_stalled_window.abstime;
1901cc9a6355SApple OSS Distributions }
1902cc9a6355SApple OSS Distributions
1903cc9a6355SApple OSS Distributions static void
workq_add_new_threads_call(void * _p,void * flags)1904cc9a6355SApple OSS Distributions workq_add_new_threads_call(void *_p, void *flags)
1905cc9a6355SApple OSS Distributions {
1906cc9a6355SApple OSS Distributions proc_t p = _p;
1907cc9a6355SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
1908cc9a6355SApple OSS Distributions uint32_t my_flag = (uint32_t)(uintptr_t)flags;
1909cc9a6355SApple OSS Distributions
1910cc9a6355SApple OSS Distributions /*
1911cc9a6355SApple OSS Distributions * workq_exit() will set the workqueue to NULL before
1912cc9a6355SApple OSS Distributions * it cancels thread calls.
1913cc9a6355SApple OSS Distributions */
1914a5e72196SApple OSS Distributions if (!wq) {
1915a5e72196SApple OSS Distributions return;
1916a5e72196SApple OSS Distributions }
1917cc9a6355SApple OSS Distributions
1918cc9a6355SApple OSS Distributions assert((my_flag == WQ_DELAYED_CALL_SCHEDULED) ||
1919cc9a6355SApple OSS Distributions (my_flag == WQ_IMMEDIATE_CALL_SCHEDULED));
1920cc9a6355SApple OSS Distributions
1921cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_START, wq, _wq_flags(wq),
1922e6231be0SApple OSS Distributions wq->wq_nthreads, wq->wq_thidlecount);
1923cc9a6355SApple OSS Distributions
1924cc9a6355SApple OSS Distributions workq_lock_spin(wq);
1925cc9a6355SApple OSS Distributions
1926cc9a6355SApple OSS Distributions wq->wq_thread_call_last_run = mach_absolute_time();
1927a5e72196SApple OSS Distributions os_atomic_andnot(&wq->wq_flags, my_flag, release);
1928cc9a6355SApple OSS Distributions
1929cc9a6355SApple OSS Distributions /* This can drop the workqueue lock, and take it again */
1930cc9a6355SApple OSS Distributions workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
1931cc9a6355SApple OSS Distributions
1932cc9a6355SApple OSS Distributions workq_unlock(wq);
1933cc9a6355SApple OSS Distributions
1934cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_END, wq, 0,
1935e6231be0SApple OSS Distributions wq->wq_nthreads, wq->wq_thidlecount);
1936cc9a6355SApple OSS Distributions }
1937cc9a6355SApple OSS Distributions
1938cc9a6355SApple OSS Distributions #pragma mark thread state tracking
1939cc9a6355SApple OSS Distributions
1940cc9a6355SApple OSS Distributions static void
workq_sched_callback(int type,thread_t thread)1941cc9a6355SApple OSS Distributions workq_sched_callback(int type, thread_t thread)
1942cc9a6355SApple OSS Distributions {
1943e7776783SApple OSS Distributions thread_ro_t tro = get_thread_ro(thread);
1944cc9a6355SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
1945e7776783SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(tro->tro_proc);
1946cc9a6355SApple OSS Distributions thread_qos_t req_qos, qos = uth->uu_workq_pri.qos_bucket;
1947cc9a6355SApple OSS Distributions wq_thactive_t old_thactive;
1948cc9a6355SApple OSS Distributions bool start_timer = false;
1949cc9a6355SApple OSS Distributions
1950cc9a6355SApple OSS Distributions if (qos == WORKQ_THREAD_QOS_MANAGER) {
1951cc9a6355SApple OSS Distributions return;
1952cc9a6355SApple OSS Distributions }
1953cc9a6355SApple OSS Distributions
1954cc9a6355SApple OSS Distributions switch (type) {
1955cc9a6355SApple OSS Distributions case SCHED_CALL_BLOCK:
1956cc9a6355SApple OSS Distributions old_thactive = _wq_thactive_dec(wq, qos);
1957cc9a6355SApple OSS Distributions req_qos = WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(old_thactive);
1958cc9a6355SApple OSS Distributions
1959cc9a6355SApple OSS Distributions /*
1960cc9a6355SApple OSS Distributions * Remember the timestamp of the last thread that blocked in this
1961cc9a6355SApple OSS Distributions * bucket, it used used by admission checks to ignore one thread
1962cc9a6355SApple OSS Distributions * being inactive if this timestamp is recent enough.
1963cc9a6355SApple OSS Distributions *
1964cc9a6355SApple OSS Distributions * If we collide with another thread trying to update the
1965cc9a6355SApple OSS Distributions * last_blocked (really unlikely since another thread would have to
1966cc9a6355SApple OSS Distributions * get scheduled and then block after we start down this path), it's
1967cc9a6355SApple OSS Distributions * not a problem. Either timestamp is adequate, so no need to retry
1968cc9a6355SApple OSS Distributions */
1969a5e72196SApple OSS Distributions os_atomic_store_wide(&wq->wq_lastblocked_ts[_wq_bucket(qos)],
1970cc9a6355SApple OSS Distributions thread_last_run_time(thread), relaxed);
1971cc9a6355SApple OSS Distributions
1972cc9a6355SApple OSS Distributions if (req_qos == THREAD_QOS_UNSPECIFIED) {
1973cc9a6355SApple OSS Distributions /*
1974cc9a6355SApple OSS Distributions * No pending request at the moment we could unblock, move on.
1975cc9a6355SApple OSS Distributions */
1976cc9a6355SApple OSS Distributions } else if (qos < req_qos) {
1977cc9a6355SApple OSS Distributions /*
1978cc9a6355SApple OSS Distributions * The blocking thread is at a lower QoS than the highest currently
1979cc9a6355SApple OSS Distributions * pending constrained request, nothing has to be redriven
1980cc9a6355SApple OSS Distributions */
1981cc9a6355SApple OSS Distributions } else {
1982cc9a6355SApple OSS Distributions uint32_t max_busycount, old_req_count;
1983cc9a6355SApple OSS Distributions old_req_count = _wq_thactive_aggregate_downto_qos(wq, old_thactive,
1984cc9a6355SApple OSS Distributions req_qos, NULL, &max_busycount);
1985cc9a6355SApple OSS Distributions /*
1986cc9a6355SApple OSS Distributions * If it is possible that may_start_constrained_thread had refused
1987cc9a6355SApple OSS Distributions * admission due to being over the max concurrency, we may need to
1988cc9a6355SApple OSS Distributions * spin up a new thread.
1989cc9a6355SApple OSS Distributions *
1990cc9a6355SApple OSS Distributions * We take into account the maximum number of busy threads
1991cc9a6355SApple OSS Distributions * that can affect may_start_constrained_thread as looking at the
1992cc9a6355SApple OSS Distributions * actual number may_start_constrained_thread will see is racy.
1993cc9a6355SApple OSS Distributions *
1994cc9a6355SApple OSS Distributions * IOW at NCPU = 4, for IN (req_qos = 1), if the old req count is
1995cc9a6355SApple OSS Distributions * between NCPU (4) and NCPU - 2 (2) we need to redrive.
1996cc9a6355SApple OSS Distributions */
1997cc9a6355SApple OSS Distributions uint32_t conc = wq_max_parallelism[_wq_bucket(qos)];
1998cc9a6355SApple OSS Distributions if (old_req_count <= conc && conc <= old_req_count + max_busycount) {
1999cc9a6355SApple OSS Distributions start_timer = workq_schedule_delayed_thread_creation(wq, 0);
2000cc9a6355SApple OSS Distributions }
2001cc9a6355SApple OSS Distributions }
2002cc9a6355SApple OSS Distributions if (__improbable(kdebug_enable)) {
2003cc9a6355SApple OSS Distributions __unused uint32_t old = _wq_thactive_aggregate_downto_qos(wq,
2004cc9a6355SApple OSS Distributions old_thactive, qos, NULL, NULL);
2005cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_block | DBG_FUNC_START, wq,
2006cc9a6355SApple OSS Distributions old - 1, qos | (req_qos << 8),
2007e6231be0SApple OSS Distributions wq->wq_reqcount << 1 | start_timer);
2008cc9a6355SApple OSS Distributions }
2009cc9a6355SApple OSS Distributions break;
2010cc9a6355SApple OSS Distributions
2011cc9a6355SApple OSS Distributions case SCHED_CALL_UNBLOCK:
2012cc9a6355SApple OSS Distributions /*
2013cc9a6355SApple OSS Distributions * we cannot take the workqueue_lock here...
2014cc9a6355SApple OSS Distributions * an UNBLOCK can occur from a timer event which
2015cc9a6355SApple OSS Distributions * is run from an interrupt context... if the workqueue_lock
2016cc9a6355SApple OSS Distributions * is already held by this processor, we'll deadlock...
2017cc9a6355SApple OSS Distributions * the thread lock for the thread being UNBLOCKED
2018cc9a6355SApple OSS Distributions * is also held
2019cc9a6355SApple OSS Distributions */
2020cc9a6355SApple OSS Distributions old_thactive = _wq_thactive_inc(wq, qos);
2021cc9a6355SApple OSS Distributions if (__improbable(kdebug_enable)) {
2022cc9a6355SApple OSS Distributions __unused uint32_t old = _wq_thactive_aggregate_downto_qos(wq,
2023cc9a6355SApple OSS Distributions old_thactive, qos, NULL, NULL);
2024cc9a6355SApple OSS Distributions req_qos = WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(old_thactive);
2025cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_block | DBG_FUNC_END, wq,
2026cc9a6355SApple OSS Distributions old + 1, qos | (req_qos << 8),
2027e6231be0SApple OSS Distributions wq->wq_threads_scheduled);
2028cc9a6355SApple OSS Distributions }
2029cc9a6355SApple OSS Distributions break;
2030cc9a6355SApple OSS Distributions }
2031cc9a6355SApple OSS Distributions }
2032cc9a6355SApple OSS Distributions
2033cc9a6355SApple OSS Distributions #pragma mark workq lifecycle
2034cc9a6355SApple OSS Distributions
2035cc9a6355SApple OSS Distributions void
workq_reference(struct workqueue * wq)2036cc9a6355SApple OSS Distributions workq_reference(struct workqueue *wq)
2037cc9a6355SApple OSS Distributions {
2038cc9a6355SApple OSS Distributions os_ref_retain(&wq->wq_refcnt);
2039cc9a6355SApple OSS Distributions }
2040cc9a6355SApple OSS Distributions
2041a5e72196SApple OSS Distributions static void
workq_deallocate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)2042a5e72196SApple OSS Distributions workq_deallocate_queue_invoke(mpsc_queue_chain_t e,
2043a5e72196SApple OSS Distributions __assert_only mpsc_daemon_queue_t dq)
2044cc9a6355SApple OSS Distributions {
2045a5e72196SApple OSS Distributions struct workqueue *wq;
2046cc9a6355SApple OSS Distributions struct turnstile *ts;
2047cc9a6355SApple OSS Distributions
2048a5e72196SApple OSS Distributions wq = mpsc_queue_element(e, struct workqueue, wq_destroy_link);
2049a5e72196SApple OSS Distributions assert(dq == &workq_deallocate_queue);
2050a5e72196SApple OSS Distributions
2051a5e72196SApple OSS Distributions turnstile_complete((uintptr_t)wq, &wq->wq_turnstile, &ts, TURNSTILE_WORKQS);
2052cc9a6355SApple OSS Distributions assert(ts);
2053cc9a6355SApple OSS Distributions turnstile_cleanup();
2054cc9a6355SApple OSS Distributions turnstile_deallocate(ts);
2055cc9a6355SApple OSS Distributions
2056e6231be0SApple OSS Distributions lck_ticket_destroy(&wq->wq_lock, &workq_lck_grp);
2057cc9a6355SApple OSS Distributions zfree(workq_zone_workqueue, wq);
2058cc9a6355SApple OSS Distributions }
2059cc9a6355SApple OSS Distributions
2060cc9a6355SApple OSS Distributions static void
workq_deallocate(struct workqueue * wq)2061cc9a6355SApple OSS Distributions workq_deallocate(struct workqueue *wq)
2062cc9a6355SApple OSS Distributions {
2063cc9a6355SApple OSS Distributions if (os_ref_release_relaxed(&wq->wq_refcnt) == 0) {
2064a5e72196SApple OSS Distributions workq_deallocate_queue_invoke(&wq->wq_destroy_link,
2065a5e72196SApple OSS Distributions &workq_deallocate_queue);
2066cc9a6355SApple OSS Distributions }
2067cc9a6355SApple OSS Distributions }
2068cc9a6355SApple OSS Distributions
2069cc9a6355SApple OSS Distributions void
workq_deallocate_safe(struct workqueue * wq)2070cc9a6355SApple OSS Distributions workq_deallocate_safe(struct workqueue *wq)
2071cc9a6355SApple OSS Distributions {
2072cc9a6355SApple OSS Distributions if (__improbable(os_ref_release_relaxed(&wq->wq_refcnt) == 0)) {
2073a5e72196SApple OSS Distributions mpsc_daemon_enqueue(&workq_deallocate_queue, &wq->wq_destroy_link,
2074a5e72196SApple OSS Distributions MPSC_QUEUE_DISABLE_PREEMPTION);
2075cc9a6355SApple OSS Distributions }
2076cc9a6355SApple OSS Distributions }
2077cc9a6355SApple OSS Distributions
2078cc9a6355SApple OSS Distributions /**
2079cc9a6355SApple OSS Distributions * Setup per-process state for the workqueue.
2080cc9a6355SApple OSS Distributions */
2081cc9a6355SApple OSS Distributions int
workq_open(struct proc * p,__unused struct workq_open_args * uap,__unused int32_t * retval)2082cc9a6355SApple OSS Distributions workq_open(struct proc *p, __unused struct workq_open_args *uap,
2083cc9a6355SApple OSS Distributions __unused int32_t *retval)
2084cc9a6355SApple OSS Distributions {
2085cc9a6355SApple OSS Distributions struct workqueue *wq;
2086cc9a6355SApple OSS Distributions int error = 0;
2087cc9a6355SApple OSS Distributions
2088cc9a6355SApple OSS Distributions if ((p->p_lflag & P_LREGISTER) == 0) {
2089cc9a6355SApple OSS Distributions return EINVAL;
2090cc9a6355SApple OSS Distributions }
2091cc9a6355SApple OSS Distributions
2092cc9a6355SApple OSS Distributions if (wq_init_constrained_limit) {
2093bb611c8fSApple OSS Distributions uint32_t limit, num_cpus = ml_wait_max_cpus();
2094cc9a6355SApple OSS Distributions
2095cc9a6355SApple OSS Distributions /*
2096cc9a6355SApple OSS Distributions * set up the limit for the constrained pool
2097cc9a6355SApple OSS Distributions * this is a virtual pool in that we don't
2098cc9a6355SApple OSS Distributions * maintain it on a separate idle and run list
2099cc9a6355SApple OSS Distributions */
2100cc9a6355SApple OSS Distributions limit = num_cpus * WORKQUEUE_CONSTRAINED_FACTOR;
2101cc9a6355SApple OSS Distributions
2102a5e72196SApple OSS Distributions if (limit > wq_max_constrained_threads) {
2103cc9a6355SApple OSS Distributions wq_max_constrained_threads = limit;
2104a5e72196SApple OSS Distributions }
2105cc9a6355SApple OSS Distributions
2106cc9a6355SApple OSS Distributions if (wq_max_threads > WQ_THACTIVE_BUCKET_HALF) {
2107cc9a6355SApple OSS Distributions wq_max_threads = WQ_THACTIVE_BUCKET_HALF;
2108cc9a6355SApple OSS Distributions }
2109cc9a6355SApple OSS Distributions if (wq_max_threads > CONFIG_THREAD_MAX - 20) {
2110cc9a6355SApple OSS Distributions wq_max_threads = CONFIG_THREAD_MAX - 20;
2111cc9a6355SApple OSS Distributions }
2112cc9a6355SApple OSS Distributions
2113cc9a6355SApple OSS Distributions wq_death_max_load = (uint16_t)fls(num_cpus) + 1;
2114cc9a6355SApple OSS Distributions
2115cc9a6355SApple OSS Distributions for (thread_qos_t qos = WORKQ_THREAD_QOS_MIN; qos <= WORKQ_THREAD_QOS_MAX; qos++) {
2116cc9a6355SApple OSS Distributions wq_max_parallelism[_wq_bucket(qos)] =
2117cc9a6355SApple OSS Distributions qos_max_parallelism(qos, QOS_PARALLELISM_COUNT_LOGICAL);
2118cc9a6355SApple OSS Distributions }
2119cc9a6355SApple OSS Distributions
2120e6231be0SApple OSS Distributions wq_max_cooperative_threads = num_cpus;
2121e6231be0SApple OSS Distributions
2122cc9a6355SApple OSS Distributions wq_init_constrained_limit = 0;
2123cc9a6355SApple OSS Distributions }
2124cc9a6355SApple OSS Distributions
2125cc9a6355SApple OSS Distributions if (proc_get_wqptr(p) == NULL) {
2126cc9a6355SApple OSS Distributions if (proc_init_wqptr_or_wait(p) == FALSE) {
2127cc9a6355SApple OSS Distributions assert(proc_get_wqptr(p) != NULL);
2128cc9a6355SApple OSS Distributions goto out;
2129cc9a6355SApple OSS Distributions }
2130cc9a6355SApple OSS Distributions
2131e6231be0SApple OSS Distributions wq = zalloc_flags(workq_zone_workqueue, Z_WAITOK | Z_ZERO);
2132cc9a6355SApple OSS Distributions
2133cc9a6355SApple OSS Distributions os_ref_init_count(&wq->wq_refcnt, &workq_refgrp, 1);
2134cc9a6355SApple OSS Distributions
2135cc9a6355SApple OSS Distributions // Start the event manager at the priority hinted at by the policy engine
2136cc9a6355SApple OSS Distributions thread_qos_t mgr_priority_hint = task_get_default_manager_qos(current_task());
2137cc9a6355SApple OSS Distributions pthread_priority_t pp = _pthread_priority_make_from_thread_qos(mgr_priority_hint, 0, 0);
2138cc9a6355SApple OSS Distributions wq->wq_event_manager_priority = (uint32_t)pp;
2139bb611c8fSApple OSS Distributions wq->wq_timer_interval = (uint32_t)wq_stalled_window.abstime;
2140cc9a6355SApple OSS Distributions wq->wq_proc = p;
2141cc9a6355SApple OSS Distributions turnstile_prepare((uintptr_t)wq, &wq->wq_turnstile, turnstile_alloc(),
2142cc9a6355SApple OSS Distributions TURNSTILE_WORKQS);
2143cc9a6355SApple OSS Distributions
2144cc9a6355SApple OSS Distributions TAILQ_INIT(&wq->wq_thrunlist);
2145cc9a6355SApple OSS Distributions TAILQ_INIT(&wq->wq_thnewlist);
2146cc9a6355SApple OSS Distributions TAILQ_INIT(&wq->wq_thidlelist);
2147bb611c8fSApple OSS Distributions priority_queue_init(&wq->wq_overcommit_queue);
2148bb611c8fSApple OSS Distributions priority_queue_init(&wq->wq_constrained_queue);
2149bb611c8fSApple OSS Distributions priority_queue_init(&wq->wq_special_queue);
2150e6231be0SApple OSS Distributions for (int bucket = 0; bucket < WORKQ_NUM_QOS_BUCKETS; bucket++) {
2151e6231be0SApple OSS Distributions STAILQ_INIT(&wq->wq_cooperative_queue[bucket]);
2152e6231be0SApple OSS Distributions }
2153cc9a6355SApple OSS Distributions
2154e6231be0SApple OSS Distributions /* We are only using the delayed thread call for the constrained pool
2155e6231be0SApple OSS Distributions * which can't have work at >= UI QoS and so we can be fine with a
2156e6231be0SApple OSS Distributions * UI QoS thread call.
2157e6231be0SApple OSS Distributions */
2158e6231be0SApple OSS Distributions wq->wq_delayed_call = thread_call_allocate_with_qos(
2159e6231be0SApple OSS Distributions workq_add_new_threads_call, p, THREAD_QOS_USER_INTERACTIVE,
2160cc9a6355SApple OSS Distributions THREAD_CALL_OPTIONS_ONCE);
2161cc9a6355SApple OSS Distributions wq->wq_immediate_call = thread_call_allocate_with_options(
2162cc9a6355SApple OSS Distributions workq_add_new_threads_call, p, THREAD_CALL_PRIORITY_KERNEL,
2163cc9a6355SApple OSS Distributions THREAD_CALL_OPTIONS_ONCE);
2164cc9a6355SApple OSS Distributions wq->wq_death_call = thread_call_allocate_with_options(
2165cc9a6355SApple OSS Distributions workq_kill_old_threads_call, wq,
2166cc9a6355SApple OSS Distributions THREAD_CALL_PRIORITY_USER, THREAD_CALL_OPTIONS_ONCE);
2167cc9a6355SApple OSS Distributions
2168e6231be0SApple OSS Distributions lck_ticket_init(&wq->wq_lock, &workq_lck_grp);
2169cc9a6355SApple OSS Distributions
2170cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_create | DBG_FUNC_NONE, wq,
2171e6231be0SApple OSS Distributions VM_KERNEL_ADDRHIDE(wq), 0, 0);
2172cc9a6355SApple OSS Distributions proc_set_wqptr(p, wq);
2173cc9a6355SApple OSS Distributions }
2174cc9a6355SApple OSS Distributions out:
2175cc9a6355SApple OSS Distributions
2176cc9a6355SApple OSS Distributions return error;
2177cc9a6355SApple OSS Distributions }
2178cc9a6355SApple OSS Distributions
2179cc9a6355SApple OSS Distributions /*
2180cc9a6355SApple OSS Distributions * Routine: workq_mark_exiting
2181cc9a6355SApple OSS Distributions *
2182cc9a6355SApple OSS Distributions * Function: Mark the work queue such that new threads will not be added to the
2183cc9a6355SApple OSS Distributions * work queue after we return.
2184cc9a6355SApple OSS Distributions *
2185cc9a6355SApple OSS Distributions * Conditions: Called against the current process.
2186cc9a6355SApple OSS Distributions */
2187cc9a6355SApple OSS Distributions void
workq_mark_exiting(struct proc * p)2188cc9a6355SApple OSS Distributions workq_mark_exiting(struct proc *p)
2189cc9a6355SApple OSS Distributions {
2190cc9a6355SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2191cc9a6355SApple OSS Distributions uint32_t wq_flags;
2192cc9a6355SApple OSS Distributions workq_threadreq_t mgr_req;
2193cc9a6355SApple OSS Distributions
2194a5e72196SApple OSS Distributions if (!wq) {
2195a5e72196SApple OSS Distributions return;
2196a5e72196SApple OSS Distributions }
2197cc9a6355SApple OSS Distributions
2198e6231be0SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_pthread_exit | DBG_FUNC_START, wq, 0, 0, 0);
2199cc9a6355SApple OSS Distributions
2200cc9a6355SApple OSS Distributions workq_lock_spin(wq);
2201cc9a6355SApple OSS Distributions
2202cc9a6355SApple OSS Distributions wq_flags = os_atomic_or_orig(&wq->wq_flags, WQ_EXITING, relaxed);
2203cc9a6355SApple OSS Distributions if (__improbable(wq_flags & WQ_EXITING)) {
2204cc9a6355SApple OSS Distributions panic("workq_mark_exiting called twice");
2205cc9a6355SApple OSS Distributions }
2206cc9a6355SApple OSS Distributions
2207cc9a6355SApple OSS Distributions /*
2208cc9a6355SApple OSS Distributions * Opportunistically try to cancel thread calls that are likely in flight.
2209cc9a6355SApple OSS Distributions * workq_exit() will do the proper cleanup.
2210cc9a6355SApple OSS Distributions */
2211cc9a6355SApple OSS Distributions if (wq_flags & WQ_IMMEDIATE_CALL_SCHEDULED) {
2212cc9a6355SApple OSS Distributions thread_call_cancel(wq->wq_immediate_call);
2213cc9a6355SApple OSS Distributions }
2214cc9a6355SApple OSS Distributions if (wq_flags & WQ_DELAYED_CALL_SCHEDULED) {
2215cc9a6355SApple OSS Distributions thread_call_cancel(wq->wq_delayed_call);
2216cc9a6355SApple OSS Distributions }
2217cc9a6355SApple OSS Distributions if (wq_flags & WQ_DEATH_CALL_SCHEDULED) {
2218cc9a6355SApple OSS Distributions thread_call_cancel(wq->wq_death_call);
2219cc9a6355SApple OSS Distributions }
2220cc9a6355SApple OSS Distributions
2221cc9a6355SApple OSS Distributions mgr_req = wq->wq_event_manager_threadreq;
2222cc9a6355SApple OSS Distributions wq->wq_event_manager_threadreq = NULL;
2223cc9a6355SApple OSS Distributions wq->wq_reqcount = 0; /* workq_schedule_creator must not look at queues */
2224a5e72196SApple OSS Distributions wq->wq_creator = NULL;
2225a5e72196SApple OSS Distributions workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
2226cc9a6355SApple OSS Distributions
2227cc9a6355SApple OSS Distributions workq_unlock(wq);
2228cc9a6355SApple OSS Distributions
2229cc9a6355SApple OSS Distributions if (mgr_req) {
2230cc9a6355SApple OSS Distributions kqueue_threadreq_cancel(p, mgr_req);
2231cc9a6355SApple OSS Distributions }
2232cc9a6355SApple OSS Distributions /*
2233cc9a6355SApple OSS Distributions * No one touches the priority queues once WQ_EXITING is set.
2234cc9a6355SApple OSS Distributions * It is hence safe to do the tear down without holding any lock.
2235cc9a6355SApple OSS Distributions */
2236cc9a6355SApple OSS Distributions priority_queue_destroy(&wq->wq_overcommit_queue,
2237bb611c8fSApple OSS Distributions struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
2238cc9a6355SApple OSS Distributions workq_threadreq_destroy(p, e);
2239cc9a6355SApple OSS Distributions });
2240cc9a6355SApple OSS Distributions priority_queue_destroy(&wq->wq_constrained_queue,
2241bb611c8fSApple OSS Distributions struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
2242cc9a6355SApple OSS Distributions workq_threadreq_destroy(p, e);
2243cc9a6355SApple OSS Distributions });
2244cc9a6355SApple OSS Distributions priority_queue_destroy(&wq->wq_special_queue,
2245bb611c8fSApple OSS Distributions struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
2246cc9a6355SApple OSS Distributions workq_threadreq_destroy(p, e);
2247cc9a6355SApple OSS Distributions });
2248cc9a6355SApple OSS Distributions
2249e6231be0SApple OSS Distributions WQ_TRACE(TRACE_wq_pthread_exit | DBG_FUNC_END, 0, 0, 0, 0);
2250cc9a6355SApple OSS Distributions }
2251cc9a6355SApple OSS Distributions
2252cc9a6355SApple OSS Distributions /*
2253cc9a6355SApple OSS Distributions * Routine: workq_exit
2254cc9a6355SApple OSS Distributions *
2255cc9a6355SApple OSS Distributions * Function: clean up the work queue structure(s) now that there are no threads
2256cc9a6355SApple OSS Distributions * left running inside the work queue (except possibly current_thread).
2257cc9a6355SApple OSS Distributions *
2258cc9a6355SApple OSS Distributions * Conditions: Called by the last thread in the process.
2259cc9a6355SApple OSS Distributions * Called against current process.
2260cc9a6355SApple OSS Distributions */
2261cc9a6355SApple OSS Distributions void
workq_exit(struct proc * p)2262cc9a6355SApple OSS Distributions workq_exit(struct proc *p)
2263cc9a6355SApple OSS Distributions {
2264cc9a6355SApple OSS Distributions struct workqueue *wq;
2265cc9a6355SApple OSS Distributions struct uthread *uth, *tmp;
2266cc9a6355SApple OSS Distributions
2267cc9a6355SApple OSS Distributions wq = os_atomic_xchg(&p->p_wqptr, NULL, relaxed);
2268cc9a6355SApple OSS Distributions if (wq != NULL) {
2269cc9a6355SApple OSS Distributions thread_t th = current_thread();
2270cc9a6355SApple OSS Distributions
2271e6231be0SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_workqueue_exit | DBG_FUNC_START, wq, 0, 0, 0);
2272cc9a6355SApple OSS Distributions
2273cc9a6355SApple OSS Distributions if (thread_get_tag(th) & THREAD_TAG_WORKQUEUE) {
2274cc9a6355SApple OSS Distributions /*
2275cc9a6355SApple OSS Distributions * <rdar://problem/40111515> Make sure we will no longer call the
2276cc9a6355SApple OSS Distributions * sched call, if we ever block this thread, which the cancel_wait
2277cc9a6355SApple OSS Distributions * below can do.
2278cc9a6355SApple OSS Distributions */
2279cc9a6355SApple OSS Distributions thread_sched_call(th, NULL);
2280cc9a6355SApple OSS Distributions }
2281cc9a6355SApple OSS Distributions
2282cc9a6355SApple OSS Distributions /*
2283cc9a6355SApple OSS Distributions * Thread calls are always scheduled by the proc itself or under the
2284cc9a6355SApple OSS Distributions * workqueue spinlock if WQ_EXITING is not yet set.
2285cc9a6355SApple OSS Distributions *
2286cc9a6355SApple OSS Distributions * Either way, when this runs, the proc has no threads left beside
2287cc9a6355SApple OSS Distributions * the one running this very code, so we know no thread call can be
2288cc9a6355SApple OSS Distributions * dispatched anymore.
2289cc9a6355SApple OSS Distributions */
2290cc9a6355SApple OSS Distributions thread_call_cancel_wait(wq->wq_delayed_call);
2291cc9a6355SApple OSS Distributions thread_call_cancel_wait(wq->wq_immediate_call);
2292cc9a6355SApple OSS Distributions thread_call_cancel_wait(wq->wq_death_call);
2293cc9a6355SApple OSS Distributions thread_call_free(wq->wq_delayed_call);
2294cc9a6355SApple OSS Distributions thread_call_free(wq->wq_immediate_call);
2295cc9a6355SApple OSS Distributions thread_call_free(wq->wq_death_call);
2296cc9a6355SApple OSS Distributions
2297cc9a6355SApple OSS Distributions /*
2298cc9a6355SApple OSS Distributions * Clean up workqueue data structures for threads that exited and
2299cc9a6355SApple OSS Distributions * didn't get a chance to clean up after themselves.
2300cc9a6355SApple OSS Distributions *
2301cc9a6355SApple OSS Distributions * idle/new threads should have been interrupted and died on their own
2302cc9a6355SApple OSS Distributions */
2303cc9a6355SApple OSS Distributions TAILQ_FOREACH_SAFE(uth, &wq->wq_thrunlist, uu_workq_entry, tmp) {
2304e7776783SApple OSS Distributions thread_t mth = get_machthread(uth);
2305e7776783SApple OSS Distributions thread_sched_call(mth, NULL);
2306e7776783SApple OSS Distributions thread_deallocate(mth);
2307cc9a6355SApple OSS Distributions }
2308cc9a6355SApple OSS Distributions assert(TAILQ_EMPTY(&wq->wq_thnewlist));
2309cc9a6355SApple OSS Distributions assert(TAILQ_EMPTY(&wq->wq_thidlelist));
2310cc9a6355SApple OSS Distributions
2311cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_destroy | DBG_FUNC_END, wq,
2312e6231be0SApple OSS Distributions VM_KERNEL_ADDRHIDE(wq), 0, 0);
2313cc9a6355SApple OSS Distributions
2314cc9a6355SApple OSS Distributions workq_deallocate(wq);
2315cc9a6355SApple OSS Distributions
2316e6231be0SApple OSS Distributions WQ_TRACE(TRACE_wq_workqueue_exit | DBG_FUNC_END, 0, 0, 0, 0);
2317cc9a6355SApple OSS Distributions }
2318cc9a6355SApple OSS Distributions }
2319cc9a6355SApple OSS Distributions
2320cc9a6355SApple OSS Distributions
2321cc9a6355SApple OSS Distributions #pragma mark bsd thread control
2322cc9a6355SApple OSS Distributions
2323e6231be0SApple OSS Distributions bool
bsdthread_part_of_cooperative_workqueue(struct uthread * uth)2324e6231be0SApple OSS Distributions bsdthread_part_of_cooperative_workqueue(struct uthread *uth)
2325e6231be0SApple OSS Distributions {
2326e6231be0SApple OSS Distributions return (workq_thread_is_cooperative(uth) || workq_thread_is_nonovercommit(uth)) &&
2327*8d741a5dSApple OSS Distributions (uth->uu_workq_pri.qos_bucket != WORKQ_THREAD_QOS_MANAGER) &&
2328*8d741a5dSApple OSS Distributions (!workq_thread_is_permanently_bound(uth));
2329e6231be0SApple OSS Distributions }
2330e6231be0SApple OSS Distributions
2331cc9a6355SApple OSS Distributions static bool
_pthread_priority_to_policy(pthread_priority_t priority,thread_qos_policy_data_t * data)2332cc9a6355SApple OSS Distributions _pthread_priority_to_policy(pthread_priority_t priority,
2333cc9a6355SApple OSS Distributions thread_qos_policy_data_t *data)
2334cc9a6355SApple OSS Distributions {
2335cc9a6355SApple OSS Distributions data->qos_tier = _pthread_priority_thread_qos(priority);
2336cc9a6355SApple OSS Distributions data->tier_importance = _pthread_priority_relpri(priority);
2337cc9a6355SApple OSS Distributions if (data->qos_tier == THREAD_QOS_UNSPECIFIED || data->tier_importance > 0 ||
2338cc9a6355SApple OSS Distributions data->tier_importance < THREAD_QOS_MIN_TIER_IMPORTANCE) {
2339cc9a6355SApple OSS Distributions return false;
2340cc9a6355SApple OSS Distributions }
2341cc9a6355SApple OSS Distributions return true;
2342cc9a6355SApple OSS Distributions }
2343cc9a6355SApple OSS Distributions
2344cc9a6355SApple OSS Distributions static int
bsdthread_set_self(proc_t p,thread_t th,pthread_priority_t priority,mach_port_name_t voucher,enum workq_set_self_flags flags)2345cc9a6355SApple OSS Distributions bsdthread_set_self(proc_t p, thread_t th, pthread_priority_t priority,
2346cc9a6355SApple OSS Distributions mach_port_name_t voucher, enum workq_set_self_flags flags)
2347cc9a6355SApple OSS Distributions {
2348cc9a6355SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
2349cc9a6355SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2350cc9a6355SApple OSS Distributions
2351cc9a6355SApple OSS Distributions kern_return_t kr;
2352cc9a6355SApple OSS Distributions int unbind_rv = 0, qos_rv = 0, voucher_rv = 0, fixedpri_rv = 0;
2353cc9a6355SApple OSS Distributions bool is_wq_thread = (thread_get_tag(th) & THREAD_TAG_WORKQUEUE);
2354cc9a6355SApple OSS Distributions
23555c2921b0SApple OSS Distributions assert(th == current_thread());
2356cc9a6355SApple OSS Distributions if (flags & WORKQ_SET_SELF_WQ_KEVENT_UNBIND) {
2357cc9a6355SApple OSS Distributions if (!is_wq_thread) {
2358cc9a6355SApple OSS Distributions unbind_rv = EINVAL;
2359cc9a6355SApple OSS Distributions goto qos;
2360cc9a6355SApple OSS Distributions }
2361cc9a6355SApple OSS Distributions
2362cc9a6355SApple OSS Distributions if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
2363cc9a6355SApple OSS Distributions unbind_rv = EINVAL;
2364cc9a6355SApple OSS Distributions goto qos;
2365cc9a6355SApple OSS Distributions }
2366cc9a6355SApple OSS Distributions
2367a5e72196SApple OSS Distributions workq_threadreq_t kqr = uth->uu_kqr_bound;
2368cc9a6355SApple OSS Distributions if (kqr == NULL) {
2369cc9a6355SApple OSS Distributions unbind_rv = EALREADY;
2370cc9a6355SApple OSS Distributions goto qos;
2371cc9a6355SApple OSS Distributions }
2372cc9a6355SApple OSS Distributions
2373a5e72196SApple OSS Distributions if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
2374cc9a6355SApple OSS Distributions unbind_rv = EINVAL;
2375cc9a6355SApple OSS Distributions goto qos;
2376cc9a6355SApple OSS Distributions }
2377cc9a6355SApple OSS Distributions
2378a5e72196SApple OSS Distributions kqueue_threadreq_unbind(p, kqr);
2379cc9a6355SApple OSS Distributions }
2380cc9a6355SApple OSS Distributions
2381cc9a6355SApple OSS Distributions qos:
23825c2921b0SApple OSS Distributions if (flags & (WORKQ_SET_SELF_QOS_FLAG | WORKQ_SET_SELF_QOS_OVERRIDE_FLAG)) {
23835c2921b0SApple OSS Distributions assert(flags & WORKQ_SET_SELF_QOS_FLAG);
23845c2921b0SApple OSS Distributions
2385cc9a6355SApple OSS Distributions thread_qos_policy_data_t new_policy;
23865c2921b0SApple OSS Distributions thread_qos_t qos_override = THREAD_QOS_UNSPECIFIED;
2387cc9a6355SApple OSS Distributions
2388cc9a6355SApple OSS Distributions if (!_pthread_priority_to_policy(priority, &new_policy)) {
2389cc9a6355SApple OSS Distributions qos_rv = EINVAL;
2390cc9a6355SApple OSS Distributions goto voucher;
2391cc9a6355SApple OSS Distributions }
2392cc9a6355SApple OSS Distributions
23935c2921b0SApple OSS Distributions if (flags & WORKQ_SET_SELF_QOS_OVERRIDE_FLAG) {
23945c2921b0SApple OSS Distributions /*
23955c2921b0SApple OSS Distributions * If the WORKQ_SET_SELF_QOS_OVERRIDE_FLAG is set, we definitely
23965c2921b0SApple OSS Distributions * should have an override QoS in the pthread_priority_t and we should
23975c2921b0SApple OSS Distributions * only come into this path for cooperative thread requests
23985c2921b0SApple OSS Distributions */
23995c2921b0SApple OSS Distributions if (!_pthread_priority_has_override_qos(priority) ||
24005c2921b0SApple OSS Distributions !_pthread_priority_is_cooperative(priority)) {
24015c2921b0SApple OSS Distributions qos_rv = EINVAL;
24025c2921b0SApple OSS Distributions goto voucher;
24035c2921b0SApple OSS Distributions }
24045c2921b0SApple OSS Distributions qos_override = _pthread_priority_thread_override_qos(priority);
24055c2921b0SApple OSS Distributions } else {
24065c2921b0SApple OSS Distributions /*
24075c2921b0SApple OSS Distributions * If the WORKQ_SET_SELF_QOS_OVERRIDE_FLAG is not set, we definitely
24085c2921b0SApple OSS Distributions * should not have an override QoS in the pthread_priority_t
24095c2921b0SApple OSS Distributions */
24105c2921b0SApple OSS Distributions if (_pthread_priority_has_override_qos(priority)) {
24115c2921b0SApple OSS Distributions qos_rv = EINVAL;
24125c2921b0SApple OSS Distributions goto voucher;
24135c2921b0SApple OSS Distributions }
24145c2921b0SApple OSS Distributions }
24155c2921b0SApple OSS Distributions
2416cc9a6355SApple OSS Distributions if (!is_wq_thread) {
2417cc9a6355SApple OSS Distributions /*
2418cc9a6355SApple OSS Distributions * Threads opted out of QoS can't change QoS
2419cc9a6355SApple OSS Distributions */
2420cc9a6355SApple OSS Distributions if (!thread_has_qos_policy(th)) {
2421cc9a6355SApple OSS Distributions qos_rv = EPERM;
2422cc9a6355SApple OSS Distributions goto voucher;
2423cc9a6355SApple OSS Distributions }
2424a5e72196SApple OSS Distributions } else if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER ||
2425a5e72196SApple OSS Distributions uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_ABOVEUI) {
2426cc9a6355SApple OSS Distributions /*
2427a5e72196SApple OSS Distributions * Workqueue manager threads or threads above UI can't change QoS
2428cc9a6355SApple OSS Distributions */
2429cc9a6355SApple OSS Distributions qos_rv = EINVAL;
2430cc9a6355SApple OSS Distributions goto voucher;
2431cc9a6355SApple OSS Distributions } else {
2432cc9a6355SApple OSS Distributions /*
2433cc9a6355SApple OSS Distributions * For workqueue threads, possibly adjust buckets and redrive thread
2434cc9a6355SApple OSS Distributions * requests.
2435e6231be0SApple OSS Distributions *
2436e6231be0SApple OSS Distributions * Transitions allowed:
2437e6231be0SApple OSS Distributions *
2438e6231be0SApple OSS Distributions * overcommit --> non-overcommit
2439e6231be0SApple OSS Distributions * overcommit --> overcommit
2440e6231be0SApple OSS Distributions * non-overcommit --> non-overcommit
2441e6231be0SApple OSS Distributions * non-overcommit --> overcommit (to be deprecated later)
2442e6231be0SApple OSS Distributions * cooperative --> cooperative
2443e6231be0SApple OSS Distributions *
2444e6231be0SApple OSS Distributions * All other transitions aren't allowed so reject them.
2445cc9a6355SApple OSS Distributions */
2446e6231be0SApple OSS Distributions if (workq_thread_is_overcommit(uth) && _pthread_priority_is_cooperative(priority)) {
2447e6231be0SApple OSS Distributions qos_rv = EINVAL;
2448e6231be0SApple OSS Distributions goto voucher;
2449e6231be0SApple OSS Distributions } else if (workq_thread_is_cooperative(uth) && !_pthread_priority_is_cooperative(priority)) {
2450e6231be0SApple OSS Distributions qos_rv = EINVAL;
2451e6231be0SApple OSS Distributions goto voucher;
2452e6231be0SApple OSS Distributions } else if (workq_thread_is_nonovercommit(uth) && _pthread_priority_is_cooperative(priority)) {
2453e6231be0SApple OSS Distributions qos_rv = EINVAL;
2454e6231be0SApple OSS Distributions goto voucher;
2455e6231be0SApple OSS Distributions }
2456e6231be0SApple OSS Distributions
2457cc9a6355SApple OSS Distributions struct uu_workq_policy old_pri, new_pri;
2458cc9a6355SApple OSS Distributions bool force_run = false;
2459cc9a6355SApple OSS Distributions
24605c2921b0SApple OSS Distributions if (qos_override) {
24615c2921b0SApple OSS Distributions /*
24625c2921b0SApple OSS Distributions * We're in the case of a thread clarifying that it is for eg. not IN
24635c2921b0SApple OSS Distributions * req QoS but rather, UT req QoS with IN override. However, this can
24645c2921b0SApple OSS Distributions * race with a concurrent override happening to the thread via
24655c2921b0SApple OSS Distributions * workq_thread_add_dispatch_override so this needs to be
24665c2921b0SApple OSS Distributions * synchronized with the thread mutex.
24675c2921b0SApple OSS Distributions */
24685c2921b0SApple OSS Distributions thread_mtx_lock(th);
24695c2921b0SApple OSS Distributions }
24705c2921b0SApple OSS Distributions
2471cc9a6355SApple OSS Distributions workq_lock_spin(wq);
2472cc9a6355SApple OSS Distributions
2473cc9a6355SApple OSS Distributions old_pri = new_pri = uth->uu_workq_pri;
2474bb611c8fSApple OSS Distributions new_pri.qos_req = (thread_qos_t)new_policy.qos_tier;
2475e6231be0SApple OSS Distributions
24765c2921b0SApple OSS Distributions if (old_pri.qos_override < qos_override) {
24775c2921b0SApple OSS Distributions /*
24785c2921b0SApple OSS Distributions * Since this can race with a concurrent override via
24795c2921b0SApple OSS Distributions * workq_thread_add_dispatch_override, only adjust override value if we
24805c2921b0SApple OSS Distributions * are higher - this is a saturating function.
24815c2921b0SApple OSS Distributions *
24825c2921b0SApple OSS Distributions * We should not be changing the final override values, we should simply
24835c2921b0SApple OSS Distributions * be redistributing the current value with a different breakdown of req
24845c2921b0SApple OSS Distributions * vs override QoS - assert to that effect. Therefore, buckets should
24855c2921b0SApple OSS Distributions * not change.
24865c2921b0SApple OSS Distributions */
24875c2921b0SApple OSS Distributions new_pri.qos_override = qos_override;
24885c2921b0SApple OSS Distributions assert(workq_pri_override(new_pri) == workq_pri_override(old_pri));
24895c2921b0SApple OSS Distributions assert(workq_pri_bucket(new_pri) == workq_pri_bucket(old_pri));
24905c2921b0SApple OSS Distributions }
24915c2921b0SApple OSS Distributions
2492e6231be0SApple OSS Distributions /* Adjust schedule counts for various types of transitions */
2493e6231be0SApple OSS Distributions
2494e6231be0SApple OSS Distributions /* overcommit -> non-overcommit */
2495e6231be0SApple OSS Distributions if (workq_thread_is_overcommit(uth) && _pthread_priority_is_nonovercommit(priority)) {
2496e6231be0SApple OSS Distributions workq_thread_set_type(uth, 0);
2497e6231be0SApple OSS Distributions wq->wq_constrained_threads_scheduled++;
2498e6231be0SApple OSS Distributions
2499e6231be0SApple OSS Distributions /* non-overcommit -> overcommit */
2500e6231be0SApple OSS Distributions } else if (workq_thread_is_nonovercommit(uth) && _pthread_priority_is_overcommit(priority)) {
2501e6231be0SApple OSS Distributions workq_thread_set_type(uth, UT_WORKQ_OVERCOMMIT);
2502e6231be0SApple OSS Distributions force_run = (wq->wq_constrained_threads_scheduled-- == wq_max_constrained_threads);
2503e6231be0SApple OSS Distributions
2504e6231be0SApple OSS Distributions /* cooperative -> cooperative */
2505e6231be0SApple OSS Distributions } else if (workq_thread_is_cooperative(uth)) {
25065c2921b0SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, old_pri.qos_req);
25075c2921b0SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, new_pri.qos_req);
2508e6231be0SApple OSS Distributions
2509e6231be0SApple OSS Distributions /* We're changing schedule counts within cooperative pool, we
2510e6231be0SApple OSS Distributions * need to refresh best cooperative QoS logic again */
2511e6231be0SApple OSS Distributions force_run = _wq_cooperative_queue_refresh_best_req_qos(wq);
2512e6231be0SApple OSS Distributions }
2513e6231be0SApple OSS Distributions
25145c2921b0SApple OSS Distributions /*
25155c2921b0SApple OSS Distributions * This will set up an override on the thread if any and will also call
25165c2921b0SApple OSS Distributions * schedule_creator if needed
25175c2921b0SApple OSS Distributions */
2518cc9a6355SApple OSS Distributions workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, force_run);
2519cc9a6355SApple OSS Distributions workq_unlock(wq);
2520e6231be0SApple OSS Distributions
25215c2921b0SApple OSS Distributions if (qos_override) {
25225c2921b0SApple OSS Distributions thread_mtx_unlock(th);
25235c2921b0SApple OSS Distributions }
25245c2921b0SApple OSS Distributions
2525e6231be0SApple OSS Distributions if (workq_thread_is_overcommit(uth)) {
2526e6231be0SApple OSS Distributions thread_disarm_workqueue_quantum(th);
2527e6231be0SApple OSS Distributions } else {
2528e6231be0SApple OSS Distributions /* If the thread changed QoS buckets, the quantum duration
2529e6231be0SApple OSS Distributions * may have changed too */
2530e6231be0SApple OSS Distributions thread_arm_workqueue_quantum(th);
2531e6231be0SApple OSS Distributions }
2532cc9a6355SApple OSS Distributions }
2533cc9a6355SApple OSS Distributions
2534cc9a6355SApple OSS Distributions kr = thread_policy_set_internal(th, THREAD_QOS_POLICY,
2535cc9a6355SApple OSS Distributions (thread_policy_t)&new_policy, THREAD_QOS_POLICY_COUNT);
2536cc9a6355SApple OSS Distributions if (kr != KERN_SUCCESS) {
2537cc9a6355SApple OSS Distributions qos_rv = EINVAL;
2538cc9a6355SApple OSS Distributions }
2539cc9a6355SApple OSS Distributions }
2540cc9a6355SApple OSS Distributions
2541cc9a6355SApple OSS Distributions voucher:
2542cc9a6355SApple OSS Distributions if (flags & WORKQ_SET_SELF_VOUCHER_FLAG) {
2543cc9a6355SApple OSS Distributions kr = thread_set_voucher_name(voucher);
2544cc9a6355SApple OSS Distributions if (kr != KERN_SUCCESS) {
2545cc9a6355SApple OSS Distributions voucher_rv = ENOENT;
2546cc9a6355SApple OSS Distributions goto fixedpri;
2547cc9a6355SApple OSS Distributions }
2548cc9a6355SApple OSS Distributions }
2549cc9a6355SApple OSS Distributions
2550cc9a6355SApple OSS Distributions fixedpri:
2551a5e72196SApple OSS Distributions if (qos_rv) {
2552a5e72196SApple OSS Distributions goto done;
2553a5e72196SApple OSS Distributions }
2554cc9a6355SApple OSS Distributions if (flags & WORKQ_SET_SELF_FIXEDPRIORITY_FLAG) {
2555cc9a6355SApple OSS Distributions thread_extended_policy_data_t extpol = {.timeshare = 0};
2556cc9a6355SApple OSS Distributions
2557cc9a6355SApple OSS Distributions if (is_wq_thread) {
2558cc9a6355SApple OSS Distributions /* Not allowed on workqueue threads */
2559cc9a6355SApple OSS Distributions fixedpri_rv = ENOTSUP;
2560cc9a6355SApple OSS Distributions goto done;
2561cc9a6355SApple OSS Distributions }
2562cc9a6355SApple OSS Distributions
2563cc9a6355SApple OSS Distributions kr = thread_policy_set_internal(th, THREAD_EXTENDED_POLICY,
2564cc9a6355SApple OSS Distributions (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT);
2565cc9a6355SApple OSS Distributions if (kr != KERN_SUCCESS) {
2566cc9a6355SApple OSS Distributions fixedpri_rv = EINVAL;
2567cc9a6355SApple OSS Distributions goto done;
2568cc9a6355SApple OSS Distributions }
2569cc9a6355SApple OSS Distributions } else if (flags & WORKQ_SET_SELF_TIMESHARE_FLAG) {
2570cc9a6355SApple OSS Distributions thread_extended_policy_data_t extpol = {.timeshare = 1};
2571cc9a6355SApple OSS Distributions
2572cc9a6355SApple OSS Distributions if (is_wq_thread) {
2573cc9a6355SApple OSS Distributions /* Not allowed on workqueue threads */
2574cc9a6355SApple OSS Distributions fixedpri_rv = ENOTSUP;
2575cc9a6355SApple OSS Distributions goto done;
2576cc9a6355SApple OSS Distributions }
2577cc9a6355SApple OSS Distributions
2578cc9a6355SApple OSS Distributions kr = thread_policy_set_internal(th, THREAD_EXTENDED_POLICY,
2579cc9a6355SApple OSS Distributions (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT);
2580cc9a6355SApple OSS Distributions if (kr != KERN_SUCCESS) {
2581cc9a6355SApple OSS Distributions fixedpri_rv = EINVAL;
2582cc9a6355SApple OSS Distributions goto done;
2583cc9a6355SApple OSS Distributions }
2584cc9a6355SApple OSS Distributions }
2585cc9a6355SApple OSS Distributions
2586cc9a6355SApple OSS Distributions done:
2587cc9a6355SApple OSS Distributions if (qos_rv && voucher_rv) {
2588cc9a6355SApple OSS Distributions /* Both failed, give that a unique error. */
2589cc9a6355SApple OSS Distributions return EBADMSG;
2590cc9a6355SApple OSS Distributions }
2591cc9a6355SApple OSS Distributions
2592cc9a6355SApple OSS Distributions if (unbind_rv) {
2593cc9a6355SApple OSS Distributions return unbind_rv;
2594cc9a6355SApple OSS Distributions }
2595cc9a6355SApple OSS Distributions
2596cc9a6355SApple OSS Distributions if (qos_rv) {
2597cc9a6355SApple OSS Distributions return qos_rv;
2598cc9a6355SApple OSS Distributions }
2599cc9a6355SApple OSS Distributions
2600cc9a6355SApple OSS Distributions if (voucher_rv) {
2601cc9a6355SApple OSS Distributions return voucher_rv;
2602cc9a6355SApple OSS Distributions }
2603cc9a6355SApple OSS Distributions
2604cc9a6355SApple OSS Distributions if (fixedpri_rv) {
2605cc9a6355SApple OSS Distributions return fixedpri_rv;
2606cc9a6355SApple OSS Distributions }
2607cc9a6355SApple OSS Distributions
2608bb611c8fSApple OSS Distributions
2609cc9a6355SApple OSS Distributions return 0;
2610cc9a6355SApple OSS Distributions }
2611cc9a6355SApple OSS Distributions
2612cc9a6355SApple OSS Distributions static int
bsdthread_add_explicit_override(proc_t p,mach_port_name_t kport,pthread_priority_t pp,user_addr_t resource)2613cc9a6355SApple OSS Distributions bsdthread_add_explicit_override(proc_t p, mach_port_name_t kport,
2614cc9a6355SApple OSS Distributions pthread_priority_t pp, user_addr_t resource)
2615cc9a6355SApple OSS Distributions {
2616cc9a6355SApple OSS Distributions thread_qos_t qos = _pthread_priority_thread_qos(pp);
2617cc9a6355SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
2618cc9a6355SApple OSS Distributions return EINVAL;
2619cc9a6355SApple OSS Distributions }
2620cc9a6355SApple OSS Distributions
2621a5e72196SApple OSS Distributions thread_t th = port_name_to_thread(kport,
2622e6231be0SApple OSS Distributions PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2623cc9a6355SApple OSS Distributions if (th == THREAD_NULL) {
2624cc9a6355SApple OSS Distributions return ESRCH;
2625cc9a6355SApple OSS Distributions }
2626cc9a6355SApple OSS Distributions
26275c2921b0SApple OSS Distributions int rv = proc_thread_qos_add_override(proc_task(p), th, 0, qos, TRUE,
2628cc9a6355SApple OSS Distributions resource, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE);
2629cc9a6355SApple OSS Distributions
2630cc9a6355SApple OSS Distributions thread_deallocate(th);
2631cc9a6355SApple OSS Distributions return rv;
2632cc9a6355SApple OSS Distributions }
2633cc9a6355SApple OSS Distributions
2634cc9a6355SApple OSS Distributions static int
bsdthread_remove_explicit_override(proc_t p,mach_port_name_t kport,user_addr_t resource)2635cc9a6355SApple OSS Distributions bsdthread_remove_explicit_override(proc_t p, mach_port_name_t kport,
2636cc9a6355SApple OSS Distributions user_addr_t resource)
2637cc9a6355SApple OSS Distributions {
2638a5e72196SApple OSS Distributions thread_t th = port_name_to_thread(kport,
2639e6231be0SApple OSS Distributions PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2640cc9a6355SApple OSS Distributions if (th == THREAD_NULL) {
2641cc9a6355SApple OSS Distributions return ESRCH;
2642cc9a6355SApple OSS Distributions }
2643cc9a6355SApple OSS Distributions
26445c2921b0SApple OSS Distributions int rv = proc_thread_qos_remove_override(proc_task(p), th, 0, resource,
2645cc9a6355SApple OSS Distributions THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE);
2646cc9a6355SApple OSS Distributions
2647cc9a6355SApple OSS Distributions thread_deallocate(th);
2648cc9a6355SApple OSS Distributions return rv;
2649cc9a6355SApple OSS Distributions }
2650cc9a6355SApple OSS Distributions
2651cc9a6355SApple OSS Distributions static int
workq_thread_add_dispatch_override(proc_t p,mach_port_name_t kport,pthread_priority_t pp,user_addr_t ulock_addr)2652cc9a6355SApple OSS Distributions workq_thread_add_dispatch_override(proc_t p, mach_port_name_t kport,
2653cc9a6355SApple OSS Distributions pthread_priority_t pp, user_addr_t ulock_addr)
2654cc9a6355SApple OSS Distributions {
2655cc9a6355SApple OSS Distributions struct uu_workq_policy old_pri, new_pri;
2656cc9a6355SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2657cc9a6355SApple OSS Distributions
2658cc9a6355SApple OSS Distributions thread_qos_t qos_override = _pthread_priority_thread_qos(pp);
2659cc9a6355SApple OSS Distributions if (qos_override == THREAD_QOS_UNSPECIFIED) {
2660cc9a6355SApple OSS Distributions return EINVAL;
2661cc9a6355SApple OSS Distributions }
2662cc9a6355SApple OSS Distributions
2663a5e72196SApple OSS Distributions thread_t thread = port_name_to_thread(kport,
2664e6231be0SApple OSS Distributions PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2665cc9a6355SApple OSS Distributions if (thread == THREAD_NULL) {
2666cc9a6355SApple OSS Distributions return ESRCH;
2667cc9a6355SApple OSS Distributions }
2668cc9a6355SApple OSS Distributions
2669cc9a6355SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
2670cc9a6355SApple OSS Distributions if ((thread_get_tag(thread) & THREAD_TAG_WORKQUEUE) == 0) {
2671cc9a6355SApple OSS Distributions thread_deallocate(thread);
2672cc9a6355SApple OSS Distributions return EPERM;
2673cc9a6355SApple OSS Distributions }
2674cc9a6355SApple OSS Distributions
2675cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_override_dispatch | DBG_FUNC_NONE,
2676e6231be0SApple OSS Distributions wq, thread_tid(thread), 1, pp);
2677cc9a6355SApple OSS Distributions
2678cc9a6355SApple OSS Distributions thread_mtx_lock(thread);
2679cc9a6355SApple OSS Distributions
2680cc9a6355SApple OSS Distributions if (ulock_addr) {
2681a5e72196SApple OSS Distributions uint32_t val;
2682cc9a6355SApple OSS Distributions int rc;
2683cc9a6355SApple OSS Distributions /*
2684cc9a6355SApple OSS Distributions * Workaround lack of explicit support for 'no-fault copyin'
2685cc9a6355SApple OSS Distributions * <rdar://problem/24999882>, as disabling preemption prevents paging in
2686cc9a6355SApple OSS Distributions */
2687cc9a6355SApple OSS Distributions disable_preemption();
2688a5e72196SApple OSS Distributions rc = copyin_atomic32(ulock_addr, &val);
2689cc9a6355SApple OSS Distributions enable_preemption();
2690a5e72196SApple OSS Distributions if (rc == 0 && ulock_owner_value_to_port_name(val) != kport) {
2691cc9a6355SApple OSS Distributions goto out;
2692cc9a6355SApple OSS Distributions }
2693cc9a6355SApple OSS Distributions }
2694cc9a6355SApple OSS Distributions
2695cc9a6355SApple OSS Distributions workq_lock_spin(wq);
2696cc9a6355SApple OSS Distributions
2697cc9a6355SApple OSS Distributions old_pri = uth->uu_workq_pri;
2698cc9a6355SApple OSS Distributions if (old_pri.qos_override >= qos_override) {
2699cc9a6355SApple OSS Distributions /* Nothing to do */
2700cc9a6355SApple OSS Distributions } else if (thread == current_thread()) {
2701cc9a6355SApple OSS Distributions new_pri = old_pri;
2702cc9a6355SApple OSS Distributions new_pri.qos_override = qos_override;
2703cc9a6355SApple OSS Distributions workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, false);
2704cc9a6355SApple OSS Distributions } else {
2705cc9a6355SApple OSS Distributions uth->uu_workq_pri.qos_override = qos_override;
2706cc9a6355SApple OSS Distributions if (qos_override > workq_pri_override(old_pri)) {
2707cc9a6355SApple OSS Distributions thread_set_workq_override(thread, qos_override);
2708cc9a6355SApple OSS Distributions }
2709cc9a6355SApple OSS Distributions }
2710cc9a6355SApple OSS Distributions
2711cc9a6355SApple OSS Distributions workq_unlock(wq);
2712cc9a6355SApple OSS Distributions
2713cc9a6355SApple OSS Distributions out:
2714cc9a6355SApple OSS Distributions thread_mtx_unlock(thread);
2715cc9a6355SApple OSS Distributions thread_deallocate(thread);
2716cc9a6355SApple OSS Distributions return 0;
2717cc9a6355SApple OSS Distributions }
2718cc9a6355SApple OSS Distributions
2719cc9a6355SApple OSS Distributions static int
workq_thread_reset_dispatch_override(proc_t p,thread_t thread)2720cc9a6355SApple OSS Distributions workq_thread_reset_dispatch_override(proc_t p, thread_t thread)
2721cc9a6355SApple OSS Distributions {
2722cc9a6355SApple OSS Distributions struct uu_workq_policy old_pri, new_pri;
2723cc9a6355SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2724cc9a6355SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
2725cc9a6355SApple OSS Distributions
2726cc9a6355SApple OSS Distributions if ((thread_get_tag(thread) & THREAD_TAG_WORKQUEUE) == 0) {
2727cc9a6355SApple OSS Distributions return EPERM;
2728cc9a6355SApple OSS Distributions }
2729cc9a6355SApple OSS Distributions
2730e6231be0SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_override_reset | DBG_FUNC_NONE, wq, 0, 0, 0);
2731cc9a6355SApple OSS Distributions
27325c2921b0SApple OSS Distributions /*
27335c2921b0SApple OSS Distributions * workq_thread_add_dispatch_override takes the thread mutex before doing the
27345c2921b0SApple OSS Distributions * copyin to validate the drainer and apply the override. We need to do the
27355c2921b0SApple OSS Distributions * same here. See rdar://84472518
27365c2921b0SApple OSS Distributions */
27375c2921b0SApple OSS Distributions thread_mtx_lock(thread);
27385c2921b0SApple OSS Distributions
2739cc9a6355SApple OSS Distributions workq_lock_spin(wq);
2740cc9a6355SApple OSS Distributions old_pri = new_pri = uth->uu_workq_pri;
2741cc9a6355SApple OSS Distributions new_pri.qos_override = THREAD_QOS_UNSPECIFIED;
2742cc9a6355SApple OSS Distributions workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, false);
2743cc9a6355SApple OSS Distributions workq_unlock(wq);
27445c2921b0SApple OSS Distributions
27455c2921b0SApple OSS Distributions thread_mtx_unlock(thread);
2746cc9a6355SApple OSS Distributions return 0;
2747cc9a6355SApple OSS Distributions }
2748cc9a6355SApple OSS Distributions
2749cc9a6355SApple OSS Distributions static int
workq_thread_allow_kill(__unused proc_t p,thread_t thread,bool enable)2750a5e72196SApple OSS Distributions workq_thread_allow_kill(__unused proc_t p, thread_t thread, bool enable)
2751a5e72196SApple OSS Distributions {
2752a5e72196SApple OSS Distributions if (!(thread_get_tag(thread) & THREAD_TAG_WORKQUEUE)) {
2753a5e72196SApple OSS Distributions // If the thread isn't a workqueue thread, don't set the
2754a5e72196SApple OSS Distributions // kill_allowed bit; however, we still need to return 0
2755a5e72196SApple OSS Distributions // instead of an error code since this code is executed
2756a5e72196SApple OSS Distributions // on the abort path which needs to not depend on the
2757a5e72196SApple OSS Distributions // pthread_t (returning an error depends on pthread_t via
2758a5e72196SApple OSS Distributions // cerror_nocancel)
2759a5e72196SApple OSS Distributions return 0;
2760a5e72196SApple OSS Distributions }
2761a5e72196SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
2762a5e72196SApple OSS Distributions uth->uu_workq_pthread_kill_allowed = enable;
2763a5e72196SApple OSS Distributions return 0;
2764a5e72196SApple OSS Distributions }
2765a5e72196SApple OSS Distributions
2766a5e72196SApple OSS Distributions static int
workq_allow_sigmask(proc_t p,sigset_t mask)276794d3b452SApple OSS Distributions workq_allow_sigmask(proc_t p, sigset_t mask)
276894d3b452SApple OSS Distributions {
276994d3b452SApple OSS Distributions if (mask & workq_threadmask) {
277094d3b452SApple OSS Distributions return EINVAL;
277194d3b452SApple OSS Distributions }
277294d3b452SApple OSS Distributions
277394d3b452SApple OSS Distributions proc_lock(p);
277494d3b452SApple OSS Distributions p->p_workq_allow_sigmask |= mask;
277594d3b452SApple OSS Distributions proc_unlock(p);
277694d3b452SApple OSS Distributions
277794d3b452SApple OSS Distributions return 0;
277894d3b452SApple OSS Distributions }
277994d3b452SApple OSS Distributions
278094d3b452SApple OSS Distributions static int
bsdthread_get_max_parallelism(thread_qos_t qos,unsigned long flags,int * retval)2781cc9a6355SApple OSS Distributions bsdthread_get_max_parallelism(thread_qos_t qos, unsigned long flags,
2782cc9a6355SApple OSS Distributions int *retval)
2783cc9a6355SApple OSS Distributions {
2784cc9a6355SApple OSS Distributions static_assert(QOS_PARALLELISM_COUNT_LOGICAL ==
2785cc9a6355SApple OSS Distributions _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL, "logical");
2786cc9a6355SApple OSS Distributions static_assert(QOS_PARALLELISM_REALTIME ==
2787cc9a6355SApple OSS Distributions _PTHREAD_QOS_PARALLELISM_REALTIME, "realtime");
2788e6231be0SApple OSS Distributions static_assert(QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE ==
2789e6231be0SApple OSS Distributions _PTHREAD_QOS_PARALLELISM_CLUSTER_SHARED_RSRC, "cluster shared resource");
2790cc9a6355SApple OSS Distributions
2791e6231be0SApple OSS Distributions if (flags & ~(QOS_PARALLELISM_REALTIME | QOS_PARALLELISM_COUNT_LOGICAL | QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE)) {
2792cc9a6355SApple OSS Distributions return EINVAL;
2793cc9a6355SApple OSS Distributions }
2794cc9a6355SApple OSS Distributions
2795e6231be0SApple OSS Distributions /* No units are present */
2796e6231be0SApple OSS Distributions if (flags & QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE) {
2797e6231be0SApple OSS Distributions return ENOTSUP;
2798e6231be0SApple OSS Distributions }
2799e6231be0SApple OSS Distributions
2800cc9a6355SApple OSS Distributions if (flags & QOS_PARALLELISM_REALTIME) {
2801cc9a6355SApple OSS Distributions if (qos) {
2802cc9a6355SApple OSS Distributions return EINVAL;
2803cc9a6355SApple OSS Distributions }
2804cc9a6355SApple OSS Distributions } else if (qos == THREAD_QOS_UNSPECIFIED || qos >= THREAD_QOS_LAST) {
2805cc9a6355SApple OSS Distributions return EINVAL;
2806cc9a6355SApple OSS Distributions }
2807cc9a6355SApple OSS Distributions
2808cc9a6355SApple OSS Distributions *retval = qos_max_parallelism(qos, flags);
2809cc9a6355SApple OSS Distributions return 0;
2810cc9a6355SApple OSS Distributions }
2811cc9a6355SApple OSS Distributions
2812e6231be0SApple OSS Distributions static int
bsdthread_dispatch_apply_attr(__unused struct proc * p,thread_t thread,unsigned long flags,uint64_t value1,__unused uint64_t value2)2813e6231be0SApple OSS Distributions bsdthread_dispatch_apply_attr(__unused struct proc *p, thread_t thread,
2814e6231be0SApple OSS Distributions unsigned long flags, uint64_t value1, __unused uint64_t value2)
2815e6231be0SApple OSS Distributions {
2816e6231be0SApple OSS Distributions uint32_t apply_worker_index;
2817e6231be0SApple OSS Distributions kern_return_t kr;
2818e6231be0SApple OSS Distributions
2819e6231be0SApple OSS Distributions switch (flags) {
2820e6231be0SApple OSS Distributions case _PTHREAD_DISPATCH_APPLY_ATTR_CLUSTER_SHARED_RSRC_SET:
2821e6231be0SApple OSS Distributions apply_worker_index = (uint32_t)value1;
2822e6231be0SApple OSS Distributions kr = thread_shared_rsrc_policy_set(thread, apply_worker_index, CLUSTER_SHARED_RSRC_TYPE_RR, SHARED_RSRC_POLICY_AGENT_DISPATCH);
2823e6231be0SApple OSS Distributions /*
2824e6231be0SApple OSS Distributions * KERN_INVALID_POLICY indicates that the thread was trying to bind to a
2825e6231be0SApple OSS Distributions * cluster which it was not eligible to execute on.
2826e6231be0SApple OSS Distributions */
2827e6231be0SApple OSS Distributions return (kr == KERN_SUCCESS) ? 0 : ((kr == KERN_INVALID_POLICY) ? ENOTSUP : EINVAL);
2828e6231be0SApple OSS Distributions case _PTHREAD_DISPATCH_APPLY_ATTR_CLUSTER_SHARED_RSRC_CLEAR:
2829e6231be0SApple OSS Distributions kr = thread_shared_rsrc_policy_clear(thread, CLUSTER_SHARED_RSRC_TYPE_RR, SHARED_RSRC_POLICY_AGENT_DISPATCH);
2830e6231be0SApple OSS Distributions return (kr == KERN_SUCCESS) ? 0 : EINVAL;
2831e6231be0SApple OSS Distributions default:
2832e6231be0SApple OSS Distributions return EINVAL;
2833e6231be0SApple OSS Distributions }
2834e6231be0SApple OSS Distributions }
2835e6231be0SApple OSS Distributions
2836cc9a6355SApple OSS Distributions #define ENSURE_UNUSED(arg) \
2837cc9a6355SApple OSS Distributions ({ if ((arg) != 0) { return EINVAL; } })
2838cc9a6355SApple OSS Distributions
2839cc9a6355SApple OSS Distributions int
bsdthread_ctl(struct proc * p,struct bsdthread_ctl_args * uap,int * retval)2840cc9a6355SApple OSS Distributions bsdthread_ctl(struct proc *p, struct bsdthread_ctl_args *uap, int *retval)
2841cc9a6355SApple OSS Distributions {
2842cc9a6355SApple OSS Distributions switch (uap->cmd) {
2843cc9a6355SApple OSS Distributions case BSDTHREAD_CTL_QOS_OVERRIDE_START:
2844cc9a6355SApple OSS Distributions return bsdthread_add_explicit_override(p, (mach_port_name_t)uap->arg1,
2845cc9a6355SApple OSS Distributions (pthread_priority_t)uap->arg2, uap->arg3);
2846cc9a6355SApple OSS Distributions case BSDTHREAD_CTL_QOS_OVERRIDE_END:
2847cc9a6355SApple OSS Distributions ENSURE_UNUSED(uap->arg3);
2848cc9a6355SApple OSS Distributions return bsdthread_remove_explicit_override(p, (mach_port_name_t)uap->arg1,
2849cc9a6355SApple OSS Distributions (user_addr_t)uap->arg2);
2850cc9a6355SApple OSS Distributions
2851cc9a6355SApple OSS Distributions case BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH:
2852cc9a6355SApple OSS Distributions return workq_thread_add_dispatch_override(p, (mach_port_name_t)uap->arg1,
2853cc9a6355SApple OSS Distributions (pthread_priority_t)uap->arg2, uap->arg3);
2854cc9a6355SApple OSS Distributions case BSDTHREAD_CTL_QOS_OVERRIDE_RESET:
2855cc9a6355SApple OSS Distributions return workq_thread_reset_dispatch_override(p, current_thread());
2856cc9a6355SApple OSS Distributions
2857cc9a6355SApple OSS Distributions case BSDTHREAD_CTL_SET_SELF:
2858cc9a6355SApple OSS Distributions return bsdthread_set_self(p, current_thread(),
2859cc9a6355SApple OSS Distributions (pthread_priority_t)uap->arg1, (mach_port_name_t)uap->arg2,
2860cc9a6355SApple OSS Distributions (enum workq_set_self_flags)uap->arg3);
2861cc9a6355SApple OSS Distributions
2862cc9a6355SApple OSS Distributions case BSDTHREAD_CTL_QOS_MAX_PARALLELISM:
2863cc9a6355SApple OSS Distributions ENSURE_UNUSED(uap->arg3);
2864cc9a6355SApple OSS Distributions return bsdthread_get_max_parallelism((thread_qos_t)uap->arg1,
2865cc9a6355SApple OSS Distributions (unsigned long)uap->arg2, retval);
2866a5e72196SApple OSS Distributions case BSDTHREAD_CTL_WORKQ_ALLOW_KILL:
2867a5e72196SApple OSS Distributions ENSURE_UNUSED(uap->arg2);
2868a5e72196SApple OSS Distributions ENSURE_UNUSED(uap->arg3);
2869a5e72196SApple OSS Distributions return workq_thread_allow_kill(p, current_thread(), (bool)uap->arg1);
2870e6231be0SApple OSS Distributions case BSDTHREAD_CTL_DISPATCH_APPLY_ATTR:
2871e6231be0SApple OSS Distributions return bsdthread_dispatch_apply_attr(p, current_thread(),
2872e6231be0SApple OSS Distributions (unsigned long)uap->arg1, (uint64_t)uap->arg2,
2873e6231be0SApple OSS Distributions (uint64_t)uap->arg3);
287494d3b452SApple OSS Distributions case BSDTHREAD_CTL_WORKQ_ALLOW_SIGMASK:
287594d3b452SApple OSS Distributions return workq_allow_sigmask(p, (int)uap->arg1);
2876cc9a6355SApple OSS Distributions case BSDTHREAD_CTL_SET_QOS:
2877cc9a6355SApple OSS Distributions case BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD:
2878cc9a6355SApple OSS Distributions case BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET:
2879cc9a6355SApple OSS Distributions /* no longer supported */
2880cc9a6355SApple OSS Distributions return ENOTSUP;
2881cc9a6355SApple OSS Distributions
2882cc9a6355SApple OSS Distributions default:
2883cc9a6355SApple OSS Distributions return EINVAL;
2884cc9a6355SApple OSS Distributions }
2885cc9a6355SApple OSS Distributions }
2886cc9a6355SApple OSS Distributions
2887cc9a6355SApple OSS Distributions #pragma mark workqueue thread manipulation
2888cc9a6355SApple OSS Distributions
2889cc9a6355SApple OSS Distributions static void __dead2
2890a5e72196SApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
2891a5e72196SApple OSS Distributions struct uthread *uth, uint32_t setup_flags);
2892a5e72196SApple OSS Distributions
2893a5e72196SApple OSS Distributions static void __dead2
2894cc9a6355SApple OSS Distributions workq_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
2895a5e72196SApple OSS Distributions struct uthread *uth, uint32_t setup_flags);
2896cc9a6355SApple OSS Distributions
2897cc9a6355SApple OSS Distributions static void workq_setup_and_run(proc_t p, struct uthread *uth, int flags) __dead2;
2898cc9a6355SApple OSS Distributions
2899cc9a6355SApple OSS Distributions #if KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD
2900cc9a6355SApple OSS Distributions static inline uint64_t
workq_trace_req_id(workq_threadreq_t req)2901cc9a6355SApple OSS Distributions workq_trace_req_id(workq_threadreq_t req)
2902cc9a6355SApple OSS Distributions {
2903cc9a6355SApple OSS Distributions struct kqworkloop *kqwl;
2904a5e72196SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
2905a5e72196SApple OSS Distributions kqwl = __container_of(req, struct kqworkloop, kqwl_request);
2906cc9a6355SApple OSS Distributions return kqwl->kqwl_dynamicid;
2907cc9a6355SApple OSS Distributions }
2908cc9a6355SApple OSS Distributions
2909cc9a6355SApple OSS Distributions return VM_KERNEL_ADDRHIDE(req);
2910cc9a6355SApple OSS Distributions }
2911cc9a6355SApple OSS Distributions #endif
2912cc9a6355SApple OSS Distributions
2913cc9a6355SApple OSS Distributions /**
2914cc9a6355SApple OSS Distributions * Entry point for libdispatch to ask for threads
2915cc9a6355SApple OSS Distributions */
2916cc9a6355SApple OSS Distributions static int
workq_reqthreads(struct proc * p,uint32_t reqcount,pthread_priority_t pp,bool cooperative)2917e6231be0SApple OSS Distributions workq_reqthreads(struct proc *p, uint32_t reqcount, pthread_priority_t pp, bool cooperative)
2918cc9a6355SApple OSS Distributions {
2919cc9a6355SApple OSS Distributions thread_qos_t qos = _pthread_priority_thread_qos(pp);
2920cc9a6355SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2921cc9a6355SApple OSS Distributions uint32_t unpaced, upcall_flags = WQ_FLAG_THREAD_NEWSPI;
2922e6231be0SApple OSS Distributions int ret = 0;
2923cc9a6355SApple OSS Distributions
2924cc9a6355SApple OSS Distributions if (wq == NULL || reqcount <= 0 || reqcount > UINT16_MAX ||
2925cc9a6355SApple OSS Distributions qos == THREAD_QOS_UNSPECIFIED) {
2926e6231be0SApple OSS Distributions ret = EINVAL;
2927e6231be0SApple OSS Distributions goto exit;
2928cc9a6355SApple OSS Distributions }
2929cc9a6355SApple OSS Distributions
2930cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_wqops_reqthreads | DBG_FUNC_NONE,
2931e6231be0SApple OSS Distributions wq, reqcount, pp, cooperative);
2932cc9a6355SApple OSS Distributions
2933cc9a6355SApple OSS Distributions workq_threadreq_t req = zalloc(workq_zone_threadreq);
2934cc9a6355SApple OSS Distributions priority_queue_entry_init(&req->tr_entry);
2935a5e72196SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_NEW;
2936cc9a6355SApple OSS Distributions req->tr_qos = qos;
2937e6231be0SApple OSS Distributions workq_tr_flags_t tr_flags = 0;
2938cc9a6355SApple OSS Distributions
2939cc9a6355SApple OSS Distributions if (pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) {
2940e6231be0SApple OSS Distributions tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT;
2941cc9a6355SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
2942cc9a6355SApple OSS Distributions }
2943cc9a6355SApple OSS Distributions
2944e6231be0SApple OSS Distributions if (cooperative) {
2945e6231be0SApple OSS Distributions tr_flags |= WORKQ_TR_FLAG_COOPERATIVE;
2946e6231be0SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_COOPERATIVE;
2947e6231be0SApple OSS Distributions
2948e6231be0SApple OSS Distributions if (reqcount > 1) {
2949e6231be0SApple OSS Distributions ret = ENOTSUP;
2950e6231be0SApple OSS Distributions goto free_and_exit;
2951e6231be0SApple OSS Distributions }
2952e6231be0SApple OSS Distributions }
2953e6231be0SApple OSS Distributions
2954e6231be0SApple OSS Distributions /* A thread request cannot be both overcommit and cooperative */
2955e6231be0SApple OSS Distributions if (workq_tr_is_cooperative(tr_flags) &&
2956e6231be0SApple OSS Distributions workq_tr_is_overcommit(tr_flags)) {
2957e6231be0SApple OSS Distributions ret = EINVAL;
2958e6231be0SApple OSS Distributions goto free_and_exit;
2959e6231be0SApple OSS Distributions }
2960e6231be0SApple OSS Distributions req->tr_flags = tr_flags;
2961e6231be0SApple OSS Distributions
2962cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_request_initiate | DBG_FUNC_NONE,
2963e6231be0SApple OSS Distributions wq, workq_trace_req_id(req), req->tr_qos, reqcount);
2964cc9a6355SApple OSS Distributions
2965cc9a6355SApple OSS Distributions workq_lock_spin(wq);
2966cc9a6355SApple OSS Distributions do {
2967cc9a6355SApple OSS Distributions if (_wq_exiting(wq)) {
2968e6231be0SApple OSS Distributions goto unlock_and_exit;
2969cc9a6355SApple OSS Distributions }
2970cc9a6355SApple OSS Distributions
2971cc9a6355SApple OSS Distributions /*
2972cc9a6355SApple OSS Distributions * When userspace is asking for parallelism, wakeup up to (reqcount - 1)
2973cc9a6355SApple OSS Distributions * threads without pacing, to inform the scheduler of that workload.
2974cc9a6355SApple OSS Distributions *
2975cc9a6355SApple OSS Distributions * The last requests, or the ones that failed the admission checks are
2976cc9a6355SApple OSS Distributions * enqueued and go through the regular creator codepath.
2977cc9a6355SApple OSS Distributions *
2978cc9a6355SApple OSS Distributions * If there aren't enough threads, add one, but re-evaluate everything
2979cc9a6355SApple OSS Distributions * as conditions may now have changed.
2980cc9a6355SApple OSS Distributions */
2981e6231be0SApple OSS Distributions unpaced = reqcount - 1;
2982e6231be0SApple OSS Distributions
2983e6231be0SApple OSS Distributions if (reqcount > 1) {
2984e6231be0SApple OSS Distributions /* We don't handle asking for parallelism on the cooperative
2985e6231be0SApple OSS Distributions * workqueue just yet */
2986e6231be0SApple OSS Distributions assert(!workq_threadreq_is_cooperative(req));
2987e6231be0SApple OSS Distributions
2988e6231be0SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
2989*8d741a5dSApple OSS Distributions unpaced = workq_constrained_allowance(wq, qos, NULL, false, true);
2990cc9a6355SApple OSS Distributions if (unpaced >= reqcount - 1) {
2991cc9a6355SApple OSS Distributions unpaced = reqcount - 1;
2992cc9a6355SApple OSS Distributions }
2993e6231be0SApple OSS Distributions }
2994cc9a6355SApple OSS Distributions }
2995cc9a6355SApple OSS Distributions
2996cc9a6355SApple OSS Distributions /*
2997cc9a6355SApple OSS Distributions * This path does not currently handle custom workloop parameters
2998cc9a6355SApple OSS Distributions * when creating threads for parallelism.
2999cc9a6355SApple OSS Distributions */
3000a5e72196SApple OSS Distributions assert(!(req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS));
3001cc9a6355SApple OSS Distributions
3002cc9a6355SApple OSS Distributions /*
3003cc9a6355SApple OSS Distributions * This is a trimmed down version of workq_threadreq_bind_and_unlock()
3004cc9a6355SApple OSS Distributions */
3005cc9a6355SApple OSS Distributions while (unpaced > 0 && wq->wq_thidlecount) {
3006a5e72196SApple OSS Distributions struct uthread *uth;
3007a5e72196SApple OSS Distributions bool needs_wakeup;
3008a5e72196SApple OSS Distributions uint8_t uu_flags = UT_WORKQ_EARLY_BOUND;
3009a5e72196SApple OSS Distributions
3010e6231be0SApple OSS Distributions if (workq_tr_is_overcommit(req->tr_flags)) {
3011a5e72196SApple OSS Distributions uu_flags |= UT_WORKQ_OVERCOMMIT;
3012a5e72196SApple OSS Distributions }
3013a5e72196SApple OSS Distributions
3014a5e72196SApple OSS Distributions uth = workq_pop_idle_thread(wq, uu_flags, &needs_wakeup);
3015cc9a6355SApple OSS Distributions
3016cc9a6355SApple OSS Distributions _wq_thactive_inc(wq, qos);
3017cc9a6355SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(qos)]++;
3018a5e72196SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
3019cc9a6355SApple OSS Distributions wq->wq_fulfilled++;
3020cc9a6355SApple OSS Distributions
3021cc9a6355SApple OSS Distributions uth->uu_save.uus_workq_park_data.upcall_flags = upcall_flags;
3022cc9a6355SApple OSS Distributions uth->uu_save.uus_workq_park_data.thread_request = req;
3023a5e72196SApple OSS Distributions if (needs_wakeup) {
3024cc9a6355SApple OSS Distributions workq_thread_wakeup(uth);
3025a5e72196SApple OSS Distributions }
3026cc9a6355SApple OSS Distributions unpaced--;
3027cc9a6355SApple OSS Distributions reqcount--;
3028cc9a6355SApple OSS Distributions }
3029cc9a6355SApple OSS Distributions } while (unpaced && wq->wq_nthreads < wq_max_threads &&
3030*8d741a5dSApple OSS Distributions (workq_add_new_idle_thread(p, wq, workq_unpark_continue,
3031*8d741a5dSApple OSS Distributions false, NULL) == KERN_SUCCESS));
3032cc9a6355SApple OSS Distributions
3033cc9a6355SApple OSS Distributions if (_wq_exiting(wq)) {
3034e6231be0SApple OSS Distributions goto unlock_and_exit;
3035cc9a6355SApple OSS Distributions }
3036cc9a6355SApple OSS Distributions
3037bb611c8fSApple OSS Distributions req->tr_count = (uint16_t)reqcount;
3038cc9a6355SApple OSS Distributions if (workq_threadreq_enqueue(wq, req)) {
3039cc9a6355SApple OSS Distributions /* This can drop the workqueue lock, and take it again */
3040cc9a6355SApple OSS Distributions workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
3041cc9a6355SApple OSS Distributions }
3042cc9a6355SApple OSS Distributions workq_unlock(wq);
3043cc9a6355SApple OSS Distributions return 0;
3044cc9a6355SApple OSS Distributions
3045e6231be0SApple OSS Distributions unlock_and_exit:
3046cc9a6355SApple OSS Distributions workq_unlock(wq);
3047e6231be0SApple OSS Distributions free_and_exit:
3048cc9a6355SApple OSS Distributions zfree(workq_zone_threadreq, req);
3049e6231be0SApple OSS Distributions exit:
3050e6231be0SApple OSS Distributions return ret;
3051cc9a6355SApple OSS Distributions }
3052cc9a6355SApple OSS Distributions
3053cc9a6355SApple OSS Distributions bool
workq_kern_threadreq_initiate(struct proc * p,workq_threadreq_t req,struct turnstile * workloop_ts,thread_qos_t qos,workq_kern_threadreq_flags_t flags)3054a5e72196SApple OSS Distributions workq_kern_threadreq_initiate(struct proc *p, workq_threadreq_t req,
3055a5e72196SApple OSS Distributions struct turnstile *workloop_ts, thread_qos_t qos,
3056a5e72196SApple OSS Distributions workq_kern_threadreq_flags_t flags)
3057cc9a6355SApple OSS Distributions {
3058cc9a6355SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3059cc9a6355SApple OSS Distributions struct uthread *uth = NULL;
3060cc9a6355SApple OSS Distributions
3061a5e72196SApple OSS Distributions assert(req->tr_flags & (WORKQ_TR_FLAG_WORKLOOP | WORKQ_TR_FLAG_KEVENT));
3062cc9a6355SApple OSS Distributions
3063*8d741a5dSApple OSS Distributions /*
3064*8d741a5dSApple OSS Distributions * For any new initialization changes done to workqueue thread request below,
3065*8d741a5dSApple OSS Distributions * please also consider if they are relevant to permanently bound thread
3066*8d741a5dSApple OSS Distributions * request. See workq_kern_threadreq_permanent_bind.
3067*8d741a5dSApple OSS Distributions */
3068a5e72196SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
3069cc9a6355SApple OSS Distributions workq_threadreq_param_t trp = kqueue_threadreq_workloop_param(req);
3070cc9a6355SApple OSS Distributions qos = thread_workq_qos_for_pri(trp.trp_pri);
3071cc9a6355SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
3072cc9a6355SApple OSS Distributions qos = WORKQ_THREAD_QOS_ABOVEUI;
3073cc9a6355SApple OSS Distributions }
3074cc9a6355SApple OSS Distributions }
3075cc9a6355SApple OSS Distributions
3076a5e72196SApple OSS Distributions assert(req->tr_state == WORKQ_TR_STATE_IDLE);
3077cc9a6355SApple OSS Distributions priority_queue_entry_init(&req->tr_entry);
3078cc9a6355SApple OSS Distributions req->tr_count = 1;
3079a5e72196SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_NEW;
3080cc9a6355SApple OSS Distributions req->tr_qos = qos;
3081cc9a6355SApple OSS Distributions
3082cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_request_initiate | DBG_FUNC_NONE, wq,
3083e6231be0SApple OSS Distributions workq_trace_req_id(req), qos, 1);
3084cc9a6355SApple OSS Distributions
3085cc9a6355SApple OSS Distributions if (flags & WORKQ_THREADREQ_ATTEMPT_REBIND) {
3086cc9a6355SApple OSS Distributions /*
3087cc9a6355SApple OSS Distributions * we're called back synchronously from the context of
3088cc9a6355SApple OSS Distributions * kqueue_threadreq_unbind from within workq_thread_return()
3089cc9a6355SApple OSS Distributions * we can try to match up this thread with this request !
3090cc9a6355SApple OSS Distributions */
3091cc9a6355SApple OSS Distributions uth = current_uthread();
3092cc9a6355SApple OSS Distributions assert(uth->uu_kqr_bound == NULL);
3093cc9a6355SApple OSS Distributions }
3094cc9a6355SApple OSS Distributions
3095cc9a6355SApple OSS Distributions workq_lock_spin(wq);
3096cc9a6355SApple OSS Distributions if (_wq_exiting(wq)) {
3097a5e72196SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_IDLE;
3098cc9a6355SApple OSS Distributions workq_unlock(wq);
3099cc9a6355SApple OSS Distributions return false;
3100cc9a6355SApple OSS Distributions }
3101cc9a6355SApple OSS Distributions
3102cc9a6355SApple OSS Distributions if (uth && workq_threadreq_admissible(wq, uth, req)) {
3103e6231be0SApple OSS Distributions /* This is the case of the rebind - we were about to park and unbind
3104e6231be0SApple OSS Distributions * when more events came so keep the binding.
3105e6231be0SApple OSS Distributions */
3106cc9a6355SApple OSS Distributions assert(uth != wq->wq_creator);
3107e6231be0SApple OSS Distributions
3108a5e72196SApple OSS Distributions if (uth->uu_workq_pri.qos_bucket != req->tr_qos) {
3109a5e72196SApple OSS Distributions _wq_thactive_move(wq, uth->uu_workq_pri.qos_bucket, req->tr_qos);
3110a5e72196SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ false);
3111a5e72196SApple OSS Distributions }
3112a5e72196SApple OSS Distributions /*
3113a5e72196SApple OSS Distributions * We're called from workq_kern_threadreq_initiate()
3114a5e72196SApple OSS Distributions * due to an unbind, with the kq req held.
3115a5e72196SApple OSS Distributions */
3116a5e72196SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
3117e6231be0SApple OSS Distributions workq_trace_req_id(req), req->tr_flags, 0);
3118a5e72196SApple OSS Distributions wq->wq_fulfilled++;
3119e6231be0SApple OSS Distributions
3120e7776783SApple OSS Distributions kqueue_threadreq_bind(p, req, get_machthread(uth), 0);
3121cc9a6355SApple OSS Distributions } else {
3122cc9a6355SApple OSS Distributions if (workloop_ts) {
3123cc9a6355SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
3124cc9a6355SApple OSS Distributions turnstile_update_inheritor(workloop_ts, wq->wq_turnstile,
3125cc9a6355SApple OSS Distributions TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE);
3126cc9a6355SApple OSS Distributions turnstile_update_inheritor_complete(workloop_ts,
3127cc9a6355SApple OSS Distributions TURNSTILE_INTERLOCK_HELD);
3128cc9a6355SApple OSS Distributions });
3129cc9a6355SApple OSS Distributions }
3130e6231be0SApple OSS Distributions
3131e6231be0SApple OSS Distributions bool reevaluate_creator_thread_group = false;
3132e6231be0SApple OSS Distributions #if CONFIG_PREADOPT_TG
3133e6231be0SApple OSS Distributions reevaluate_creator_thread_group = (flags & WORKQ_THREADREQ_REEVALUATE_PREADOPT_TG);
3134e6231be0SApple OSS Distributions #endif
3135e6231be0SApple OSS Distributions /* We enqueued the highest priority item or we may need to reevaluate if
3136e6231be0SApple OSS Distributions * the creator needs a thread group pre-adoption */
3137e6231be0SApple OSS Distributions if (workq_threadreq_enqueue(wq, req) || reevaluate_creator_thread_group) {
3138cc9a6355SApple OSS Distributions workq_schedule_creator(p, wq, flags);
3139cc9a6355SApple OSS Distributions }
3140cc9a6355SApple OSS Distributions }
3141cc9a6355SApple OSS Distributions
3142a5e72196SApple OSS Distributions workq_unlock(wq);
3143a5e72196SApple OSS Distributions
3144cc9a6355SApple OSS Distributions return true;
3145cc9a6355SApple OSS Distributions }
3146cc9a6355SApple OSS Distributions
3147cc9a6355SApple OSS Distributions void
workq_kern_threadreq_modify(struct proc * p,workq_threadreq_t req,thread_qos_t qos,workq_kern_threadreq_flags_t flags)3148a5e72196SApple OSS Distributions workq_kern_threadreq_modify(struct proc *p, workq_threadreq_t req,
3149a5e72196SApple OSS Distributions thread_qos_t qos, workq_kern_threadreq_flags_t flags)
3150cc9a6355SApple OSS Distributions {
3151cc9a6355SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3152a5e72196SApple OSS Distributions bool make_overcommit = false;
3153cc9a6355SApple OSS Distributions
3154a5e72196SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
3155cc9a6355SApple OSS Distributions /* Requests outside-of-QoS shouldn't accept modify operations */
3156cc9a6355SApple OSS Distributions return;
3157cc9a6355SApple OSS Distributions }
3158cc9a6355SApple OSS Distributions
3159cc9a6355SApple OSS Distributions workq_lock_spin(wq);
3160cc9a6355SApple OSS Distributions
3161cc9a6355SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
3162a5e72196SApple OSS Distributions assert(req->tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP));
3163cc9a6355SApple OSS Distributions
3164a5e72196SApple OSS Distributions if (req->tr_state == WORKQ_TR_STATE_BINDING) {
3165a5e72196SApple OSS Distributions kqueue_threadreq_bind(p, req, req->tr_thread, 0);
3166cc9a6355SApple OSS Distributions workq_unlock(wq);
3167cc9a6355SApple OSS Distributions return;
3168cc9a6355SApple OSS Distributions }
3169cc9a6355SApple OSS Distributions
3170a5e72196SApple OSS Distributions if (flags & WORKQ_THREADREQ_MAKE_OVERCOMMIT) {
3171e6231be0SApple OSS Distributions /* TODO (rokhinip): We come into this code path for kqwl thread
3172e6231be0SApple OSS Distributions * requests. kqwl requests cannot be cooperative.
3173e6231be0SApple OSS Distributions */
3174e6231be0SApple OSS Distributions assert(!workq_threadreq_is_cooperative(req));
3175e6231be0SApple OSS Distributions
3176e6231be0SApple OSS Distributions make_overcommit = workq_threadreq_is_nonovercommit(req);
3177a5e72196SApple OSS Distributions }
3178cc9a6355SApple OSS Distributions
3179a5e72196SApple OSS Distributions if (_wq_exiting(wq) || (req->tr_qos == qos && !make_overcommit)) {
3180cc9a6355SApple OSS Distributions workq_unlock(wq);
3181cc9a6355SApple OSS Distributions return;
3182cc9a6355SApple OSS Distributions }
3183cc9a6355SApple OSS Distributions
3184cc9a6355SApple OSS Distributions assert(req->tr_count == 1);
3185a5e72196SApple OSS Distributions if (req->tr_state != WORKQ_TR_STATE_QUEUED) {
3186cc9a6355SApple OSS Distributions panic("Invalid thread request (%p) state %d", req, req->tr_state);
3187cc9a6355SApple OSS Distributions }
3188cc9a6355SApple OSS Distributions
3189cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_request_modify | DBG_FUNC_NONE, wq,
3190e6231be0SApple OSS Distributions workq_trace_req_id(req), qos, 0);
3191cc9a6355SApple OSS Distributions
3192bb611c8fSApple OSS Distributions struct priority_queue_sched_max *pq = workq_priority_queue_for_req(wq, req);
3193cc9a6355SApple OSS Distributions workq_threadreq_t req_max;
3194cc9a6355SApple OSS Distributions
3195cc9a6355SApple OSS Distributions /*
3196cc9a6355SApple OSS Distributions * Stage 1: Dequeue the request from its priority queue.
3197cc9a6355SApple OSS Distributions *
3198cc9a6355SApple OSS Distributions * If we dequeue the root item of the constrained priority queue,
3199cc9a6355SApple OSS Distributions * maintain the best constrained request qos invariant.
3200cc9a6355SApple OSS Distributions */
3201bb611c8fSApple OSS Distributions if (priority_queue_remove(pq, &req->tr_entry)) {
3202e6231be0SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
3203cc9a6355SApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(wq);
3204cc9a6355SApple OSS Distributions }
3205cc9a6355SApple OSS Distributions }
3206cc9a6355SApple OSS Distributions
3207cc9a6355SApple OSS Distributions /*
3208cc9a6355SApple OSS Distributions * Stage 2: Apply changes to the thread request
3209cc9a6355SApple OSS Distributions *
3210cc9a6355SApple OSS Distributions * If the item will not become the root of the priority queue it belongs to,
3211cc9a6355SApple OSS Distributions * then we need to wait in line, just enqueue and return quickly.
3212cc9a6355SApple OSS Distributions */
3213a5e72196SApple OSS Distributions if (__improbable(make_overcommit)) {
3214a5e72196SApple OSS Distributions req->tr_flags ^= WORKQ_TR_FLAG_OVERCOMMIT;
3215cc9a6355SApple OSS Distributions pq = workq_priority_queue_for_req(wq, req);
3216cc9a6355SApple OSS Distributions }
3217cc9a6355SApple OSS Distributions req->tr_qos = qos;
3218cc9a6355SApple OSS Distributions
3219cc9a6355SApple OSS Distributions req_max = priority_queue_max(pq, struct workq_threadreq_s, tr_entry);
3220cc9a6355SApple OSS Distributions if (req_max && req_max->tr_qos >= qos) {
3221bb611c8fSApple OSS Distributions priority_queue_entry_set_sched_pri(pq, &req->tr_entry,
3222bb611c8fSApple OSS Distributions workq_priority_for_req(req), false);
3223bb611c8fSApple OSS Distributions priority_queue_insert(pq, &req->tr_entry);
3224cc9a6355SApple OSS Distributions workq_unlock(wq);
3225cc9a6355SApple OSS Distributions return;
3226cc9a6355SApple OSS Distributions }
3227cc9a6355SApple OSS Distributions
3228cc9a6355SApple OSS Distributions /*
3229cc9a6355SApple OSS Distributions * Stage 3: Reevaluate whether we should run the thread request.
3230cc9a6355SApple OSS Distributions *
3231cc9a6355SApple OSS Distributions * Pretend the thread request is new again:
3232cc9a6355SApple OSS Distributions * - adjust wq_reqcount to not count it anymore.
3233a5e72196SApple OSS Distributions * - make its state WORKQ_TR_STATE_NEW (so that workq_threadreq_bind_and_unlock
3234cc9a6355SApple OSS Distributions * properly attempts a synchronous bind)
3235cc9a6355SApple OSS Distributions */
3236cc9a6355SApple OSS Distributions wq->wq_reqcount--;
3237a5e72196SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_NEW;
3238e6231be0SApple OSS Distributions
3239e6231be0SApple OSS Distributions /* We enqueued the highest priority item or we may need to reevaluate if
3240e6231be0SApple OSS Distributions * the creator needs a thread group pre-adoption if the request got a new TG */
3241e6231be0SApple OSS Distributions bool reevaluate_creator_tg = false;
3242e6231be0SApple OSS Distributions
3243e6231be0SApple OSS Distributions #if CONFIG_PREADOPT_TG
3244e6231be0SApple OSS Distributions reevaluate_creator_tg = (flags & WORKQ_THREADREQ_REEVALUATE_PREADOPT_TG);
3245e6231be0SApple OSS Distributions #endif
3246e6231be0SApple OSS Distributions
3247e6231be0SApple OSS Distributions if (workq_threadreq_enqueue(wq, req) || reevaluate_creator_tg) {
3248cc9a6355SApple OSS Distributions workq_schedule_creator(p, wq, flags);
3249cc9a6355SApple OSS Distributions }
3250cc9a6355SApple OSS Distributions workq_unlock(wq);
3251cc9a6355SApple OSS Distributions }
3252cc9a6355SApple OSS Distributions
3253cc9a6355SApple OSS Distributions void
workq_kern_bound_thread_reset_pri(workq_threadreq_t req,struct uthread * uth)3254*8d741a5dSApple OSS Distributions workq_kern_bound_thread_reset_pri(workq_threadreq_t req, struct uthread *uth)
3255*8d741a5dSApple OSS Distributions {
3256*8d741a5dSApple OSS Distributions assert(workq_thread_is_permanently_bound(uth));
3257*8d741a5dSApple OSS Distributions
3258*8d741a5dSApple OSS Distributions if (req && (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS)) {
3259*8d741a5dSApple OSS Distributions /*
3260*8d741a5dSApple OSS Distributions * For requests outside-of-QoS, we set the scheduling policy and
3261*8d741a5dSApple OSS Distributions * absolute priority for the bound thread right at the initialization
3262*8d741a5dSApple OSS Distributions * time. See workq_kern_threadreq_permanent_bind.
3263*8d741a5dSApple OSS Distributions */
3264*8d741a5dSApple OSS Distributions return;
3265*8d741a5dSApple OSS Distributions }
3266*8d741a5dSApple OSS Distributions
3267*8d741a5dSApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(current_proc());
3268*8d741a5dSApple OSS Distributions if (req) {
3269*8d741a5dSApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
3270*8d741a5dSApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
3271*8d741a5dSApple OSS Distributions } else {
3272*8d741a5dSApple OSS Distributions thread_qos_t qos = workq_pri_override(uth->uu_workq_pri);
3273*8d741a5dSApple OSS Distributions if (qos > WORKQ_THREAD_QOS_CLEANUP) {
3274*8d741a5dSApple OSS Distributions workq_thread_reset_pri(wq, uth, NULL, /*unpark*/ true);
3275*8d741a5dSApple OSS Distributions } else {
3276*8d741a5dSApple OSS Distributions uth->uu_save.uus_workq_park_data.qos = qos;
3277*8d741a5dSApple OSS Distributions }
3278*8d741a5dSApple OSS Distributions }
3279*8d741a5dSApple OSS Distributions }
3280*8d741a5dSApple OSS Distributions
3281*8d741a5dSApple OSS Distributions void
workq_kern_threadreq_lock(struct proc * p)3282cc9a6355SApple OSS Distributions workq_kern_threadreq_lock(struct proc *p)
3283cc9a6355SApple OSS Distributions {
3284cc9a6355SApple OSS Distributions workq_lock_spin(proc_get_wqptr_fast(p));
3285cc9a6355SApple OSS Distributions }
3286cc9a6355SApple OSS Distributions
3287cc9a6355SApple OSS Distributions void
workq_kern_threadreq_unlock(struct proc * p)3288cc9a6355SApple OSS Distributions workq_kern_threadreq_unlock(struct proc *p)
3289cc9a6355SApple OSS Distributions {
3290cc9a6355SApple OSS Distributions workq_unlock(proc_get_wqptr_fast(p));
3291cc9a6355SApple OSS Distributions }
3292cc9a6355SApple OSS Distributions
3293cc9a6355SApple OSS Distributions void
workq_kern_threadreq_update_inheritor(struct proc * p,workq_threadreq_t req,thread_t owner,struct turnstile * wl_ts,turnstile_update_flags_t flags)3294a5e72196SApple OSS Distributions workq_kern_threadreq_update_inheritor(struct proc *p, workq_threadreq_t req,
3295cc9a6355SApple OSS Distributions thread_t owner, struct turnstile *wl_ts,
3296cc9a6355SApple OSS Distributions turnstile_update_flags_t flags)
3297cc9a6355SApple OSS Distributions {
3298cc9a6355SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3299cc9a6355SApple OSS Distributions turnstile_inheritor_t inheritor;
3300cc9a6355SApple OSS Distributions
3301cc9a6355SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
3302a5e72196SApple OSS Distributions assert(req->tr_flags & WORKQ_TR_FLAG_WORKLOOP);
3303cc9a6355SApple OSS Distributions workq_lock_held(wq);
3304cc9a6355SApple OSS Distributions
3305a5e72196SApple OSS Distributions if (req->tr_state == WORKQ_TR_STATE_BINDING) {
3306a5e72196SApple OSS Distributions kqueue_threadreq_bind(p, req, req->tr_thread,
3307*8d741a5dSApple OSS Distributions KQUEUE_THREADREQ_BIND_NO_INHERITOR_UPDATE);
3308cc9a6355SApple OSS Distributions return;
3309cc9a6355SApple OSS Distributions }
3310cc9a6355SApple OSS Distributions
3311cc9a6355SApple OSS Distributions if (_wq_exiting(wq)) {
3312cc9a6355SApple OSS Distributions inheritor = TURNSTILE_INHERITOR_NULL;
3313cc9a6355SApple OSS Distributions } else {
3314a5e72196SApple OSS Distributions if (req->tr_state != WORKQ_TR_STATE_QUEUED) {
3315cc9a6355SApple OSS Distributions panic("Invalid thread request (%p) state %d", req, req->tr_state);
3316cc9a6355SApple OSS Distributions }
3317cc9a6355SApple OSS Distributions
3318cc9a6355SApple OSS Distributions if (owner) {
3319cc9a6355SApple OSS Distributions inheritor = owner;
3320cc9a6355SApple OSS Distributions flags |= TURNSTILE_INHERITOR_THREAD;
3321cc9a6355SApple OSS Distributions } else {
3322cc9a6355SApple OSS Distributions inheritor = wq->wq_turnstile;
3323cc9a6355SApple OSS Distributions flags |= TURNSTILE_INHERITOR_TURNSTILE;
3324cc9a6355SApple OSS Distributions }
3325cc9a6355SApple OSS Distributions }
3326cc9a6355SApple OSS Distributions
3327cc9a6355SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
3328cc9a6355SApple OSS Distributions turnstile_update_inheritor(wl_ts, inheritor, flags);
3329cc9a6355SApple OSS Distributions });
3330cc9a6355SApple OSS Distributions }
3331cc9a6355SApple OSS Distributions
3332*8d741a5dSApple OSS Distributions /*
3333*8d741a5dSApple OSS Distributions * An entry point for kevent to request a newly created workqueue thread
3334*8d741a5dSApple OSS Distributions * and bind it permanently to the given workqueue thread request.
3335*8d741a5dSApple OSS Distributions *
3336*8d741a5dSApple OSS Distributions * It currently only supports fixed scheduler priority thread requests.
3337*8d741a5dSApple OSS Distributions *
3338*8d741a5dSApple OSS Distributions * The newly created thread counts towards wq_nthreads. This function returns
3339*8d741a5dSApple OSS Distributions * an error if we are above that limit. There is no concept of delayed thread
3340*8d741a5dSApple OSS Distributions * creation for such specially configured kqworkloops.
3341*8d741a5dSApple OSS Distributions *
3342*8d741a5dSApple OSS Distributions * If successful, the newly created thread will be parked in
3343*8d741a5dSApple OSS Distributions * workq_bound_thread_initialize_and_unpark_continue waiting for
3344*8d741a5dSApple OSS Distributions * new incoming events.
3345*8d741a5dSApple OSS Distributions */
3346*8d741a5dSApple OSS Distributions kern_return_t
workq_kern_threadreq_permanent_bind(struct proc * p,struct workq_threadreq_s * kqr)3347*8d741a5dSApple OSS Distributions workq_kern_threadreq_permanent_bind(struct proc *p, struct workq_threadreq_s *kqr)
3348*8d741a5dSApple OSS Distributions {
3349*8d741a5dSApple OSS Distributions kern_return_t ret = 0;
3350*8d741a5dSApple OSS Distributions thread_t new_thread = NULL;
3351*8d741a5dSApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3352*8d741a5dSApple OSS Distributions
3353*8d741a5dSApple OSS Distributions workq_lock_spin(wq);
3354*8d741a5dSApple OSS Distributions
3355*8d741a5dSApple OSS Distributions if (wq->wq_nthreads >= wq_max_threads) {
3356*8d741a5dSApple OSS Distributions ret = EDOM;
3357*8d741a5dSApple OSS Distributions } else {
3358*8d741a5dSApple OSS Distributions if (kqr->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
3359*8d741a5dSApple OSS Distributions workq_threadreq_param_t trp = kqueue_threadreq_workloop_param(kqr);
3360*8d741a5dSApple OSS Distributions /*
3361*8d741a5dSApple OSS Distributions * For requests outside-of-QoS, we fully initialize the thread
3362*8d741a5dSApple OSS Distributions * request here followed by preadopting the scheduling properties
3363*8d741a5dSApple OSS Distributions * on the newly created bound thread.
3364*8d741a5dSApple OSS Distributions */
3365*8d741a5dSApple OSS Distributions thread_qos_t qos = thread_workq_qos_for_pri(trp.trp_pri);
3366*8d741a5dSApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
3367*8d741a5dSApple OSS Distributions qos = WORKQ_THREAD_QOS_ABOVEUI;
3368*8d741a5dSApple OSS Distributions }
3369*8d741a5dSApple OSS Distributions kqr->tr_qos = qos;
3370*8d741a5dSApple OSS Distributions }
3371*8d741a5dSApple OSS Distributions kqr->tr_count = 1;
3372*8d741a5dSApple OSS Distributions
3373*8d741a5dSApple OSS Distributions /* workq_lock dropped and retaken around thread creation below. */
3374*8d741a5dSApple OSS Distributions ret = workq_add_new_idle_thread(p, wq,
3375*8d741a5dSApple OSS Distributions workq_bound_thread_initialize_and_unpark_continue,
3376*8d741a5dSApple OSS Distributions true, &new_thread);
3377*8d741a5dSApple OSS Distributions if (ret == KERN_SUCCESS) {
3378*8d741a5dSApple OSS Distributions struct uthread *uth = get_bsdthread_info(new_thread);
3379*8d741a5dSApple OSS Distributions if (kqr->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
3380*8d741a5dSApple OSS Distributions workq_thread_reset_pri(wq, uth, kqr, /*unpark*/ true);
3381*8d741a5dSApple OSS Distributions }
3382*8d741a5dSApple OSS Distributions /*
3383*8d741a5dSApple OSS Distributions * The newly created thread goes through a full bind to the kqwl
3384*8d741a5dSApple OSS Distributions * right upon creation.
3385*8d741a5dSApple OSS Distributions * It then falls back to soft bind/unbind upon wakeup/park.
3386*8d741a5dSApple OSS Distributions */
3387*8d741a5dSApple OSS Distributions kqueue_threadreq_bind_prepost(p, kqr, uth);
3388*8d741a5dSApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_PERMANENT_BIND;
3389*8d741a5dSApple OSS Distributions }
3390*8d741a5dSApple OSS Distributions }
3391*8d741a5dSApple OSS Distributions
3392*8d741a5dSApple OSS Distributions workq_unlock(wq);
3393*8d741a5dSApple OSS Distributions
3394*8d741a5dSApple OSS Distributions if (ret == KERN_SUCCESS) {
3395*8d741a5dSApple OSS Distributions kqueue_threadreq_bind_commit(p, new_thread);
3396*8d741a5dSApple OSS Distributions }
3397*8d741a5dSApple OSS Distributions return ret;
3398*8d741a5dSApple OSS Distributions }
3399*8d741a5dSApple OSS Distributions
3400*8d741a5dSApple OSS Distributions /*
3401*8d741a5dSApple OSS Distributions * Called with kqlock held. It does not need to take the process wide
3402*8d741a5dSApple OSS Distributions * global workq lock -> making it faster.
3403*8d741a5dSApple OSS Distributions */
3404*8d741a5dSApple OSS Distributions void
workq_kern_bound_thread_wakeup(struct workq_threadreq_s * kqr)3405*8d741a5dSApple OSS Distributions workq_kern_bound_thread_wakeup(struct workq_threadreq_s *kqr)
3406*8d741a5dSApple OSS Distributions {
3407*8d741a5dSApple OSS Distributions struct uthread *uth = get_bsdthread_info(kqr->tr_thread);
3408*8d741a5dSApple OSS Distributions workq_threadreq_param_t trp = kqueue_threadreq_workloop_param(kqr);
3409*8d741a5dSApple OSS Distributions
3410*8d741a5dSApple OSS Distributions /*
3411*8d741a5dSApple OSS Distributions * See "Locking model for accessing uu_workq_flags" for more information
3412*8d741a5dSApple OSS Distributions * on how access to uu_workq_flags for the bound thread is synchronized.
3413*8d741a5dSApple OSS Distributions */
3414*8d741a5dSApple OSS Distributions assert((uth->uu_workq_flags & (UT_WORKQ_RUNNING | UT_WORKQ_DYING)) == 0);
3415*8d741a5dSApple OSS Distributions
3416*8d741a5dSApple OSS Distributions if (trp.trp_flags & TRP_RELEASED) {
3417*8d741a5dSApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_DYING;
3418*8d741a5dSApple OSS Distributions } else {
3419*8d741a5dSApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_RUNNING;
3420*8d741a5dSApple OSS Distributions }
3421*8d741a5dSApple OSS Distributions
3422*8d741a5dSApple OSS Distributions workq_thread_wakeup(uth);
3423*8d741a5dSApple OSS Distributions }
3424*8d741a5dSApple OSS Distributions
3425*8d741a5dSApple OSS Distributions /*
3426*8d741a5dSApple OSS Distributions * Called with kqlock held. Dropped before parking.
3427*8d741a5dSApple OSS Distributions * It does not need to take process wide global workqueue
3428*8d741a5dSApple OSS Distributions * lock -> making it faster.
3429*8d741a5dSApple OSS Distributions */
3430*8d741a5dSApple OSS Distributions __attribute__((noreturn, noinline))
3431*8d741a5dSApple OSS Distributions void
workq_kern_bound_thread_park(struct workq_threadreq_s * kqr)3432*8d741a5dSApple OSS Distributions workq_kern_bound_thread_park(struct workq_threadreq_s *kqr)
3433*8d741a5dSApple OSS Distributions {
3434*8d741a5dSApple OSS Distributions struct uthread *uth = get_bsdthread_info(kqr->tr_thread);
3435*8d741a5dSApple OSS Distributions assert(uth == current_uthread());
3436*8d741a5dSApple OSS Distributions
3437*8d741a5dSApple OSS Distributions /*
3438*8d741a5dSApple OSS Distributions * See "Locking model for accessing uu_workq_flags" for more information
3439*8d741a5dSApple OSS Distributions * on how access to uu_workq_flags for the bound thread is synchronized.
3440*8d741a5dSApple OSS Distributions */
3441*8d741a5dSApple OSS Distributions uth->uu_workq_flags &= ~(UT_WORKQ_RUNNING);
3442*8d741a5dSApple OSS Distributions
3443*8d741a5dSApple OSS Distributions thread_disarm_workqueue_quantum(get_machthread(uth));
3444*8d741a5dSApple OSS Distributions
3445*8d741a5dSApple OSS Distributions /*
3446*8d741a5dSApple OSS Distributions * TODO (pavhad) We could do the reusable userspace stack performance
3447*8d741a5dSApple OSS Distributions * optimization here.
3448*8d741a5dSApple OSS Distributions */
3449*8d741a5dSApple OSS Distributions
3450*8d741a5dSApple OSS Distributions kqworkloop_bound_thread_park_prepost(kqr);
3451*8d741a5dSApple OSS Distributions /* KQ_SLEEP bit is set and kqlock is dropped. */
3452*8d741a5dSApple OSS Distributions
3453*8d741a5dSApple OSS Distributions __assert_only kern_return_t kr;
3454*8d741a5dSApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
3455*8d741a5dSApple OSS Distributions assert(kr == KERN_SUCCESS);
3456*8d741a5dSApple OSS Distributions
3457*8d741a5dSApple OSS Distributions kqworkloop_bound_thread_park_commit(kqr,
3458*8d741a5dSApple OSS Distributions workq_parked_wait_event(uth), workq_bound_thread_unpark_continue);
3459*8d741a5dSApple OSS Distributions
3460*8d741a5dSApple OSS Distributions __builtin_unreachable();
3461*8d741a5dSApple OSS Distributions }
3462*8d741a5dSApple OSS Distributions
3463*8d741a5dSApple OSS Distributions /*
3464*8d741a5dSApple OSS Distributions * To terminate the permenantly bound workqueue thread. It unbinds itself
3465*8d741a5dSApple OSS Distributions * with the kqwl during uthread_cleanup -> kqueue_threadreq_unbind.
3466*8d741a5dSApple OSS Distributions * It is also when it will release its reference on the kqwl.
3467*8d741a5dSApple OSS Distributions */
3468*8d741a5dSApple OSS Distributions __attribute__((noreturn, noinline))
3469*8d741a5dSApple OSS Distributions void
workq_kern_bound_thread_terminate(struct workq_threadreq_s * kqr)3470*8d741a5dSApple OSS Distributions workq_kern_bound_thread_terminate(struct workq_threadreq_s *kqr)
3471*8d741a5dSApple OSS Distributions {
3472*8d741a5dSApple OSS Distributions proc_t p = current_proc();
3473*8d741a5dSApple OSS Distributions struct uthread *uth = get_bsdthread_info(kqr->tr_thread);
3474*8d741a5dSApple OSS Distributions uint16_t uu_workq_flags_orig;
3475*8d741a5dSApple OSS Distributions
3476*8d741a5dSApple OSS Distributions assert(uth == current_uthread());
3477*8d741a5dSApple OSS Distributions
3478*8d741a5dSApple OSS Distributions /*
3479*8d741a5dSApple OSS Distributions * See "Locking model for accessing uu_workq_flags" for more information
3480*8d741a5dSApple OSS Distributions * on how access to uu_workq_flags for the bound thread is synchronized.
3481*8d741a5dSApple OSS Distributions */
3482*8d741a5dSApple OSS Distributions kqworkloop_bound_thread_terminate(kqr, &uu_workq_flags_orig);
3483*8d741a5dSApple OSS Distributions
3484*8d741a5dSApple OSS Distributions if (uu_workq_flags_orig & UT_WORKQ_WORK_INTERVAL_JOINED) {
3485*8d741a5dSApple OSS Distributions __assert_only kern_return_t kr;
3486*8d741a5dSApple OSS Distributions kr = kern_work_interval_join(get_machthread(uth), MACH_PORT_NULL);
3487*8d741a5dSApple OSS Distributions /* The bound thread un-joins the work interval and drops its +1 ref. */
3488*8d741a5dSApple OSS Distributions assert(kr == KERN_SUCCESS);
3489*8d741a5dSApple OSS Distributions }
3490*8d741a5dSApple OSS Distributions
3491*8d741a5dSApple OSS Distributions /*
3492*8d741a5dSApple OSS Distributions * Drop the voucher now that we are on our way to termination.
3493*8d741a5dSApple OSS Distributions */
3494*8d741a5dSApple OSS Distributions __assert_only kern_return_t kr;
3495*8d741a5dSApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
3496*8d741a5dSApple OSS Distributions assert(kr == KERN_SUCCESS);
3497*8d741a5dSApple OSS Distributions
3498*8d741a5dSApple OSS Distributions uint32_t upcall_flags = WQ_FLAG_THREAD_NEWSPI;
3499*8d741a5dSApple OSS Distributions upcall_flags |= uth->uu_save.uus_workq_park_data.qos |
3500*8d741a5dSApple OSS Distributions WQ_FLAG_THREAD_PRIO_QOS;
3501*8d741a5dSApple OSS Distributions
3502*8d741a5dSApple OSS Distributions thread_t th = get_machthread(uth);
3503*8d741a5dSApple OSS Distributions vm_map_t vmap = get_task_map(proc_task(p));
3504*8d741a5dSApple OSS Distributions
3505*8d741a5dSApple OSS Distributions if ((uu_workq_flags_orig & UT_WORKQ_NEW) == 0) {
3506*8d741a5dSApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_REUSE;
3507*8d741a5dSApple OSS Distributions }
3508*8d741a5dSApple OSS Distributions
3509*8d741a5dSApple OSS Distributions pthread_functions->workq_setup_thread(p, th, vmap, uth->uu_workq_stackaddr,
3510*8d741a5dSApple OSS Distributions uth->uu_workq_thport, 0, WQ_SETUP_EXIT_THREAD, upcall_flags);
3511*8d741a5dSApple OSS Distributions __builtin_unreachable();
3512*8d741a5dSApple OSS Distributions }
3513*8d741a5dSApple OSS Distributions
3514cc9a6355SApple OSS Distributions void
workq_kern_threadreq_redrive(struct proc * p,workq_kern_threadreq_flags_t flags)3515a5e72196SApple OSS Distributions workq_kern_threadreq_redrive(struct proc *p, workq_kern_threadreq_flags_t flags)
3516cc9a6355SApple OSS Distributions {
3517cc9a6355SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3518cc9a6355SApple OSS Distributions
3519cc9a6355SApple OSS Distributions workq_lock_spin(wq);
3520cc9a6355SApple OSS Distributions workq_schedule_creator(p, wq, flags);
3521cc9a6355SApple OSS Distributions workq_unlock(wq);
3522cc9a6355SApple OSS Distributions }
3523cc9a6355SApple OSS Distributions
3524e6231be0SApple OSS Distributions /*
3525e6231be0SApple OSS Distributions * Always called at AST by the thread on itself
3526e6231be0SApple OSS Distributions *
3527e6231be0SApple OSS Distributions * Upon quantum expiry, the workqueue subsystem evaluates its state and decides
3528e6231be0SApple OSS Distributions * on what the thread should do next. The TSD value is always set by the thread
3529e6231be0SApple OSS Distributions * on itself in the kernel and cleared either by userspace when it acks the TSD
3530e6231be0SApple OSS Distributions * value and takes action, or by the thread in the kernel when the quantum
3531e6231be0SApple OSS Distributions * expires again.
3532e6231be0SApple OSS Distributions */
3533e6231be0SApple OSS Distributions void
workq_kern_quantum_expiry_reevaluate(proc_t proc,thread_t thread)3534e6231be0SApple OSS Distributions workq_kern_quantum_expiry_reevaluate(proc_t proc, thread_t thread)
3535e6231be0SApple OSS Distributions {
3536e6231be0SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
3537e6231be0SApple OSS Distributions
3538e6231be0SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_DYING) {
3539e6231be0SApple OSS Distributions return;
3540e6231be0SApple OSS Distributions }
3541e6231be0SApple OSS Distributions
3542e6231be0SApple OSS Distributions if (!thread_supports_cooperative_workqueue(thread)) {
3543e6231be0SApple OSS Distributions panic("Quantum expired for thread that doesn't support cooperative workqueue");
3544e6231be0SApple OSS Distributions }
3545e6231be0SApple OSS Distributions
3546e6231be0SApple OSS Distributions thread_qos_t qos = uth->uu_workq_pri.qos_bucket;
3547e6231be0SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
3548e6231be0SApple OSS Distributions panic("Thread should not have workq bucket of QoS UN");
3549e6231be0SApple OSS Distributions }
3550e6231be0SApple OSS Distributions
3551e6231be0SApple OSS Distributions assert(thread_has_expired_workqueue_quantum(thread, false));
3552e6231be0SApple OSS Distributions
3553e6231be0SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(proc);
3554e6231be0SApple OSS Distributions assert(wq != NULL);
3555e6231be0SApple OSS Distributions
3556e6231be0SApple OSS Distributions /*
3557e6231be0SApple OSS Distributions * For starters, we're just going to evaluate and see if we need to narrow
3558e6231be0SApple OSS Distributions * the pool and tell this thread to park if needed. In the future, we'll
3559e6231be0SApple OSS Distributions * evaluate and convey other workqueue state information like needing to
3560e6231be0SApple OSS Distributions * pump kevents, etc.
3561e6231be0SApple OSS Distributions */
3562e6231be0SApple OSS Distributions uint64_t flags = 0;
3563e6231be0SApple OSS Distributions
3564e6231be0SApple OSS Distributions workq_lock_spin(wq);
3565e6231be0SApple OSS Distributions
3566e6231be0SApple OSS Distributions if (workq_thread_is_cooperative(uth)) {
3567e6231be0SApple OSS Distributions if (!workq_cooperative_allowance(wq, qos, uth, false)) {
3568e6231be0SApple OSS Distributions flags |= PTHREAD_WQ_QUANTUM_EXPIRY_NARROW;
3569e6231be0SApple OSS Distributions } else {
3570e6231be0SApple OSS Distributions /* In the future, when we have kevent hookups for the cooperative
3571e6231be0SApple OSS Distributions * pool, we need fancier logic for what userspace should do. But
3572e6231be0SApple OSS Distributions * right now, only userspace thread requests exist - so we'll just
3573e6231be0SApple OSS Distributions * tell userspace to shuffle work items */
3574e6231be0SApple OSS Distributions flags |= PTHREAD_WQ_QUANTUM_EXPIRY_SHUFFLE;
3575e6231be0SApple OSS Distributions }
3576e6231be0SApple OSS Distributions } else if (workq_thread_is_nonovercommit(uth)) {
3577*8d741a5dSApple OSS Distributions if (!workq_constrained_allowance(wq, qos, uth, false, false)) {
3578e6231be0SApple OSS Distributions flags |= PTHREAD_WQ_QUANTUM_EXPIRY_NARROW;
3579e6231be0SApple OSS Distributions }
3580e6231be0SApple OSS Distributions }
3581e6231be0SApple OSS Distributions workq_unlock(wq);
3582e6231be0SApple OSS Distributions
3583e6231be0SApple OSS Distributions WQ_TRACE(TRACE_wq_quantum_expiry_reevaluate, flags, 0, 0, 0);
3584e6231be0SApple OSS Distributions
3585e6231be0SApple OSS Distributions kevent_set_workq_quantum_expiry_user_tsd(proc, thread, flags);
3586e6231be0SApple OSS Distributions
3587e6231be0SApple OSS Distributions /* We have conveyed to userspace about what it needs to do upon quantum
3588e6231be0SApple OSS Distributions * expiry, now rearm the workqueue quantum again */
3589e7776783SApple OSS Distributions thread_arm_workqueue_quantum(get_machthread(uth));
3590e6231be0SApple OSS Distributions }
3591e6231be0SApple OSS Distributions
3592cc9a6355SApple OSS Distributions void
workq_schedule_creator_turnstile_redrive(struct workqueue * wq,bool locked)3593cc9a6355SApple OSS Distributions workq_schedule_creator_turnstile_redrive(struct workqueue *wq, bool locked)
3594cc9a6355SApple OSS Distributions {
3595a5e72196SApple OSS Distributions if (locked) {
3596a5e72196SApple OSS Distributions workq_schedule_creator(NULL, wq, WORKQ_THREADREQ_NONE);
3597a5e72196SApple OSS Distributions } else {
3598a5e72196SApple OSS Distributions workq_schedule_immediate_thread_creation(wq);
3599a5e72196SApple OSS Distributions }
3600cc9a6355SApple OSS Distributions }
3601cc9a6355SApple OSS Distributions
3602cc9a6355SApple OSS Distributions static int
workq_thread_return(struct proc * p,struct workq_kernreturn_args * uap,struct workqueue * wq)3603cc9a6355SApple OSS Distributions workq_thread_return(struct proc *p, struct workq_kernreturn_args *uap,
3604cc9a6355SApple OSS Distributions struct workqueue *wq)
3605cc9a6355SApple OSS Distributions {
3606cc9a6355SApple OSS Distributions thread_t th = current_thread();
3607cc9a6355SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
3608a5e72196SApple OSS Distributions workq_threadreq_t kqr = uth->uu_kqr_bound;
3609cc9a6355SApple OSS Distributions workq_threadreq_param_t trp = { };
3610cc9a6355SApple OSS Distributions int nevents = uap->affinity, error;
3611cc9a6355SApple OSS Distributions user_addr_t eventlist = uap->item;
3612cc9a6355SApple OSS Distributions
3613cc9a6355SApple OSS Distributions if (((thread_get_tag(th) & THREAD_TAG_WORKQUEUE) == 0) ||
3614cc9a6355SApple OSS Distributions (uth->uu_workq_flags & UT_WORKQ_DYING)) {
3615cc9a6355SApple OSS Distributions return EINVAL;
3616cc9a6355SApple OSS Distributions }
3617cc9a6355SApple OSS Distributions
3618cc9a6355SApple OSS Distributions if (eventlist && nevents && kqr == NULL) {
3619cc9a6355SApple OSS Distributions return EINVAL;
3620cc9a6355SApple OSS Distributions }
3621cc9a6355SApple OSS Distributions
362294d3b452SApple OSS Distributions /*
362394d3b452SApple OSS Distributions * Reset signal mask on the workqueue thread to default state,
362494d3b452SApple OSS Distributions * but do not touch any signals that are marked for preservation.
362594d3b452SApple OSS Distributions */
362694d3b452SApple OSS Distributions sigset_t resettable = uth->uu_sigmask & ~p->p_workq_allow_sigmask;
362794d3b452SApple OSS Distributions if (resettable != (sigset_t)~workq_threadmask) {
3628cc9a6355SApple OSS Distributions proc_lock(p);
362994d3b452SApple OSS Distributions uth->uu_sigmask |= ~workq_threadmask & ~p->p_workq_allow_sigmask;
3630cc9a6355SApple OSS Distributions proc_unlock(p);
3631cc9a6355SApple OSS Distributions }
3632cc9a6355SApple OSS Distributions
3633a5e72196SApple OSS Distributions if (kqr && kqr->tr_flags & WORKQ_TR_FLAG_WL_PARAMS) {
3634cc9a6355SApple OSS Distributions /*
3635cc9a6355SApple OSS Distributions * Ensure we store the threadreq param before unbinding
3636cc9a6355SApple OSS Distributions * the kqr from this thread.
3637cc9a6355SApple OSS Distributions */
3638a5e72196SApple OSS Distributions trp = kqueue_threadreq_workloop_param(kqr);
3639cc9a6355SApple OSS Distributions }
3640cc9a6355SApple OSS Distributions
3641*8d741a5dSApple OSS Distributions if (kqr && kqr->tr_flags & WORKQ_TR_FLAG_PERMANENT_BIND) {
3642*8d741a5dSApple OSS Distributions goto handle_stack_events;
3643*8d741a5dSApple OSS Distributions }
3644*8d741a5dSApple OSS Distributions
3645a5e72196SApple OSS Distributions /*
3646e6231be0SApple OSS Distributions * Freeze the base pri while we decide the fate of this thread.
3647a5e72196SApple OSS Distributions *
3648a5e72196SApple OSS Distributions * Either:
3649a5e72196SApple OSS Distributions * - we return to user and kevent_cleanup will have unfrozen the base pri,
3650a5e72196SApple OSS Distributions * - or we proceed to workq_select_threadreq_or_park_and_unlock() who will.
3651a5e72196SApple OSS Distributions */
3652a5e72196SApple OSS Distributions thread_freeze_base_pri(th);
3653a5e72196SApple OSS Distributions
3654*8d741a5dSApple OSS Distributions handle_stack_events:
3655*8d741a5dSApple OSS Distributions
3656cc9a6355SApple OSS Distributions if (kqr) {
3657cc9a6355SApple OSS Distributions uint32_t upcall_flags = WQ_FLAG_THREAD_NEWSPI | WQ_FLAG_THREAD_REUSE;
3658a5e72196SApple OSS Distributions if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
3659cc9a6355SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_WORKLOOP | WQ_FLAG_THREAD_KEVENT;
3660cc9a6355SApple OSS Distributions } else {
3661cc9a6355SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_KEVENT;
3662cc9a6355SApple OSS Distributions }
3663cc9a6355SApple OSS Distributions if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
3664cc9a6355SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_EVENT_MANAGER;
3665cc9a6355SApple OSS Distributions } else {
3666e6231be0SApple OSS Distributions if (workq_thread_is_overcommit(uth)) {
3667cc9a6355SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
3668cc9a6355SApple OSS Distributions }
3669cc9a6355SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_OUTSIDE_QOS) {
3670cc9a6355SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OUTSIDEQOS;
3671cc9a6355SApple OSS Distributions } else {
3672cc9a6355SApple OSS Distributions upcall_flags |= uth->uu_workq_pri.qos_req |
3673cc9a6355SApple OSS Distributions WQ_FLAG_THREAD_PRIO_QOS;
3674cc9a6355SApple OSS Distributions }
3675cc9a6355SApple OSS Distributions }
3676cc9a6355SApple OSS Distributions error = pthread_functions->workq_handle_stack_events(p, th,
36775c2921b0SApple OSS Distributions get_task_map(proc_task(p)), uth->uu_workq_stackaddr,
3678cc9a6355SApple OSS Distributions uth->uu_workq_thport, eventlist, nevents, upcall_flags);
3679a5e72196SApple OSS Distributions if (error) {
3680a5e72196SApple OSS Distributions assert(uth->uu_kqr_bound == kqr);
3681a5e72196SApple OSS Distributions return error;
3682a5e72196SApple OSS Distributions }
3683cc9a6355SApple OSS Distributions
3684cc9a6355SApple OSS Distributions // pthread is supposed to pass KEVENT_FLAG_PARKING here
3685cc9a6355SApple OSS Distributions // which should cause the above call to either:
3686cc9a6355SApple OSS Distributions // - not return
3687cc9a6355SApple OSS Distributions // - return an error
3688cc9a6355SApple OSS Distributions // - return 0 and have unbound properly
3689cc9a6355SApple OSS Distributions assert(uth->uu_kqr_bound == NULL);
3690cc9a6355SApple OSS Distributions }
3691cc9a6355SApple OSS Distributions
3692e6231be0SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_runthread | DBG_FUNC_END, wq, uap->options, 0, 0);
3693cc9a6355SApple OSS Distributions
3694cc9a6355SApple OSS Distributions thread_sched_call(th, NULL);
3695cc9a6355SApple OSS Distributions thread_will_park_or_terminate(th);
3696cc9a6355SApple OSS Distributions #if CONFIG_WORKLOOP_DEBUG
3697cc9a6355SApple OSS Distributions UU_KEVENT_HISTORY_WRITE_ENTRY(uth, { .uu_error = -1, });
3698cc9a6355SApple OSS Distributions #endif
3699cc9a6355SApple OSS Distributions
3700cc9a6355SApple OSS Distributions workq_lock_spin(wq);
3701e6231be0SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0);
3702cc9a6355SApple OSS Distributions uth->uu_save.uus_workq_park_data.workloop_params = trp.trp_value;
3703a5e72196SApple OSS Distributions workq_select_threadreq_or_park_and_unlock(p, wq, uth,
3704a5e72196SApple OSS Distributions WQ_SETUP_CLEAR_VOUCHER);
3705cc9a6355SApple OSS Distributions __builtin_unreachable();
3706cc9a6355SApple OSS Distributions }
3707cc9a6355SApple OSS Distributions
3708cc9a6355SApple OSS Distributions /**
3709cc9a6355SApple OSS Distributions * Multiplexed call to interact with the workqueue mechanism
3710cc9a6355SApple OSS Distributions */
3711cc9a6355SApple OSS Distributions int
workq_kernreturn(struct proc * p,struct workq_kernreturn_args * uap,int32_t * retval)3712cc9a6355SApple OSS Distributions workq_kernreturn(struct proc *p, struct workq_kernreturn_args *uap, int32_t *retval)
3713cc9a6355SApple OSS Distributions {
3714cc9a6355SApple OSS Distributions int options = uap->options;
3715cc9a6355SApple OSS Distributions int arg2 = uap->affinity;
3716cc9a6355SApple OSS Distributions int arg3 = uap->prio;
3717cc9a6355SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
3718cc9a6355SApple OSS Distributions int error = 0;
3719cc9a6355SApple OSS Distributions
3720cc9a6355SApple OSS Distributions if ((p->p_lflag & P_LREGISTER) == 0) {
3721cc9a6355SApple OSS Distributions return EINVAL;
3722cc9a6355SApple OSS Distributions }
3723cc9a6355SApple OSS Distributions
3724cc9a6355SApple OSS Distributions switch (options) {
3725cc9a6355SApple OSS Distributions case WQOPS_QUEUE_NEWSPISUPP: {
3726cc9a6355SApple OSS Distributions /*
3727cc9a6355SApple OSS Distributions * arg2 = offset of serialno into dispatch queue
3728cc9a6355SApple OSS Distributions * arg3 = kevent support
3729cc9a6355SApple OSS Distributions */
3730cc9a6355SApple OSS Distributions int offset = arg2;
3731cc9a6355SApple OSS Distributions if (arg3 & 0x01) {
3732cc9a6355SApple OSS Distributions // If we get here, then userspace has indicated support for kevent delivery.
3733cc9a6355SApple OSS Distributions }
3734cc9a6355SApple OSS Distributions
3735cc9a6355SApple OSS Distributions p->p_dispatchqueue_serialno_offset = (uint64_t)offset;
3736cc9a6355SApple OSS Distributions break;
3737cc9a6355SApple OSS Distributions }
3738cc9a6355SApple OSS Distributions case WQOPS_QUEUE_REQTHREADS: {
3739cc9a6355SApple OSS Distributions /*
3740cc9a6355SApple OSS Distributions * arg2 = number of threads to start
3741cc9a6355SApple OSS Distributions * arg3 = priority
3742cc9a6355SApple OSS Distributions */
3743e6231be0SApple OSS Distributions error = workq_reqthreads(p, arg2, arg3, false);
3744e6231be0SApple OSS Distributions break;
3745e6231be0SApple OSS Distributions }
3746e6231be0SApple OSS Distributions /* For requesting threads for the cooperative pool */
3747e6231be0SApple OSS Distributions case WQOPS_QUEUE_REQTHREADS2: {
3748e6231be0SApple OSS Distributions /*
3749e6231be0SApple OSS Distributions * arg2 = number of threads to start
3750e6231be0SApple OSS Distributions * arg3 = priority
3751e6231be0SApple OSS Distributions */
3752e6231be0SApple OSS Distributions error = workq_reqthreads(p, arg2, arg3, true);
3753cc9a6355SApple OSS Distributions break;
3754cc9a6355SApple OSS Distributions }
3755cc9a6355SApple OSS Distributions case WQOPS_SET_EVENT_MANAGER_PRIORITY: {
3756cc9a6355SApple OSS Distributions /*
3757cc9a6355SApple OSS Distributions * arg2 = priority for the manager thread
3758cc9a6355SApple OSS Distributions *
3759cc9a6355SApple OSS Distributions * if _PTHREAD_PRIORITY_SCHED_PRI_FLAG is set,
3760cc9a6355SApple OSS Distributions * the low bits of the value contains a scheduling priority
3761cc9a6355SApple OSS Distributions * instead of a QOS value
3762cc9a6355SApple OSS Distributions */
3763cc9a6355SApple OSS Distributions pthread_priority_t pri = arg2;
3764cc9a6355SApple OSS Distributions
3765cc9a6355SApple OSS Distributions if (wq == NULL) {
3766cc9a6355SApple OSS Distributions error = EINVAL;
3767cc9a6355SApple OSS Distributions break;
3768cc9a6355SApple OSS Distributions }
3769cc9a6355SApple OSS Distributions
3770cc9a6355SApple OSS Distributions /*
3771cc9a6355SApple OSS Distributions * Normalize the incoming priority so that it is ordered numerically.
3772cc9a6355SApple OSS Distributions */
37735c2921b0SApple OSS Distributions if (_pthread_priority_has_sched_pri(pri)) {
3774cc9a6355SApple OSS Distributions pri &= (_PTHREAD_PRIORITY_SCHED_PRI_MASK |
3775cc9a6355SApple OSS Distributions _PTHREAD_PRIORITY_SCHED_PRI_FLAG);
3776cc9a6355SApple OSS Distributions } else {
3777cc9a6355SApple OSS Distributions thread_qos_t qos = _pthread_priority_thread_qos(pri);
3778cc9a6355SApple OSS Distributions int relpri = _pthread_priority_relpri(pri);
3779cc9a6355SApple OSS Distributions if (relpri > 0 || relpri < THREAD_QOS_MIN_TIER_IMPORTANCE ||
3780cc9a6355SApple OSS Distributions qos == THREAD_QOS_UNSPECIFIED) {
3781cc9a6355SApple OSS Distributions error = EINVAL;
3782cc9a6355SApple OSS Distributions break;
3783cc9a6355SApple OSS Distributions }
3784cc9a6355SApple OSS Distributions pri &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
3785cc9a6355SApple OSS Distributions }
3786cc9a6355SApple OSS Distributions
3787cc9a6355SApple OSS Distributions /*
3788cc9a6355SApple OSS Distributions * If userspace passes a scheduling priority, that wins over any QoS.
3789cc9a6355SApple OSS Distributions * Userspace should takes care not to lower the priority this way.
3790cc9a6355SApple OSS Distributions */
3791cc9a6355SApple OSS Distributions workq_lock_spin(wq);
3792cc9a6355SApple OSS Distributions if (wq->wq_event_manager_priority < (uint32_t)pri) {
3793cc9a6355SApple OSS Distributions wq->wq_event_manager_priority = (uint32_t)pri;
3794cc9a6355SApple OSS Distributions }
3795cc9a6355SApple OSS Distributions workq_unlock(wq);
3796cc9a6355SApple OSS Distributions break;
3797cc9a6355SApple OSS Distributions }
3798cc9a6355SApple OSS Distributions case WQOPS_THREAD_KEVENT_RETURN:
3799cc9a6355SApple OSS Distributions case WQOPS_THREAD_WORKLOOP_RETURN:
3800cc9a6355SApple OSS Distributions case WQOPS_THREAD_RETURN: {
3801cc9a6355SApple OSS Distributions error = workq_thread_return(p, uap, wq);
3802cc9a6355SApple OSS Distributions break;
3803cc9a6355SApple OSS Distributions }
3804cc9a6355SApple OSS Distributions
3805cc9a6355SApple OSS Distributions case WQOPS_SHOULD_NARROW: {
3806cc9a6355SApple OSS Distributions /*
3807cc9a6355SApple OSS Distributions * arg2 = priority to test
3808cc9a6355SApple OSS Distributions * arg3 = unused
3809cc9a6355SApple OSS Distributions */
3810cc9a6355SApple OSS Distributions thread_t th = current_thread();
3811cc9a6355SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
3812cc9a6355SApple OSS Distributions if (((thread_get_tag(th) & THREAD_TAG_WORKQUEUE) == 0) ||
3813cc9a6355SApple OSS Distributions (uth->uu_workq_flags & (UT_WORKQ_DYING | UT_WORKQ_OVERCOMMIT))) {
3814cc9a6355SApple OSS Distributions error = EINVAL;
3815cc9a6355SApple OSS Distributions break;
3816cc9a6355SApple OSS Distributions }
3817cc9a6355SApple OSS Distributions
3818cc9a6355SApple OSS Distributions thread_qos_t qos = _pthread_priority_thread_qos(arg2);
3819cc9a6355SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
3820cc9a6355SApple OSS Distributions error = EINVAL;
3821cc9a6355SApple OSS Distributions break;
3822cc9a6355SApple OSS Distributions }
3823cc9a6355SApple OSS Distributions workq_lock_spin(wq);
3824*8d741a5dSApple OSS Distributions bool should_narrow = !workq_constrained_allowance(wq, qos, uth, false, false);
3825cc9a6355SApple OSS Distributions workq_unlock(wq);
3826cc9a6355SApple OSS Distributions
3827cc9a6355SApple OSS Distributions *retval = should_narrow;
3828cc9a6355SApple OSS Distributions break;
3829cc9a6355SApple OSS Distributions }
3830a5e72196SApple OSS Distributions case WQOPS_SETUP_DISPATCH: {
3831a5e72196SApple OSS Distributions /*
3832a5e72196SApple OSS Distributions * item = pointer to workq_dispatch_config structure
3833a5e72196SApple OSS Distributions * arg2 = sizeof(item)
3834a5e72196SApple OSS Distributions */
3835a5e72196SApple OSS Distributions struct workq_dispatch_config cfg;
3836a5e72196SApple OSS Distributions bzero(&cfg, sizeof(cfg));
3837a5e72196SApple OSS Distributions
3838a5e72196SApple OSS Distributions error = copyin(uap->item, &cfg, MIN(sizeof(cfg), (unsigned long) arg2));
3839a5e72196SApple OSS Distributions if (error) {
3840a5e72196SApple OSS Distributions break;
3841a5e72196SApple OSS Distributions }
3842a5e72196SApple OSS Distributions
3843a5e72196SApple OSS Distributions if (cfg.wdc_flags & ~WORKQ_DISPATCH_SUPPORTED_FLAGS ||
3844a5e72196SApple OSS Distributions cfg.wdc_version < WORKQ_DISPATCH_MIN_SUPPORTED_VERSION) {
3845a5e72196SApple OSS Distributions error = ENOTSUP;
3846a5e72196SApple OSS Distributions break;
3847a5e72196SApple OSS Distributions }
3848a5e72196SApple OSS Distributions
3849a5e72196SApple OSS Distributions /* Load fields from version 1 */
3850a5e72196SApple OSS Distributions p->p_dispatchqueue_serialno_offset = cfg.wdc_queue_serialno_offs;
3851a5e72196SApple OSS Distributions
3852a5e72196SApple OSS Distributions /* Load fields from version 2 */
3853a5e72196SApple OSS Distributions if (cfg.wdc_version >= 2) {
3854a5e72196SApple OSS Distributions p->p_dispatchqueue_label_offset = cfg.wdc_queue_label_offs;
3855a5e72196SApple OSS Distributions }
3856a5e72196SApple OSS Distributions
3857a5e72196SApple OSS Distributions break;
3858a5e72196SApple OSS Distributions }
3859cc9a6355SApple OSS Distributions default:
3860cc9a6355SApple OSS Distributions error = EINVAL;
3861cc9a6355SApple OSS Distributions break;
3862cc9a6355SApple OSS Distributions }
3863cc9a6355SApple OSS Distributions
3864a5e72196SApple OSS Distributions return error;
3865cc9a6355SApple OSS Distributions }
3866cc9a6355SApple OSS Distributions
3867cc9a6355SApple OSS Distributions /*
3868cc9a6355SApple OSS Distributions * We have no work to do, park ourselves on the idle list.
3869cc9a6355SApple OSS Distributions *
3870cc9a6355SApple OSS Distributions * Consumes the workqueue lock and does not return.
3871cc9a6355SApple OSS Distributions */
3872cc9a6355SApple OSS Distributions __attribute__((noreturn, noinline))
3873cc9a6355SApple OSS Distributions static void
workq_park_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)3874a5e72196SApple OSS Distributions workq_park_and_unlock(proc_t p, struct workqueue *wq, struct uthread *uth,
3875a5e72196SApple OSS Distributions uint32_t setup_flags)
3876cc9a6355SApple OSS Distributions {
3877cc9a6355SApple OSS Distributions assert(uth == current_uthread());
3878cc9a6355SApple OSS Distributions assert(uth->uu_kqr_bound == NULL);
3879a5e72196SApple OSS Distributions workq_push_idle_thread(p, wq, uth, setup_flags); // may not return
3880cc9a6355SApple OSS Distributions
3881cc9a6355SApple OSS Distributions workq_thread_reset_cpupercent(NULL, uth);
3882cc9a6355SApple OSS Distributions
3883e6231be0SApple OSS Distributions #if CONFIG_PREADOPT_TG
3884e6231be0SApple OSS Distributions /* Clear the preadoption thread group on the thread.
3885e6231be0SApple OSS Distributions *
3886e6231be0SApple OSS Distributions * Case 1:
3887e6231be0SApple OSS Distributions * Creator thread which never picked up a thread request. We set a
3888e6231be0SApple OSS Distributions * preadoption thread group on creator threads but if it never picked
3889e6231be0SApple OSS Distributions * up a thread request and didn't go to userspace, then the thread will
3890e6231be0SApple OSS Distributions * park with a preadoption thread group but no explicitly adopted
3891e6231be0SApple OSS Distributions * voucher or work interval.
3892e6231be0SApple OSS Distributions *
3893e6231be0SApple OSS Distributions * We drop the preadoption thread group here before proceeding to park.
3894e6231be0SApple OSS Distributions * Note - we may get preempted when we drop the workq lock below.
3895e6231be0SApple OSS Distributions *
3896e6231be0SApple OSS Distributions * Case 2:
3897e6231be0SApple OSS Distributions * Thread picked up a thread request and bound to it and returned back
3898e6231be0SApple OSS Distributions * from userspace and is parking. At this point, preadoption thread
3899e6231be0SApple OSS Distributions * group should be NULL since the thread has unbound from the thread
3900e6231be0SApple OSS Distributions * request. So this operation should be a no-op.
3901e6231be0SApple OSS Distributions */
3902e7776783SApple OSS Distributions thread_set_preadopt_thread_group(get_machthread(uth), NULL);
3903e6231be0SApple OSS Distributions #endif
3904e6231be0SApple OSS Distributions
3905a5e72196SApple OSS Distributions if ((uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) &&
3906a5e72196SApple OSS Distributions !(uth->uu_workq_flags & UT_WORKQ_DYING)) {
3907cc9a6355SApple OSS Distributions workq_unlock(wq);
3908cc9a6355SApple OSS Distributions
3909cc9a6355SApple OSS Distributions /*
3910cc9a6355SApple OSS Distributions * workq_push_idle_thread() will unset `has_stack`
3911cc9a6355SApple OSS Distributions * if it wants us to free the stack before parking.
3912cc9a6355SApple OSS Distributions */
3913cc9a6355SApple OSS Distributions if (!uth->uu_save.uus_workq_park_data.has_stack) {
3914e7776783SApple OSS Distributions pthread_functions->workq_markfree_threadstack(p,
39155c2921b0SApple OSS Distributions get_machthread(uth), get_task_map(proc_task(p)),
3916e7776783SApple OSS Distributions uth->uu_workq_stackaddr);
3917cc9a6355SApple OSS Distributions }
3918cc9a6355SApple OSS Distributions
3919cc9a6355SApple OSS Distributions /*
3920cc9a6355SApple OSS Distributions * When we remove the voucher from the thread, we may lose our importance
3921cc9a6355SApple OSS Distributions * causing us to get preempted, so we do this after putting the thread on
3922cc9a6355SApple OSS Distributions * the idle list. Then, when we get our importance back we'll be able to
3923cc9a6355SApple OSS Distributions * use this thread from e.g. the kevent call out to deliver a boosting
3924cc9a6355SApple OSS Distributions * message.
3925e6231be0SApple OSS Distributions *
3926e6231be0SApple OSS Distributions * Note that setting the voucher to NULL will not clear the preadoption
3927e6231be0SApple OSS Distributions * thread since this thread could have become the creator again and
3928e6231be0SApple OSS Distributions * perhaps acquired a preadoption thread group.
3929cc9a6355SApple OSS Distributions */
3930cc9a6355SApple OSS Distributions __assert_only kern_return_t kr;
3931cc9a6355SApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
3932cc9a6355SApple OSS Distributions assert(kr == KERN_SUCCESS);
3933cc9a6355SApple OSS Distributions
3934cc9a6355SApple OSS Distributions workq_lock_spin(wq);
3935cc9a6355SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_IDLE_CLEANUP;
3936a5e72196SApple OSS Distributions setup_flags &= ~WQ_SETUP_CLEAR_VOUCHER;
3937cc9a6355SApple OSS Distributions }
3938cc9a6355SApple OSS Distributions
3939e6231be0SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0);
3940bb611c8fSApple OSS Distributions
3941cc9a6355SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_RUNNING) {
3942cc9a6355SApple OSS Distributions /*
3943cc9a6355SApple OSS Distributions * While we'd dropped the lock to unset our voucher, someone came
3944cc9a6355SApple OSS Distributions * around and made us runnable. But because we weren't waiting on the
3945cc9a6355SApple OSS Distributions * event their thread_wakeup() was ineffectual. To correct for that,
3946cc9a6355SApple OSS Distributions * we just run the continuation ourselves.
3947cc9a6355SApple OSS Distributions */
3948a5e72196SApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(p, wq, uth, setup_flags);
3949cc9a6355SApple OSS Distributions __builtin_unreachable();
3950cc9a6355SApple OSS Distributions }
3951cc9a6355SApple OSS Distributions
3952cc9a6355SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_DYING) {
3953cc9a6355SApple OSS Distributions workq_unpark_for_death_and_unlock(p, wq, uth,
3954a5e72196SApple OSS Distributions WORKQ_UNPARK_FOR_DEATH_WAS_IDLE, setup_flags);
3955cc9a6355SApple OSS Distributions __builtin_unreachable();
3956cc9a6355SApple OSS Distributions }
3957cc9a6355SApple OSS Distributions
3958e6231be0SApple OSS Distributions /* Disarm the workqueue quantum since the thread is now idle */
3959e7776783SApple OSS Distributions thread_disarm_workqueue_quantum(get_machthread(uth));
3960e6231be0SApple OSS Distributions
3961e7776783SApple OSS Distributions thread_set_pending_block_hint(get_machthread(uth), kThreadWaitParkedWorkQueue);
3962cc9a6355SApple OSS Distributions assert_wait(workq_parked_wait_event(uth), THREAD_INTERRUPTIBLE);
3963cc9a6355SApple OSS Distributions workq_unlock(wq);
3964cc9a6355SApple OSS Distributions thread_block(workq_unpark_continue);
3965cc9a6355SApple OSS Distributions __builtin_unreachable();
3966cc9a6355SApple OSS Distributions }
3967cc9a6355SApple OSS Distributions
3968cc9a6355SApple OSS Distributions static inline bool
workq_may_start_event_mgr_thread(struct workqueue * wq,struct uthread * uth)3969cc9a6355SApple OSS Distributions workq_may_start_event_mgr_thread(struct workqueue *wq, struct uthread *uth)
3970cc9a6355SApple OSS Distributions {
3971cc9a6355SApple OSS Distributions /*
3972cc9a6355SApple OSS Distributions * There's an event manager request and either:
3973cc9a6355SApple OSS Distributions * - no event manager currently running
3974cc9a6355SApple OSS Distributions * - we are re-using the event manager
3975cc9a6355SApple OSS Distributions */
3976cc9a6355SApple OSS Distributions return wq->wq_thscheduled_count[_wq_bucket(WORKQ_THREAD_QOS_MANAGER)] == 0 ||
3977cc9a6355SApple OSS Distributions (uth && uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER);
3978cc9a6355SApple OSS Distributions }
3979cc9a6355SApple OSS Distributions
3980*8d741a5dSApple OSS Distributions /* Called with workq lock held. */
3981cc9a6355SApple OSS Distributions static uint32_t
workq_constrained_allowance(struct workqueue * wq,thread_qos_t at_qos,struct uthread * uth,bool may_start_timer,bool record_failed_allowance)3982cc9a6355SApple OSS Distributions workq_constrained_allowance(struct workqueue *wq, thread_qos_t at_qos,
3983*8d741a5dSApple OSS Distributions struct uthread *uth, bool may_start_timer, bool record_failed_allowance)
3984cc9a6355SApple OSS Distributions {
3985cc9a6355SApple OSS Distributions assert(at_qos != WORKQ_THREAD_QOS_MANAGER);
3986*8d741a5dSApple OSS Distributions uint32_t allowance_passed = 0;
3987cc9a6355SApple OSS Distributions uint32_t count = 0;
3988cc9a6355SApple OSS Distributions
3989cc9a6355SApple OSS Distributions uint32_t max_count = wq->wq_constrained_threads_scheduled;
3990e6231be0SApple OSS Distributions if (uth && workq_thread_is_nonovercommit(uth)) {
3991cc9a6355SApple OSS Distributions /*
3992cc9a6355SApple OSS Distributions * don't count the current thread as scheduled
3993cc9a6355SApple OSS Distributions */
3994cc9a6355SApple OSS Distributions assert(max_count > 0);
3995cc9a6355SApple OSS Distributions max_count--;
3996cc9a6355SApple OSS Distributions }
3997cc9a6355SApple OSS Distributions if (max_count >= wq_max_constrained_threads) {
3998cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 1,
3999cc9a6355SApple OSS Distributions wq->wq_constrained_threads_scheduled,
4000e6231be0SApple OSS Distributions wq_max_constrained_threads);
4001cc9a6355SApple OSS Distributions /*
4002cc9a6355SApple OSS Distributions * we need 1 or more constrained threads to return to the kernel before
4003cc9a6355SApple OSS Distributions * we can dispatch additional work
4004cc9a6355SApple OSS Distributions */
4005*8d741a5dSApple OSS Distributions allowance_passed = 0;
4006*8d741a5dSApple OSS Distributions goto out;
4007cc9a6355SApple OSS Distributions }
4008cc9a6355SApple OSS Distributions max_count -= wq_max_constrained_threads;
4009cc9a6355SApple OSS Distributions
4010cc9a6355SApple OSS Distributions /*
4011cc9a6355SApple OSS Distributions * Compute a metric for many how many threads are active. We find the
4012e6231be0SApple OSS Distributions * highest priority request outstanding and then add up the number of active
4013e6231be0SApple OSS Distributions * threads in that and all higher-priority buckets. We'll also add any
4014e6231be0SApple OSS Distributions * "busy" threads which are not currently active but blocked recently enough
4015e6231be0SApple OSS Distributions * that we can't be sure that they won't be unblocked soon and start
4016e6231be0SApple OSS Distributions * being active again.
4017e6231be0SApple OSS Distributions *
4018e6231be0SApple OSS Distributions * We'll then compare this metric to our max concurrency to decide whether
4019e6231be0SApple OSS Distributions * to add a new thread.
4020cc9a6355SApple OSS Distributions */
4021cc9a6355SApple OSS Distributions
4022cc9a6355SApple OSS Distributions uint32_t busycount, thactive_count;
4023cc9a6355SApple OSS Distributions
4024cc9a6355SApple OSS Distributions thactive_count = _wq_thactive_aggregate_downto_qos(wq, _wq_thactive(wq),
4025cc9a6355SApple OSS Distributions at_qos, &busycount, NULL);
4026cc9a6355SApple OSS Distributions
4027cc9a6355SApple OSS Distributions if (uth && uth->uu_workq_pri.qos_bucket != WORKQ_THREAD_QOS_MANAGER &&
4028cc9a6355SApple OSS Distributions at_qos <= uth->uu_workq_pri.qos_bucket) {
4029cc9a6355SApple OSS Distributions /*
4030cc9a6355SApple OSS Distributions * Don't count this thread as currently active, but only if it's not
4031cc9a6355SApple OSS Distributions * a manager thread, as _wq_thactive_aggregate_downto_qos ignores active
4032cc9a6355SApple OSS Distributions * managers.
4033cc9a6355SApple OSS Distributions */
4034cc9a6355SApple OSS Distributions assert(thactive_count > 0);
4035cc9a6355SApple OSS Distributions thactive_count--;
4036cc9a6355SApple OSS Distributions }
4037cc9a6355SApple OSS Distributions
4038cc9a6355SApple OSS Distributions count = wq_max_parallelism[_wq_bucket(at_qos)];
4039cc9a6355SApple OSS Distributions if (count > thactive_count + busycount) {
4040cc9a6355SApple OSS Distributions count -= thactive_count + busycount;
4041cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 2,
4042e6231be0SApple OSS Distributions thactive_count, busycount);
4043*8d741a5dSApple OSS Distributions allowance_passed = MIN(count, max_count);
4044*8d741a5dSApple OSS Distributions goto out;
4045cc9a6355SApple OSS Distributions } else {
4046cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 3,
4047e6231be0SApple OSS Distributions thactive_count, busycount);
4048*8d741a5dSApple OSS Distributions allowance_passed = 0;
4049cc9a6355SApple OSS Distributions }
4050cc9a6355SApple OSS Distributions
4051e6231be0SApple OSS Distributions if (may_start_timer) {
4052cc9a6355SApple OSS Distributions /*
4053cc9a6355SApple OSS Distributions * If this is called from the add timer, we won't have another timer
4054cc9a6355SApple OSS Distributions * fire when the thread exits the "busy" state, so rearm the timer.
4055cc9a6355SApple OSS Distributions */
4056cc9a6355SApple OSS Distributions workq_schedule_delayed_thread_creation(wq, 0);
4057cc9a6355SApple OSS Distributions }
4058cc9a6355SApple OSS Distributions
4059*8d741a5dSApple OSS Distributions out:
4060*8d741a5dSApple OSS Distributions if (record_failed_allowance) {
4061*8d741a5dSApple OSS Distributions wq->wq_exceeded_active_constrained_thread_limit = !allowance_passed;
4062*8d741a5dSApple OSS Distributions }
4063*8d741a5dSApple OSS Distributions return allowance_passed;
4064cc9a6355SApple OSS Distributions }
4065cc9a6355SApple OSS Distributions
4066cc9a6355SApple OSS Distributions static bool
workq_threadreq_admissible(struct workqueue * wq,struct uthread * uth,workq_threadreq_t req)4067cc9a6355SApple OSS Distributions workq_threadreq_admissible(struct workqueue *wq, struct uthread *uth,
4068cc9a6355SApple OSS Distributions workq_threadreq_t req)
4069cc9a6355SApple OSS Distributions {
4070cc9a6355SApple OSS Distributions if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
4071cc9a6355SApple OSS Distributions return workq_may_start_event_mgr_thread(wq, uth);
4072cc9a6355SApple OSS Distributions }
4073e6231be0SApple OSS Distributions if (workq_threadreq_is_cooperative(req)) {
4074e6231be0SApple OSS Distributions return workq_cooperative_allowance(wq, req->tr_qos, uth, true);
4075e6231be0SApple OSS Distributions }
4076e6231be0SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
4077*8d741a5dSApple OSS Distributions return workq_constrained_allowance(wq, req->tr_qos, uth, true, true);
4078cc9a6355SApple OSS Distributions }
4079e6231be0SApple OSS Distributions
4080cc9a6355SApple OSS Distributions return true;
4081cc9a6355SApple OSS Distributions }
4082cc9a6355SApple OSS Distributions
4083e6231be0SApple OSS Distributions /*
4084e6231be0SApple OSS Distributions * Called from the context of selecting thread requests for threads returning
4085e6231be0SApple OSS Distributions * from userspace or creator thread
4086e6231be0SApple OSS Distributions */
4087e6231be0SApple OSS Distributions static workq_threadreq_t
workq_cooperative_queue_best_req(struct workqueue * wq,struct uthread * uth)4088e6231be0SApple OSS Distributions workq_cooperative_queue_best_req(struct workqueue *wq, struct uthread *uth)
4089e6231be0SApple OSS Distributions {
4090e6231be0SApple OSS Distributions workq_lock_held(wq);
4091e6231be0SApple OSS Distributions
4092e6231be0SApple OSS Distributions /*
4093e6231be0SApple OSS Distributions * If the current thread is cooperative, we need to exclude it as part of
4094e6231be0SApple OSS Distributions * cooperative schedule count since this thread is looking for a new
4095e6231be0SApple OSS Distributions * request. Change in the schedule count for cooperative pool therefore
4096e6231be0SApple OSS Distributions * requires us to reeevaluate the next best request for it.
4097e6231be0SApple OSS Distributions */
4098e6231be0SApple OSS Distributions if (uth && workq_thread_is_cooperative(uth)) {
40995c2921b0SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, uth->uu_workq_pri.qos_req);
4100e6231be0SApple OSS Distributions
4101e6231be0SApple OSS Distributions (void) _wq_cooperative_queue_refresh_best_req_qos(wq);
4102e6231be0SApple OSS Distributions
41035c2921b0SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, uth->uu_workq_pri.qos_req);
4104e6231be0SApple OSS Distributions } else {
4105e6231be0SApple OSS Distributions /*
4106e6231be0SApple OSS Distributions * The old value that was already precomputed should be safe to use -
4107e6231be0SApple OSS Distributions * add an assert that asserts that the best req QoS doesn't change in
4108e6231be0SApple OSS Distributions * this case
4109e6231be0SApple OSS Distributions */
4110e6231be0SApple OSS Distributions assert(_wq_cooperative_queue_refresh_best_req_qos(wq) == false);
4111e6231be0SApple OSS Distributions }
4112e6231be0SApple OSS Distributions
4113e6231be0SApple OSS Distributions thread_qos_t qos = wq->wq_cooperative_queue_best_req_qos;
4114e6231be0SApple OSS Distributions
4115e6231be0SApple OSS Distributions /* There are no eligible requests in the cooperative pool */
4116e6231be0SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
4117e6231be0SApple OSS Distributions return NULL;
4118e6231be0SApple OSS Distributions }
4119e6231be0SApple OSS Distributions assert(qos != WORKQ_THREAD_QOS_ABOVEUI);
4120e6231be0SApple OSS Distributions assert(qos != WORKQ_THREAD_QOS_MANAGER);
4121e6231be0SApple OSS Distributions
4122e7776783SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
4123e6231be0SApple OSS Distributions assert(!STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket]));
4124e6231be0SApple OSS Distributions
4125e6231be0SApple OSS Distributions return STAILQ_FIRST(&wq->wq_cooperative_queue[bucket]);
4126e6231be0SApple OSS Distributions }
4127e6231be0SApple OSS Distributions
4128cc9a6355SApple OSS Distributions static workq_threadreq_t
workq_threadreq_select_for_creator(struct workqueue * wq)4129cc9a6355SApple OSS Distributions workq_threadreq_select_for_creator(struct workqueue *wq)
4130cc9a6355SApple OSS Distributions {
4131bb611c8fSApple OSS Distributions workq_threadreq_t req_qos, req_pri, req_tmp, req_mgr;
4132cc9a6355SApple OSS Distributions thread_qos_t qos = THREAD_QOS_UNSPECIFIED;
4133cc9a6355SApple OSS Distributions uint8_t pri = 0;
4134cc9a6355SApple OSS Distributions
4135cc9a6355SApple OSS Distributions /*
4136cc9a6355SApple OSS Distributions * Compute the best priority request, and ignore the turnstile for now
4137cc9a6355SApple OSS Distributions */
4138cc9a6355SApple OSS Distributions
4139cc9a6355SApple OSS Distributions req_pri = priority_queue_max(&wq->wq_special_queue,
4140cc9a6355SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4141cc9a6355SApple OSS Distributions if (req_pri) {
4142bb611c8fSApple OSS Distributions pri = (uint8_t)priority_queue_entry_sched_pri(&wq->wq_special_queue,
4143bb611c8fSApple OSS Distributions &req_pri->tr_entry);
4144bb611c8fSApple OSS Distributions }
4145bb611c8fSApple OSS Distributions
4146bb611c8fSApple OSS Distributions /*
4147bb611c8fSApple OSS Distributions * Handle the manager thread request. The special queue might yield
4148bb611c8fSApple OSS Distributions * a higher priority, but the manager always beats the QoS world.
4149bb611c8fSApple OSS Distributions */
4150bb611c8fSApple OSS Distributions
4151bb611c8fSApple OSS Distributions req_mgr = wq->wq_event_manager_threadreq;
4152bb611c8fSApple OSS Distributions if (req_mgr && workq_may_start_event_mgr_thread(wq, NULL)) {
4153bb611c8fSApple OSS Distributions uint32_t mgr_pri = wq->wq_event_manager_priority;
4154bb611c8fSApple OSS Distributions
4155bb611c8fSApple OSS Distributions if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) {
4156bb611c8fSApple OSS Distributions mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
4157bb611c8fSApple OSS Distributions } else {
4158bb611c8fSApple OSS Distributions mgr_pri = thread_workq_pri_for_qos(
4159bb611c8fSApple OSS Distributions _pthread_priority_thread_qos(mgr_pri));
4160bb611c8fSApple OSS Distributions }
4161bb611c8fSApple OSS Distributions
4162bb611c8fSApple OSS Distributions return mgr_pri >= pri ? req_mgr : req_pri;
4163cc9a6355SApple OSS Distributions }
4164cc9a6355SApple OSS Distributions
4165cc9a6355SApple OSS Distributions /*
4166cc9a6355SApple OSS Distributions * Compute the best QoS Request, and check whether it beats the "pri" one
4167e6231be0SApple OSS Distributions *
4168e6231be0SApple OSS Distributions * Start by comparing the overcommit and the cooperative pool
4169cc9a6355SApple OSS Distributions */
4170cc9a6355SApple OSS Distributions req_qos = priority_queue_max(&wq->wq_overcommit_queue,
4171cc9a6355SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4172cc9a6355SApple OSS Distributions if (req_qos) {
4173cc9a6355SApple OSS Distributions qos = req_qos->tr_qos;
4174cc9a6355SApple OSS Distributions }
4175cc9a6355SApple OSS Distributions
4176e6231be0SApple OSS Distributions req_tmp = workq_cooperative_queue_best_req(wq, NULL);
4177e6231be0SApple OSS Distributions if (req_tmp && qos <= req_tmp->tr_qos) {
4178e6231be0SApple OSS Distributions /*
4179e6231be0SApple OSS Distributions * Cooperative TR is better between overcommit and cooperative. Note
4180e6231be0SApple OSS Distributions * that if qos is same between overcommit and cooperative, we choose
4181e6231be0SApple OSS Distributions * cooperative.
4182e6231be0SApple OSS Distributions *
4183e6231be0SApple OSS Distributions * Pick cooperative pool if it passes the admissions check
4184e6231be0SApple OSS Distributions */
4185e6231be0SApple OSS Distributions if (workq_cooperative_allowance(wq, req_tmp->tr_qos, NULL, true)) {
4186e6231be0SApple OSS Distributions req_qos = req_tmp;
4187e6231be0SApple OSS Distributions qos = req_qos->tr_qos;
4188e6231be0SApple OSS Distributions }
4189e6231be0SApple OSS Distributions }
4190e6231be0SApple OSS Distributions
4191e6231be0SApple OSS Distributions /*
4192e6231be0SApple OSS Distributions * Compare the best QoS so far - either from overcommit or from cooperative
4193e6231be0SApple OSS Distributions * pool - and compare it with the constrained pool
4194e6231be0SApple OSS Distributions */
4195cc9a6355SApple OSS Distributions req_tmp = priority_queue_max(&wq->wq_constrained_queue,
4196cc9a6355SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4197cc9a6355SApple OSS Distributions
4198cc9a6355SApple OSS Distributions if (req_tmp && qos < req_tmp->tr_qos) {
4199e6231be0SApple OSS Distributions /*
4200e6231be0SApple OSS Distributions * Constrained pool is best in QoS between overcommit, cooperative
4201e6231be0SApple OSS Distributions * and constrained. Now check how it fairs against the priority case
4202e6231be0SApple OSS Distributions */
4203cc9a6355SApple OSS Distributions if (pri && pri >= thread_workq_pri_for_qos(req_tmp->tr_qos)) {
4204cc9a6355SApple OSS Distributions return req_pri;
4205cc9a6355SApple OSS Distributions }
4206cc9a6355SApple OSS Distributions
4207*8d741a5dSApple OSS Distributions if (workq_constrained_allowance(wq, req_tmp->tr_qos, NULL, true, true)) {
4208cc9a6355SApple OSS Distributions /*
4209cc9a6355SApple OSS Distributions * If the constrained thread request is the best one and passes
4210cc9a6355SApple OSS Distributions * the admission check, pick it.
4211cc9a6355SApple OSS Distributions */
4212cc9a6355SApple OSS Distributions return req_tmp;
4213cc9a6355SApple OSS Distributions }
4214cc9a6355SApple OSS Distributions }
4215cc9a6355SApple OSS Distributions
4216e6231be0SApple OSS Distributions /*
4217e6231be0SApple OSS Distributions * Compare the best of the QoS world with the priority
4218e6231be0SApple OSS Distributions */
4219cc9a6355SApple OSS Distributions if (pri && (!qos || pri >= thread_workq_pri_for_qos(qos))) {
4220cc9a6355SApple OSS Distributions return req_pri;
4221cc9a6355SApple OSS Distributions }
4222cc9a6355SApple OSS Distributions
4223cc9a6355SApple OSS Distributions if (req_qos) {
4224cc9a6355SApple OSS Distributions return req_qos;
4225cc9a6355SApple OSS Distributions }
4226cc9a6355SApple OSS Distributions
4227cc9a6355SApple OSS Distributions /*
4228cc9a6355SApple OSS Distributions * If we had no eligible request but we have a turnstile push,
4229cc9a6355SApple OSS Distributions * it must be a non overcommit thread request that failed
4230cc9a6355SApple OSS Distributions * the admission check.
4231cc9a6355SApple OSS Distributions *
4232cc9a6355SApple OSS Distributions * Just fake a BG thread request so that if the push stops the creator
4233cc9a6355SApple OSS Distributions * priority just drops to 4.
4234cc9a6355SApple OSS Distributions */
4235cc9a6355SApple OSS Distributions if (turnstile_workq_proprietor_of_max_turnstile(wq->wq_turnstile, NULL)) {
4236cc9a6355SApple OSS Distributions static struct workq_threadreq_s workq_sync_push_fake_req = {
4237cc9a6355SApple OSS Distributions .tr_qos = THREAD_QOS_BACKGROUND,
4238cc9a6355SApple OSS Distributions };
4239cc9a6355SApple OSS Distributions
4240cc9a6355SApple OSS Distributions return &workq_sync_push_fake_req;
4241cc9a6355SApple OSS Distributions }
4242cc9a6355SApple OSS Distributions
4243cc9a6355SApple OSS Distributions return NULL;
4244cc9a6355SApple OSS Distributions }
4245cc9a6355SApple OSS Distributions
4246e6231be0SApple OSS Distributions /*
4247e6231be0SApple OSS Distributions * Returns true if this caused a change in the schedule counts of the
4248e6231be0SApple OSS Distributions * cooperative pool
4249e6231be0SApple OSS Distributions */
4250e6231be0SApple OSS Distributions static bool
workq_adjust_cooperative_constrained_schedule_counts(struct workqueue * wq,struct uthread * uth,thread_qos_t old_thread_qos,workq_tr_flags_t tr_flags)4251e6231be0SApple OSS Distributions workq_adjust_cooperative_constrained_schedule_counts(struct workqueue *wq,
4252e6231be0SApple OSS Distributions struct uthread *uth, thread_qos_t old_thread_qos, workq_tr_flags_t tr_flags)
4253e6231be0SApple OSS Distributions {
4254e6231be0SApple OSS Distributions workq_lock_held(wq);
4255e6231be0SApple OSS Distributions
4256e6231be0SApple OSS Distributions /*
4257e6231be0SApple OSS Distributions * Row: thread type
4258e6231be0SApple OSS Distributions * Column: Request type
4259e6231be0SApple OSS Distributions *
4260e6231be0SApple OSS Distributions * overcommit non-overcommit cooperative
4261e6231be0SApple OSS Distributions * overcommit X case 1 case 2
4262e6231be0SApple OSS Distributions * cooperative case 3 case 4 case 5
4263e6231be0SApple OSS Distributions * non-overcommit case 6 X case 7
4264e6231be0SApple OSS Distributions *
4265e6231be0SApple OSS Distributions * Move the thread to the right bucket depending on what state it currently
4266e6231be0SApple OSS Distributions * has and what state the thread req it picks, is going to have.
4267e6231be0SApple OSS Distributions *
4268e6231be0SApple OSS Distributions * Note that the creator thread is an overcommit thread.
4269e6231be0SApple OSS Distributions */
42705c2921b0SApple OSS Distributions thread_qos_t new_thread_qos = uth->uu_workq_pri.qos_req;
4271e6231be0SApple OSS Distributions
4272e6231be0SApple OSS Distributions /*
4273e6231be0SApple OSS Distributions * Anytime a cooperative bucket's schedule count changes, we need to
4274e6231be0SApple OSS Distributions * potentially refresh the next best QoS for that pool when we determine
4275e6231be0SApple OSS Distributions * the next request for the creator
4276e6231be0SApple OSS Distributions */
4277e6231be0SApple OSS Distributions bool cooperative_pool_sched_count_changed = false;
4278e6231be0SApple OSS Distributions
4279e6231be0SApple OSS Distributions if (workq_thread_is_overcommit(uth)) {
4280e6231be0SApple OSS Distributions if (workq_tr_is_nonovercommit(tr_flags)) {
4281e6231be0SApple OSS Distributions // Case 1: thread is overcommit, req is non-overcommit
4282e6231be0SApple OSS Distributions wq->wq_constrained_threads_scheduled++;
4283e6231be0SApple OSS Distributions } else if (workq_tr_is_cooperative(tr_flags)) {
4284e6231be0SApple OSS Distributions // Case 2: thread is overcommit, req is cooperative
4285e6231be0SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, new_thread_qos);
4286e6231be0SApple OSS Distributions cooperative_pool_sched_count_changed = true;
4287e6231be0SApple OSS Distributions }
4288e6231be0SApple OSS Distributions } else if (workq_thread_is_cooperative(uth)) {
4289e6231be0SApple OSS Distributions if (workq_tr_is_overcommit(tr_flags)) {
4290e6231be0SApple OSS Distributions // Case 3: thread is cooperative, req is overcommit
4291e6231be0SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, old_thread_qos);
4292e6231be0SApple OSS Distributions } else if (workq_tr_is_nonovercommit(tr_flags)) {
4293e6231be0SApple OSS Distributions // Case 4: thread is cooperative, req is non-overcommit
4294e6231be0SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, old_thread_qos);
4295e6231be0SApple OSS Distributions wq->wq_constrained_threads_scheduled++;
4296e6231be0SApple OSS Distributions } else {
4297e6231be0SApple OSS Distributions // Case 5: thread is cooperative, req is also cooperative
4298e6231be0SApple OSS Distributions assert(workq_tr_is_cooperative(tr_flags));
4299e6231be0SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, old_thread_qos);
4300e6231be0SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, new_thread_qos);
4301e6231be0SApple OSS Distributions }
4302e6231be0SApple OSS Distributions cooperative_pool_sched_count_changed = true;
4303e6231be0SApple OSS Distributions } else {
4304e6231be0SApple OSS Distributions if (workq_tr_is_overcommit(tr_flags)) {
4305e6231be0SApple OSS Distributions // Case 6: Thread is non-overcommit, req is overcommit
4306e6231be0SApple OSS Distributions wq->wq_constrained_threads_scheduled--;
4307e6231be0SApple OSS Distributions } else if (workq_tr_is_cooperative(tr_flags)) {
4308e6231be0SApple OSS Distributions // Case 7: Thread is non-overcommit, req is cooperative
4309e6231be0SApple OSS Distributions wq->wq_constrained_threads_scheduled--;
4310e6231be0SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, new_thread_qos);
4311e6231be0SApple OSS Distributions cooperative_pool_sched_count_changed = true;
4312e6231be0SApple OSS Distributions }
4313e6231be0SApple OSS Distributions }
4314e6231be0SApple OSS Distributions
4315e6231be0SApple OSS Distributions return cooperative_pool_sched_count_changed;
4316e6231be0SApple OSS Distributions }
4317e6231be0SApple OSS Distributions
4318cc9a6355SApple OSS Distributions static workq_threadreq_t
workq_threadreq_select(struct workqueue * wq,struct uthread * uth)4319cc9a6355SApple OSS Distributions workq_threadreq_select(struct workqueue *wq, struct uthread *uth)
4320cc9a6355SApple OSS Distributions {
4321bb611c8fSApple OSS Distributions workq_threadreq_t req_qos, req_pri, req_tmp, req_mgr;
4322cc9a6355SApple OSS Distributions uintptr_t proprietor;
4323cc9a6355SApple OSS Distributions thread_qos_t qos = THREAD_QOS_UNSPECIFIED;
4324cc9a6355SApple OSS Distributions uint8_t pri = 0;
4325cc9a6355SApple OSS Distributions
4326a5e72196SApple OSS Distributions if (uth == wq->wq_creator) {
4327a5e72196SApple OSS Distributions uth = NULL;
4328a5e72196SApple OSS Distributions }
4329cc9a6355SApple OSS Distributions
4330cc9a6355SApple OSS Distributions /*
4331cc9a6355SApple OSS Distributions * Compute the best priority request (special or turnstile)
4332cc9a6355SApple OSS Distributions */
4333cc9a6355SApple OSS Distributions
4334bb611c8fSApple OSS Distributions pri = (uint8_t)turnstile_workq_proprietor_of_max_turnstile(wq->wq_turnstile,
4335cc9a6355SApple OSS Distributions &proprietor);
4336cc9a6355SApple OSS Distributions if (pri) {
4337cc9a6355SApple OSS Distributions struct kqworkloop *kqwl = (struct kqworkloop *)proprietor;
4338a5e72196SApple OSS Distributions req_pri = &kqwl->kqwl_request;
4339a5e72196SApple OSS Distributions if (req_pri->tr_state != WORKQ_TR_STATE_QUEUED) {
4340cc9a6355SApple OSS Distributions panic("Invalid thread request (%p) state %d",
4341cc9a6355SApple OSS Distributions req_pri, req_pri->tr_state);
4342cc9a6355SApple OSS Distributions }
4343cc9a6355SApple OSS Distributions } else {
4344cc9a6355SApple OSS Distributions req_pri = NULL;
4345cc9a6355SApple OSS Distributions }
4346cc9a6355SApple OSS Distributions
4347cc9a6355SApple OSS Distributions req_tmp = priority_queue_max(&wq->wq_special_queue,
4348cc9a6355SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4349bb611c8fSApple OSS Distributions if (req_tmp && pri < priority_queue_entry_sched_pri(&wq->wq_special_queue,
4350cc9a6355SApple OSS Distributions &req_tmp->tr_entry)) {
4351cc9a6355SApple OSS Distributions req_pri = req_tmp;
4352bb611c8fSApple OSS Distributions pri = (uint8_t)priority_queue_entry_sched_pri(&wq->wq_special_queue,
4353bb611c8fSApple OSS Distributions &req_tmp->tr_entry);
4354bb611c8fSApple OSS Distributions }
4355bb611c8fSApple OSS Distributions
4356bb611c8fSApple OSS Distributions /*
4357bb611c8fSApple OSS Distributions * Handle the manager thread request. The special queue might yield
4358bb611c8fSApple OSS Distributions * a higher priority, but the manager always beats the QoS world.
4359bb611c8fSApple OSS Distributions */
4360bb611c8fSApple OSS Distributions
4361bb611c8fSApple OSS Distributions req_mgr = wq->wq_event_manager_threadreq;
4362bb611c8fSApple OSS Distributions if (req_mgr && workq_may_start_event_mgr_thread(wq, uth)) {
4363bb611c8fSApple OSS Distributions uint32_t mgr_pri = wq->wq_event_manager_priority;
4364bb611c8fSApple OSS Distributions
4365bb611c8fSApple OSS Distributions if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) {
4366bb611c8fSApple OSS Distributions mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
4367bb611c8fSApple OSS Distributions } else {
4368bb611c8fSApple OSS Distributions mgr_pri = thread_workq_pri_for_qos(
4369bb611c8fSApple OSS Distributions _pthread_priority_thread_qos(mgr_pri));
4370bb611c8fSApple OSS Distributions }
4371bb611c8fSApple OSS Distributions
4372bb611c8fSApple OSS Distributions return mgr_pri >= pri ? req_mgr : req_pri;
4373cc9a6355SApple OSS Distributions }
4374cc9a6355SApple OSS Distributions
4375cc9a6355SApple OSS Distributions /*
4376cc9a6355SApple OSS Distributions * Compute the best QoS Request, and check whether it beats the "pri" one
4377cc9a6355SApple OSS Distributions */
4378cc9a6355SApple OSS Distributions
4379cc9a6355SApple OSS Distributions req_qos = priority_queue_max(&wq->wq_overcommit_queue,
4380cc9a6355SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4381cc9a6355SApple OSS Distributions if (req_qos) {
4382cc9a6355SApple OSS Distributions qos = req_qos->tr_qos;
4383cc9a6355SApple OSS Distributions }
4384cc9a6355SApple OSS Distributions
4385e6231be0SApple OSS Distributions req_tmp = workq_cooperative_queue_best_req(wq, uth);
4386e6231be0SApple OSS Distributions if (req_tmp && qos <= req_tmp->tr_qos) {
4387e6231be0SApple OSS Distributions /*
4388e6231be0SApple OSS Distributions * Cooperative TR is better between overcommit and cooperative. Note
4389e6231be0SApple OSS Distributions * that if qos is same between overcommit and cooperative, we choose
4390e6231be0SApple OSS Distributions * cooperative.
4391e6231be0SApple OSS Distributions *
4392e6231be0SApple OSS Distributions * Pick cooperative pool if it passes the admissions check
4393e6231be0SApple OSS Distributions */
4394e6231be0SApple OSS Distributions if (workq_cooperative_allowance(wq, req_tmp->tr_qos, uth, true)) {
4395e6231be0SApple OSS Distributions req_qos = req_tmp;
4396e6231be0SApple OSS Distributions qos = req_qos->tr_qos;
4397e6231be0SApple OSS Distributions }
4398e6231be0SApple OSS Distributions }
4399e6231be0SApple OSS Distributions
4400e6231be0SApple OSS Distributions /*
4401e6231be0SApple OSS Distributions * Compare the best QoS so far - either from overcommit or from cooperative
4402e6231be0SApple OSS Distributions * pool - and compare it with the constrained pool
4403e6231be0SApple OSS Distributions */
4404cc9a6355SApple OSS Distributions req_tmp = priority_queue_max(&wq->wq_constrained_queue,
4405cc9a6355SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4406cc9a6355SApple OSS Distributions
4407cc9a6355SApple OSS Distributions if (req_tmp && qos < req_tmp->tr_qos) {
4408e6231be0SApple OSS Distributions /*
4409e6231be0SApple OSS Distributions * Constrained pool is best in QoS between overcommit, cooperative
4410e6231be0SApple OSS Distributions * and constrained. Now check how it fairs against the priority case
4411e6231be0SApple OSS Distributions */
4412cc9a6355SApple OSS Distributions if (pri && pri >= thread_workq_pri_for_qos(req_tmp->tr_qos)) {
4413cc9a6355SApple OSS Distributions return req_pri;
4414cc9a6355SApple OSS Distributions }
4415cc9a6355SApple OSS Distributions
4416*8d741a5dSApple OSS Distributions if (workq_constrained_allowance(wq, req_tmp->tr_qos, uth, true, true)) {
4417cc9a6355SApple OSS Distributions /*
4418cc9a6355SApple OSS Distributions * If the constrained thread request is the best one and passes
4419cc9a6355SApple OSS Distributions * the admission check, pick it.
4420cc9a6355SApple OSS Distributions */
4421cc9a6355SApple OSS Distributions return req_tmp;
4422cc9a6355SApple OSS Distributions }
4423cc9a6355SApple OSS Distributions }
4424cc9a6355SApple OSS Distributions
4425cc9a6355SApple OSS Distributions if (req_pri && (!qos || pri >= thread_workq_pri_for_qos(qos))) {
4426cc9a6355SApple OSS Distributions return req_pri;
4427cc9a6355SApple OSS Distributions }
4428cc9a6355SApple OSS Distributions
4429cc9a6355SApple OSS Distributions return req_qos;
4430cc9a6355SApple OSS Distributions }
4431cc9a6355SApple OSS Distributions
4432cc9a6355SApple OSS Distributions /*
4433cc9a6355SApple OSS Distributions * The creator is an anonymous thread that is counted as scheduled,
4434cc9a6355SApple OSS Distributions * but otherwise without its scheduler callback set or tracked as active
4435cc9a6355SApple OSS Distributions * that is used to make other threads.
4436cc9a6355SApple OSS Distributions *
4437cc9a6355SApple OSS Distributions * When more requests are added or an existing one is hurried along,
4438cc9a6355SApple OSS Distributions * a creator is elected and setup, or the existing one overridden accordingly.
4439cc9a6355SApple OSS Distributions *
4440cc9a6355SApple OSS Distributions * While this creator is in flight, because no request has been dequeued,
4441cc9a6355SApple OSS Distributions * already running threads have a chance at stealing thread requests avoiding
4442cc9a6355SApple OSS Distributions * useless context switches, and the creator once scheduled may not find any
4443cc9a6355SApple OSS Distributions * work to do and will then just park again.
4444cc9a6355SApple OSS Distributions *
4445cc9a6355SApple OSS Distributions * The creator serves the dual purpose of informing the scheduler of work that
4446cc9a6355SApple OSS Distributions * hasn't be materialized as threads yet, and also as a natural pacing mechanism
4447cc9a6355SApple OSS Distributions * for thread creation.
4448cc9a6355SApple OSS Distributions *
4449cc9a6355SApple OSS Distributions * By being anonymous (and not bound to anything) it means that thread requests
4450cc9a6355SApple OSS Distributions * can be stolen from this creator by threads already on core yielding more
4451cc9a6355SApple OSS Distributions * efficient scheduling and reduced context switches.
4452cc9a6355SApple OSS Distributions */
4453cc9a6355SApple OSS Distributions static void
workq_schedule_creator(proc_t p,struct workqueue * wq,workq_kern_threadreq_flags_t flags)4454a5e72196SApple OSS Distributions workq_schedule_creator(proc_t p, struct workqueue *wq,
4455a5e72196SApple OSS Distributions workq_kern_threadreq_flags_t flags)
4456cc9a6355SApple OSS Distributions {
4457cc9a6355SApple OSS Distributions workq_threadreq_t req;
4458cc9a6355SApple OSS Distributions struct uthread *uth;
4459a5e72196SApple OSS Distributions bool needs_wakeup;
4460cc9a6355SApple OSS Distributions
4461cc9a6355SApple OSS Distributions workq_lock_held(wq);
4462cc9a6355SApple OSS Distributions assert(p || (flags & WORKQ_THREADREQ_CAN_CREATE_THREADS) == 0);
4463cc9a6355SApple OSS Distributions
4464cc9a6355SApple OSS Distributions again:
4465cc9a6355SApple OSS Distributions uth = wq->wq_creator;
4466cc9a6355SApple OSS Distributions
4467cc9a6355SApple OSS Distributions if (!wq->wq_reqcount) {
4468a5e72196SApple OSS Distributions /*
4469a5e72196SApple OSS Distributions * There is no thread request left.
4470a5e72196SApple OSS Distributions *
4471a5e72196SApple OSS Distributions * If there is a creator, leave everything in place, so that it cleans
4472a5e72196SApple OSS Distributions * up itself in workq_push_idle_thread().
4473a5e72196SApple OSS Distributions *
4474a5e72196SApple OSS Distributions * Else, make sure the turnstile state is reset to no inheritor.
4475a5e72196SApple OSS Distributions */
4476cc9a6355SApple OSS Distributions if (uth == NULL) {
4477cc9a6355SApple OSS Distributions workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
4478cc9a6355SApple OSS Distributions }
4479cc9a6355SApple OSS Distributions return;
4480cc9a6355SApple OSS Distributions }
4481cc9a6355SApple OSS Distributions
4482cc9a6355SApple OSS Distributions req = workq_threadreq_select_for_creator(wq);
4483cc9a6355SApple OSS Distributions if (req == NULL) {
4484cc9a6355SApple OSS Distributions /*
4485a5e72196SApple OSS Distributions * There isn't a thread request that passes the admission check.
4486a5e72196SApple OSS Distributions *
4487a5e72196SApple OSS Distributions * If there is a creator, do not touch anything, the creator will sort
4488a5e72196SApple OSS Distributions * it out when it runs.
4489a5e72196SApple OSS Distributions *
4490a5e72196SApple OSS Distributions * Else, set the inheritor to "WORKQ" so that the turnstile propagation
4491a5e72196SApple OSS Distributions * code calls us if anything changes.
4492cc9a6355SApple OSS Distributions */
4493a5e72196SApple OSS Distributions if (uth == NULL) {
4494cc9a6355SApple OSS Distributions workq_turnstile_update_inheritor(wq, wq, TURNSTILE_INHERITOR_WORKQ);
4495cc9a6355SApple OSS Distributions }
4496cc9a6355SApple OSS Distributions return;
4497cc9a6355SApple OSS Distributions }
4498cc9a6355SApple OSS Distributions
4499e6231be0SApple OSS Distributions
4500cc9a6355SApple OSS Distributions if (uth) {
4501cc9a6355SApple OSS Distributions /*
4502cc9a6355SApple OSS Distributions * We need to maybe override the creator we already have
4503cc9a6355SApple OSS Distributions */
4504cc9a6355SApple OSS Distributions if (workq_thread_needs_priority_change(req, uth)) {
4505cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_select | DBG_FUNC_NONE,
4506e7776783SApple OSS Distributions wq, 1, uthread_tid(uth), req->tr_qos);
4507a5e72196SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
4508cc9a6355SApple OSS Distributions }
4509e7776783SApple OSS Distributions assert(wq->wq_inheritor == get_machthread(uth));
4510cc9a6355SApple OSS Distributions } else if (wq->wq_thidlecount) {
4511cc9a6355SApple OSS Distributions /*
4512cc9a6355SApple OSS Distributions * We need to unpark a creator thread
4513cc9a6355SApple OSS Distributions */
4514a5e72196SApple OSS Distributions wq->wq_creator = uth = workq_pop_idle_thread(wq, UT_WORKQ_OVERCOMMIT,
4515a5e72196SApple OSS Distributions &needs_wakeup);
4516bb611c8fSApple OSS Distributions /* Always reset the priorities on the newly chosen creator */
4517a5e72196SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
4518e7776783SApple OSS Distributions workq_turnstile_update_inheritor(wq, get_machthread(uth),
4519cc9a6355SApple OSS Distributions TURNSTILE_INHERITOR_THREAD);
4520cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_select | DBG_FUNC_NONE,
4521e7776783SApple OSS Distributions wq, 2, uthread_tid(uth), req->tr_qos);
4522cc9a6355SApple OSS Distributions uth->uu_save.uus_workq_park_data.fulfilled_snapshot = wq->wq_fulfilled;
4523cc9a6355SApple OSS Distributions uth->uu_save.uus_workq_park_data.yields = 0;
4524a5e72196SApple OSS Distributions if (needs_wakeup) {
4525cc9a6355SApple OSS Distributions workq_thread_wakeup(uth);
4526a5e72196SApple OSS Distributions }
4527cc9a6355SApple OSS Distributions } else {
4528cc9a6355SApple OSS Distributions /*
4529cc9a6355SApple OSS Distributions * We need to allocate a thread...
4530cc9a6355SApple OSS Distributions */
4531cc9a6355SApple OSS Distributions if (__improbable(wq->wq_nthreads >= wq_max_threads)) {
4532cc9a6355SApple OSS Distributions /* out of threads, just go away */
4533a5e72196SApple OSS Distributions flags = WORKQ_THREADREQ_NONE;
4534cc9a6355SApple OSS Distributions } else if (flags & WORKQ_THREADREQ_SET_AST_ON_FAILURE) {
4535cc9a6355SApple OSS Distributions act_set_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ);
4536cc9a6355SApple OSS Distributions } else if (!(flags & WORKQ_THREADREQ_CAN_CREATE_THREADS)) {
4537cc9a6355SApple OSS Distributions /* This can drop the workqueue lock, and take it again */
4538cc9a6355SApple OSS Distributions workq_schedule_immediate_thread_creation(wq);
4539*8d741a5dSApple OSS Distributions } else if ((workq_add_new_idle_thread(p, wq,
4540*8d741a5dSApple OSS Distributions workq_unpark_continue, false, NULL) == KERN_SUCCESS)) {
4541cc9a6355SApple OSS Distributions goto again;
4542cc9a6355SApple OSS Distributions } else {
4543cc9a6355SApple OSS Distributions workq_schedule_delayed_thread_creation(wq, 0);
4544cc9a6355SApple OSS Distributions }
4545cc9a6355SApple OSS Distributions
4546cc9a6355SApple OSS Distributions /*
4547a5e72196SApple OSS Distributions * If the current thread is the inheritor:
4548cc9a6355SApple OSS Distributions *
4549a5e72196SApple OSS Distributions * If we set the AST, then the thread will stay the inheritor until
4550a5e72196SApple OSS Distributions * either the AST calls workq_kern_threadreq_redrive(), or it parks
4551a5e72196SApple OSS Distributions * and calls workq_push_idle_thread().
4552a5e72196SApple OSS Distributions *
4553a5e72196SApple OSS Distributions * Else, the responsibility of the thread creation is with a thread-call
4554a5e72196SApple OSS Distributions * and we need to clear the inheritor.
4555cc9a6355SApple OSS Distributions */
4556a5e72196SApple OSS Distributions if ((flags & WORKQ_THREADREQ_SET_AST_ON_FAILURE) == 0 &&
4557a5e72196SApple OSS Distributions wq->wq_inheritor == current_thread()) {
4558a5e72196SApple OSS Distributions workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
4559cc9a6355SApple OSS Distributions }
4560cc9a6355SApple OSS Distributions }
4561cc9a6355SApple OSS Distributions }
4562cc9a6355SApple OSS Distributions
4563cc9a6355SApple OSS Distributions /**
4564a5e72196SApple OSS Distributions * Same as workq_unpark_select_threadreq_or_park_and_unlock,
4565a5e72196SApple OSS Distributions * but do not allow early binds.
4566a5e72196SApple OSS Distributions *
4567a5e72196SApple OSS Distributions * Called with the base pri frozen, will unfreeze it.
4568a5e72196SApple OSS Distributions */
4569a5e72196SApple OSS Distributions __attribute__((noreturn, noinline))
4570a5e72196SApple OSS Distributions static void
workq_select_threadreq_or_park_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)4571a5e72196SApple OSS Distributions workq_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
4572a5e72196SApple OSS Distributions struct uthread *uth, uint32_t setup_flags)
4573a5e72196SApple OSS Distributions {
4574a5e72196SApple OSS Distributions workq_threadreq_t req = NULL;
4575a5e72196SApple OSS Distributions bool is_creator = (wq->wq_creator == uth);
4576a5e72196SApple OSS Distributions bool schedule_creator = false;
4577a5e72196SApple OSS Distributions
4578a5e72196SApple OSS Distributions if (__improbable(_wq_exiting(wq))) {
4579e6231be0SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 0, 0, 0);
4580a5e72196SApple OSS Distributions goto park;
4581a5e72196SApple OSS Distributions }
4582a5e72196SApple OSS Distributions
4583a5e72196SApple OSS Distributions if (wq->wq_reqcount == 0) {
4584e6231be0SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 1, 0, 0);
4585a5e72196SApple OSS Distributions goto park;
4586a5e72196SApple OSS Distributions }
4587a5e72196SApple OSS Distributions
4588a5e72196SApple OSS Distributions req = workq_threadreq_select(wq, uth);
4589a5e72196SApple OSS Distributions if (__improbable(req == NULL)) {
4590e6231be0SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 2, 0, 0);
4591a5e72196SApple OSS Distributions goto park;
4592a5e72196SApple OSS Distributions }
4593a5e72196SApple OSS Distributions
45945c2921b0SApple OSS Distributions struct uu_workq_policy old_pri = uth->uu_workq_pri;
4595a5e72196SApple OSS Distributions uint8_t tr_flags = req->tr_flags;
4596a5e72196SApple OSS Distributions struct turnstile *req_ts = kqueue_threadreq_get_turnstile(req);
4597a5e72196SApple OSS Distributions
4598a5e72196SApple OSS Distributions /*
4599a5e72196SApple OSS Distributions * Attempt to setup ourselves as the new thing to run, moving all priority
4600a5e72196SApple OSS Distributions * pushes to ourselves.
4601a5e72196SApple OSS Distributions *
4602a5e72196SApple OSS Distributions * If the current thread is the creator, then the fact that we are presently
4603a5e72196SApple OSS Distributions * running is proof that we'll do something useful, so keep going.
4604a5e72196SApple OSS Distributions *
4605a5e72196SApple OSS Distributions * For other cases, peek at the AST to know whether the scheduler wants
4606a5e72196SApple OSS Distributions * to preempt us, if yes, park instead, and move the thread request
4607a5e72196SApple OSS Distributions * turnstile back to the workqueue.
4608a5e72196SApple OSS Distributions */
4609a5e72196SApple OSS Distributions if (req_ts) {
4610a5e72196SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
4611e7776783SApple OSS Distributions turnstile_update_inheritor(req_ts, get_machthread(uth),
4612a5e72196SApple OSS Distributions TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD);
4613a5e72196SApple OSS Distributions turnstile_update_inheritor_complete(req_ts,
4614a5e72196SApple OSS Distributions TURNSTILE_INTERLOCK_HELD);
4615a5e72196SApple OSS Distributions });
4616a5e72196SApple OSS Distributions }
4617a5e72196SApple OSS Distributions
4618e6231be0SApple OSS Distributions /* accounting changes of aggregate thscheduled_count and thactive which has
4619e6231be0SApple OSS Distributions * to be paired with the workq_thread_reset_pri below so that we have
4620e6231be0SApple OSS Distributions * uth->uu_workq_pri match with thactive.
4621e6231be0SApple OSS Distributions *
4622e6231be0SApple OSS Distributions * This is undone when the thread parks */
4623a5e72196SApple OSS Distributions if (is_creator) {
4624a5e72196SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_select, wq, 4, 0,
4625e6231be0SApple OSS Distributions uth->uu_save.uus_workq_park_data.yields);
4626a5e72196SApple OSS Distributions wq->wq_creator = NULL;
4627a5e72196SApple OSS Distributions _wq_thactive_inc(wq, req->tr_qos);
4628a5e72196SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(req->tr_qos)]++;
46295c2921b0SApple OSS Distributions } else if (old_pri.qos_bucket != req->tr_qos) {
46305c2921b0SApple OSS Distributions _wq_thactive_move(wq, old_pri.qos_bucket, req->tr_qos);
4631a5e72196SApple OSS Distributions }
4632a5e72196SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
4633a5e72196SApple OSS Distributions
4634e6231be0SApple OSS Distributions /*
4635e6231be0SApple OSS Distributions * Make relevant accounting changes for pool specific counts.
4636e6231be0SApple OSS Distributions *
4637e6231be0SApple OSS Distributions * The schedule counts changing can affect what the next best request
4638e6231be0SApple OSS Distributions * for cooperative thread pool is if this request is dequeued.
4639e6231be0SApple OSS Distributions */
4640e6231be0SApple OSS Distributions bool cooperative_sched_count_changed =
4641e6231be0SApple OSS Distributions workq_adjust_cooperative_constrained_schedule_counts(wq, uth,
46425c2921b0SApple OSS Distributions old_pri.qos_req, tr_flags);
4643e6231be0SApple OSS Distributions
4644e6231be0SApple OSS Distributions if (workq_tr_is_overcommit(tr_flags)) {
4645e6231be0SApple OSS Distributions workq_thread_set_type(uth, UT_WORKQ_OVERCOMMIT);
4646e6231be0SApple OSS Distributions } else if (workq_tr_is_cooperative(tr_flags)) {
4647e6231be0SApple OSS Distributions workq_thread_set_type(uth, UT_WORKQ_COOPERATIVE);
4648e6231be0SApple OSS Distributions } else {
4649e6231be0SApple OSS Distributions workq_thread_set_type(uth, 0);
4650e6231be0SApple OSS Distributions }
4651e6231be0SApple OSS Distributions
4652e7776783SApple OSS Distributions if (__improbable(thread_unfreeze_base_pri(get_machthread(uth)) && !is_creator)) {
4653a5e72196SApple OSS Distributions if (req_ts) {
4654a5e72196SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
4655a5e72196SApple OSS Distributions turnstile_update_inheritor(req_ts, wq->wq_turnstile,
4656a5e72196SApple OSS Distributions TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE);
4657a5e72196SApple OSS Distributions turnstile_update_inheritor_complete(req_ts,
4658a5e72196SApple OSS Distributions TURNSTILE_INTERLOCK_HELD);
4659a5e72196SApple OSS Distributions });
4660a5e72196SApple OSS Distributions }
4661e6231be0SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 3, 0, 0);
4662*8d741a5dSApple OSS Distributions
4663*8d741a5dSApple OSS Distributions /*
4664*8d741a5dSApple OSS Distributions * If a cooperative thread was the one which picked up the manager
4665*8d741a5dSApple OSS Distributions * thread request, we need to reevaluate the cooperative pool before
4666*8d741a5dSApple OSS Distributions * it goes and parks.
4667*8d741a5dSApple OSS Distributions *
4668*8d741a5dSApple OSS Distributions * For every other of thread request that it picks up, the logic in
4669*8d741a5dSApple OSS Distributions * workq_threadreq_select should have done this refresh.
4670*8d741a5dSApple OSS Distributions * See workq_push_idle_thread.
4671*8d741a5dSApple OSS Distributions */
4672*8d741a5dSApple OSS Distributions if (cooperative_sched_count_changed) {
4673*8d741a5dSApple OSS Distributions if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
4674*8d741a5dSApple OSS Distributions _wq_cooperative_queue_refresh_best_req_qos(wq);
4675*8d741a5dSApple OSS Distributions }
4676*8d741a5dSApple OSS Distributions }
4677a5e72196SApple OSS Distributions goto park_thawed;
4678a5e72196SApple OSS Distributions }
4679a5e72196SApple OSS Distributions
4680a5e72196SApple OSS Distributions /*
4681a5e72196SApple OSS Distributions * We passed all checks, dequeue the request, bind to it, and set it up
4682a5e72196SApple OSS Distributions * to return to user.
4683a5e72196SApple OSS Distributions */
4684a5e72196SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
4685e6231be0SApple OSS Distributions workq_trace_req_id(req), tr_flags, 0);
4686a5e72196SApple OSS Distributions wq->wq_fulfilled++;
4687e6231be0SApple OSS Distributions schedule_creator = workq_threadreq_dequeue(wq, req,
4688e6231be0SApple OSS Distributions cooperative_sched_count_changed);
4689e6231be0SApple OSS Distributions
4690e6231be0SApple OSS Distributions workq_thread_reset_cpupercent(req, uth);
4691a5e72196SApple OSS Distributions
4692a5e72196SApple OSS Distributions if (tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP)) {
4693a5e72196SApple OSS Distributions kqueue_threadreq_bind_prepost(p, req, uth);
4694a5e72196SApple OSS Distributions req = NULL;
4695a5e72196SApple OSS Distributions } else if (req->tr_count > 0) {
4696a5e72196SApple OSS Distributions req = NULL;
4697a5e72196SApple OSS Distributions }
4698a5e72196SApple OSS Distributions
4699a5e72196SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_NEW) {
4700a5e72196SApple OSS Distributions uth->uu_workq_flags ^= UT_WORKQ_NEW;
4701a5e72196SApple OSS Distributions setup_flags |= WQ_SETUP_FIRST_USE;
4702a5e72196SApple OSS Distributions }
4703a5e72196SApple OSS Distributions
4704e6231be0SApple OSS Distributions /* If one of the following is true, call workq_schedule_creator (which also
4705e6231be0SApple OSS Distributions * adjusts priority of existing creator):
4706e6231be0SApple OSS Distributions *
4707e6231be0SApple OSS Distributions * - We are the creator currently so the wq may need a new creator
4708e6231be0SApple OSS Distributions * - The request we're binding to is the highest priority one, existing
4709e6231be0SApple OSS Distributions * creator's priority might need to be adjusted to reflect the next
4710e6231be0SApple OSS Distributions * highest TR
4711e6231be0SApple OSS Distributions */
4712a5e72196SApple OSS Distributions if (is_creator || schedule_creator) {
4713a5e72196SApple OSS Distributions /* This can drop the workqueue lock, and take it again */
4714a5e72196SApple OSS Distributions workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
4715a5e72196SApple OSS Distributions }
4716a5e72196SApple OSS Distributions
4717a5e72196SApple OSS Distributions workq_unlock(wq);
4718a5e72196SApple OSS Distributions
4719a5e72196SApple OSS Distributions if (req) {
4720a5e72196SApple OSS Distributions zfree(workq_zone_threadreq, req);
4721a5e72196SApple OSS Distributions }
4722a5e72196SApple OSS Distributions
4723a5e72196SApple OSS Distributions /*
4724a5e72196SApple OSS Distributions * Run Thread, Run!
4725a5e72196SApple OSS Distributions */
4726a5e72196SApple OSS Distributions uint32_t upcall_flags = WQ_FLAG_THREAD_NEWSPI;
4727a5e72196SApple OSS Distributions if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
4728a5e72196SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_EVENT_MANAGER;
4729e6231be0SApple OSS Distributions } else if (workq_tr_is_overcommit(tr_flags)) {
4730a5e72196SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
4731e6231be0SApple OSS Distributions } else if (workq_tr_is_cooperative(tr_flags)) {
4732e6231be0SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_COOPERATIVE;
4733a5e72196SApple OSS Distributions }
4734a5e72196SApple OSS Distributions if (tr_flags & WORKQ_TR_FLAG_KEVENT) {
4735a5e72196SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_KEVENT;
4736e6231be0SApple OSS Distributions assert((upcall_flags & WQ_FLAG_THREAD_COOPERATIVE) == 0);
4737a5e72196SApple OSS Distributions }
4738e6231be0SApple OSS Distributions
4739a5e72196SApple OSS Distributions if (tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
4740a5e72196SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_WORKLOOP | WQ_FLAG_THREAD_KEVENT;
4741a5e72196SApple OSS Distributions }
4742a5e72196SApple OSS Distributions uth->uu_save.uus_workq_park_data.upcall_flags = upcall_flags;
4743a5e72196SApple OSS Distributions
4744a5e72196SApple OSS Distributions if (tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP)) {
4745e7776783SApple OSS Distributions kqueue_threadreq_bind_commit(p, get_machthread(uth));
4746e6231be0SApple OSS Distributions } else {
4747e6231be0SApple OSS Distributions #if CONFIG_PREADOPT_TG
4748e6231be0SApple OSS Distributions /*
4749e6231be0SApple OSS Distributions * The thread may have a preadopt thread group on it already because it
4750e6231be0SApple OSS Distributions * got tagged with it as a creator thread. So we need to make sure to
4751e6231be0SApple OSS Distributions * clear that since we don't have preadoption for anonymous thread
4752e6231be0SApple OSS Distributions * requests
4753e6231be0SApple OSS Distributions */
4754e7776783SApple OSS Distributions thread_set_preadopt_thread_group(get_machthread(uth), NULL);
4755e6231be0SApple OSS Distributions #endif
4756a5e72196SApple OSS Distributions }
4757e6231be0SApple OSS Distributions
4758a5e72196SApple OSS Distributions workq_setup_and_run(p, uth, setup_flags);
4759a5e72196SApple OSS Distributions __builtin_unreachable();
4760a5e72196SApple OSS Distributions
4761a5e72196SApple OSS Distributions park:
4762e7776783SApple OSS Distributions thread_unfreeze_base_pri(get_machthread(uth));
4763a5e72196SApple OSS Distributions park_thawed:
4764a5e72196SApple OSS Distributions workq_park_and_unlock(p, wq, uth, setup_flags);
4765a5e72196SApple OSS Distributions }
4766a5e72196SApple OSS Distributions
4767a5e72196SApple OSS Distributions /**
4768cc9a6355SApple OSS Distributions * Runs a thread request on a thread
4769cc9a6355SApple OSS Distributions *
4770cc9a6355SApple OSS Distributions * - if thread is THREAD_NULL, will find a thread and run the request there.
4771cc9a6355SApple OSS Distributions * Otherwise, the thread must be the current thread.
4772cc9a6355SApple OSS Distributions *
4773cc9a6355SApple OSS Distributions * - if req is NULL, will find the highest priority request and run that. If
4774cc9a6355SApple OSS Distributions * it is not NULL, it must be a threadreq object in state NEW. If it can not
4775cc9a6355SApple OSS Distributions * be run immediately, it will be enqueued and moved to state QUEUED.
4776cc9a6355SApple OSS Distributions *
4777cc9a6355SApple OSS Distributions * Either way, the thread request object serviced will be moved to state
4778cc9a6355SApple OSS Distributions * BINDING and attached to the uthread.
4779cc9a6355SApple OSS Distributions *
4780cc9a6355SApple OSS Distributions * Should be called with the workqueue lock held. Will drop it.
4781a5e72196SApple OSS Distributions * Should be called with the base pri not frozen.
4782cc9a6355SApple OSS Distributions */
4783cc9a6355SApple OSS Distributions __attribute__((noreturn, noinline))
4784cc9a6355SApple OSS Distributions static void
workq_unpark_select_threadreq_or_park_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)4785a5e72196SApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
4786a5e72196SApple OSS Distributions struct uthread *uth, uint32_t setup_flags)
4787cc9a6355SApple OSS Distributions {
4788cc9a6355SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_EARLY_BOUND) {
4789cc9a6355SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_NEW) {
4790cc9a6355SApple OSS Distributions setup_flags |= WQ_SETUP_FIRST_USE;
4791cc9a6355SApple OSS Distributions }
4792cc9a6355SApple OSS Distributions uth->uu_workq_flags &= ~(UT_WORKQ_NEW | UT_WORKQ_EARLY_BOUND);
4793cc9a6355SApple OSS Distributions /*
4794cc9a6355SApple OSS Distributions * This pointer is possibly freed and only used for tracing purposes.
4795cc9a6355SApple OSS Distributions */
4796a5e72196SApple OSS Distributions workq_threadreq_t req = uth->uu_save.uus_workq_park_data.thread_request;
4797cc9a6355SApple OSS Distributions workq_unlock(wq);
4798cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
4799e6231be0SApple OSS Distributions VM_KERNEL_ADDRHIDE(req), 0, 0);
4800a5e72196SApple OSS Distributions (void)req;
4801e6231be0SApple OSS Distributions
4802cc9a6355SApple OSS Distributions workq_setup_and_run(p, uth, setup_flags);
4803cc9a6355SApple OSS Distributions __builtin_unreachable();
4804cc9a6355SApple OSS Distributions }
4805cc9a6355SApple OSS Distributions
4806e7776783SApple OSS Distributions thread_freeze_base_pri(get_machthread(uth));
4807a5e72196SApple OSS Distributions workq_select_threadreq_or_park_and_unlock(p, wq, uth, setup_flags);
4808cc9a6355SApple OSS Distributions }
4809cc9a6355SApple OSS Distributions
4810cc9a6355SApple OSS Distributions static bool
workq_creator_should_yield(struct workqueue * wq,struct uthread * uth)4811cc9a6355SApple OSS Distributions workq_creator_should_yield(struct workqueue *wq, struct uthread *uth)
4812cc9a6355SApple OSS Distributions {
4813cc9a6355SApple OSS Distributions thread_qos_t qos = workq_pri_override(uth->uu_workq_pri);
4814cc9a6355SApple OSS Distributions
4815cc9a6355SApple OSS Distributions if (qos >= THREAD_QOS_USER_INTERACTIVE) {
4816cc9a6355SApple OSS Distributions return false;
4817cc9a6355SApple OSS Distributions }
4818cc9a6355SApple OSS Distributions
4819cc9a6355SApple OSS Distributions uint32_t snapshot = uth->uu_save.uus_workq_park_data.fulfilled_snapshot;
4820cc9a6355SApple OSS Distributions if (wq->wq_fulfilled == snapshot) {
4821cc9a6355SApple OSS Distributions return false;
4822cc9a6355SApple OSS Distributions }
4823cc9a6355SApple OSS Distributions
4824cc9a6355SApple OSS Distributions uint32_t cnt = 0, conc = wq_max_parallelism[_wq_bucket(qos)];
4825cc9a6355SApple OSS Distributions if (wq->wq_fulfilled - snapshot > conc) {
4826cc9a6355SApple OSS Distributions /* we fulfilled more than NCPU requests since being dispatched */
4827cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_yield, wq, 1,
4828e6231be0SApple OSS Distributions wq->wq_fulfilled, snapshot);
4829cc9a6355SApple OSS Distributions return true;
4830cc9a6355SApple OSS Distributions }
4831cc9a6355SApple OSS Distributions
4832e7776783SApple OSS Distributions for (uint8_t i = _wq_bucket(qos); i < WORKQ_NUM_QOS_BUCKETS; i++) {
4833cc9a6355SApple OSS Distributions cnt += wq->wq_thscheduled_count[i];
4834cc9a6355SApple OSS Distributions }
4835cc9a6355SApple OSS Distributions if (conc <= cnt) {
4836cc9a6355SApple OSS Distributions /* We fulfilled requests and have more than NCPU scheduled threads */
4837cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_yield, wq, 2,
4838e6231be0SApple OSS Distributions wq->wq_fulfilled, snapshot);
4839cc9a6355SApple OSS Distributions return true;
4840cc9a6355SApple OSS Distributions }
4841cc9a6355SApple OSS Distributions
4842cc9a6355SApple OSS Distributions return false;
4843cc9a6355SApple OSS Distributions }
4844cc9a6355SApple OSS Distributions
4845cc9a6355SApple OSS Distributions /**
4846*8d741a5dSApple OSS Distributions * parked idle thread wakes up
4847cc9a6355SApple OSS Distributions */
4848cc9a6355SApple OSS Distributions __attribute__((noreturn, noinline))
4849cc9a6355SApple OSS Distributions static void
workq_unpark_continue(void * parameter __unused,wait_result_t wr __unused)4850cc9a6355SApple OSS Distributions workq_unpark_continue(void *parameter __unused, wait_result_t wr __unused)
4851cc9a6355SApple OSS Distributions {
4852a5e72196SApple OSS Distributions thread_t th = current_thread();
4853a5e72196SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
4854cc9a6355SApple OSS Distributions proc_t p = current_proc();
4855cc9a6355SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
4856cc9a6355SApple OSS Distributions
4857cc9a6355SApple OSS Distributions workq_lock_spin(wq);
4858cc9a6355SApple OSS Distributions
4859cc9a6355SApple OSS Distributions if (wq->wq_creator == uth && workq_creator_should_yield(wq, uth)) {
4860cc9a6355SApple OSS Distributions /*
4861cc9a6355SApple OSS Distributions * If the number of threads we have out are able to keep up with the
4862cc9a6355SApple OSS Distributions * demand, then we should avoid sending this creator thread to
4863cc9a6355SApple OSS Distributions * userspace.
4864cc9a6355SApple OSS Distributions */
4865cc9a6355SApple OSS Distributions uth->uu_save.uus_workq_park_data.fulfilled_snapshot = wq->wq_fulfilled;
4866cc9a6355SApple OSS Distributions uth->uu_save.uus_workq_park_data.yields++;
4867cc9a6355SApple OSS Distributions workq_unlock(wq);
4868cc9a6355SApple OSS Distributions thread_yield_with_continuation(workq_unpark_continue, NULL);
4869cc9a6355SApple OSS Distributions __builtin_unreachable();
4870cc9a6355SApple OSS Distributions }
4871cc9a6355SApple OSS Distributions
4872cc9a6355SApple OSS Distributions if (__probable(uth->uu_workq_flags & UT_WORKQ_RUNNING)) {
4873a5e72196SApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(p, wq, uth, WQ_SETUP_NONE);
4874cc9a6355SApple OSS Distributions __builtin_unreachable();
4875cc9a6355SApple OSS Distributions }
4876cc9a6355SApple OSS Distributions
4877cc9a6355SApple OSS Distributions if (__probable(wr == THREAD_AWAKENED)) {
4878cc9a6355SApple OSS Distributions /*
4879cc9a6355SApple OSS Distributions * We were set running, but for the purposes of dying.
4880cc9a6355SApple OSS Distributions */
4881cc9a6355SApple OSS Distributions assert(uth->uu_workq_flags & UT_WORKQ_DYING);
4882cc9a6355SApple OSS Distributions assert((uth->uu_workq_flags & UT_WORKQ_NEW) == 0);
4883cc9a6355SApple OSS Distributions } else {
4884cc9a6355SApple OSS Distributions /*
4885cc9a6355SApple OSS Distributions * workaround for <rdar://problem/38647347>,
4886cc9a6355SApple OSS Distributions * in case we do hit userspace, make sure calling
4887cc9a6355SApple OSS Distributions * workq_thread_terminate() does the right thing here,
4888cc9a6355SApple OSS Distributions * and if we never call it, that workq_exit() will too because it sees
4889cc9a6355SApple OSS Distributions * this thread on the runlist.
4890cc9a6355SApple OSS Distributions */
4891cc9a6355SApple OSS Distributions assert(wr == THREAD_INTERRUPTED);
4892cc9a6355SApple OSS Distributions wq->wq_thdying_count++;
4893cc9a6355SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_DYING;
4894cc9a6355SApple OSS Distributions }
4895cc9a6355SApple OSS Distributions
4896cc9a6355SApple OSS Distributions workq_unpark_for_death_and_unlock(p, wq, uth,
4897a5e72196SApple OSS Distributions WORKQ_UNPARK_FOR_DEATH_WAS_IDLE, WQ_SETUP_NONE);
4898cc9a6355SApple OSS Distributions __builtin_unreachable();
4899cc9a6355SApple OSS Distributions }
4900cc9a6355SApple OSS Distributions
4901cc9a6355SApple OSS Distributions __attribute__((noreturn, noinline))
4902cc9a6355SApple OSS Distributions static void
workq_setup_and_run(proc_t p,struct uthread * uth,int setup_flags)4903cc9a6355SApple OSS Distributions workq_setup_and_run(proc_t p, struct uthread *uth, int setup_flags)
4904cc9a6355SApple OSS Distributions {
4905e7776783SApple OSS Distributions thread_t th = get_machthread(uth);
49065c2921b0SApple OSS Distributions vm_map_t vmap = get_task_map(proc_task(p));
4907cc9a6355SApple OSS Distributions
4908cc9a6355SApple OSS Distributions if (setup_flags & WQ_SETUP_CLEAR_VOUCHER) {
4909cc9a6355SApple OSS Distributions /*
4910cc9a6355SApple OSS Distributions * For preemption reasons, we want to reset the voucher as late as
4911cc9a6355SApple OSS Distributions * possible, so we do it in two places:
4912cc9a6355SApple OSS Distributions * - Just before parking (i.e. in workq_park_and_unlock())
4913cc9a6355SApple OSS Distributions * - Prior to doing the setup for the next workitem (i.e. here)
4914cc9a6355SApple OSS Distributions *
4915cc9a6355SApple OSS Distributions * Those two places are sufficient to ensure we always reset it before
4916cc9a6355SApple OSS Distributions * it goes back out to user space, but be careful to not break that
4917cc9a6355SApple OSS Distributions * guarantee.
4918e6231be0SApple OSS Distributions *
4919e6231be0SApple OSS Distributions * Note that setting the voucher to NULL will not clear the preadoption
4920e6231be0SApple OSS Distributions * thread group on this thread
4921cc9a6355SApple OSS Distributions */
4922cc9a6355SApple OSS Distributions __assert_only kern_return_t kr;
4923cc9a6355SApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
4924cc9a6355SApple OSS Distributions assert(kr == KERN_SUCCESS);
4925cc9a6355SApple OSS Distributions }
4926cc9a6355SApple OSS Distributions
4927cc9a6355SApple OSS Distributions uint32_t upcall_flags = uth->uu_save.uus_workq_park_data.upcall_flags;
4928cc9a6355SApple OSS Distributions if (!(setup_flags & WQ_SETUP_FIRST_USE)) {
4929cc9a6355SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_REUSE;
4930cc9a6355SApple OSS Distributions }
4931cc9a6355SApple OSS Distributions
4932cc9a6355SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_OUTSIDE_QOS) {
4933cc9a6355SApple OSS Distributions /*
4934cc9a6355SApple OSS Distributions * For threads that have an outside-of-QoS thread priority, indicate
4935cc9a6355SApple OSS Distributions * to userspace that setting QoS should only affect the TSD and not
4936cc9a6355SApple OSS Distributions * change QOS in the kernel.
4937cc9a6355SApple OSS Distributions */
4938cc9a6355SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OUTSIDEQOS;
4939cc9a6355SApple OSS Distributions } else {
4940cc9a6355SApple OSS Distributions /*
4941cc9a6355SApple OSS Distributions * Put the QoS class value into the lower bits of the reuse_thread
4942cc9a6355SApple OSS Distributions * register, this is where the thread priority used to be stored
4943cc9a6355SApple OSS Distributions * anyway.
4944cc9a6355SApple OSS Distributions */
4945cc9a6355SApple OSS Distributions upcall_flags |= uth->uu_save.uus_workq_park_data.qos |
4946cc9a6355SApple OSS Distributions WQ_FLAG_THREAD_PRIO_QOS;
4947cc9a6355SApple OSS Distributions }
4948cc9a6355SApple OSS Distributions
4949cc9a6355SApple OSS Distributions if (uth->uu_workq_thport == MACH_PORT_NULL) {
4950e6231be0SApple OSS Distributions /* convert_thread_to_port_pinned() consumes a reference */
4951cc9a6355SApple OSS Distributions thread_reference(th);
4952e6231be0SApple OSS Distributions /* Convert to immovable/pinned thread port, but port is not pinned yet */
4953e6231be0SApple OSS Distributions ipc_port_t port = convert_thread_to_port_pinned(th);
4954e6231be0SApple OSS Distributions /* Atomically, pin and copy out the port */
49555c2921b0SApple OSS Distributions uth->uu_workq_thport = ipc_port_copyout_send_pinned(port, get_task_ipcspace(proc_task(p)));
4956e6231be0SApple OSS Distributions }
4957e6231be0SApple OSS Distributions
4958e6231be0SApple OSS Distributions /* Thread has been set up to run, arm its next workqueue quantum or disarm
4959e6231be0SApple OSS Distributions * if it is no longer supporting that */
4960e6231be0SApple OSS Distributions if (thread_supports_cooperative_workqueue(th)) {
4961e6231be0SApple OSS Distributions thread_arm_workqueue_quantum(th);
4962e6231be0SApple OSS Distributions } else {
4963e6231be0SApple OSS Distributions thread_disarm_workqueue_quantum(th);
4964cc9a6355SApple OSS Distributions }
4965cc9a6355SApple OSS Distributions
4966cc9a6355SApple OSS Distributions /*
4967cc9a6355SApple OSS Distributions * Call out to pthread, this sets up the thread, pulls in kevent structs
4968cc9a6355SApple OSS Distributions * onto the stack, sets up the thread state and then returns to userspace.
4969cc9a6355SApple OSS Distributions */
4970cc9a6355SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_runthread | DBG_FUNC_START,
4971e6231be0SApple OSS Distributions proc_get_wqptr_fast(p), 0, 0, 0);
4972e6231be0SApple OSS Distributions
4973*8d741a5dSApple OSS Distributions if (workq_thread_is_cooperative(uth) || workq_thread_is_permanently_bound(uth)) {
4974e6231be0SApple OSS Distributions thread_sched_call(th, NULL);
4975e6231be0SApple OSS Distributions } else {
4976cc9a6355SApple OSS Distributions thread_sched_call(th, workq_sched_callback);
4977e6231be0SApple OSS Distributions }
4978e6231be0SApple OSS Distributions
4979cc9a6355SApple OSS Distributions pthread_functions->workq_setup_thread(p, th, vmap, uth->uu_workq_stackaddr,
4980cc9a6355SApple OSS Distributions uth->uu_workq_thport, 0, setup_flags, upcall_flags);
4981cc9a6355SApple OSS Distributions
4982cc9a6355SApple OSS Distributions __builtin_unreachable();
4983cc9a6355SApple OSS Distributions }
4984cc9a6355SApple OSS Distributions
4985*8d741a5dSApple OSS Distributions /**
4986*8d741a5dSApple OSS Distributions * A wrapper around workq_setup_and_run for permanently bound thread.
4987*8d741a5dSApple OSS Distributions */
4988*8d741a5dSApple OSS Distributions __attribute__((noreturn, noinline))
4989*8d741a5dSApple OSS Distributions static void
workq_bound_thread_setup_and_run(struct uthread * uth,int setup_flags)4990*8d741a5dSApple OSS Distributions workq_bound_thread_setup_and_run(struct uthread *uth, int setup_flags)
4991*8d741a5dSApple OSS Distributions {
4992*8d741a5dSApple OSS Distributions struct workq_threadreq_s * kqr = uth->uu_kqr_bound;
4993*8d741a5dSApple OSS Distributions
4994*8d741a5dSApple OSS Distributions uint32_t upcall_flags = (WQ_FLAG_THREAD_NEWSPI |
4995*8d741a5dSApple OSS Distributions WQ_FLAG_THREAD_WORKLOOP | WQ_FLAG_THREAD_KEVENT);
4996*8d741a5dSApple OSS Distributions if (workq_tr_is_overcommit(kqr->tr_flags)) {
4997*8d741a5dSApple OSS Distributions workq_thread_set_type(uth, UT_WORKQ_OVERCOMMIT);
4998*8d741a5dSApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
4999*8d741a5dSApple OSS Distributions }
5000*8d741a5dSApple OSS Distributions uth->uu_save.uus_workq_park_data.upcall_flags = upcall_flags;
5001*8d741a5dSApple OSS Distributions workq_setup_and_run(current_proc(), uth, setup_flags);
5002*8d741a5dSApple OSS Distributions __builtin_unreachable();
5003*8d741a5dSApple OSS Distributions }
5004*8d741a5dSApple OSS Distributions
5005*8d741a5dSApple OSS Distributions /**
5006*8d741a5dSApple OSS Distributions * A parked bound thread wakes up for the first time.
5007*8d741a5dSApple OSS Distributions */
5008*8d741a5dSApple OSS Distributions __attribute__((noreturn, noinline))
5009*8d741a5dSApple OSS Distributions static void
workq_bound_thread_initialize_and_unpark_continue(void * parameter __unused,wait_result_t wr)5010*8d741a5dSApple OSS Distributions workq_bound_thread_initialize_and_unpark_continue(void *parameter __unused,
5011*8d741a5dSApple OSS Distributions wait_result_t wr)
5012*8d741a5dSApple OSS Distributions {
5013*8d741a5dSApple OSS Distributions /*
5014*8d741a5dSApple OSS Distributions * Locking model for accessing uu_workq_flags :
5015*8d741a5dSApple OSS Distributions *
5016*8d741a5dSApple OSS Distributions * The concurrent access to uu_workq_flags is synchronized with workq lock
5017*8d741a5dSApple OSS Distributions * until a thread gets permanently bound to a kqwl. Post that, kqlock
5018*8d741a5dSApple OSS Distributions * is used for subsequent synchronizations. This gives us a significant
5019*8d741a5dSApple OSS Distributions * benefit by avoiding having to take a process wide workq lock on every
5020*8d741a5dSApple OSS Distributions * wakeup of the bound thread.
5021*8d741a5dSApple OSS Distributions * This flip in locking model is tracked with UT_WORKQ_PERMANENT_BIND flag.
5022*8d741a5dSApple OSS Distributions *
5023*8d741a5dSApple OSS Distributions * There is one more optimization we can perform for when the thread is
5024*8d741a5dSApple OSS Distributions * awakened for running (i.e THREAD_AWAKENED) until it parks.
5025*8d741a5dSApple OSS Distributions * During this window, we know KQ_SLEEP bit is reset so there should not
5026*8d741a5dSApple OSS Distributions * be any concurrent attempts to modify uu_workq_flags by
5027*8d741a5dSApple OSS Distributions * kqworkloop_bound_thread_wakeup because the thread is already "awake".
5028*8d741a5dSApple OSS Distributions * So we can safely access uu_workq_flags within this window without having
5029*8d741a5dSApple OSS Distributions * to take kqlock. This KQ_SLEEP is later set by the bound thread under
5030*8d741a5dSApple OSS Distributions * kqlock on its way to parking.
5031*8d741a5dSApple OSS Distributions */
5032*8d741a5dSApple OSS Distributions struct uthread *uth = get_bsdthread_info(current_thread());
5033*8d741a5dSApple OSS Distributions
5034*8d741a5dSApple OSS Distributions if (__probable(wr == THREAD_AWAKENED)) {
5035*8d741a5dSApple OSS Distributions /* At most one flag. */
5036*8d741a5dSApple OSS Distributions assert((uth->uu_workq_flags & (UT_WORKQ_RUNNING | UT_WORKQ_DYING))
5037*8d741a5dSApple OSS Distributions != (UT_WORKQ_RUNNING | UT_WORKQ_DYING));
5038*8d741a5dSApple OSS Distributions
5039*8d741a5dSApple OSS Distributions assert(workq_thread_is_permanently_bound(uth));
5040*8d741a5dSApple OSS Distributions
5041*8d741a5dSApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_RUNNING) {
5042*8d741a5dSApple OSS Distributions assert(uth->uu_workq_flags & UT_WORKQ_NEW);
5043*8d741a5dSApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_NEW;
5044*8d741a5dSApple OSS Distributions
5045*8d741a5dSApple OSS Distributions struct workq_threadreq_s * kqr = uth->uu_kqr_bound;
5046*8d741a5dSApple OSS Distributions if (kqr->tr_work_interval) {
5047*8d741a5dSApple OSS Distributions kern_return_t kr;
5048*8d741a5dSApple OSS Distributions kr = kern_work_interval_explicit_join(get_machthread(uth),
5049*8d741a5dSApple OSS Distributions kqr->tr_work_interval);
5050*8d741a5dSApple OSS Distributions /*
5051*8d741a5dSApple OSS Distributions * The work interval functions requires to be called on the
5052*8d741a5dSApple OSS Distributions * current thread. If we fail here, we record the fact and
5053*8d741a5dSApple OSS Distributions * continue.
5054*8d741a5dSApple OSS Distributions * In the future, we can preflight checking that this join will
5055*8d741a5dSApple OSS Distributions * always be successful when the paird kqwl is configured; but,
5056*8d741a5dSApple OSS Distributions * for now, this should be a rare case (e.g. if you have passed
5057*8d741a5dSApple OSS Distributions * invalid arguments to the join).
5058*8d741a5dSApple OSS Distributions */
5059*8d741a5dSApple OSS Distributions if (kr == KERN_SUCCESS) {
5060*8d741a5dSApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_WORK_INTERVAL_JOINED;
5061*8d741a5dSApple OSS Distributions /* Thread and kqwl both have +1 ref on the work interval. */
5062*8d741a5dSApple OSS Distributions } else {
5063*8d741a5dSApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_WORK_INTERVAL_FAILED;
5064*8d741a5dSApple OSS Distributions }
5065*8d741a5dSApple OSS Distributions }
5066*8d741a5dSApple OSS Distributions workq_thread_reset_cpupercent(kqr, uth);
5067*8d741a5dSApple OSS Distributions workq_bound_thread_setup_and_run(uth, WQ_SETUP_FIRST_USE);
5068*8d741a5dSApple OSS Distributions __builtin_unreachable();
5069*8d741a5dSApple OSS Distributions } else {
5070*8d741a5dSApple OSS Distributions /*
5071*8d741a5dSApple OSS Distributions * The permanently bound kqworkloop is getting destroyed so we
5072*8d741a5dSApple OSS Distributions * are woken up to cleanly unbind ourselves from it and terminate.
5073*8d741a5dSApple OSS Distributions * See KQ_WORKLOOP_DESTROY -> workq_kern_bound_thread_wakeup.
5074*8d741a5dSApple OSS Distributions *
5075*8d741a5dSApple OSS Distributions * The actual full unbind happens from
5076*8d741a5dSApple OSS Distributions * uthread_cleanup -> kqueue_threadreq_unbind.
5077*8d741a5dSApple OSS Distributions */
5078*8d741a5dSApple OSS Distributions assert(uth->uu_workq_flags & UT_WORKQ_DYING);
5079*8d741a5dSApple OSS Distributions }
5080*8d741a5dSApple OSS Distributions } else {
5081*8d741a5dSApple OSS Distributions /*
5082*8d741a5dSApple OSS Distributions * The process is getting terminated so we are woken up to die.
5083*8d741a5dSApple OSS Distributions * E.g. SIGKILL'd.
5084*8d741a5dSApple OSS Distributions */
5085*8d741a5dSApple OSS Distributions assert(wr == THREAD_INTERRUPTED);
5086*8d741a5dSApple OSS Distributions /*
5087*8d741a5dSApple OSS Distributions * It is possible we started running as the process is aborted
5088*8d741a5dSApple OSS Distributions * due to termination; but, workq_kern_threadreq_permanent_bind
5089*8d741a5dSApple OSS Distributions * has not had a chance to bind us to the kqwl yet.
5090*8d741a5dSApple OSS Distributions *
5091*8d741a5dSApple OSS Distributions * We synchronize with it using workq lock.
5092*8d741a5dSApple OSS Distributions */
5093*8d741a5dSApple OSS Distributions proc_t p = current_proc();
5094*8d741a5dSApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
5095*8d741a5dSApple OSS Distributions workq_lock_spin(wq);
5096*8d741a5dSApple OSS Distributions assert(workq_thread_is_permanently_bound(uth));
5097*8d741a5dSApple OSS Distributions workq_unlock(wq);
5098*8d741a5dSApple OSS Distributions
5099*8d741a5dSApple OSS Distributions /*
5100*8d741a5dSApple OSS Distributions * We do the bind commit ourselves if workq_kern_threadreq_permanent_bind
5101*8d741a5dSApple OSS Distributions * has not done it for us yet so our state is aligned with what the
5102*8d741a5dSApple OSS Distributions * termination path below expects.
5103*8d741a5dSApple OSS Distributions */
5104*8d741a5dSApple OSS Distributions kqueue_threadreq_bind_commit(p, get_machthread(uth));
5105*8d741a5dSApple OSS Distributions }
5106*8d741a5dSApple OSS Distributions workq_kern_bound_thread_terminate(uth->uu_kqr_bound);
5107*8d741a5dSApple OSS Distributions __builtin_unreachable();
5108*8d741a5dSApple OSS Distributions }
5109*8d741a5dSApple OSS Distributions
5110*8d741a5dSApple OSS Distributions /**
5111*8d741a5dSApple OSS Distributions * A parked bound thread wakes up. Not the first time.
5112*8d741a5dSApple OSS Distributions */
5113*8d741a5dSApple OSS Distributions __attribute__((noreturn, noinline))
5114*8d741a5dSApple OSS Distributions static void
workq_bound_thread_unpark_continue(void * parameter __unused,wait_result_t wr)5115*8d741a5dSApple OSS Distributions workq_bound_thread_unpark_continue(void *parameter __unused, wait_result_t wr)
5116*8d741a5dSApple OSS Distributions {
5117*8d741a5dSApple OSS Distributions struct uthread *uth = get_bsdthread_info(current_thread());
5118*8d741a5dSApple OSS Distributions assert(workq_thread_is_permanently_bound(uth));
5119*8d741a5dSApple OSS Distributions
5120*8d741a5dSApple OSS Distributions if (__probable(wr == THREAD_AWAKENED)) {
5121*8d741a5dSApple OSS Distributions /* At most one flag. */
5122*8d741a5dSApple OSS Distributions assert((uth->uu_workq_flags & (UT_WORKQ_RUNNING | UT_WORKQ_DYING))
5123*8d741a5dSApple OSS Distributions != (UT_WORKQ_RUNNING | UT_WORKQ_DYING));
5124*8d741a5dSApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_RUNNING) {
5125*8d741a5dSApple OSS Distributions workq_bound_thread_setup_and_run(uth, WQ_SETUP_NONE);
5126*8d741a5dSApple OSS Distributions } else {
5127*8d741a5dSApple OSS Distributions assert(uth->uu_workq_flags & UT_WORKQ_DYING);
5128*8d741a5dSApple OSS Distributions }
5129*8d741a5dSApple OSS Distributions } else {
5130*8d741a5dSApple OSS Distributions assert(wr == THREAD_INTERRUPTED);
5131*8d741a5dSApple OSS Distributions }
5132*8d741a5dSApple OSS Distributions workq_kern_bound_thread_terminate(uth->uu_kqr_bound);
5133*8d741a5dSApple OSS Distributions __builtin_unreachable();
5134*8d741a5dSApple OSS Distributions }
5135*8d741a5dSApple OSS Distributions
5136cc9a6355SApple OSS Distributions #pragma mark misc
5137cc9a6355SApple OSS Distributions
5138cc9a6355SApple OSS Distributions int
fill_procworkqueue(proc_t p,struct proc_workqueueinfo * pwqinfo)5139cc9a6355SApple OSS Distributions fill_procworkqueue(proc_t p, struct proc_workqueueinfo * pwqinfo)
5140cc9a6355SApple OSS Distributions {
5141cc9a6355SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
5142cc9a6355SApple OSS Distributions int error = 0;
5143cc9a6355SApple OSS Distributions int activecount;
5144cc9a6355SApple OSS Distributions
5145cc9a6355SApple OSS Distributions if (wq == NULL) {
5146cc9a6355SApple OSS Distributions return EINVAL;
5147cc9a6355SApple OSS Distributions }
5148cc9a6355SApple OSS Distributions
5149cc9a6355SApple OSS Distributions /*
5150cc9a6355SApple OSS Distributions * This is sometimes called from interrupt context by the kperf sampler.
5151cc9a6355SApple OSS Distributions * In that case, it's not safe to spin trying to take the lock since we
5152cc9a6355SApple OSS Distributions * might already hold it. So, we just try-lock it and error out if it's
5153cc9a6355SApple OSS Distributions * already held. Since this is just a debugging aid, and all our callers
5154cc9a6355SApple OSS Distributions * are able to handle an error, that's fine.
5155cc9a6355SApple OSS Distributions */
5156cc9a6355SApple OSS Distributions bool locked = workq_lock_try(wq);
5157cc9a6355SApple OSS Distributions if (!locked) {
5158cc9a6355SApple OSS Distributions return EBUSY;
5159cc9a6355SApple OSS Distributions }
5160cc9a6355SApple OSS Distributions
5161cc9a6355SApple OSS Distributions wq_thactive_t act = _wq_thactive(wq);
5162cc9a6355SApple OSS Distributions activecount = _wq_thactive_aggregate_downto_qos(wq, act,
5163cc9a6355SApple OSS Distributions WORKQ_THREAD_QOS_MIN, NULL, NULL);
5164cc9a6355SApple OSS Distributions if (act & _wq_thactive_offset_for_qos(WORKQ_THREAD_QOS_MANAGER)) {
5165cc9a6355SApple OSS Distributions activecount++;
5166cc9a6355SApple OSS Distributions }
5167cc9a6355SApple OSS Distributions pwqinfo->pwq_nthreads = wq->wq_nthreads;
5168cc9a6355SApple OSS Distributions pwqinfo->pwq_runthreads = activecount;
5169cc9a6355SApple OSS Distributions pwqinfo->pwq_blockedthreads = wq->wq_threads_scheduled - activecount;
5170cc9a6355SApple OSS Distributions pwqinfo->pwq_state = 0;
5171cc9a6355SApple OSS Distributions
5172cc9a6355SApple OSS Distributions if (wq->wq_constrained_threads_scheduled >= wq_max_constrained_threads) {
5173cc9a6355SApple OSS Distributions pwqinfo->pwq_state |= WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT;
5174cc9a6355SApple OSS Distributions }
5175cc9a6355SApple OSS Distributions
5176cc9a6355SApple OSS Distributions if (wq->wq_nthreads >= wq_max_threads) {
5177cc9a6355SApple OSS Distributions pwqinfo->pwq_state |= WQ_EXCEEDED_TOTAL_THREAD_LIMIT;
5178cc9a6355SApple OSS Distributions }
5179cc9a6355SApple OSS Distributions
5180*8d741a5dSApple OSS Distributions uint64_t total_cooperative_threads;
5181*8d741a5dSApple OSS Distributions total_cooperative_threads = workq_num_cooperative_threads_scheduled_total(wq);
5182*8d741a5dSApple OSS Distributions if ((total_cooperative_threads == wq_cooperative_queue_max_size(wq)) &&
5183*8d741a5dSApple OSS Distributions workq_has_cooperative_thread_requests(wq)) {
5184*8d741a5dSApple OSS Distributions pwqinfo->pwq_state |= WQ_EXCEEDED_COOPERATIVE_THREAD_LIMIT;
5185*8d741a5dSApple OSS Distributions }
5186*8d741a5dSApple OSS Distributions
5187*8d741a5dSApple OSS Distributions if (wq->wq_exceeded_active_constrained_thread_limit) {
5188*8d741a5dSApple OSS Distributions pwqinfo->pwq_state |= WQ_EXCEEDED_ACTIVE_CONSTRAINED_THREAD_LIMIT;
5189*8d741a5dSApple OSS Distributions }
5190*8d741a5dSApple OSS Distributions
5191cc9a6355SApple OSS Distributions workq_unlock(wq);
5192cc9a6355SApple OSS Distributions return error;
5193cc9a6355SApple OSS Distributions }
5194cc9a6355SApple OSS Distributions
5195cc9a6355SApple OSS Distributions boolean_t
workqueue_get_pwq_exceeded(void * v,boolean_t * exceeded_total,boolean_t * exceeded_constrained)5196cc9a6355SApple OSS Distributions workqueue_get_pwq_exceeded(void *v, boolean_t *exceeded_total,
5197cc9a6355SApple OSS Distributions boolean_t *exceeded_constrained)
5198cc9a6355SApple OSS Distributions {
5199cc9a6355SApple OSS Distributions proc_t p = v;
5200cc9a6355SApple OSS Distributions struct proc_workqueueinfo pwqinfo;
5201cc9a6355SApple OSS Distributions int err;
5202cc9a6355SApple OSS Distributions
5203cc9a6355SApple OSS Distributions assert(p != NULL);
5204cc9a6355SApple OSS Distributions assert(exceeded_total != NULL);
5205cc9a6355SApple OSS Distributions assert(exceeded_constrained != NULL);
5206cc9a6355SApple OSS Distributions
5207cc9a6355SApple OSS Distributions err = fill_procworkqueue(p, &pwqinfo);
5208cc9a6355SApple OSS Distributions if (err) {
5209cc9a6355SApple OSS Distributions return FALSE;
5210cc9a6355SApple OSS Distributions }
5211cc9a6355SApple OSS Distributions if (!(pwqinfo.pwq_state & WQ_FLAGS_AVAILABLE)) {
5212cc9a6355SApple OSS Distributions return FALSE;
5213cc9a6355SApple OSS Distributions }
5214cc9a6355SApple OSS Distributions
5215cc9a6355SApple OSS Distributions *exceeded_total = (pwqinfo.pwq_state & WQ_EXCEEDED_TOTAL_THREAD_LIMIT);
5216cc9a6355SApple OSS Distributions *exceeded_constrained = (pwqinfo.pwq_state & WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT);
5217cc9a6355SApple OSS Distributions
5218cc9a6355SApple OSS Distributions return TRUE;
5219cc9a6355SApple OSS Distributions }
5220cc9a6355SApple OSS Distributions
5221*8d741a5dSApple OSS Distributions uint64_t
workqueue_get_task_ss_flags_from_pwq_state_kdp(void * v)5222*8d741a5dSApple OSS Distributions workqueue_get_task_ss_flags_from_pwq_state_kdp(void * v)
5223cc9a6355SApple OSS Distributions {
5224cc9a6355SApple OSS Distributions static_assert((WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT << 17) ==
5225cc9a6355SApple OSS Distributions kTaskWqExceededConstrainedThreadLimit);
5226cc9a6355SApple OSS Distributions static_assert((WQ_EXCEEDED_TOTAL_THREAD_LIMIT << 17) ==
5227cc9a6355SApple OSS Distributions kTaskWqExceededTotalThreadLimit);
5228cc9a6355SApple OSS Distributions static_assert((WQ_FLAGS_AVAILABLE << 17) == kTaskWqFlagsAvailable);
5229*8d741a5dSApple OSS Distributions static_assert(((uint64_t)WQ_EXCEEDED_COOPERATIVE_THREAD_LIMIT << 34) ==
5230*8d741a5dSApple OSS Distributions (uint64_t)kTaskWqExceededCooperativeThreadLimit);
5231*8d741a5dSApple OSS Distributions static_assert(((uint64_t)WQ_EXCEEDED_ACTIVE_CONSTRAINED_THREAD_LIMIT << 34) ==
5232*8d741a5dSApple OSS Distributions (uint64_t)kTaskWqExceededActiveConstrainedThreadLimit);
5233cc9a6355SApple OSS Distributions static_assert((WQ_FLAGS_AVAILABLE | WQ_EXCEEDED_TOTAL_THREAD_LIMIT |
5234*8d741a5dSApple OSS Distributions WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT |
5235*8d741a5dSApple OSS Distributions WQ_EXCEEDED_COOPERATIVE_THREAD_LIMIT |
5236*8d741a5dSApple OSS Distributions WQ_EXCEEDED_ACTIVE_CONSTRAINED_THREAD_LIMIT) == 0x1F);
5237cc9a6355SApple OSS Distributions
5238cc9a6355SApple OSS Distributions if (v == NULL) {
5239cc9a6355SApple OSS Distributions return 0;
5240cc9a6355SApple OSS Distributions }
5241cc9a6355SApple OSS Distributions
5242cc9a6355SApple OSS Distributions proc_t p = v;
5243cc9a6355SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
5244cc9a6355SApple OSS Distributions
5245e6231be0SApple OSS Distributions if (wq == NULL || workq_lock_is_acquired_kdp(wq)) {
5246cc9a6355SApple OSS Distributions return 0;
5247cc9a6355SApple OSS Distributions }
5248cc9a6355SApple OSS Distributions
5249*8d741a5dSApple OSS Distributions uint64_t ss_flags = kTaskWqFlagsAvailable;
5250cc9a6355SApple OSS Distributions
5251cc9a6355SApple OSS Distributions if (wq->wq_constrained_threads_scheduled >= wq_max_constrained_threads) {
5252*8d741a5dSApple OSS Distributions ss_flags |= kTaskWqExceededConstrainedThreadLimit;
5253cc9a6355SApple OSS Distributions }
5254cc9a6355SApple OSS Distributions
5255cc9a6355SApple OSS Distributions if (wq->wq_nthreads >= wq_max_threads) {
5256*8d741a5dSApple OSS Distributions ss_flags |= kTaskWqExceededTotalThreadLimit;
5257cc9a6355SApple OSS Distributions }
5258cc9a6355SApple OSS Distributions
5259*8d741a5dSApple OSS Distributions uint64_t total_cooperative_threads;
5260*8d741a5dSApple OSS Distributions total_cooperative_threads = workq_num_cooperative_threads_scheduled_to_qos_internal(wq,
5261*8d741a5dSApple OSS Distributions WORKQ_THREAD_QOS_MIN);
5262*8d741a5dSApple OSS Distributions if ((total_cooperative_threads == wq_cooperative_queue_max_size(wq)) &&
5263*8d741a5dSApple OSS Distributions workq_has_cooperative_thread_requests(wq)) {
5264*8d741a5dSApple OSS Distributions ss_flags |= kTaskWqExceededCooperativeThreadLimit;
5265*8d741a5dSApple OSS Distributions }
5266*8d741a5dSApple OSS Distributions
5267*8d741a5dSApple OSS Distributions if (wq->wq_exceeded_active_constrained_thread_limit) {
5268*8d741a5dSApple OSS Distributions ss_flags |= kTaskWqExceededActiveConstrainedThreadLimit;
5269*8d741a5dSApple OSS Distributions }
5270*8d741a5dSApple OSS Distributions
5271*8d741a5dSApple OSS Distributions return ss_flags;
5272cc9a6355SApple OSS Distributions }
5273cc9a6355SApple OSS Distributions
5274cc9a6355SApple OSS Distributions void
workq_init(void)5275cc9a6355SApple OSS Distributions workq_init(void)
5276cc9a6355SApple OSS Distributions {
5277cc9a6355SApple OSS Distributions clock_interval_to_absolutetime_interval(wq_stalled_window.usecs,
5278cc9a6355SApple OSS Distributions NSEC_PER_USEC, &wq_stalled_window.abstime);
5279cc9a6355SApple OSS Distributions clock_interval_to_absolutetime_interval(wq_reduce_pool_window.usecs,
5280cc9a6355SApple OSS Distributions NSEC_PER_USEC, &wq_reduce_pool_window.abstime);
5281cc9a6355SApple OSS Distributions clock_interval_to_absolutetime_interval(wq_max_timer_interval.usecs,
5282cc9a6355SApple OSS Distributions NSEC_PER_USEC, &wq_max_timer_interval.abstime);
5283a5e72196SApple OSS Distributions
5284a5e72196SApple OSS Distributions thread_deallocate_daemon_register_queue(&workq_deallocate_queue,
5285a5e72196SApple OSS Distributions workq_deallocate_queue_invoke);
5286cc9a6355SApple OSS Distributions }
5287