xref: /xnu-11215/osfmk/kern/sched_prim.c (revision 8d741a5d)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	sched_prim.c
60  *	Author:	Avadis Tevanian, Jr.
61  *	Date:	1986
62  *
63  *	Scheduling primitives
64  *
65  */
66 
67 #include <debug.h>
68 
69 #include <mach/mach_types.h>
70 #include <mach/machine.h>
71 #include <mach/policy.h>
72 #include <mach/sync_policy.h>
73 #include <mach/thread_act.h>
74 
75 #include <machine/machine_routines.h>
76 #include <machine/sched_param.h>
77 #include <machine/machine_cpu.h>
78 #include <machine/limits.h>
79 #include <machine/atomic.h>
80 
81 #include <machine/commpage.h>
82 
83 #include <kern/kern_types.h>
84 #include <kern/backtrace.h>
85 #include <kern/clock.h>
86 #include <kern/cpu_number.h>
87 #include <kern/cpu_data.h>
88 #include <kern/smp.h>
89 #include <kern/debug.h>
90 #include <kern/macro_help.h>
91 #include <kern/machine.h>
92 #include <kern/misc_protos.h>
93 #include <kern/monotonic.h>
94 #include <kern/processor.h>
95 #include <kern/queue.h>
96 #include <kern/recount.h>
97 #include <kern/restartable.h>
98 #include <kern/sched.h>
99 #include <kern/sched_prim.h>
100 #include <kern/sfi.h>
101 #include <kern/syscall_subr.h>
102 #include <kern/task.h>
103 #include <kern/thread.h>
104 #include <kern/thread_group.h>
105 #include <kern/ledger.h>
106 #include <kern/timer_queue.h>
107 #include <kern/waitq.h>
108 #include <kern/policy_internal.h>
109 
110 #include <vm/pmap.h>
111 #include <vm/vm_kern.h>
112 #include <vm/vm_map.h>
113 #include <vm/vm_pageout_xnu.h>
114 
115 #include <mach/sdt.h>
116 #include <mach/mach_host.h>
117 #include <mach/host_info.h>
118 
119 #include <sys/kdebug.h>
120 #include <kperf/kperf.h>
121 #include <kern/kpc.h>
122 #include <san/kasan.h>
123 #include <kern/pms.h>
124 #include <kern/host.h>
125 #include <stdatomic.h>
126 #include <os/atomic_private.h>
127 
128 #ifdef KDBG_MACOS_RELEASE
129 #define KTRC KDBG_MACOS_RELEASE
130 #else
131 #define KTRC KDBG_RELEASE
132 #endif
133 
134 struct sched_statistics PERCPU_DATA(sched_stats);
135 bool sched_stats_active;
136 
137 static uint64_t
deadline_add(uint64_t d,uint64_t e)138 deadline_add(uint64_t d, uint64_t e)
139 {
140 	uint64_t sum;
141 	return os_add_overflow(d, e, &sum) ? UINT64_MAX : sum;
142 }
143 
144 int
rt_runq_count(processor_set_t pset)145 rt_runq_count(processor_set_t pset)
146 {
147 	return os_atomic_load(&SCHED(rt_runq)(pset)->count, relaxed);
148 }
149 
150 uint64_t
rt_runq_earliest_deadline(processor_set_t pset)151 rt_runq_earliest_deadline(processor_set_t pset)
152 {
153 	return os_atomic_load_wide(&SCHED(rt_runq)(pset)->earliest_deadline, relaxed);
154 }
155 
156 static int
rt_runq_priority(processor_set_t pset)157 rt_runq_priority(processor_set_t pset)
158 {
159 	pset_assert_locked(pset);
160 	rt_queue_t rt_run_queue = SCHED(rt_runq)(pset);
161 
162 	bitmap_t *map = rt_run_queue->bitmap;
163 	int i = bitmap_first(map, NRTQS);
164 	assert(i < NRTQS);
165 
166 	if (i >= 0) {
167 		return i + BASEPRI_RTQUEUES;
168 	}
169 
170 	return i;
171 }
172 
173 static thread_t rt_runq_first(rt_queue_t rt_runq);
174 
175 #if DEBUG
176 static void
check_rt_runq_consistency(rt_queue_t rt_run_queue,thread_t thread)177 check_rt_runq_consistency(rt_queue_t rt_run_queue, thread_t thread)
178 {
179 	bitmap_t *map = rt_run_queue->bitmap;
180 
181 	uint64_t earliest_deadline = RT_DEADLINE_NONE;
182 	uint32_t constraint = RT_CONSTRAINT_NONE;
183 	int ed_index = NOPRI;
184 	int count = 0;
185 	bool found_thread = false;
186 
187 	for (int pri = BASEPRI_RTQUEUES; pri <= MAXPRI; pri++) {
188 		int i = pri - BASEPRI_RTQUEUES;
189 		rt_queue_pri_t *rt_runq = &rt_run_queue->rt_queue_pri[i];
190 		queue_t queue = &rt_runq->pri_queue;
191 		queue_entry_t iter;
192 		int n = 0;
193 		uint64_t previous_deadline = 0;
194 		qe_foreach(iter, queue) {
195 			thread_t iter_thread = qe_element(iter, struct thread, runq_links);
196 			assert_thread_magic(iter_thread);
197 			if (iter_thread == thread) {
198 				found_thread = true;
199 			}
200 			assert(iter_thread->sched_pri == (i + BASEPRI_RTQUEUES));
201 			assert(iter_thread->realtime.deadline < RT_DEADLINE_NONE);
202 			assert(iter_thread->realtime.constraint < RT_CONSTRAINT_NONE);
203 			assert(previous_deadline <= iter_thread->realtime.deadline);
204 			n++;
205 			if (iter == queue_first(queue)) {
206 				assert(rt_runq->pri_earliest_deadline == iter_thread->realtime.deadline);
207 				assert(rt_runq->pri_constraint == iter_thread->realtime.constraint);
208 			}
209 			previous_deadline = iter_thread->realtime.deadline;
210 		}
211 		assert(n == rt_runq->pri_count);
212 		if (n == 0) {
213 			assert(bitmap_test(map, i) == false);
214 			assert(rt_runq->pri_earliest_deadline == RT_DEADLINE_NONE);
215 			assert(rt_runq->pri_constraint == RT_CONSTRAINT_NONE);
216 		} else {
217 			assert(bitmap_test(map, i) == true);
218 		}
219 		if (rt_runq->pri_earliest_deadline < earliest_deadline) {
220 			earliest_deadline = rt_runq->pri_earliest_deadline;
221 			constraint = rt_runq->pri_constraint;
222 			ed_index = i;
223 		}
224 		count += n;
225 	}
226 	assert(os_atomic_load_wide(&rt_run_queue->earliest_deadline, relaxed) == earliest_deadline);
227 	assert(os_atomic_load(&rt_run_queue->count, relaxed) == count);
228 	assert(os_atomic_load(&rt_run_queue->constraint, relaxed) == constraint);
229 	assert(os_atomic_load(&rt_run_queue->ed_index, relaxed) == ed_index);
230 	if (thread) {
231 		assert(found_thread);
232 	}
233 }
234 #define CHECK_RT_RUNQ_CONSISTENCY(q, th)    check_rt_runq_consistency(q, th)
235 #else
236 #define CHECK_RT_RUNQ_CONSISTENCY(q, th)    do {} while (0)
237 #endif
238 
239 uint32_t rt_constraint_threshold;
240 
241 static bool
rt_runq_is_low_latency(processor_set_t pset)242 rt_runq_is_low_latency(processor_set_t pset)
243 {
244 	return os_atomic_load(&SCHED(rt_runq)(pset)->constraint, relaxed) <= rt_constraint_threshold;
245 }
246 
247 TUNABLE(bool, cpulimit_affects_quantum, "cpulimit_affects_quantum", true);
248 
249 /* TODO: enable this, to 50us (less than the deferred IPI latency, to beat a spill) */
250 TUNABLE(uint32_t, nonurgent_preemption_timer_us, "nonurgent_preemption_timer", 0); /* microseconds */
251 static uint64_t nonurgent_preemption_timer_abs = 0;
252 
253 #define         DEFAULT_PREEMPTION_RATE         100             /* (1/s) */
254 TUNABLE(int, default_preemption_rate, "preempt", DEFAULT_PREEMPTION_RATE);
255 
256 #define         DEFAULT_BG_PREEMPTION_RATE      400             /* (1/s) */
257 TUNABLE(int, default_bg_preemption_rate, "bg_preempt", DEFAULT_BG_PREEMPTION_RATE);
258 
259 #if XNU_TARGET_OS_XR
260 #define         MAX_UNSAFE_RT_QUANTA               1
261 #define         SAFE_RT_MULTIPLIER                 5
262 #else
263 #define         MAX_UNSAFE_RT_QUANTA               100
264 #define         SAFE_RT_MULTIPLIER                 2
265 #endif /* XNU_TARGET_OS_XR */
266 
267 #define         MAX_UNSAFE_FIXED_QUANTA               100
268 #define         SAFE_FIXED_MULTIPLIER                 2
269 
270 TUNABLE_DEV_WRITEABLE(int, max_unsafe_rt_quanta, "max_unsafe_rt_quanta", MAX_UNSAFE_RT_QUANTA);
271 TUNABLE_DEV_WRITEABLE(int, max_unsafe_fixed_quanta, "max_unsafe_fixed_quanta", MAX_UNSAFE_FIXED_QUANTA);
272 
273 TUNABLE_DEV_WRITEABLE(int, safe_rt_multiplier, "safe_rt_multiplier", SAFE_RT_MULTIPLIER);
274 TUNABLE_DEV_WRITEABLE(int, safe_fixed_multiplier, "safe_fixed_multiplier", SAFE_RT_MULTIPLIER);
275 
276 #define         MAX_POLL_QUANTA                 2
277 TUNABLE(int, max_poll_quanta, "poll", MAX_POLL_QUANTA);
278 
279 #define         SCHED_POLL_YIELD_SHIFT          4               /* 1/16 */
280 int             sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT;
281 
282 uint64_t        max_poll_computation;
283 
284 uint64_t        max_unsafe_rt_computation;
285 uint64_t        max_unsafe_fixed_computation;
286 uint64_t        sched_safe_rt_duration;
287 uint64_t        sched_safe_fixed_duration;
288 
289 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
290 
291 uint32_t        std_quantum;
292 uint32_t        min_std_quantum;
293 uint32_t        bg_quantum;
294 
295 uint32_t        std_quantum_us;
296 uint32_t        bg_quantum_us;
297 
298 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
299 
300 uint32_t        thread_depress_time;
301 uint32_t        default_timeshare_computation;
302 uint32_t        default_timeshare_constraint;
303 
304 uint32_t        max_rt_quantum;
305 uint32_t        min_rt_quantum;
306 
307 uint32_t        rt_deadline_epsilon;
308 
309 uint32_t        rt_constraint_threshold;
310 
311 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
312 
313 unsigned                sched_tick;
314 uint32_t                sched_tick_interval;
315 
316 /* Timeshare load calculation interval (15ms) */
317 uint32_t                sched_load_compute_interval_us = 15000;
318 uint64_t                sched_load_compute_interval_abs;
319 static _Atomic uint64_t sched_load_compute_deadline;
320 
321 uint32_t        sched_pri_shifts[TH_BUCKET_MAX];
322 uint32_t        sched_fixed_shift;
323 
324 uint32_t        sched_decay_usage_age_factor = 1; /* accelerate 5/8^n usage aging */
325 
326 /* Allow foreground to decay past default to resolve inversions */
327 #define DEFAULT_DECAY_BAND_LIMIT ((BASEPRI_FOREGROUND - BASEPRI_DEFAULT) + 2)
328 int             sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
329 
330 /* Defaults for timer deadline profiling */
331 #define TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT 2000000 /* Timers with deadlines <=
332 	                                               * 2ms */
333 #define TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT 5000000 /* Timers with deadlines
334 	                                               *   <= 5ms */
335 
336 uint64_t timer_deadline_tracking_bin_1;
337 uint64_t timer_deadline_tracking_bin_2;
338 
339 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
340 
341 thread_t sched_maintenance_thread;
342 
343 LCK_GRP_DECLARE(cluster_powerdown_grp, "cluster_powerdown");
344 LCK_MTX_DECLARE(cluster_powerdown_lock, &cluster_powerdown_grp);
345 
346 /* interrupts disabled lock to guard core online, recommendation, pcs state */
347 decl_simple_lock_data(, sched_available_cores_lock);
348 
349 /*
350  * Locked by sched_available_cores_lock.
351  * cluster_powerdown_lock is held while making changes to CPU offline state.
352  */
353 static struct global_powered_cores_state {
354 	/*
355 	 * Set when PCS has seen all cores boot up and is ready to manage online
356 	 * state.  CPU recommendation works before this point.
357 	 */
358 	bool    pcs_init_completed;
359 
360 	cpumap_t pcs_managed_cores;         /* all cores managed by the PCS */
361 
362 	/*
363 	 * Inputs for CPU offline state provided by clients
364 	 */
365 	cpumap_t pcs_requested_online_user; /* updated by processor_start/exit from userspace */
366 	cpumap_t pcs_requested_online_clpc_user;
367 	cpumap_t pcs_requested_online_clpc_system;
368 	cpumap_t pcs_required_online_pmgr;  /* e.g. ANE needs these powered for their rail to be happy */
369 	cpumap_t pcs_required_online_system;  /* e.g. smt1 for interrupts, boot processor unless boot arg is set, makes them disable instead of sleep */
370 
371 	/*
372 	 * When a suspend count is held, all CPUs must be powered up.
373 	 */
374 	int32_t  pcs_powerdown_suspend_count;
375 
376 	/*
377 	 * Disable automatic cluster powerdown in favor of explicit user core online control
378 	 */
379 	bool     pcs_user_online_core_control;
380 	bool     pcs_wants_kernel_sleep;
381 	bool     pcs_in_kernel_sleep;
382 
383 	struct powered_cores_state {
384 		/*
385 		 * The input into the recommendation computation from update powered cores.
386 		 */
387 		cpumap_t pcs_powerdown_recommended_cores;
388 
389 		/*
390 		 * These cores are online and are not powered down.
391 		 *
392 		 * Processors with processor->processor_online bit set.
393 		 */
394 		cpumap_t pcs_online_cores;
395 
396 		/*
397 		 * These cores are disabled or powered down
398 		 * due to temporary reasons and will come back under presented load
399 		 * so the user should still see them as active in the cpu count.
400 		 *
401 		 * Processors with processor->shutdown_temporary bit set.
402 		 */
403 		cpumap_t pcs_tempdown_cores;
404 	} pcs_effective;
405 
406 	/* The 'goal state' PCS has computed and is attempting to apply */
407 	struct powered_cores_state pcs_requested;
408 
409 	/*
410 	 * Inputs into CPU recommended cores provided by clients.
411 	 * Note that these may be changed under the available cores lock and
412 	 * become effective while sched_update_powered_cores_drops_lock is in
413 	 * the middle of making changes to CPU online state.
414 	 */
415 
416 	cpumap_t        pcs_requested_recommended_clpc;
417 	cpumap_t        pcs_requested_recommended_clpc_system;
418 	cpumap_t        pcs_requested_recommended_clpc_user;
419 	bool            pcs_recommended_clpc_failsafe_active;
420 	bool            pcs_sleep_override_recommended;
421 
422 	/*
423 	 * These cores are recommended and can be used for execution
424 	 * of non-bound threads.
425 	 *
426 	 * Processors with processor->is_recommended bit set.
427 	 */
428 	cpumap_t pcs_recommended_cores;
429 
430 	/*
431 	 * These are for the debugger.
432 	 * Use volatile to stop the compiler from optimizing out the stores
433 	 */
434 	volatile processor_reason_t pcs_in_flight_reason;
435 	volatile processor_reason_t pcs_previous_reason;
436 } pcs = {
437 	/*
438 	 * Powerdown is suspended during boot until after all CPUs finish booting,
439 	 * released by sched_cpu_init_completed.
440 	 */
441 	.pcs_powerdown_suspend_count = 1,
442 	.pcs_requested_online_user = ALL_CORES_POWERED,
443 	.pcs_requested_online_clpc_user = ALL_CORES_POWERED,
444 	.pcs_requested_online_clpc_system = ALL_CORES_POWERED,
445 	.pcs_in_flight_reason = REASON_NONE,
446 	.pcs_previous_reason = REASON_NONE,
447 	.pcs_requested.pcs_powerdown_recommended_cores = ALL_CORES_POWERED,
448 	.pcs_requested_recommended_clpc = ALL_CORES_RECOMMENDED,
449 	.pcs_requested_recommended_clpc_system = ALL_CORES_RECOMMENDED,
450 	.pcs_requested_recommended_clpc_user = ALL_CORES_RECOMMENDED,
451 };
452 
453 uint64_t sysctl_sched_recommended_cores = ALL_CORES_RECOMMENDED;
454 
455 static int sched_last_resort_cpu(void);
456 
457 static void sched_update_recommended_cores_locked(processor_reason_t reason, cpumap_t core_going_offline);
458 static void sched_update_powered_cores_drops_lock(processor_reason_t requested_reason, spl_t s);
459 
460 #if __arm64__
461 static void sched_recommended_cores_maintenance(void);
462 uint64_t    perfcontrol_failsafe_starvation_threshold;
463 extern char *proc_name_address(struct proc *p);
464 #endif /* __arm64__ */
465 
466 uint64_t        sched_one_second_interval;
467 boolean_t       allow_direct_handoff = TRUE;
468 
469 /* Forwards */
470 
471 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
472 
473 static void load_shift_init(void);
474 static void preempt_pri_init(void);
475 
476 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
477 
478 thread_t        processor_idle(
479 	thread_t                        thread,
480 	processor_t                     processor);
481 
482 static ast_t
483 csw_check_locked(
484 	thread_t        thread,
485 	processor_t     processor,
486 	processor_set_t pset,
487 	ast_t           check_reason);
488 
489 static void processor_setrun(
490 	processor_t                    processor,
491 	thread_t                       thread,
492 	integer_t                      options);
493 
494 static void
495 sched_realtime_timebase_init(void);
496 
497 static void
498 sched_timer_deadline_tracking_init(void);
499 
500 #if     DEBUG
501 extern int debug_task;
502 #define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args)
503 #else
504 #define TLOG(a, fmt, args...) do {} while (0)
505 #endif
506 
507 static processor_t
508 thread_bind_internal(
509 	thread_t                thread,
510 	processor_t             processor);
511 
512 static void
513 sched_vm_group_maintenance(void);
514 
515 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
516 int8_t          sched_load_shifts[NRQS];
517 bitmap_t        sched_preempt_pri[BITMAP_LEN(NRQS_MAX)];
518 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
519 
520 #define cpumap_foreach(cpu_id, cpumap) \
521 	for (int cpu_id = lsb_first(cpumap); \
522 	    (cpu_id) >= 0; \
523 	     cpu_id = lsb_next((cpumap), cpu_id))
524 
525 #define foreach_node(node) \
526 	for (pset_node_t node = &pset_node0; node != NULL; node = node->node_list)
527 
528 #define foreach_pset_id(pset_id, node) \
529 	for (int pset_id = lsb_first((node)->pset_map); \
530 	    pset_id >= 0; \
531 	    pset_id = lsb_next((node)->pset_map, pset_id))
532 
533 /*
534  * Statically allocate a buffer to hold the longest possible
535  * scheduler description string, as currently implemented.
536  * bsd/kern/kern_sysctl.c has a corresponding definition in bsd/
537  * to export to userspace via sysctl(3). If either version
538  * changes, update the other.
539  *
540  * Note that in addition to being an upper bound on the strings
541  * in the kernel, it's also an exact parameter to PE_get_default(),
542  * which interrogates the device tree on some platforms. That
543  * API requires the caller know the exact size of the device tree
544  * property, so we need both a legacy size (32) and the current size
545  * (48) to deal with old and new device trees. The device tree property
546  * is similarly padded to a fixed size so that the same kernel image
547  * can run on multiple devices with different schedulers configured
548  * in the device tree.
549  */
550 char sched_string[SCHED_STRING_MAX_LENGTH];
551 
552 uint32_t sched_debug_flags = SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS;
553 
554 /* Global flag which indicates whether Background Stepper Context is enabled */
555 static int cpu_throttle_enabled = 1;
556 
557 #if DEVELOPMENT || DEBUG
558 int enable_task_set_cluster_type = 0;
559 bool system_ecore_only = false;
560 #endif /* DEVELOPMENT || DEBUG */
561 
562 void
sched_init(void)563 sched_init(void)
564 {
565 	boolean_t direct_handoff = FALSE;
566 	kprintf("Scheduler: Default of %s\n", SCHED(sched_name));
567 
568 	if (!PE_parse_boot_argn("sched_pri_decay_limit", &sched_pri_decay_band_limit, sizeof(sched_pri_decay_band_limit))) {
569 		/* No boot-args, check in device tree */
570 		if (!PE_get_default("kern.sched_pri_decay_limit",
571 		    &sched_pri_decay_band_limit,
572 		    sizeof(sched_pri_decay_band_limit))) {
573 			/* Allow decay all the way to normal limits */
574 			sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
575 		}
576 	}
577 
578 	kprintf("Setting scheduler priority decay band limit %d\n", sched_pri_decay_band_limit);
579 
580 	if (PE_parse_boot_argn("sched_debug", &sched_debug_flags, sizeof(sched_debug_flags))) {
581 		kprintf("Scheduler: Debug flags 0x%08x\n", sched_debug_flags);
582 	}
583 	strlcpy(sched_string, SCHED(sched_name), sizeof(sched_string));
584 
585 #if __arm64__
586 	clock_interval_to_absolutetime_interval(expecting_ipi_wfe_timeout_usec, NSEC_PER_USEC, &expecting_ipi_wfe_timeout_mt);
587 #endif /* __arm64__ */
588 
589 	SCHED(init)();
590 	SCHED(rt_init)(&pset0);
591 	sched_timer_deadline_tracking_init();
592 
593 	SCHED(pset_init)(&pset0);
594 	SCHED(processor_init)(master_processor);
595 
596 	if (PE_parse_boot_argn("direct_handoff", &direct_handoff, sizeof(direct_handoff))) {
597 		allow_direct_handoff = direct_handoff;
598 	}
599 
600 #if DEVELOPMENT || DEBUG
601 	if (PE_parse_boot_argn("enable_skstsct", &enable_task_set_cluster_type, sizeof(enable_task_set_cluster_type))) {
602 		system_ecore_only = (enable_task_set_cluster_type == 2);
603 	}
604 #endif /* DEVELOPMENT || DEBUG */
605 }
606 
607 void
sched_timebase_init(void)608 sched_timebase_init(void)
609 {
610 	uint64_t        abstime;
611 
612 	clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC, &abstime);
613 	sched_one_second_interval = abstime;
614 
615 	SCHED(timebase_init)();
616 	sched_realtime_timebase_init();
617 }
618 
619 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
620 
621 void
sched_timeshare_init(void)622 sched_timeshare_init(void)
623 {
624 	/*
625 	 * Calculate the timeslicing quantum
626 	 * in us.
627 	 */
628 	if (default_preemption_rate < 1) {
629 		default_preemption_rate = DEFAULT_PREEMPTION_RATE;
630 	}
631 	std_quantum_us = (1000 * 1000) / default_preemption_rate;
632 
633 	printf("standard timeslicing quantum is %d us\n", std_quantum_us);
634 
635 	if (default_bg_preemption_rate < 1) {
636 		default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
637 	}
638 	bg_quantum_us = (1000 * 1000) / default_bg_preemption_rate;
639 
640 	printf("standard background quantum is %d us\n", bg_quantum_us);
641 
642 	load_shift_init();
643 	preempt_pri_init();
644 	sched_tick = 0;
645 }
646 
647 void
sched_set_max_unsafe_rt_quanta(int max)648 sched_set_max_unsafe_rt_quanta(int max)
649 {
650 	const uint32_t quantum_size = SCHED(initial_quantum_size)(THREAD_NULL);
651 
652 	max_unsafe_rt_computation = ((uint64_t)max) * quantum_size;
653 
654 	const int mult = safe_rt_multiplier <= 0 ? 2 : safe_rt_multiplier;
655 	sched_safe_rt_duration = mult * ((uint64_t)max) * quantum_size;
656 
657 
658 #if DEVELOPMENT || DEBUG
659 	max_unsafe_rt_quanta = max;
660 #else
661 	/*
662 	 * On RELEASE kernels, this is only called on boot where
663 	 * max is already equal to max_unsafe_rt_quanta.
664 	 */
665 	assert3s(max, ==, max_unsafe_rt_quanta);
666 #endif
667 }
668 
669 void
sched_set_max_unsafe_fixed_quanta(int max)670 sched_set_max_unsafe_fixed_quanta(int max)
671 {
672 	const uint32_t quantum_size = SCHED(initial_quantum_size)(THREAD_NULL);
673 
674 	max_unsafe_fixed_computation = ((uint64_t)max) * quantum_size;
675 
676 	const int mult = safe_fixed_multiplier <= 0 ? 2 : safe_fixed_multiplier;
677 	sched_safe_fixed_duration = mult * ((uint64_t)max) * quantum_size;
678 
679 #if DEVELOPMENT || DEBUG
680 	max_unsafe_fixed_quanta = max;
681 #else
682 	/*
683 	 * On RELEASE kernels, this is only called on boot where
684 	 * max is already equal to max_unsafe_fixed_quanta.
685 	 */
686 	assert3s(max, ==, max_unsafe_fixed_quanta);
687 #endif
688 }
689 
690 void
sched_timeshare_timebase_init(void)691 sched_timeshare_timebase_init(void)
692 {
693 	uint64_t        abstime;
694 	uint32_t        shift;
695 
696 	/* standard timeslicing quantum */
697 	clock_interval_to_absolutetime_interval(
698 		std_quantum_us, NSEC_PER_USEC, &abstime);
699 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
700 	std_quantum = (uint32_t)abstime;
701 
702 	/* smallest remaining quantum (250 us) */
703 	clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime);
704 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
705 	min_std_quantum = (uint32_t)abstime;
706 
707 	/* quantum for background tasks */
708 	clock_interval_to_absolutetime_interval(
709 		bg_quantum_us, NSEC_PER_USEC, &abstime);
710 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
711 	bg_quantum = (uint32_t)abstime;
712 
713 	/* scheduler tick interval */
714 	clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT,
715 	    NSEC_PER_USEC, &abstime);
716 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
717 	sched_tick_interval = (uint32_t)abstime;
718 
719 	/* timeshare load calculation interval & deadline initialization */
720 	clock_interval_to_absolutetime_interval(sched_load_compute_interval_us, NSEC_PER_USEC, &sched_load_compute_interval_abs);
721 	os_atomic_init(&sched_load_compute_deadline, sched_load_compute_interval_abs);
722 
723 	/*
724 	 * Compute conversion factor from usage to
725 	 * timesharing priorities with 5/8 ** n aging.
726 	 */
727 	abstime = (abstime * 5) / 3;
728 	for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift) {
729 		abstime >>= 1;
730 	}
731 	sched_fixed_shift = shift;
732 
733 	for (uint32_t i = 0; i < TH_BUCKET_MAX; i++) {
734 		sched_pri_shifts[i] = INT8_MAX;
735 	}
736 
737 	sched_set_max_unsafe_rt_quanta(max_unsafe_rt_quanta);
738 	sched_set_max_unsafe_fixed_quanta(max_unsafe_fixed_quanta);
739 
740 	max_poll_computation = ((uint64_t)max_poll_quanta) * std_quantum;
741 	thread_depress_time = 1 * std_quantum;
742 	default_timeshare_computation = std_quantum / 2;
743 	default_timeshare_constraint = std_quantum;
744 
745 #if __arm64__
746 	perfcontrol_failsafe_starvation_threshold = (2 * sched_tick_interval);
747 #endif /* __arm64__ */
748 
749 	if (nonurgent_preemption_timer_us) {
750 		clock_interval_to_absolutetime_interval(nonurgent_preemption_timer_us, NSEC_PER_USEC, &abstime);
751 		nonurgent_preemption_timer_abs = abstime;
752 	}
753 }
754 
755 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
756 
757 void
pset_rt_init(processor_set_t pset)758 pset_rt_init(processor_set_t pset)
759 {
760 	for (int pri = BASEPRI_RTQUEUES; pri <= MAXPRI; pri++) {
761 		int i = pri - BASEPRI_RTQUEUES;
762 		rt_queue_pri_t *rqi = &pset->rt_runq.rt_queue_pri[i];
763 		queue_init(&rqi->pri_queue);
764 		rqi->pri_count = 0;
765 		rqi->pri_earliest_deadline = RT_DEADLINE_NONE;
766 		rqi->pri_constraint = RT_CONSTRAINT_NONE;
767 	}
768 	os_atomic_init(&pset->rt_runq.count, 0);
769 	os_atomic_init(&pset->rt_runq.earliest_deadline, RT_DEADLINE_NONE);
770 	os_atomic_init(&pset->rt_runq.constraint, RT_CONSTRAINT_NONE);
771 	os_atomic_init(&pset->rt_runq.ed_index, NOPRI);
772 	memset(&pset->rt_runq.runq_stats, 0, sizeof pset->rt_runq.runq_stats);
773 }
774 
775 /* epsilon for comparing RT deadlines */
776 int rt_deadline_epsilon_us = 100;
777 
778 int
sched_get_rt_deadline_epsilon(void)779 sched_get_rt_deadline_epsilon(void)
780 {
781 	return rt_deadline_epsilon_us;
782 }
783 
784 void
sched_set_rt_deadline_epsilon(int new_epsilon_us)785 sched_set_rt_deadline_epsilon(int new_epsilon_us)
786 {
787 	rt_deadline_epsilon_us = new_epsilon_us;
788 
789 	uint64_t abstime;
790 	clock_interval_to_absolutetime_interval(rt_deadline_epsilon_us, NSEC_PER_USEC, &abstime);
791 	assert((abstime >> 32) == 0 && ((rt_deadline_epsilon_us == 0) || (uint32_t)abstime != 0));
792 	rt_deadline_epsilon = (uint32_t)abstime;
793 }
794 
795 static void
sched_realtime_timebase_init(void)796 sched_realtime_timebase_init(void)
797 {
798 	uint64_t abstime;
799 
800 	/* smallest rt computation (50 us) */
801 	clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime);
802 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
803 	min_rt_quantum = (uint32_t)abstime;
804 
805 	/* maximum rt computation (50 ms) */
806 	clock_interval_to_absolutetime_interval(
807 		50, 1000 * NSEC_PER_USEC, &abstime);
808 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
809 	max_rt_quantum = (uint32_t)abstime;
810 
811 	/* constraint threshold for sending backup IPIs (4 ms) */
812 	clock_interval_to_absolutetime_interval(4, NSEC_PER_MSEC, &abstime);
813 	assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
814 	rt_constraint_threshold = (uint32_t)abstime;
815 
816 	/* epsilon for comparing deadlines */
817 	sched_set_rt_deadline_epsilon(rt_deadline_epsilon_us);
818 }
819 
820 void
sched_check_spill(processor_set_t pset,thread_t thread)821 sched_check_spill(processor_set_t pset, thread_t thread)
822 {
823 	(void)pset;
824 	(void)thread;
825 
826 	return;
827 }
828 
829 bool
sched_thread_should_yield(processor_t processor,thread_t thread)830 sched_thread_should_yield(processor_t processor, thread_t thread)
831 {
832 	(void)thread;
833 
834 	return !SCHED(processor_queue_empty)(processor) || rt_runq_count(processor->processor_set) > 0;
835 }
836 
837 /* Default implementations of .steal_thread_enabled */
838 bool
sched_steal_thread_DISABLED(processor_set_t pset)839 sched_steal_thread_DISABLED(processor_set_t pset)
840 {
841 	(void)pset;
842 	return false;
843 }
844 
845 bool
sched_steal_thread_enabled(processor_set_t pset)846 sched_steal_thread_enabled(processor_set_t pset)
847 {
848 	return bit_count(pset->node->pset_map) > 1;
849 }
850 
851 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
852 
853 /*
854  * Set up values for timeshare
855  * loading factors.
856  */
857 static void
load_shift_init(void)858 load_shift_init(void)
859 {
860 	int8_t          k, *p = sched_load_shifts;
861 	uint32_t        i, j;
862 
863 	uint32_t        sched_decay_penalty = 1;
864 
865 	if (PE_parse_boot_argn("sched_decay_penalty", &sched_decay_penalty, sizeof(sched_decay_penalty))) {
866 		kprintf("Overriding scheduler decay penalty %u\n", sched_decay_penalty);
867 	}
868 
869 	if (PE_parse_boot_argn("sched_decay_usage_age_factor", &sched_decay_usage_age_factor, sizeof(sched_decay_usage_age_factor))) {
870 		kprintf("Overriding scheduler decay usage age factor %u\n", sched_decay_usage_age_factor);
871 	}
872 
873 	if (sched_decay_penalty == 0) {
874 		/*
875 		 * There is no penalty for timeshare threads for using too much
876 		 * CPU, so set all load shifts to INT8_MIN. Even under high load,
877 		 * sched_pri_shift will be >INT8_MAX, and there will be no
878 		 * penalty applied to threads (nor will sched_usage be updated per
879 		 * thread).
880 		 */
881 		for (i = 0; i < NRQS; i++) {
882 			sched_load_shifts[i] = INT8_MIN;
883 		}
884 
885 		return;
886 	}
887 
888 	*p++ = INT8_MIN; *p++ = 0;
889 
890 	/*
891 	 * For a given system load "i", the per-thread priority
892 	 * penalty per quantum of CPU usage is ~2^k priority
893 	 * levels. "sched_decay_penalty" can cause more
894 	 * array entries to be filled with smaller "k" values
895 	 */
896 	for (i = 2, j = 1 << sched_decay_penalty, k = 1; i < NRQS; ++k) {
897 		for (j <<= 1; (i < j) && (i < NRQS); ++i) {
898 			*p++ = k;
899 		}
900 	}
901 }
902 
903 static void
preempt_pri_init(void)904 preempt_pri_init(void)
905 {
906 	bitmap_t *p = sched_preempt_pri;
907 
908 	for (int i = BASEPRI_FOREGROUND; i < MINPRI_KERNEL; ++i) {
909 		bitmap_set(p, i);
910 	}
911 
912 	for (int i = BASEPRI_PREEMPT; i <= MAXPRI; ++i) {
913 		bitmap_set(p, i);
914 	}
915 }
916 
917 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
918 
919 void
check_monotonic_time(uint64_t ctime)920 check_monotonic_time(uint64_t ctime)
921 {
922 	processor_t processor = current_processor();
923 	uint64_t last_dispatch = processor->last_dispatch;
924 
925 	if (last_dispatch > ctime) {
926 		panic("Non-monotonic time: last_dispatch at 0x%llx, ctime 0x%llx",
927 		    last_dispatch, ctime);
928 	}
929 }
930 
931 
932 /*
933  *	Thread wait timer expiration.
934  *	Runs in timer interrupt context with interrupts disabled.
935  */
936 void
thread_timer_expire(void * p0,__unused void * p1)937 thread_timer_expire(void *p0, __unused void *p1)
938 {
939 	thread_t thread = (thread_t)p0;
940 
941 	assert_thread_magic(thread);
942 
943 	assert(ml_get_interrupts_enabled() == FALSE);
944 
945 	thread_lock(thread);
946 
947 	if (thread->wait_timer_armed) {
948 		thread->wait_timer_armed = false;
949 		clear_wait_internal(thread, THREAD_TIMED_OUT);
950 		/* clear_wait_internal may have dropped and retaken the thread lock */
951 	}
952 
953 	thread->wait_timer_active--;
954 
955 	thread_unlock(thread);
956 }
957 
958 /*
959  *	thread_unblock:
960  *
961  *	Unblock thread on wake up.
962  *
963  *	Returns TRUE if the thread should now be placed on the runqueue.
964  *
965  *	Thread must be locked.
966  *
967  *	Called at splsched().
968  */
969 boolean_t
thread_unblock(thread_t thread,wait_result_t wresult)970 thread_unblock(
971 	thread_t                thread,
972 	wait_result_t   wresult)
973 {
974 	boolean_t               ready_for_runq = FALSE;
975 	thread_t                cthread = current_thread();
976 	uint32_t                new_run_count;
977 	int                             old_thread_state;
978 
979 	/*
980 	 *	Set wait_result.
981 	 */
982 	thread->wait_result = wresult;
983 
984 	/*
985 	 *	Cancel pending wait timer.
986 	 */
987 	if (thread->wait_timer_armed) {
988 		if (timer_call_cancel(thread->wait_timer)) {
989 			thread->wait_timer_active--;
990 		}
991 		thread->wait_timer_armed = false;
992 	}
993 
994 	boolean_t aticontext, pidle;
995 	ml_get_power_state(&aticontext, &pidle);
996 
997 	/*
998 	 *	Update scheduling state: not waiting,
999 	 *	set running.
1000 	 */
1001 	old_thread_state = thread->state;
1002 	thread->state = (old_thread_state | TH_RUN) &
1003 	    ~(TH_WAIT | TH_UNINT | TH_WAIT_REPORT | TH_WAKING);
1004 
1005 	if ((old_thread_state & TH_RUN) == 0) {
1006 		uint64_t ctime = mach_approximate_time();
1007 
1008 		check_monotonic_time(ctime);
1009 
1010 		thread->last_made_runnable_time = thread->last_basepri_change_time = ctime;
1011 		timer_start(&thread->runnable_timer, ctime);
1012 
1013 		ready_for_runq = TRUE;
1014 
1015 		if (old_thread_state & TH_WAIT_REPORT) {
1016 			(*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
1017 		}
1018 
1019 		/* Update the runnable thread count */
1020 		new_run_count = SCHED(run_count_incr)(thread);
1021 
1022 #if CONFIG_SCHED_AUTO_JOIN
1023 		if (aticontext == FALSE && work_interval_should_propagate(cthread, thread)) {
1024 			work_interval_auto_join_propagate(cthread, thread);
1025 		}
1026 #endif /*CONFIG_SCHED_AUTO_JOIN */
1027 
1028 	} else {
1029 		/*
1030 		 * Either the thread is idling in place on another processor,
1031 		 * or it hasn't finished context switching yet.
1032 		 */
1033 		assert((thread->state & TH_IDLE) == 0);
1034 		/*
1035 		 * The run count is only dropped after the context switch completes
1036 		 * and the thread is still waiting, so we should not run_incr here
1037 		 */
1038 		new_run_count = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
1039 	}
1040 
1041 	/*
1042 	 * Calculate deadline for real-time threads.
1043 	 */
1044 	if (thread->sched_mode == TH_MODE_REALTIME) {
1045 		uint64_t ctime = mach_absolute_time();
1046 		thread->realtime.deadline = thread->realtime.constraint + ctime;
1047 		KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SET_RT_DEADLINE) | DBG_FUNC_NONE,
1048 		    (uintptr_t)thread_tid(thread), thread->realtime.deadline, thread->realtime.computation, 0);
1049 	}
1050 
1051 	/*
1052 	 * Clear old quantum, fail-safe computation, etc.
1053 	 */
1054 	thread->quantum_remaining = 0;
1055 	thread->computation_metered = 0;
1056 	thread->reason = AST_NONE;
1057 	thread->block_hint = kThreadWaitNone;
1058 
1059 	/* Obtain power-relevant interrupt and "platform-idle exit" statistics.
1060 	 * We also account for "double hop" thread signaling via
1061 	 * the thread callout infrastructure.
1062 	 * DRK: consider removing the callout wakeup counters in the future
1063 	 * they're present for verification at the moment.
1064 	 */
1065 
1066 	if (__improbable(aticontext && !(thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT))) {
1067 		DTRACE_SCHED2(iwakeup, struct thread *, thread, struct proc *, current_proc());
1068 
1069 		uint64_t ttd = current_processor()->timer_call_ttd;
1070 
1071 		if (ttd) {
1072 			if (ttd <= timer_deadline_tracking_bin_1) {
1073 				thread->thread_timer_wakeups_bin_1++;
1074 			} else if (ttd <= timer_deadline_tracking_bin_2) {
1075 				thread->thread_timer_wakeups_bin_2++;
1076 			}
1077 		}
1078 
1079 		ledger_credit_thread(thread, thread->t_ledger,
1080 		    task_ledgers.interrupt_wakeups, 1);
1081 		if (pidle) {
1082 			ledger_credit_thread(thread, thread->t_ledger,
1083 			    task_ledgers.platform_idle_wakeups, 1);
1084 		}
1085 	} else if (thread_get_tag_internal(cthread) & THREAD_TAG_CALLOUT) {
1086 		/* TODO: what about an interrupt that does a wake taken on a callout thread? */
1087 		if (cthread->callout_woken_from_icontext) {
1088 			ledger_credit_thread(thread, thread->t_ledger,
1089 			    task_ledgers.interrupt_wakeups, 1);
1090 			thread->thread_callout_interrupt_wakeups++;
1091 
1092 			if (cthread->callout_woken_from_platform_idle) {
1093 				ledger_credit_thread(thread, thread->t_ledger,
1094 				    task_ledgers.platform_idle_wakeups, 1);
1095 				thread->thread_callout_platform_idle_wakeups++;
1096 			}
1097 
1098 			cthread->callout_woke_thread = TRUE;
1099 		}
1100 	}
1101 
1102 	if (thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT) {
1103 		thread->callout_woken_from_icontext = !!aticontext;
1104 		thread->callout_woken_from_platform_idle = !!pidle;
1105 		thread->callout_woke_thread = FALSE;
1106 	}
1107 
1108 #if KPERF
1109 	if (ready_for_runq) {
1110 		kperf_make_runnable(thread, aticontext);
1111 	}
1112 #endif /* KPERF */
1113 
1114 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1115 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE,
1116 	    (uintptr_t)thread_tid(thread), thread->sched_pri, thread->wait_result,
1117 	    sched_run_buckets[TH_BUCKET_RUN], 0);
1118 
1119 	DTRACE_SCHED2(wakeup, struct thread *, thread, struct proc *, current_proc());
1120 
1121 	return ready_for_runq;
1122 }
1123 
1124 /*
1125  *	Routine:	thread_allowed_for_handoff
1126  *	Purpose:
1127  *		Check if the thread is allowed for handoff operation
1128  *	Conditions:
1129  *		thread lock held, IPC locks may be held.
1130  *	TODO: In future, do not allow handoff if threads have different cluster
1131  *	recommendations.
1132  */
1133 boolean_t
thread_allowed_for_handoff(thread_t thread)1134 thread_allowed_for_handoff(
1135 	thread_t         thread)
1136 {
1137 	thread_t self = current_thread();
1138 
1139 	if (allow_direct_handoff &&
1140 	    thread->sched_mode == TH_MODE_REALTIME &&
1141 	    self->sched_mode == TH_MODE_REALTIME) {
1142 		return TRUE;
1143 	}
1144 
1145 	return FALSE;
1146 }
1147 
1148 /*
1149  *	Routine:	thread_go
1150  *	Purpose:
1151  *		Unblock and dispatch thread.
1152  *	Conditions:
1153  *		thread lock held, IPC locks may be held.
1154  *		thread must have been waiting
1155  */
1156 void
thread_go(thread_t thread,wait_result_t wresult,bool try_handoff)1157 thread_go(
1158 	thread_t                thread,
1159 	wait_result_t           wresult,
1160 	bool                    try_handoff)
1161 {
1162 	thread_t self = current_thread();
1163 
1164 	assert_thread_magic(thread);
1165 
1166 	assert(thread->at_safe_point == FALSE);
1167 	assert(thread->wait_event == NO_EVENT64);
1168 	assert(waitq_is_null(thread->waitq));
1169 
1170 	assert(!(thread->state & (TH_TERMINATE | TH_TERMINATE2)));
1171 	assert(thread->state & TH_WAIT);
1172 
1173 	if (thread->started) {
1174 		assert(thread->state & TH_WAKING);
1175 	}
1176 
1177 	thread_lock_assert(thread, LCK_ASSERT_OWNED);
1178 
1179 	assert(ml_get_interrupts_enabled() == false);
1180 
1181 	if (thread_unblock(thread, wresult)) {
1182 #if SCHED_TRACE_THREAD_WAKEUPS
1183 		backtrace(&thread->thread_wakeup_bt[0],
1184 		    (sizeof(thread->thread_wakeup_bt) / sizeof(uintptr_t)), NULL,
1185 		    NULL);
1186 #endif /* SCHED_TRACE_THREAD_WAKEUPS */
1187 		if (try_handoff && thread_allowed_for_handoff(thread)) {
1188 			thread_reference(thread);
1189 			assert(self->handoff_thread == NULL);
1190 			self->handoff_thread = thread;
1191 
1192 			/*
1193 			 * A TH_RUN'ed thread must have a chosen_processor.
1194 			 * thread_setrun would have set it, so we need to
1195 			 * replicate that here.
1196 			 */
1197 			thread->chosen_processor = current_processor();
1198 		} else {
1199 			thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
1200 		}
1201 	}
1202 }
1203 
1204 /*
1205  *	Routine:	thread_mark_wait_locked
1206  *	Purpose:
1207  *		Mark a thread as waiting.  If, given the circumstances,
1208  *		it doesn't want to wait (i.e. already aborted), then
1209  *		indicate that in the return value.
1210  *	Conditions:
1211  *		at splsched() and thread is locked.
1212  */
1213 __private_extern__
1214 wait_result_t
thread_mark_wait_locked(thread_t thread,wait_interrupt_t interruptible_orig)1215 thread_mark_wait_locked(
1216 	thread_t                        thread,
1217 	wait_interrupt_t        interruptible_orig)
1218 {
1219 	boolean_t                       at_safe_point;
1220 	wait_interrupt_t        interruptible = interruptible_orig;
1221 
1222 	if (thread->state & TH_IDLE) {
1223 		panic("Invalid attempt to wait while running the idle thread");
1224 	}
1225 
1226 	assert(!(thread->state & (TH_WAIT | TH_WAKING | TH_IDLE | TH_UNINT | TH_TERMINATE2 | TH_WAIT_REPORT)));
1227 
1228 	/*
1229 	 *	The thread may have certain types of interrupts/aborts masked
1230 	 *	off.  Even if the wait location says these types of interrupts
1231 	 *	are OK, we have to honor mask settings (outer-scoped code may
1232 	 *	not be able to handle aborts at the moment).
1233 	 */
1234 	interruptible &= TH_OPT_INTMASK;
1235 	if (interruptible > (thread->options & TH_OPT_INTMASK)) {
1236 		interruptible = thread->options & TH_OPT_INTMASK;
1237 	}
1238 
1239 	at_safe_point = (interruptible == THREAD_ABORTSAFE);
1240 
1241 	if (interruptible == THREAD_UNINT ||
1242 	    !(thread->sched_flags & TH_SFLAG_ABORT) ||
1243 	    (!at_safe_point &&
1244 	    (thread->sched_flags & TH_SFLAG_ABORTSAFELY))) {
1245 		if (!(thread->state & TH_TERMINATE)) {
1246 			DTRACE_SCHED(sleep);
1247 		}
1248 
1249 		int state_bits = TH_WAIT;
1250 		if (!interruptible) {
1251 			state_bits |= TH_UNINT;
1252 		}
1253 		if (thread->sched_call) {
1254 			wait_interrupt_t mask = THREAD_WAIT_NOREPORT_USER;
1255 			if (is_kerneltask(get_threadtask(thread))) {
1256 				mask = THREAD_WAIT_NOREPORT_KERNEL;
1257 			}
1258 			if ((interruptible_orig & mask) == 0) {
1259 				state_bits |= TH_WAIT_REPORT;
1260 			}
1261 		}
1262 		thread->state |= state_bits;
1263 		thread->at_safe_point = at_safe_point;
1264 
1265 		/* TODO: pass this through assert_wait instead, have
1266 		 * assert_wait just take a struct as an argument */
1267 		assert(!thread->block_hint);
1268 		thread->block_hint = thread->pending_block_hint;
1269 		thread->pending_block_hint = kThreadWaitNone;
1270 
1271 		return thread->wait_result = THREAD_WAITING;
1272 	} else {
1273 		if (thread->sched_flags & TH_SFLAG_ABORTSAFELY) {
1274 			thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1275 		}
1276 	}
1277 	thread->pending_block_hint = kThreadWaitNone;
1278 
1279 	return thread->wait_result = THREAD_INTERRUPTED;
1280 }
1281 
1282 /*
1283  *	Routine:	thread_interrupt_level
1284  *	Purpose:
1285  *	        Set the maximum interruptible state for the
1286  *		current thread.  The effective value of any
1287  *		interruptible flag passed into assert_wait
1288  *		will never exceed this.
1289  *
1290  *		Useful for code that must not be interrupted,
1291  *		but which calls code that doesn't know that.
1292  *	Returns:
1293  *		The old interrupt level for the thread.
1294  */
1295 __private_extern__
1296 wait_interrupt_t
thread_interrupt_level(wait_interrupt_t new_level)1297 thread_interrupt_level(
1298 	wait_interrupt_t new_level)
1299 {
1300 	thread_t thread = current_thread();
1301 	wait_interrupt_t result = thread->options & TH_OPT_INTMASK;
1302 
1303 	thread->options = (thread->options & ~TH_OPT_INTMASK) | (new_level & TH_OPT_INTMASK);
1304 
1305 	return result;
1306 }
1307 
1308 /*
1309  *	assert_wait:
1310  *
1311  *	Assert that the current thread is about to go to
1312  *	sleep until the specified event occurs.
1313  */
1314 wait_result_t
assert_wait(event_t event,wait_interrupt_t interruptible)1315 assert_wait(
1316 	event_t                         event,
1317 	wait_interrupt_t        interruptible)
1318 {
1319 	if (__improbable(event == NO_EVENT)) {
1320 		panic("%s() called with NO_EVENT", __func__);
1321 	}
1322 
1323 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1324 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1325 	    VM_KERNEL_UNSLIDE_OR_PERM(event), 0, 0, 0, 0);
1326 
1327 	struct waitq *waitq;
1328 	waitq = global_eventq(event);
1329 	return waitq_assert_wait64(waitq, CAST_EVENT64_T(event), interruptible, TIMEOUT_WAIT_FOREVER);
1330 }
1331 
1332 /*
1333  *	assert_wait_queue:
1334  *
1335  *	Return the global waitq for the specified event
1336  */
1337 struct waitq *
assert_wait_queue(event_t event)1338 assert_wait_queue(
1339 	event_t                         event)
1340 {
1341 	return global_eventq(event);
1342 }
1343 
1344 wait_result_t
assert_wait_timeout(event_t event,wait_interrupt_t interruptible,uint32_t interval,uint32_t scale_factor)1345 assert_wait_timeout(
1346 	event_t                         event,
1347 	wait_interrupt_t        interruptible,
1348 	uint32_t                        interval,
1349 	uint32_t                        scale_factor)
1350 {
1351 	thread_t                        thread = current_thread();
1352 	wait_result_t           wresult;
1353 	uint64_t                        deadline;
1354 	spl_t                           s;
1355 
1356 	if (__improbable(event == NO_EVENT)) {
1357 		panic("%s() called with NO_EVENT", __func__);
1358 	}
1359 
1360 	struct waitq *waitq;
1361 	waitq = global_eventq(event);
1362 
1363 	s = splsched();
1364 	waitq_lock(waitq);
1365 
1366 	clock_interval_to_deadline(interval, scale_factor, &deadline);
1367 
1368 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1369 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1370 	    VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1371 
1372 	wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1373 	    interruptible,
1374 	    TIMEOUT_URGENCY_SYS_NORMAL,
1375 	    deadline, TIMEOUT_NO_LEEWAY,
1376 	    thread);
1377 
1378 	waitq_unlock(waitq);
1379 	splx(s);
1380 	return wresult;
1381 }
1382 
1383 wait_result_t
assert_wait_timeout_with_leeway(event_t event,wait_interrupt_t interruptible,wait_timeout_urgency_t urgency,uint32_t interval,uint32_t leeway,uint32_t scale_factor)1384 assert_wait_timeout_with_leeway(
1385 	event_t                         event,
1386 	wait_interrupt_t        interruptible,
1387 	wait_timeout_urgency_t  urgency,
1388 	uint32_t                        interval,
1389 	uint32_t                        leeway,
1390 	uint32_t                        scale_factor)
1391 {
1392 	thread_t                        thread = current_thread();
1393 	wait_result_t           wresult;
1394 	uint64_t                        deadline;
1395 	uint64_t                        abstime;
1396 	uint64_t                        slop;
1397 	uint64_t                        now;
1398 	spl_t                           s;
1399 
1400 	if (__improbable(event == NO_EVENT)) {
1401 		panic("%s() called with NO_EVENT", __func__);
1402 	}
1403 
1404 	now = mach_absolute_time();
1405 	clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1406 	deadline = now + abstime;
1407 
1408 	clock_interval_to_absolutetime_interval(leeway, scale_factor, &slop);
1409 
1410 	struct waitq *waitq;
1411 	waitq = global_eventq(event);
1412 
1413 	s = splsched();
1414 	waitq_lock(waitq);
1415 
1416 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1417 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1418 	    VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1419 
1420 	wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1421 	    interruptible,
1422 	    urgency, deadline, slop,
1423 	    thread);
1424 
1425 	waitq_unlock(waitq);
1426 	splx(s);
1427 	return wresult;
1428 }
1429 
1430 wait_result_t
assert_wait_deadline(event_t event,wait_interrupt_t interruptible,uint64_t deadline)1431 assert_wait_deadline(
1432 	event_t                         event,
1433 	wait_interrupt_t        interruptible,
1434 	uint64_t                        deadline)
1435 {
1436 	thread_t                        thread = current_thread();
1437 	wait_result_t           wresult;
1438 	spl_t                           s;
1439 
1440 	if (__improbable(event == NO_EVENT)) {
1441 		panic("%s() called with NO_EVENT", __func__);
1442 	}
1443 
1444 	struct waitq *waitq;
1445 	waitq = global_eventq(event);
1446 
1447 	s = splsched();
1448 	waitq_lock(waitq);
1449 
1450 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1451 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1452 	    VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1453 
1454 	wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1455 	    interruptible,
1456 	    TIMEOUT_URGENCY_SYS_NORMAL, deadline,
1457 	    TIMEOUT_NO_LEEWAY, thread);
1458 	waitq_unlock(waitq);
1459 	splx(s);
1460 	return wresult;
1461 }
1462 
1463 wait_result_t
assert_wait_deadline_with_leeway(event_t event,wait_interrupt_t interruptible,wait_timeout_urgency_t urgency,uint64_t deadline,uint64_t leeway)1464 assert_wait_deadline_with_leeway(
1465 	event_t                         event,
1466 	wait_interrupt_t        interruptible,
1467 	wait_timeout_urgency_t  urgency,
1468 	uint64_t                        deadline,
1469 	uint64_t                        leeway)
1470 {
1471 	thread_t                        thread = current_thread();
1472 	wait_result_t           wresult;
1473 	spl_t                           s;
1474 
1475 	if (__improbable(event == NO_EVENT)) {
1476 		panic("%s() called with NO_EVENT", __func__);
1477 	}
1478 
1479 	struct waitq *waitq;
1480 	waitq = global_eventq(event);
1481 
1482 	s = splsched();
1483 	waitq_lock(waitq);
1484 
1485 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1486 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1487 	    VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1488 
1489 	wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1490 	    interruptible,
1491 	    urgency, deadline, leeway,
1492 	    thread);
1493 	waitq_unlock(waitq);
1494 	splx(s);
1495 	return wresult;
1496 }
1497 
1498 void
sched_cond_init(sched_cond_atomic_t * cond)1499 sched_cond_init(
1500 	sched_cond_atomic_t *cond)
1501 {
1502 	os_atomic_init(cond, SCHED_COND_INIT);
1503 }
1504 
1505 wait_result_t
sched_cond_wait_parameter(sched_cond_atomic_t * cond,wait_interrupt_t interruptible,thread_continue_t continuation,void * parameter)1506 sched_cond_wait_parameter(
1507 	sched_cond_atomic_t *cond,
1508 	wait_interrupt_t interruptible,
1509 	thread_continue_t continuation,
1510 	void *parameter)
1511 {
1512 	assert_wait((event_t) cond, interruptible);
1513 	/* clear active bit to indicate future wakeups will have to unblock this thread */
1514 	sched_cond_t new_state = (sched_cond_t) os_atomic_andnot(cond, SCHED_COND_ACTIVE, relaxed);
1515 	if (__improbable(new_state & SCHED_COND_WAKEUP)) {
1516 		/* a wakeup has been issued; undo wait assertion, ack the wakeup, and return */
1517 		thread_t thread = current_thread();
1518 		clear_wait(thread, THREAD_AWAKENED);
1519 		sched_cond_ack(cond);
1520 		return THREAD_AWAKENED;
1521 	}
1522 	return thread_block_parameter(continuation, parameter);
1523 }
1524 
1525 wait_result_t
sched_cond_wait(sched_cond_atomic_t * cond,wait_interrupt_t interruptible,thread_continue_t continuation)1526 sched_cond_wait(
1527 	sched_cond_atomic_t *cond,
1528 	wait_interrupt_t interruptible,
1529 	thread_continue_t continuation)
1530 {
1531 	return sched_cond_wait_parameter(cond, interruptible, continuation, NULL);
1532 }
1533 
1534 sched_cond_t
sched_cond_ack(sched_cond_atomic_t * cond)1535 sched_cond_ack(
1536 	sched_cond_atomic_t *cond)
1537 {
1538 	sched_cond_t new_cond = (sched_cond_t) os_atomic_xor(cond, SCHED_COND_ACTIVE | SCHED_COND_WAKEUP, acquire);
1539 	assert(new_cond & SCHED_COND_ACTIVE);
1540 	return new_cond;
1541 }
1542 
1543 kern_return_t
sched_cond_signal(sched_cond_atomic_t * cond,thread_t thread)1544 sched_cond_signal(
1545 	sched_cond_atomic_t  *cond,
1546 	thread_t thread)
1547 {
1548 	disable_preemption();
1549 	sched_cond_t old_cond = (sched_cond_t) os_atomic_or_orig(cond, SCHED_COND_WAKEUP, release);
1550 	if (!(old_cond & (SCHED_COND_WAKEUP | SCHED_COND_ACTIVE))) {
1551 		/* this was the first wakeup to be issued AND the thread was inactive */
1552 		thread_wakeup_thread((event_t) cond, thread);
1553 	}
1554 	enable_preemption();
1555 	return KERN_SUCCESS;
1556 }
1557 
1558 /*
1559  * thread_isoncpu:
1560  *
1561  * Return TRUE if a thread is running on a processor such that an AST
1562  * is needed to pull it out of userspace execution, or if executing in
1563  * the kernel, bring to a context switch boundary that would cause
1564  * thread state to be serialized in the thread PCB.
1565  *
1566  * Thread locked, returns the same way. While locked, fields
1567  * like "state" cannot change. "runq" can change only from set to unset.
1568  */
1569 static inline boolean_t
thread_isoncpu(thread_t thread)1570 thread_isoncpu(thread_t thread)
1571 {
1572 	/* Not running or runnable */
1573 	if (!(thread->state & TH_RUN)) {
1574 		return FALSE;
1575 	}
1576 
1577 	/* Waiting on a runqueue, not currently running */
1578 	/* TODO: This is invalid - it can get dequeued without thread lock, but not context switched. */
1579 	/* TODO: This can also be incorrect for `handoff` cases where
1580 	 * the thread is never enqueued on the runq */
1581 	if (thread_get_runq(thread) != PROCESSOR_NULL) {
1582 		return FALSE;
1583 	}
1584 
1585 	/*
1586 	 * Thread does not have a stack yet
1587 	 * It could be on the stack alloc queue or preparing to be invoked
1588 	 */
1589 	if (!thread->kernel_stack) {
1590 		return FALSE;
1591 	}
1592 
1593 	/*
1594 	 * Thread must be running on a processor, or
1595 	 * about to run, or just did run. In all these
1596 	 * cases, an AST to the processor is needed
1597 	 * to guarantee that the thread is kicked out
1598 	 * of userspace and the processor has
1599 	 * context switched (and saved register state).
1600 	 */
1601 	return TRUE;
1602 }
1603 
1604 /*
1605  * thread_stop:
1606  *
1607  * Force a preemption point for a thread and wait
1608  * for it to stop running on a CPU. If a stronger
1609  * guarantee is requested, wait until no longer
1610  * runnable. Arbitrates access among
1611  * multiple stop requests. (released by unstop)
1612  *
1613  * The thread must enter a wait state and stop via a
1614  * separate means.
1615  *
1616  * Returns FALSE if interrupted.
1617  */
1618 boolean_t
thread_stop(thread_t thread,boolean_t until_not_runnable)1619 thread_stop(
1620 	thread_t                thread,
1621 	boolean_t       until_not_runnable)
1622 {
1623 	wait_result_t   wresult;
1624 	spl_t                   s = splsched();
1625 	boolean_t               oncpu;
1626 
1627 	wake_lock(thread);
1628 	thread_lock(thread);
1629 
1630 	while (thread->state & TH_SUSP) {
1631 		thread->wake_active = TRUE;
1632 		thread_unlock(thread);
1633 
1634 		wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1635 		wake_unlock(thread);
1636 		splx(s);
1637 
1638 		if (wresult == THREAD_WAITING) {
1639 			wresult = thread_block(THREAD_CONTINUE_NULL);
1640 		}
1641 
1642 		if (wresult != THREAD_AWAKENED) {
1643 			return FALSE;
1644 		}
1645 
1646 		s = splsched();
1647 		wake_lock(thread);
1648 		thread_lock(thread);
1649 	}
1650 
1651 	thread->state |= TH_SUSP;
1652 
1653 	while ((oncpu = thread_isoncpu(thread)) ||
1654 	    (until_not_runnable && (thread->state & TH_RUN))) {
1655 		if (oncpu) {
1656 			/*
1657 			 * TODO: chosen_processor isn't really the right
1658 			 * thing to IPI here.  We really want `last_processor`,
1659 			 * but we also want to know where to send the IPI
1660 			 * *before* thread_invoke sets last_processor.
1661 			 *
1662 			 * rdar://47149497 (thread_stop doesn't IPI the right core)
1663 			 */
1664 			assert(thread->state & TH_RUN);
1665 			processor_t processor = thread->chosen_processor;
1666 			assert(processor != PROCESSOR_NULL);
1667 			cause_ast_check(processor);
1668 		}
1669 
1670 		thread->wake_active = TRUE;
1671 		thread_unlock(thread);
1672 
1673 		wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1674 		wake_unlock(thread);
1675 		splx(s);
1676 
1677 		if (wresult == THREAD_WAITING) {
1678 			wresult = thread_block(THREAD_CONTINUE_NULL);
1679 		}
1680 
1681 		if (wresult != THREAD_AWAKENED) {
1682 			thread_unstop(thread);
1683 			return FALSE;
1684 		}
1685 
1686 		s = splsched();
1687 		wake_lock(thread);
1688 		thread_lock(thread);
1689 	}
1690 
1691 	thread_unlock(thread);
1692 	wake_unlock(thread);
1693 	splx(s);
1694 
1695 	/*
1696 	 * We return with the thread unlocked. To prevent it from
1697 	 * transitioning to a runnable state (or from TH_RUN to
1698 	 * being on the CPU), the caller must ensure the thread
1699 	 * is stopped via an external means (such as an AST)
1700 	 */
1701 
1702 	return TRUE;
1703 }
1704 
1705 /*
1706  * thread_unstop:
1707  *
1708  * Release a previous stop request and set
1709  * the thread running if appropriate.
1710  *
1711  * Use only after a successful stop operation.
1712  */
1713 void
thread_unstop(thread_t thread)1714 thread_unstop(
1715 	thread_t        thread)
1716 {
1717 	spl_t           s = splsched();
1718 
1719 	wake_lock(thread);
1720 	thread_lock(thread);
1721 
1722 	assert((thread->state & (TH_RUN | TH_WAIT | TH_SUSP)) != TH_SUSP);
1723 
1724 	if (thread->state & TH_SUSP) {
1725 		thread->state &= ~TH_SUSP;
1726 
1727 		if (thread->wake_active) {
1728 			thread->wake_active = FALSE;
1729 			thread_unlock(thread);
1730 
1731 			thread_wakeup(&thread->wake_active);
1732 			wake_unlock(thread);
1733 			splx(s);
1734 
1735 			return;
1736 		}
1737 	}
1738 
1739 	thread_unlock(thread);
1740 	wake_unlock(thread);
1741 	splx(s);
1742 }
1743 
1744 /*
1745  * thread_wait:
1746  *
1747  * Wait for a thread to stop running. (non-interruptible)
1748  *
1749  */
1750 void
thread_wait(thread_t thread,boolean_t until_not_runnable)1751 thread_wait(
1752 	thread_t        thread,
1753 	boolean_t       until_not_runnable)
1754 {
1755 	wait_result_t   wresult;
1756 	boolean_t       oncpu;
1757 	processor_t     processor;
1758 	spl_t           s = splsched();
1759 
1760 	wake_lock(thread);
1761 	thread_lock(thread);
1762 
1763 	/*
1764 	 * Wait until not running on a CPU.  If stronger requirement
1765 	 * desired, wait until not runnable.  Assumption: if thread is
1766 	 * on CPU, then TH_RUN is set, so we're not waiting in any case
1767 	 * where the original, pure "TH_RUN" check would have let us
1768 	 * finish.
1769 	 */
1770 	while ((oncpu = thread_isoncpu(thread)) ||
1771 	    (until_not_runnable && (thread->state & TH_RUN))) {
1772 		if (oncpu) {
1773 			assert(thread->state & TH_RUN);
1774 			processor = thread->chosen_processor;
1775 			cause_ast_check(processor);
1776 		}
1777 
1778 		thread->wake_active = TRUE;
1779 		thread_unlock(thread);
1780 
1781 		wresult = assert_wait(&thread->wake_active, THREAD_UNINT);
1782 		wake_unlock(thread);
1783 		splx(s);
1784 
1785 		if (wresult == THREAD_WAITING) {
1786 			thread_block(THREAD_CONTINUE_NULL);
1787 		}
1788 
1789 		s = splsched();
1790 		wake_lock(thread);
1791 		thread_lock(thread);
1792 	}
1793 
1794 	thread_unlock(thread);
1795 	wake_unlock(thread);
1796 	splx(s);
1797 }
1798 
1799 /*
1800  *	Routine: clear_wait_internal
1801  *
1802  *		Clear the wait condition for the specified thread.
1803  *		Start the thread executing if that is appropriate.
1804  *	Arguments:
1805  *		thread		thread to awaken
1806  *		result		Wakeup result the thread should see
1807  *	Conditions:
1808  *		At splsched
1809  *		the thread is locked.
1810  *	Returns:
1811  *		KERN_SUCCESS		thread was rousted out a wait
1812  *		KERN_FAILURE		thread was waiting but could not be rousted
1813  *		KERN_NOT_WAITING	thread was not waiting
1814  */
1815 __private_extern__ kern_return_t
clear_wait_internal(thread_t thread,wait_result_t wresult)1816 clear_wait_internal(
1817 	thread_t        thread,
1818 	wait_result_t   wresult)
1819 {
1820 	waitq_t waitq = thread->waitq;
1821 
1822 	if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT)) {
1823 		return KERN_FAILURE;
1824 	}
1825 
1826 	/*
1827 	 * Check that the thread is waiting and not waking, as a waking thread
1828 	 * has already cleared its waitq, and is destined to be go'ed, don't
1829 	 * need to do it again.
1830 	 */
1831 	if ((thread->state & (TH_WAIT | TH_TERMINATE | TH_WAKING)) != TH_WAIT) {
1832 		assert(waitq_is_null(thread->waitq));
1833 		return KERN_NOT_WAITING;
1834 	}
1835 
1836 	/* may drop and retake the thread lock */
1837 	if (!waitq_is_null(waitq) && !waitq_pull_thread_locked(waitq, thread)) {
1838 		return KERN_NOT_WAITING;
1839 	}
1840 
1841 	thread_go(thread, wresult, /* handoff */ false);
1842 
1843 	return KERN_SUCCESS;
1844 }
1845 
1846 
1847 /*
1848  *	clear_wait:
1849  *
1850  *	Clear the wait condition for the specified thread.  Start the thread
1851  *	executing if that is appropriate.
1852  *
1853  *	parameters:
1854  *	  thread		thread to awaken
1855  *	  result		Wakeup result the thread should see
1856  */
1857 kern_return_t
clear_wait(thread_t thread,wait_result_t result)1858 clear_wait(
1859 	thread_t                thread,
1860 	wait_result_t   result)
1861 {
1862 	kern_return_t ret;
1863 	spl_t           s;
1864 
1865 	s = splsched();
1866 	thread_lock(thread);
1867 
1868 	ret = clear_wait_internal(thread, result);
1869 
1870 	if (thread == current_thread()) {
1871 		/*
1872 		 * The thread must be ready to wait again immediately
1873 		 * after clearing its own wait.
1874 		 */
1875 		assert((thread->state & TH_WAKING) == 0);
1876 	}
1877 
1878 	thread_unlock(thread);
1879 	splx(s);
1880 	return ret;
1881 }
1882 
1883 
1884 /*
1885  *	thread_wakeup_prim:
1886  *
1887  *	Common routine for thread_wakeup, thread_wakeup_with_result,
1888  *	and thread_wakeup_one.
1889  *
1890  */
1891 kern_return_t
thread_wakeup_prim(event_t event,boolean_t one_thread,wait_result_t result)1892 thread_wakeup_prim(
1893 	event_t          event,
1894 	boolean_t        one_thread,
1895 	wait_result_t    result)
1896 {
1897 	if (__improbable(event == NO_EVENT)) {
1898 		panic("%s() called with NO_EVENT", __func__);
1899 	}
1900 
1901 	struct waitq *wq = global_eventq(event);
1902 
1903 	if (one_thread) {
1904 		return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), result, WAITQ_WAKEUP_DEFAULT);
1905 	} else {
1906 		return waitq_wakeup64_all(wq, CAST_EVENT64_T(event), result, WAITQ_WAKEUP_DEFAULT);
1907 	}
1908 }
1909 
1910 /*
1911  * Wakeup a specified thread if and only if it's waiting for this event
1912  */
1913 kern_return_t
thread_wakeup_thread(event_t event,thread_t thread)1914 thread_wakeup_thread(
1915 	event_t         event,
1916 	thread_t        thread)
1917 {
1918 	if (__improbable(event == NO_EVENT)) {
1919 		panic("%s() called with NO_EVENT", __func__);
1920 	}
1921 
1922 	if (__improbable(thread == THREAD_NULL)) {
1923 		panic("%s() called with THREAD_NULL", __func__);
1924 	}
1925 
1926 	struct waitq *wq = global_eventq(event);
1927 
1928 	return waitq_wakeup64_thread(wq, CAST_EVENT64_T(event), thread, THREAD_AWAKENED);
1929 }
1930 
1931 /*
1932  * Wakeup a thread waiting on an event and promote it to a priority.
1933  *
1934  * Requires woken thread to un-promote itself when done.
1935  */
1936 kern_return_t
thread_wakeup_one_with_pri(event_t event,int priority)1937 thread_wakeup_one_with_pri(
1938 	event_t      event,
1939 	int          priority)
1940 {
1941 	if (__improbable(event == NO_EVENT)) {
1942 		panic("%s() called with NO_EVENT", __func__);
1943 	}
1944 
1945 	struct waitq *wq = global_eventq(event);
1946 
1947 	return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), THREAD_AWAKENED, priority);
1948 }
1949 
1950 /*
1951  * Wakeup a thread waiting on an event,
1952  * promote it to a priority,
1953  * and return a reference to the woken thread.
1954  *
1955  * Requires woken thread to un-promote itself when done.
1956  */
1957 thread_t
thread_wakeup_identify(event_t event,int priority)1958 thread_wakeup_identify(event_t  event,
1959     int      priority)
1960 {
1961 	if (__improbable(event == NO_EVENT)) {
1962 		panic("%s() called with NO_EVENT", __func__);
1963 	}
1964 
1965 	struct waitq *wq = global_eventq(event);
1966 
1967 	return waitq_wakeup64_identify(wq, CAST_EVENT64_T(event), THREAD_AWAKENED, priority);
1968 }
1969 
1970 /*
1971  *	thread_bind:
1972  *
1973  *	Force the current thread to execute on the specified processor.
1974  *	Takes effect after the next thread_block().
1975  *
1976  *	Returns the previous binding.  PROCESSOR_NULL means
1977  *	not bound.
1978  *
1979  *	XXX - DO NOT export this to users - XXX
1980  */
1981 processor_t
thread_bind(processor_t processor)1982 thread_bind(
1983 	processor_t             processor)
1984 {
1985 	thread_t                self = current_thread();
1986 	processor_t             prev;
1987 	spl_t                   s;
1988 
1989 	s = splsched();
1990 	thread_lock(self);
1991 
1992 	prev = thread_bind_internal(self, processor);
1993 
1994 	thread_unlock(self);
1995 	splx(s);
1996 
1997 	return prev;
1998 }
1999 
2000 void
thread_bind_during_wakeup(thread_t thread,processor_t processor)2001 thread_bind_during_wakeup(thread_t thread, processor_t processor)
2002 {
2003 	assert(!ml_get_interrupts_enabled());
2004 	assert((thread->state & (TH_WAIT | TH_WAKING)) == (TH_WAIT | TH_WAKING));
2005 #if MACH_ASSERT
2006 	thread_lock_assert(thread, LCK_ASSERT_OWNED);
2007 #endif
2008 
2009 	if (thread->bound_processor != processor) {
2010 		thread_bind_internal(thread, processor);
2011 	}
2012 }
2013 
2014 void
thread_unbind_after_queue_shutdown(thread_t thread,processor_t processor __assert_only)2015 thread_unbind_after_queue_shutdown(
2016 	thread_t                thread,
2017 	processor_t             processor __assert_only)
2018 {
2019 	assert(!ml_get_interrupts_enabled());
2020 
2021 	thread_lock(thread);
2022 
2023 	if (thread->bound_processor) {
2024 		bool removed;
2025 
2026 		assert(thread->bound_processor == processor);
2027 
2028 		removed = thread_run_queue_remove(thread);
2029 		/*
2030 		 * we can always unbind even if we didn't really remove the
2031 		 * thread from the runqueue
2032 		 */
2033 		thread_bind_internal(thread, PROCESSOR_NULL);
2034 		if (removed) {
2035 			thread_run_queue_reinsert(thread, SCHED_TAILQ);
2036 		}
2037 	}
2038 
2039 	thread_unlock(thread);
2040 }
2041 
2042 /*
2043  * thread_bind_internal:
2044  *
2045  * If the specified thread is not the current thread, and it is currently
2046  * running on another CPU, a remote AST must be sent to that CPU to cause
2047  * the thread to migrate to its bound processor. Otherwise, the migration
2048  * will occur at the next quantum expiration or blocking point.
2049  *
2050  * When the thread is the current thread, and explicit thread_block() should
2051  * be used to force the current processor to context switch away and
2052  * let the thread migrate to the bound processor.
2053  *
2054  * Thread must be locked, and at splsched.
2055  */
2056 
2057 static processor_t
thread_bind_internal(thread_t thread,processor_t processor)2058 thread_bind_internal(
2059 	thread_t                thread,
2060 	processor_t             processor)
2061 {
2062 	processor_t             prev;
2063 
2064 	/* <rdar://problem/15102234> */
2065 	assert(thread->sched_pri < BASEPRI_RTQUEUES);
2066 	/* A thread can't be bound if it's sitting on a (potentially incorrect) runqueue */
2067 	thread_assert_runq_null(thread);
2068 
2069 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_THREAD_BIND),
2070 	    thread_tid(thread), processor ? processor->cpu_id : ~0ul, 0, 0, 0);
2071 
2072 	prev = thread->bound_processor;
2073 	thread->bound_processor = processor;
2074 
2075 	return prev;
2076 }
2077 
2078 /*
2079  * thread_vm_bind_group_add:
2080  *
2081  * The "VM bind group" is a special mechanism to mark a collection
2082  * of threads from the VM subsystem that, in general, should be scheduled
2083  * with only one CPU of parallelism. To accomplish this, we initially
2084  * bind all the threads to the master processor, which has the effect
2085  * that only one of the threads in the group can execute at once, including
2086  * preempting threads in the group that are a lower priority. Future
2087  * mechanisms may use more dynamic mechanisms to prevent the collection
2088  * of VM threads from using more CPU time than desired.
2089  *
2090  * The current implementation can result in priority inversions where
2091  * compute-bound priority 95 or realtime threads that happen to have
2092  * landed on the master processor prevent the VM threads from running.
2093  * When this situation is detected, we unbind the threads for one
2094  * scheduler tick to allow the scheduler to run the threads an
2095  * additional CPUs, before restoring the binding (assuming high latency
2096  * is no longer a problem).
2097  */
2098 
2099 /*
2100  * The current max is provisioned for:
2101  * vm_compressor_swap_trigger_thread (92)
2102  * 2 x vm_pageout_iothread_internal (92) when vm_restricted_to_single_processor==TRUE
2103  * vm_pageout_continue (92)
2104  * memorystatus_thread (95)
2105  */
2106 #define MAX_VM_BIND_GROUP_COUNT (5)
2107 decl_simple_lock_data(static, sched_vm_group_list_lock);
2108 static thread_t sched_vm_group_thread_list[MAX_VM_BIND_GROUP_COUNT];
2109 static int sched_vm_group_thread_count;
2110 static boolean_t sched_vm_group_temporarily_unbound = FALSE;
2111 
2112 void
thread_vm_bind_group_add(void)2113 thread_vm_bind_group_add(void)
2114 {
2115 	thread_t self = current_thread();
2116 
2117 	if (support_bootcpu_shutdown) {
2118 		/*
2119 		 * Bind group is not supported without an always-on
2120 		 * processor to bind to. If we need these to coexist,
2121 		 * we'd need to dynamically move the group to
2122 		 * another processor as it shuts down, or build
2123 		 * a different way to run a set of threads
2124 		 * without parallelism.
2125 		 */
2126 		return;
2127 	}
2128 
2129 	thread_reference(self);
2130 	self->options |= TH_OPT_SCHED_VM_GROUP;
2131 
2132 	simple_lock(&sched_vm_group_list_lock, LCK_GRP_NULL);
2133 	assert(sched_vm_group_thread_count < MAX_VM_BIND_GROUP_COUNT);
2134 	sched_vm_group_thread_list[sched_vm_group_thread_count++] = self;
2135 	simple_unlock(&sched_vm_group_list_lock);
2136 
2137 	thread_bind(master_processor);
2138 
2139 	/* Switch to bound processor if not already there */
2140 	thread_block(THREAD_CONTINUE_NULL);
2141 }
2142 
2143 static void
sched_vm_group_maintenance(void)2144 sched_vm_group_maintenance(void)
2145 {
2146 	uint64_t ctime = mach_absolute_time();
2147 	uint64_t longtime = ctime - sched_tick_interval;
2148 	int i;
2149 	spl_t s;
2150 	boolean_t high_latency_observed = FALSE;
2151 	boolean_t runnable_and_not_on_runq_observed = FALSE;
2152 	boolean_t bind_target_changed = FALSE;
2153 	processor_t bind_target = PROCESSOR_NULL;
2154 
2155 	/* Make sure nobody attempts to add new threads while we are enumerating them */
2156 	simple_lock(&sched_vm_group_list_lock, LCK_GRP_NULL);
2157 
2158 	s = splsched();
2159 
2160 	for (i = 0; i < sched_vm_group_thread_count; i++) {
2161 		thread_t thread = sched_vm_group_thread_list[i];
2162 		assert(thread != THREAD_NULL);
2163 		thread_lock(thread);
2164 		if ((thread->state & (TH_RUN | TH_WAIT)) == TH_RUN) {
2165 			if (thread_get_runq(thread) != PROCESSOR_NULL && thread->last_made_runnable_time < longtime) {
2166 				high_latency_observed = TRUE;
2167 			} else if (thread_get_runq(thread) == PROCESSOR_NULL) {
2168 				/* There are some cases where a thread be transitiong that also fall into this case */
2169 				runnable_and_not_on_runq_observed = TRUE;
2170 			}
2171 		}
2172 		thread_unlock(thread);
2173 
2174 		if (high_latency_observed && runnable_and_not_on_runq_observed) {
2175 			/* All the things we are looking for are true, stop looking */
2176 			break;
2177 		}
2178 	}
2179 
2180 	splx(s);
2181 
2182 	if (sched_vm_group_temporarily_unbound) {
2183 		/* If we turned off binding, make sure everything is OK before rebinding */
2184 		if (!high_latency_observed) {
2185 			/* rebind */
2186 			bind_target_changed = TRUE;
2187 			bind_target = master_processor;
2188 			sched_vm_group_temporarily_unbound = FALSE; /* might be reset to TRUE if change cannot be completed */
2189 		}
2190 	} else {
2191 		/*
2192 		 * Check if we're in a bad state, which is defined by high
2193 		 * latency with no core currently executing a thread. If a
2194 		 * single thread is making progress on a CPU, that means the
2195 		 * binding concept to reduce parallelism is working as
2196 		 * designed.
2197 		 */
2198 		if (high_latency_observed && !runnable_and_not_on_runq_observed) {
2199 			/* unbind */
2200 			bind_target_changed = TRUE;
2201 			bind_target = PROCESSOR_NULL;
2202 			sched_vm_group_temporarily_unbound = TRUE;
2203 		}
2204 	}
2205 
2206 	if (bind_target_changed) {
2207 		s = splsched();
2208 		for (i = 0; i < sched_vm_group_thread_count; i++) {
2209 			thread_t thread = sched_vm_group_thread_list[i];
2210 			boolean_t removed;
2211 			assert(thread != THREAD_NULL);
2212 
2213 			thread_lock(thread);
2214 			removed = thread_run_queue_remove(thread);
2215 			if (removed || ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT)) {
2216 				thread_bind_internal(thread, bind_target);
2217 			} else {
2218 				/*
2219 				 * Thread was in the middle of being context-switched-to,
2220 				 * or was in the process of blocking. To avoid switching the bind
2221 				 * state out mid-flight, defer the change if possible.
2222 				 */
2223 				if (bind_target == PROCESSOR_NULL) {
2224 					thread_bind_internal(thread, bind_target);
2225 				} else {
2226 					sched_vm_group_temporarily_unbound = TRUE; /* next pass will try again */
2227 				}
2228 			}
2229 
2230 			if (removed) {
2231 				thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
2232 			}
2233 			thread_unlock(thread);
2234 		}
2235 		splx(s);
2236 	}
2237 
2238 	simple_unlock(&sched_vm_group_list_lock);
2239 }
2240 
2241 #if defined(__x86_64__)
2242 #define SCHED_AVOID_CPU0 1
2243 #else
2244 #define SCHED_AVOID_CPU0 0
2245 #endif
2246 
2247 int sched_allow_rt_smt = 1;
2248 int sched_avoid_cpu0 = SCHED_AVOID_CPU0;
2249 int sched_allow_rt_steal = 1;
2250 int sched_backup_cpu_timeout_count = 5; /* The maximum number of 10us delays to wait before using a backup cpu */
2251 
2252 int sched_rt_n_backup_processors = SCHED_DEFAULT_BACKUP_PROCESSORS;
2253 
2254 int
sched_get_rt_n_backup_processors(void)2255 sched_get_rt_n_backup_processors(void)
2256 {
2257 	return sched_rt_n_backup_processors;
2258 }
2259 
2260 void
sched_set_rt_n_backup_processors(int n)2261 sched_set_rt_n_backup_processors(int n)
2262 {
2263 	if (n < 0) {
2264 		n = 0;
2265 	} else if (n > SCHED_MAX_BACKUP_PROCESSORS) {
2266 		n = SCHED_MAX_BACKUP_PROCESSORS;
2267 	}
2268 
2269 	sched_rt_n_backup_processors = n;
2270 }
2271 
2272 int sched_rt_runq_strict_priority = false;
2273 
2274 inline static processor_set_t
change_locked_pset(processor_set_t current_pset,processor_set_t new_pset)2275 change_locked_pset(processor_set_t current_pset, processor_set_t new_pset)
2276 {
2277 	if (current_pset != new_pset) {
2278 		pset_unlock(current_pset);
2279 		pset_lock(new_pset);
2280 	}
2281 
2282 	return new_pset;
2283 }
2284 
2285 /*
2286  * Invoked prior to idle entry to determine if, on SMT capable processors, an SMT
2287  * rebalancing opportunity exists when a core is (instantaneously) idle, but
2288  * other SMT-capable cores may be over-committed. TODO: some possible negatives:
2289  * IPI thrash if this core does not remain idle following the load balancing ASTs
2290  * Idle "thrash", when IPI issue is followed by idle entry/core power down
2291  * followed by a wakeup shortly thereafter.
2292  */
2293 
2294 #if (DEVELOPMENT || DEBUG)
2295 int sched_smt_balance = 1;
2296 #endif
2297 
2298 /* Invoked with pset locked, returns with pset unlocked */
2299 bool
sched_SMT_balance(processor_t cprocessor,processor_set_t cpset)2300 sched_SMT_balance(processor_t cprocessor, processor_set_t cpset)
2301 {
2302 	processor_t ast_processor = NULL;
2303 
2304 #if (DEVELOPMENT || DEBUG)
2305 	if (__improbable(sched_smt_balance == 0)) {
2306 		goto smt_balance_exit;
2307 	}
2308 #endif
2309 
2310 	assert(cprocessor == current_processor());
2311 	if (cprocessor->is_SMT == FALSE) {
2312 		goto smt_balance_exit;
2313 	}
2314 
2315 	processor_t sib_processor = cprocessor->processor_secondary ? cprocessor->processor_secondary : cprocessor->processor_primary;
2316 
2317 	/* Determine if both this processor and its sibling are idle,
2318 	 * indicating an SMT rebalancing opportunity.
2319 	 */
2320 	if (sib_processor->state != PROCESSOR_IDLE) {
2321 		goto smt_balance_exit;
2322 	}
2323 
2324 	processor_t sprocessor;
2325 
2326 	sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
2327 	uint64_t running_secondary_map = (cpset->cpu_state_map[PROCESSOR_RUNNING] &
2328 	    ~cpset->primary_map);
2329 	for (int cpuid = lsb_first(running_secondary_map); cpuid >= 0; cpuid = lsb_next(running_secondary_map, cpuid)) {
2330 		sprocessor = processor_array[cpuid];
2331 		if ((sprocessor->processor_primary->state == PROCESSOR_RUNNING) &&
2332 		    (sprocessor->current_pri < BASEPRI_RTQUEUES)) {
2333 			ipi_type = sched_ipi_action(sprocessor, NULL, SCHED_IPI_EVENT_SMT_REBAL);
2334 			if (ipi_type != SCHED_IPI_NONE) {
2335 				assert(sprocessor != cprocessor);
2336 				ast_processor = sprocessor;
2337 				break;
2338 			}
2339 		}
2340 	}
2341 
2342 smt_balance_exit:
2343 	pset_unlock(cpset);
2344 
2345 	if (ast_processor) {
2346 		KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_SMT_BALANCE), ast_processor->cpu_id, ast_processor->state, ast_processor->processor_primary->state, 0, 0);
2347 		sched_ipi_perform(ast_processor, ipi_type);
2348 	}
2349 	return false;
2350 }
2351 
2352 static cpumap_t
pset_available_cpumap(processor_set_t pset)2353 pset_available_cpumap(processor_set_t pset)
2354 {
2355 	return pset->cpu_available_map & pset->recommended_bitmask;
2356 }
2357 
2358 int
pset_available_cpu_count(processor_set_t pset)2359 pset_available_cpu_count(processor_set_t pset)
2360 {
2361 	return bit_count(pset_available_cpumap(pset));
2362 }
2363 
2364 bool
pset_is_recommended(processor_set_t pset)2365 pset_is_recommended(processor_set_t pset)
2366 {
2367 	if (!pset) {
2368 		return false;
2369 	}
2370 	return pset_available_cpu_count(pset) > 0;
2371 }
2372 
2373 bool
pset_type_is_recommended(processor_set_t pset)2374 pset_type_is_recommended(processor_set_t pset)
2375 {
2376 	if (!pset) {
2377 		return false;
2378 	}
2379 	pset_map_t recommended_psets = os_atomic_load(&pset->node->pset_recommended_map, relaxed);
2380 	return bit_count(recommended_psets) > 0;
2381 }
2382 
2383 static cpumap_t
pset_available_but_not_running_cpumap(processor_set_t pset)2384 pset_available_but_not_running_cpumap(processor_set_t pset)
2385 {
2386 	return (pset->cpu_state_map[PROCESSOR_IDLE] | pset->cpu_state_map[PROCESSOR_DISPATCHING]) &
2387 	       pset->recommended_bitmask;
2388 }
2389 
2390 bool
pset_has_stealable_threads(processor_set_t pset)2391 pset_has_stealable_threads(processor_set_t pset)
2392 {
2393 	pset_assert_locked(pset);
2394 
2395 	cpumap_t avail_map = pset_available_but_not_running_cpumap(pset);
2396 	/*
2397 	 * Secondary CPUs never steal, so allow stealing of threads if there are more threads than
2398 	 * available primary CPUs
2399 	 */
2400 	avail_map &= pset->primary_map;
2401 
2402 	return (pset->pset_runq.count > 0) && ((pset->pset_runq.count + rt_runq_count(pset)) > bit_count(avail_map));
2403 }
2404 
2405 static cpumap_t
pset_available_but_not_running_rt_threads_cpumap(processor_set_t pset)2406 pset_available_but_not_running_rt_threads_cpumap(processor_set_t pset)
2407 {
2408 	cpumap_t avail_map = pset_available_cpumap(pset);
2409 	if (!sched_allow_rt_smt) {
2410 		/*
2411 		 * Secondary CPUs are not allowed to run RT threads, so
2412 		 * only primary CPUs should be included
2413 		 */
2414 		avail_map &= pset->primary_map;
2415 	}
2416 
2417 	return avail_map & ~pset->realtime_map;
2418 }
2419 
2420 static bool
pset_needs_a_followup_IPI(processor_set_t pset)2421 pset_needs_a_followup_IPI(processor_set_t pset)
2422 {
2423 	int nbackup_cpus = 0;
2424 
2425 	if (rt_runq_is_low_latency(pset)) {
2426 		nbackup_cpus = sched_rt_n_backup_processors;
2427 	}
2428 
2429 	int rt_rq_count = rt_runq_count(pset);
2430 
2431 	return (rt_rq_count > 0) && ((rt_rq_count + nbackup_cpus - bit_count(pset->pending_AST_URGENT_cpu_mask)) > 0);
2432 }
2433 
2434 bool
pset_has_stealable_rt_threads(processor_set_t pset)2435 pset_has_stealable_rt_threads(processor_set_t pset)
2436 {
2437 	pset_node_t node = pset->node;
2438 	if (bit_count(node->pset_map) == 1) {
2439 		return false;
2440 	}
2441 
2442 	cpumap_t avail_map = pset_available_but_not_running_rt_threads_cpumap(pset);
2443 
2444 	return rt_runq_count(pset) > bit_count(avail_map);
2445 }
2446 
2447 static void
pset_update_rt_stealable_state(processor_set_t pset)2448 pset_update_rt_stealable_state(processor_set_t pset)
2449 {
2450 	if (pset_has_stealable_rt_threads(pset)) {
2451 		pset->stealable_rt_threads_earliest_deadline = rt_runq_earliest_deadline(pset);
2452 	} else {
2453 		pset->stealable_rt_threads_earliest_deadline = RT_DEADLINE_NONE;
2454 	}
2455 }
2456 
2457 static void
clear_pending_AST_bits(processor_set_t pset,processor_t processor,__kdebug_only const int trace_point_number)2458 clear_pending_AST_bits(processor_set_t pset, processor_t processor, __kdebug_only const int trace_point_number)
2459 {
2460 	/* Acknowledge any pending IPIs here with pset lock held */
2461 	pset_assert_locked(pset);
2462 	if (bit_clear_if_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
2463 		KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_END,
2464 		    processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, 0, trace_point_number);
2465 	}
2466 	bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
2467 
2468 #if defined(CONFIG_SCHED_DEFERRED_AST)
2469 	bit_clear(pset->pending_deferred_AST_cpu_mask, processor->cpu_id);
2470 #endif
2471 }
2472 
2473 /*
2474  * Called with pset locked, on a processor that is committing to run a new thread
2475  * Will transition an idle or dispatching processor to running as it picks up
2476  * the first new thread from the idle thread.
2477  */
2478 static void
pset_commit_processor_to_new_thread(processor_set_t pset,processor_t processor,thread_t new_thread)2479 pset_commit_processor_to_new_thread(processor_set_t pset, processor_t processor, thread_t new_thread)
2480 {
2481 	pset_assert_locked(pset);
2482 
2483 	if (processor->state == PROCESSOR_DISPATCHING || processor->state == PROCESSOR_IDLE) {
2484 		assert(current_thread() == processor->idle_thread);
2485 
2486 		/*
2487 		 * Dispatching processor is now committed to running new_thread,
2488 		 * so change its state to PROCESSOR_RUNNING.
2489 		 */
2490 		pset_update_processor_state(pset, processor, PROCESSOR_RUNNING);
2491 	} else {
2492 		assert(processor->state == PROCESSOR_RUNNING);
2493 	}
2494 
2495 	processor_state_update_from_thread(processor, new_thread, true);
2496 
2497 	if (new_thread->sched_pri >= BASEPRI_RTQUEUES) {
2498 		bit_set(pset->realtime_map, processor->cpu_id);
2499 	} else {
2500 		bit_clear(pset->realtime_map, processor->cpu_id);
2501 	}
2502 	pset_update_rt_stealable_state(pset);
2503 
2504 	pset_node_t node = pset->node;
2505 
2506 	if (bit_count(node->pset_map) == 1) {
2507 		/* Node has only a single pset, so skip node pset map updates */
2508 		return;
2509 	}
2510 
2511 	cpumap_t avail_map = pset_available_cpumap(pset);
2512 
2513 	if (new_thread->sched_pri >= BASEPRI_RTQUEUES) {
2514 		if ((avail_map & pset->realtime_map) == avail_map) {
2515 			/* No more non-RT CPUs in this pset */
2516 			atomic_bit_clear(&node->pset_non_rt_map, pset->pset_id, memory_order_relaxed);
2517 		}
2518 		avail_map &= pset->primary_map;
2519 		if ((avail_map & pset->realtime_map) == avail_map) {
2520 			/* No more non-RT primary CPUs in this pset */
2521 			atomic_bit_clear(&node->pset_non_rt_primary_map, pset->pset_id, memory_order_relaxed);
2522 		}
2523 	} else {
2524 		if ((avail_map & pset->realtime_map) != avail_map) {
2525 			if (!bit_test(atomic_load(&node->pset_non_rt_map), pset->pset_id)) {
2526 				atomic_bit_set(&node->pset_non_rt_map, pset->pset_id, memory_order_relaxed);
2527 			}
2528 		}
2529 		avail_map &= pset->primary_map;
2530 		if ((avail_map & pset->realtime_map) != avail_map) {
2531 			if (!bit_test(atomic_load(&node->pset_non_rt_primary_map), pset->pset_id)) {
2532 				atomic_bit_set(&node->pset_non_rt_primary_map, pset->pset_id, memory_order_relaxed);
2533 			}
2534 		}
2535 	}
2536 }
2537 
2538 static processor_t choose_processor_for_realtime_thread(processor_set_t pset, processor_t skip_processor, bool consider_secondaries, bool skip_spills);
2539 static processor_t choose_furthest_deadline_processor_for_realtime_thread(processor_set_t pset, int max_pri, uint64_t minimum_deadline,
2540     processor_t skip_processor, bool skip_spills, bool include_ast_urgent_pending_cpus);
2541 static processor_t choose_next_processor_for_realtime_thread(processor_set_t pset, int max_pri, uint64_t minimum_deadline, processor_t skip_processor, bool consider_secondaries);
2542 #if defined(__x86_64__)
2543 static bool all_available_primaries_are_running_realtime_threads(processor_set_t pset, bool include_backups);
2544 static bool these_processors_are_running_realtime_threads(processor_set_t pset, uint64_t these_map, bool include_backups);
2545 #endif
2546 static bool sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor, bool as_backup);
2547 static bool processor_is_fast_track_candidate_for_realtime_thread(processor_set_t pset, processor_t processor);
2548 
2549 static bool
other_psets_have_earlier_rt_threads_pending(processor_set_t stealing_pset,uint64_t earliest_deadline)2550 other_psets_have_earlier_rt_threads_pending(processor_set_t stealing_pset, uint64_t earliest_deadline)
2551 {
2552 	pset_map_t pset_map = stealing_pset->node->pset_map;
2553 
2554 	bit_clear(pset_map, stealing_pset->pset_id);
2555 
2556 	for (int pset_id = lsb_first(pset_map); pset_id >= 0; pset_id = lsb_next(pset_map, pset_id)) {
2557 		processor_set_t nset = pset_array[pset_id];
2558 
2559 		if (deadline_add(nset->stealable_rt_threads_earliest_deadline, rt_deadline_epsilon) < earliest_deadline) {
2560 			return true;
2561 		}
2562 	}
2563 
2564 	return false;
2565 }
2566 
2567 /*
2568  * starting_pset must be locked, but returns true if it is unlocked before return
2569  */
2570 static bool
choose_next_rt_processor_for_IPI(processor_set_t starting_pset,processor_t chosen_processor,bool spill_ipi,processor_t * result_processor,sched_ipi_type_t * result_ipi_type)2571 choose_next_rt_processor_for_IPI(processor_set_t starting_pset, processor_t chosen_processor, bool spill_ipi,
2572     processor_t *result_processor, sched_ipi_type_t *result_ipi_type)
2573 {
2574 	bool starting_pset_is_unlocked = false;
2575 	uint64_t earliest_deadline = rt_runq_earliest_deadline(starting_pset);
2576 	int max_pri = rt_runq_priority(starting_pset);
2577 	__kdebug_only uint64_t spill_tid = thread_tid(rt_runq_first(&starting_pset->rt_runq));
2578 	processor_set_t pset = starting_pset;
2579 	processor_t next_rt_processor = PROCESSOR_NULL;
2580 	if (spill_ipi) {
2581 		processor_set_t nset = next_pset(pset);
2582 		assert(nset != starting_pset);
2583 		pset = change_locked_pset(pset, nset);
2584 		starting_pset_is_unlocked = true;
2585 	}
2586 	do {
2587 		const bool consider_secondaries = true;
2588 		next_rt_processor = choose_next_processor_for_realtime_thread(pset, max_pri, earliest_deadline, chosen_processor, consider_secondaries);
2589 		if (next_rt_processor == PROCESSOR_NULL) {
2590 			if (!spill_ipi) {
2591 				break;
2592 			}
2593 			processor_set_t nset = next_pset(pset);
2594 			if (nset == starting_pset) {
2595 				break;
2596 			}
2597 			pset = change_locked_pset(pset, nset);
2598 			starting_pset_is_unlocked = true;
2599 		}
2600 	} while (next_rt_processor == PROCESSOR_NULL);
2601 	if (next_rt_processor) {
2602 		if (pset != starting_pset) {
2603 			if (bit_set_if_clear(pset->rt_pending_spill_cpu_mask, next_rt_processor->cpu_id)) {
2604 				KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RT_SIGNAL_SPILL) | DBG_FUNC_START,
2605 				    next_rt_processor->cpu_id, pset->rt_pending_spill_cpu_mask, starting_pset->cpu_set_low, (uintptr_t)spill_tid);
2606 			}
2607 		}
2608 		*result_ipi_type = sched_ipi_action(next_rt_processor, NULL, SCHED_IPI_EVENT_RT_PREEMPT);
2609 		*result_processor = next_rt_processor;
2610 	}
2611 	if (pset != starting_pset) {
2612 		pset_unlock(pset);
2613 	}
2614 
2615 	return starting_pset_is_unlocked;
2616 }
2617 
2618 /*
2619  * backup processor - used by choose_processor to send a backup IPI to in case the preferred processor can't immediately respond
2620  * followup processor - used in thread_select when there are still threads on the run queue and available processors
2621  * spill processor - a processor in a different processor set that is signalled to steal a thread from this run queue
2622  */
2623 typedef enum {
2624 	none,
2625 	backup,
2626 	followup,
2627 	spill
2628 } next_processor_type_t;
2629 
2630 #undef LOOP_COUNT
2631 #ifdef LOOP_COUNT
2632 int max_loop_count[MAX_SCHED_CPUS] = { 0 };
2633 #endif
2634 
2635 /*
2636  *	thread_select:
2637  *
2638  *	Select a new thread for the current processor to execute.
2639  *
2640  *	May select the current thread, which must be locked.
2641  */
2642 static thread_t
thread_select(thread_t thread,processor_t processor,ast_t * reason)2643 thread_select(thread_t          thread,
2644     processor_t       processor,
2645     ast_t            *reason)
2646 {
2647 	processor_set_t         pset = processor->processor_set;
2648 	thread_t                        new_thread = THREAD_NULL;
2649 
2650 	assert(processor == current_processor());
2651 	assert((thread->state & (TH_RUN | TH_TERMINATE2)) == TH_RUN);
2652 
2653 	KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_START,
2654 	    0, pset->pending_AST_URGENT_cpu_mask, 0, 0);
2655 
2656 	__kdebug_only int idle_reason = 0;
2657 	__kdebug_only int delay_count = 0;
2658 
2659 #if defined(__x86_64__)
2660 	int timeout_count = sched_backup_cpu_timeout_count;
2661 	if ((sched_avoid_cpu0 == 1) && (processor->cpu_id == 0)) {
2662 		/* Prefer cpu0 as backup */
2663 		timeout_count--;
2664 	} else if ((sched_avoid_cpu0 == 2) && (processor->processor_primary != processor)) {
2665 		/* Prefer secondary cpu as backup */
2666 		timeout_count--;
2667 	}
2668 #endif
2669 	bool pending_AST_URGENT = false;
2670 	bool pending_AST_PREEMPT = false;
2671 
2672 #ifdef LOOP_COUNT
2673 	int loop_count = -1;
2674 #endif
2675 
2676 	do {
2677 		/*
2678 		 *	Update the priority.
2679 		 */
2680 		if (SCHED(can_update_priority)(thread)) {
2681 			SCHED(update_priority)(thread);
2682 		}
2683 
2684 		pset_lock(pset);
2685 
2686 restart:
2687 #ifdef LOOP_COUNT
2688 		loop_count++;
2689 		if (loop_count > max_loop_count[processor->cpu_id]) {
2690 			max_loop_count[processor->cpu_id] = loop_count;
2691 			if (bit_count(loop_count) == 1) {
2692 				kprintf("[%d]%s>max_loop_count = %d\n", processor->cpu_id, __FUNCTION__, loop_count);
2693 			}
2694 		}
2695 #endif
2696 		pending_AST_URGENT = bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
2697 		pending_AST_PREEMPT = bit_test(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
2698 
2699 		processor_state_update_from_thread(processor, thread, true);
2700 
2701 		idle_reason = 0;
2702 
2703 		processor_t ast_processor = PROCESSOR_NULL;
2704 		processor_t next_rt_processor = PROCESSOR_NULL;
2705 		sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
2706 		sched_ipi_type_t next_rt_ipi_type = SCHED_IPI_NONE;
2707 
2708 		assert(processor->state != PROCESSOR_OFF_LINE);
2709 
2710 		/*
2711 		 * Bound threads are dispatched to a processor without going through
2712 		 * choose_processor(), so in those cases we must continue trying to dequeue work
2713 		 * as we are the only option.
2714 		 */
2715 		if (!SCHED(processor_bound_count)(processor)) {
2716 			if (!processor->is_recommended) {
2717 				/*
2718 				 * The performance controller has provided a hint to not dispatch more threads,
2719 				 */
2720 				idle_reason = 1;
2721 				goto send_followup_ipi_before_idle;
2722 			} else if (rt_runq_count(pset)) {
2723 				bool ok_to_run_realtime_thread = sched_ok_to_run_realtime_thread(pset, processor, false);
2724 				/* Give the current RT thread a chance to complete */
2725 				ok_to_run_realtime_thread |= (thread->sched_pri >= BASEPRI_RTQUEUES && processor->first_timeslice);
2726 #if defined(__x86_64__)
2727 				/*
2728 				 * On Intel we want to avoid SMT secondary processors and processor 0
2729 				 * but allow them to be used as backup processors in case the preferred chosen
2730 				 * processor is delayed by interrupts or processor stalls.  So if it is
2731 				 * not ok_to_run_realtime_thread as preferred (sched_ok_to_run_realtime_thread(pset, processor, as_backup=false))
2732 				 * but ok_to_run_realtime_thread as backup (sched_ok_to_run_realtime_thread(pset, processor, as_backup=true))
2733 				 * we delay up to (timeout_count * 10us) to give the preferred processor chance
2734 				 * to grab the thread before the (current) backup processor does.
2735 				 *
2736 				 * timeout_count defaults to 5 but can be tuned using sysctl kern.sched_backup_cpu_timeout_count
2737 				 * on DEVELOPMENT || DEBUG kernels.  It is also adjusted (see above) depending on whether we want to use
2738 				 * cpu0 before secondary cpus or not.
2739 				 */
2740 				if (!ok_to_run_realtime_thread) {
2741 					if (sched_ok_to_run_realtime_thread(pset, processor, true)) {
2742 						if (timeout_count-- > 0) {
2743 							pset_unlock(pset);
2744 							thread_unlock(thread);
2745 							delay(10);
2746 							delay_count++;
2747 							thread_lock(thread);
2748 							pset_lock(pset);
2749 							goto restart;
2750 						}
2751 						ok_to_run_realtime_thread = true;
2752 					}
2753 				}
2754 #endif
2755 				if (!ok_to_run_realtime_thread) {
2756 					idle_reason = 2;
2757 					goto send_followup_ipi_before_idle;
2758 				}
2759 			} else if (processor->processor_primary != processor) {
2760 				/*
2761 				 * Should this secondary SMT processor attempt to find work? For pset runqueue systems,
2762 				 * we should look for work only under the same conditions that choose_processor()
2763 				 * would have assigned work, which is when all primary processors have been assigned work.
2764 				 */
2765 				if ((pset->recommended_bitmask & pset->primary_map & pset->cpu_state_map[PROCESSOR_IDLE]) != 0) {
2766 					/* There are idle primaries */
2767 					idle_reason = 3;
2768 					goto idle;
2769 				}
2770 			}
2771 		}
2772 
2773 		/*
2774 		 *	Test to see if the current thread should continue
2775 		 *	to run on this processor.  Must not be attempting to wait, and not
2776 		 *	bound to a different processor, nor be in the wrong
2777 		 *	processor set, nor be forced to context switch by TH_SUSP.
2778 		 *
2779 		 *	Note that there are never any RT threads in the regular runqueue.
2780 		 *
2781 		 *	This code is very insanely tricky.
2782 		 */
2783 
2784 		/* i.e. not waiting, not TH_SUSP'ed */
2785 		bool still_running = ((thread->state & (TH_TERMINATE | TH_IDLE | TH_WAIT | TH_RUN | TH_SUSP)) == TH_RUN);
2786 
2787 		/*
2788 		 * Threads running on SMT processors are forced to context switch. Don't rebalance realtime threads.
2789 		 * TODO: This should check if it's worth it to rebalance, i.e. 'are there any idle primary processors'
2790 		 *       <rdar://problem/47907700>
2791 		 *
2792 		 * A yielding thread shouldn't be forced to context switch.
2793 		 */
2794 
2795 		bool is_yielding         = (*reason & AST_YIELD) == AST_YIELD;
2796 
2797 		bool needs_smt_rebalance = !is_yielding && thread->sched_pri < BASEPRI_RTQUEUES && processor->processor_primary != processor;
2798 
2799 		bool affinity_mismatch   = thread->affinity_set != AFFINITY_SET_NULL && thread->affinity_set->aset_pset != pset;
2800 
2801 		bool bound_elsewhere     = thread->bound_processor != PROCESSOR_NULL && thread->bound_processor != processor;
2802 
2803 		bool avoid_processor     = !is_yielding && SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread, *reason);
2804 
2805 		bool ok_to_run_realtime_thread = sched_ok_to_run_realtime_thread(pset, processor, true);
2806 
2807 		bool current_thread_can_keep_running = (still_running && !needs_smt_rebalance && !affinity_mismatch && !bound_elsewhere && !avoid_processor);
2808 		if (current_thread_can_keep_running) {
2809 			/*
2810 			 * This thread is eligible to keep running on this processor.
2811 			 *
2812 			 * RT threads with un-expired quantum stay on processor,
2813 			 * unless there's a valid RT thread with an earlier deadline
2814 			 * and it is still ok_to_run_realtime_thread.
2815 			 */
2816 			if (thread->sched_pri >= BASEPRI_RTQUEUES && processor->first_timeslice) {
2817 				/*
2818 				 * Pick a new RT thread only if ok_to_run_realtime_thread
2819 				 * (but the current thread is allowed to complete).
2820 				 */
2821 				if (ok_to_run_realtime_thread) {
2822 					if (bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
2823 						goto pick_new_rt_thread;
2824 					}
2825 					if (rt_runq_priority(pset) > thread->sched_pri) {
2826 						if (sched_rt_runq_strict_priority) {
2827 							/* The next RT thread is better, so pick it off the runqueue. */
2828 							goto pick_new_rt_thread;
2829 						}
2830 
2831 						/*
2832 						 * See if the current lower priority thread can continue to run without causing
2833 						 * the higher priority thread on the runq queue to miss its deadline.
2834 						 */
2835 						thread_t hi_thread = rt_runq_first(SCHED(rt_runq)(pset));
2836 						if (thread->realtime.computation + hi_thread->realtime.computation + rt_deadline_epsilon >= hi_thread->realtime.constraint) {
2837 							/* The next RT thread is better, so pick it off the runqueue. */
2838 							goto pick_new_rt_thread;
2839 						}
2840 					} else if ((rt_runq_count(pset) > 0) && (deadline_add(rt_runq_earliest_deadline(pset), rt_deadline_epsilon) < thread->realtime.deadline)) {
2841 						/* The next RT thread is better, so pick it off the runqueue. */
2842 						goto pick_new_rt_thread;
2843 					}
2844 					if (other_psets_have_earlier_rt_threads_pending(pset, thread->realtime.deadline)) {
2845 						goto pick_new_rt_thread;
2846 					}
2847 				}
2848 
2849 				/* This is still the best RT thread to run. */
2850 				processor->deadline = thread->realtime.deadline;
2851 
2852 				sched_update_pset_load_average(pset, 0);
2853 
2854 				clear_pending_AST_bits(pset, processor, 1);
2855 
2856 				next_rt_processor = PROCESSOR_NULL;
2857 				next_rt_ipi_type = SCHED_IPI_NONE;
2858 
2859 				bool pset_unlocked = false;
2860 				__kdebug_only next_processor_type_t nptype = none;
2861 				if (sched_allow_rt_steal && pset_has_stealable_rt_threads(pset)) {
2862 					nptype = spill;
2863 					pset_unlocked = choose_next_rt_processor_for_IPI(pset, processor, true, &next_rt_processor, &next_rt_ipi_type);
2864 				} else if (pset_needs_a_followup_IPI(pset)) {
2865 					nptype = followup;
2866 					pset_unlocked = choose_next_rt_processor_for_IPI(pset, processor, false, &next_rt_processor, &next_rt_ipi_type);
2867 				}
2868 				if (!pset_unlocked) {
2869 					pset_unlock(pset);
2870 				}
2871 
2872 				if (next_rt_processor) {
2873 					KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_NEXT_PROCESSOR) | DBG_FUNC_NONE,
2874 					    next_rt_processor->cpu_id, next_rt_processor->state, nptype, 2);
2875 					sched_ipi_perform(next_rt_processor, next_rt_ipi_type);
2876 				}
2877 
2878 				KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
2879 				    (uintptr_t)thread_tid(thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 1);
2880 				return thread;
2881 			}
2882 
2883 			if ((rt_runq_count(pset) == 0) &&
2884 			    SCHED(processor_queue_has_priority)(processor, thread->sched_pri, TRUE) == FALSE) {
2885 				/* This thread is still the highest priority runnable (non-idle) thread */
2886 				processor->deadline = RT_DEADLINE_NONE;
2887 
2888 				sched_update_pset_load_average(pset, 0);
2889 
2890 				clear_pending_AST_bits(pset, processor, 2);
2891 
2892 				pset_unlock(pset);
2893 
2894 				KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
2895 				    (uintptr_t)thread_tid(thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 2);
2896 				return thread;
2897 			}
2898 		} else {
2899 			/*
2900 			 * This processor must context switch.
2901 			 * If it's due to a rebalance, we should aggressively find this thread a new home.
2902 			 */
2903 			if (needs_smt_rebalance || affinity_mismatch || bound_elsewhere || avoid_processor) {
2904 				*reason |= AST_REBALANCE;
2905 			}
2906 		}
2907 
2908 		bool secondary_forced_idle = ((processor->processor_secondary != PROCESSOR_NULL) &&
2909 		    (thread_no_smt(thread) || (thread->sched_pri >= BASEPRI_RTQUEUES)) &&
2910 		    (processor->processor_secondary->state == PROCESSOR_IDLE));
2911 
2912 		/* OK, so we're not going to run the current thread. Look at the RT queue. */
2913 		if (ok_to_run_realtime_thread) {
2914 pick_new_rt_thread:
2915 			new_thread = sched_rt_choose_thread(pset);
2916 			if (new_thread != THREAD_NULL) {
2917 				processor->deadline = new_thread->realtime.deadline;
2918 				pset_commit_processor_to_new_thread(pset, processor, new_thread);
2919 
2920 				clear_pending_AST_bits(pset, processor, 3);
2921 
2922 				if (processor->processor_secondary != NULL) {
2923 					processor_t sprocessor = processor->processor_secondary;
2924 					if ((sprocessor->state == PROCESSOR_RUNNING) || (sprocessor->state == PROCESSOR_DISPATCHING)) {
2925 						ipi_type = sched_ipi_action(sprocessor, NULL, SCHED_IPI_EVENT_SMT_REBAL);
2926 						ast_processor = sprocessor;
2927 					}
2928 				}
2929 			}
2930 		}
2931 
2932 send_followup_ipi_before_idle:
2933 		/* This might not have been cleared if we didn't call sched_rt_choose_thread() */
2934 		if (bit_clear_if_set(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
2935 			KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RT_SIGNAL_SPILL) | DBG_FUNC_END, processor->cpu_id, pset->rt_pending_spill_cpu_mask, 0, 5);
2936 		}
2937 		__kdebug_only next_processor_type_t nptype = none;
2938 		bool pset_unlocked = false;
2939 		if (sched_allow_rt_steal && pset_has_stealable_rt_threads(pset)) {
2940 			nptype = spill;
2941 			pset_unlocked = choose_next_rt_processor_for_IPI(pset, processor, true, &next_rt_processor, &next_rt_ipi_type);
2942 		} else if (pset_needs_a_followup_IPI(pset)) {
2943 			nptype = followup;
2944 			pset_unlocked = choose_next_rt_processor_for_IPI(pset, processor, false, &next_rt_processor, &next_rt_ipi_type);
2945 		}
2946 
2947 		assert(new_thread || !ast_processor);
2948 		if (new_thread || next_rt_processor) {
2949 			if (!pset_unlocked) {
2950 				pset_unlock(pset);
2951 				pset_unlocked = true;
2952 			}
2953 			if (ast_processor == next_rt_processor) {
2954 				ast_processor = PROCESSOR_NULL;
2955 				ipi_type = SCHED_IPI_NONE;
2956 			}
2957 
2958 			if (ast_processor) {
2959 				sched_ipi_perform(ast_processor, ipi_type);
2960 			}
2961 
2962 			if (next_rt_processor) {
2963 				KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_NEXT_PROCESSOR) | DBG_FUNC_NONE,
2964 				    next_rt_processor->cpu_id, next_rt_processor->state, nptype, 3);
2965 				sched_ipi_perform(next_rt_processor, next_rt_ipi_type);
2966 			}
2967 
2968 			if (new_thread) {
2969 				KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
2970 				    (uintptr_t)thread_tid(new_thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 3);
2971 				return new_thread;
2972 			}
2973 		}
2974 
2975 		if (pset_unlocked) {
2976 			pset_lock(pset);
2977 		}
2978 
2979 		if (!pending_AST_URGENT && bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
2980 			/* Things changed while we dropped the lock */
2981 			goto restart;
2982 		}
2983 
2984 		if (processor->is_recommended) {
2985 			bool spill_pending = bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id);
2986 			if (sched_ok_to_run_realtime_thread(pset, processor, true) && (spill_pending || rt_runq_count(pset))) {
2987 				/* Things changed while we dropped the lock */
2988 				goto restart;
2989 			}
2990 
2991 			if ((processor->processor_primary != processor) && (processor->processor_primary->current_pri >= BASEPRI_RTQUEUES)) {
2992 				/* secondary can only run realtime thread */
2993 				if (idle_reason == 0) {
2994 					idle_reason = 4;
2995 				}
2996 				goto idle;
2997 			}
2998 		} else if (!SCHED(processor_bound_count)(processor)) {
2999 			/* processor not recommended and no bound threads */
3000 			if (idle_reason == 0) {
3001 				idle_reason = 5;
3002 			}
3003 			goto idle;
3004 		}
3005 
3006 		processor->deadline = RT_DEADLINE_NONE;
3007 
3008 		/* No RT threads, so let's look at the regular threads. */
3009 		if ((new_thread = SCHED(choose_thread)(processor, MINPRI, current_thread_can_keep_running ? thread : THREAD_NULL, *reason)) != THREAD_NULL) {
3010 			if (new_thread != thread) {
3011 				/* Going to context-switch */
3012 				pset_commit_processor_to_new_thread(pset, processor, new_thread);
3013 
3014 				clear_pending_AST_bits(pset, processor, 4);
3015 
3016 				ast_processor = PROCESSOR_NULL;
3017 				ipi_type = SCHED_IPI_NONE;
3018 
3019 				processor_t sprocessor = processor->processor_secondary;
3020 				if (sprocessor != NULL) {
3021 					if (sprocessor->state == PROCESSOR_RUNNING) {
3022 						if (thread_no_smt(new_thread)) {
3023 							ipi_type = sched_ipi_action(sprocessor, NULL, SCHED_IPI_EVENT_SMT_REBAL);
3024 							ast_processor = sprocessor;
3025 						}
3026 					} else if (secondary_forced_idle && !thread_no_smt(new_thread) && pset_has_stealable_threads(pset)) {
3027 						ipi_type = sched_ipi_action(sprocessor, NULL, SCHED_IPI_EVENT_PREEMPT);
3028 						ast_processor = sprocessor;
3029 					}
3030 				}
3031 
3032 				pset_unlock(pset);
3033 
3034 				if (ast_processor) {
3035 					sched_ipi_perform(ast_processor, ipi_type);
3036 				}
3037 			} else {
3038 				/* Will continue running the current thread */
3039 				clear_pending_AST_bits(pset, processor, 4);
3040 				pset_unlock(pset);
3041 			}
3042 
3043 			KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
3044 			    (uintptr_t)thread_tid(new_thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 4);
3045 			return new_thread;
3046 		}
3047 
3048 		if (processor->must_idle) {
3049 			processor->must_idle = false;
3050 			*reason |= AST_REBALANCE;
3051 			idle_reason = 6;
3052 			goto idle;
3053 		}
3054 
3055 		if (SCHED(steal_thread_enabled)(pset) && (processor->processor_primary == processor)) {
3056 			/*
3057 			 * No runnable threads, attempt to steal
3058 			 * from other processors. Returns with pset lock dropped.
3059 			 */
3060 
3061 			if ((new_thread = SCHED(steal_thread)(pset)) != THREAD_NULL) {
3062 				pset_lock(pset);
3063 				pset_commit_processor_to_new_thread(pset, processor, new_thread);
3064 				if (!pending_AST_URGENT && bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
3065 					/*
3066 					 * A realtime thread choose this processor while it was DISPATCHING
3067 					 * and the pset lock was dropped
3068 					 */
3069 					ast_on(AST_URGENT | AST_PREEMPT);
3070 				}
3071 
3072 				clear_pending_AST_bits(pset, processor, 5);
3073 
3074 				pset_unlock(pset);
3075 
3076 				KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
3077 				    (uintptr_t)thread_tid(new_thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 5);
3078 				return new_thread;
3079 			}
3080 
3081 			/*
3082 			 * If other threads have appeared, shortcut
3083 			 * around again.
3084 			 */
3085 			if (SCHED(processor_bound_count)(processor)) {
3086 				continue;
3087 			}
3088 			if (processor->is_recommended) {
3089 				if (!SCHED(processor_queue_empty)(processor) || (sched_ok_to_run_realtime_thread(pset, processor, true) && (rt_runq_count(pset) > 0))) {
3090 					continue;
3091 				}
3092 			}
3093 
3094 			pset_lock(pset);
3095 		}
3096 
3097 idle:
3098 		/* Someone selected this processor while we had dropped the lock */
3099 		if ((!pending_AST_URGENT && bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) ||
3100 		    (!pending_AST_PREEMPT && bit_test(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id))) {
3101 			goto restart;
3102 		}
3103 
3104 		if ((idle_reason == 0) && current_thread_can_keep_running) {
3105 			/* This thread is the only runnable (non-idle) thread */
3106 			if (thread->sched_pri >= BASEPRI_RTQUEUES) {
3107 				processor->deadline = thread->realtime.deadline;
3108 			} else {
3109 				processor->deadline = RT_DEADLINE_NONE;
3110 			}
3111 
3112 			sched_update_pset_load_average(pset, 0);
3113 
3114 			clear_pending_AST_bits(pset, processor, 6);
3115 
3116 			KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
3117 			    (uintptr_t)thread_tid(thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 6);
3118 			pset_unlock(pset);
3119 			return thread;
3120 		}
3121 
3122 		/*
3123 		 *	Nothing is runnable, or this processor must be forced idle,
3124 		 *	so set this processor idle if it was running.
3125 		 */
3126 		if ((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_DISPATCHING)) {
3127 			pset_update_processor_state(pset, processor, PROCESSOR_IDLE);
3128 			processor_state_update_idle(processor);
3129 		}
3130 		pset_update_rt_stealable_state(pset);
3131 
3132 		clear_pending_AST_bits(pset, processor, 7);
3133 
3134 		/* Invoked with pset locked, returns with pset unlocked */
3135 		processor->next_idle_short = SCHED(processor_balance)(processor, pset);
3136 
3137 		new_thread = processor->idle_thread;
3138 	} while (new_thread == THREAD_NULL);
3139 
3140 	KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SELECT) | DBG_FUNC_END,
3141 	    (uintptr_t)thread_tid(new_thread), pset->pending_AST_URGENT_cpu_mask, delay_count, 10 + idle_reason);
3142 	return new_thread;
3143 }
3144 
3145 /*
3146  * thread_invoke
3147  *
3148  * Called at splsched with neither thread locked.
3149  *
3150  * Perform a context switch and start executing the new thread.
3151  *
3152  * Returns FALSE when the context switch didn't happen.
3153  * The reference to the new thread is still consumed.
3154  *
3155  * "self" is what is currently running on the processor,
3156  * "thread" is the new thread to context switch to
3157  * (which may be the same thread in some cases)
3158  */
3159 static boolean_t
thread_invoke(thread_t self,thread_t thread,ast_t reason)3160 thread_invoke(
3161 	thread_t                        self,
3162 	thread_t                        thread,
3163 	ast_t                           reason)
3164 {
3165 	if (__improbable(get_preemption_level() != 0)) {
3166 		int pl = get_preemption_level();
3167 		panic("thread_invoke: preemption_level %d, possible cause: %s",
3168 		    pl, (pl < 0 ? "unlocking an unlocked mutex or spinlock" :
3169 		    "blocking while holding a spinlock, or within interrupt context"));
3170 	}
3171 
3172 	thread_continue_t       continuation = self->continuation;
3173 	void                    *parameter   = self->parameter;
3174 
3175 	struct recount_snap snap = { 0 };
3176 	recount_snapshot(&snap);
3177 	uint64_t ctime = snap.rsn_time_mach;
3178 
3179 	check_monotonic_time(ctime);
3180 
3181 #ifdef CONFIG_MACH_APPROXIMATE_TIME
3182 	commpage_update_mach_approximate_time(ctime);
3183 #endif
3184 
3185 	if (ctime < thread->last_made_runnable_time) {
3186 		panic("Non-monotonic time: invoke at 0x%llx, runnable at 0x%llx",
3187 		    ctime, thread->last_made_runnable_time);
3188 	}
3189 
3190 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
3191 	if (!((thread->state & TH_IDLE) != 0 ||
3192 	    ((reason & AST_HANDOFF) && self->sched_mode == TH_MODE_REALTIME))) {
3193 		sched_timeshare_consider_maintenance(ctime, true);
3194 	}
3195 #endif
3196 
3197 	recount_log_switch_thread(&snap);
3198 
3199 	processor_t processor = current_processor();
3200 
3201 	if (!processor->processor_online) {
3202 		panic("Invalid attempt to context switch an offline processor");
3203 	}
3204 
3205 	assert_thread_magic(self);
3206 	assert(self == current_thread());
3207 	thread_assert_runq_null(self);
3208 	assert((self->state & (TH_RUN | TH_TERMINATE2)) == TH_RUN);
3209 
3210 	thread_lock(thread);
3211 
3212 	assert_thread_magic(thread);
3213 	assert((thread->state & (TH_RUN | TH_WAIT | TH_UNINT | TH_TERMINATE | TH_TERMINATE2)) == TH_RUN);
3214 	assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor);
3215 	thread_assert_runq_null(thread);
3216 
3217 	/* Update SFI class based on other factors */
3218 	thread->sfi_class = sfi_thread_classify(thread);
3219 
3220 	/* Update the same_pri_latency for the thread (used by perfcontrol callouts) */
3221 	thread->same_pri_latency = ctime - thread->last_basepri_change_time;
3222 	/*
3223 	 * In case a base_pri update happened between the timestamp and
3224 	 * taking the thread lock
3225 	 */
3226 	if (ctime <= thread->last_basepri_change_time) {
3227 		thread->same_pri_latency = ctime - thread->last_made_runnable_time;
3228 	}
3229 
3230 	/* Allow realtime threads to hang onto a stack. */
3231 	if ((self->sched_mode == TH_MODE_REALTIME) && !self->reserved_stack) {
3232 		self->reserved_stack = self->kernel_stack;
3233 	}
3234 
3235 	/* Prepare for spin debugging */
3236 #if SCHED_HYGIENE_DEBUG
3237 	ml_spin_debug_clear(thread);
3238 #endif
3239 
3240 	if (continuation != NULL) {
3241 		if (!thread->kernel_stack) {
3242 			/*
3243 			 * If we are using a privileged stack,
3244 			 * check to see whether we can exchange it with
3245 			 * that of the other thread.
3246 			 */
3247 			if (self->kernel_stack == self->reserved_stack && !thread->reserved_stack) {
3248 				goto need_stack;
3249 			}
3250 
3251 			/*
3252 			 * Context switch by performing a stack handoff.
3253 			 * Requires both threads to be parked in a continuation.
3254 			 */
3255 			continuation = thread->continuation;
3256 			parameter = thread->parameter;
3257 
3258 			processor->active_thread = thread;
3259 			processor_state_update_from_thread(processor, thread, false);
3260 
3261 			if (thread->last_processor != processor && thread->last_processor != NULL) {
3262 				if (thread->last_processor->processor_set != processor->processor_set) {
3263 					thread->ps_switch++;
3264 				}
3265 				thread->p_switch++;
3266 			}
3267 			thread->last_processor = processor;
3268 			thread->c_switch++;
3269 			ast_context(thread);
3270 
3271 			thread_unlock(thread);
3272 
3273 			self->reason = reason;
3274 
3275 			processor->last_dispatch = ctime;
3276 			self->last_run_time = ctime;
3277 			timer_update(&thread->runnable_timer, ctime);
3278 			recount_switch_thread(&snap, self, get_threadtask(self));
3279 
3280 			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3281 			    MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
3282 			    self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
3283 
3284 			if ((thread->chosen_processor != processor) && (thread->chosen_processor != PROCESSOR_NULL)) {
3285 				SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT_IST(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED) | DBG_FUNC_NONE,
3286 				    (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
3287 			}
3288 
3289 			DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, current_proc());
3290 
3291 			SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
3292 
3293 #if KPERF
3294 			kperf_off_cpu(self);
3295 #endif /* KPERF */
3296 
3297 			/*
3298 			 * This is where we actually switch thread identity,
3299 			 * and address space if required.  However, register
3300 			 * state is not switched - this routine leaves the
3301 			 * stack and register state active on the current CPU.
3302 			 */
3303 			TLOG(1, "thread_invoke: calling stack_handoff\n");
3304 			stack_handoff(self, thread);
3305 
3306 			/* 'self' is now off core */
3307 			assert(thread == current_thread_volatile());
3308 
3309 			DTRACE_SCHED(on__cpu);
3310 
3311 #if KPERF
3312 			kperf_on_cpu(thread, continuation, NULL);
3313 #endif /* KPERF */
3314 
3315 			recount_log_switch_thread_on(&snap);
3316 
3317 			thread_dispatch(self, thread);
3318 
3319 #if KASAN
3320 			/* Old thread's stack has been moved to the new thread, so explicitly
3321 			 * unpoison it. */
3322 			kasan_unpoison_stack(thread->kernel_stack, kernel_stack_size);
3323 #endif
3324 
3325 			thread->continuation = thread->parameter = NULL;
3326 
3327 			boolean_t enable_interrupts = TRUE;
3328 
3329 			/* idle thread needs to stay interrupts-disabled */
3330 			if ((thread->state & TH_IDLE)) {
3331 				enable_interrupts = FALSE;
3332 			}
3333 
3334 			assert(continuation);
3335 			call_continuation(continuation, parameter,
3336 			    thread->wait_result, enable_interrupts);
3337 			/*NOTREACHED*/
3338 		} else if (thread == self) {
3339 			/* same thread but with continuation */
3340 			ast_context(self);
3341 
3342 			thread_unlock(self);
3343 
3344 #if KPERF
3345 			kperf_on_cpu(thread, continuation, NULL);
3346 #endif /* KPERF */
3347 
3348 			recount_log_switch_thread_on(&snap);
3349 
3350 			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3351 			    MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
3352 			    self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
3353 
3354 #if KASAN
3355 			/* stack handoff to self - no thread_dispatch(), so clear the stack
3356 			 * and free the fakestack directly */
3357 #if KASAN_CLASSIC
3358 			kasan_fakestack_drop(self);
3359 			kasan_fakestack_gc(self);
3360 #endif /* KASAN_CLASSIC */
3361 			kasan_unpoison_stack(self->kernel_stack, kernel_stack_size);
3362 #endif /* KASAN */
3363 
3364 			self->continuation = self->parameter = NULL;
3365 
3366 			boolean_t enable_interrupts = TRUE;
3367 
3368 			/* idle thread needs to stay interrupts-disabled */
3369 			if ((self->state & TH_IDLE)) {
3370 				enable_interrupts = FALSE;
3371 			}
3372 
3373 			call_continuation(continuation, parameter,
3374 			    self->wait_result, enable_interrupts);
3375 			/*NOTREACHED*/
3376 		}
3377 	} else {
3378 		/*
3379 		 * Check that the other thread has a stack
3380 		 */
3381 		if (!thread->kernel_stack) {
3382 need_stack:
3383 			if (!stack_alloc_try(thread)) {
3384 				thread_unlock(thread);
3385 				thread_stack_enqueue(thread);
3386 				return FALSE;
3387 			}
3388 		} else if (thread == self) {
3389 			ast_context(self);
3390 			thread_unlock(self);
3391 
3392 			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3393 			    MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
3394 			    self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
3395 
3396 			return TRUE;
3397 		}
3398 	}
3399 
3400 	/*
3401 	 * Context switch by full context save.
3402 	 */
3403 	processor->active_thread = thread;
3404 	processor_state_update_from_thread(processor, thread, false);
3405 
3406 	if (thread->last_processor != processor && thread->last_processor != NULL) {
3407 		if (thread->last_processor->processor_set != processor->processor_set) {
3408 			thread->ps_switch++;
3409 		}
3410 		thread->p_switch++;
3411 	}
3412 	thread->last_processor = processor;
3413 	thread->c_switch++;
3414 	ast_context(thread);
3415 
3416 	thread_unlock(thread);
3417 
3418 	self->reason = reason;
3419 
3420 	processor->last_dispatch = ctime;
3421 	self->last_run_time = ctime;
3422 	timer_update(&thread->runnable_timer, ctime);
3423 	recount_switch_thread(&snap, self, get_threadtask(self));
3424 
3425 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3426 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
3427 	    self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
3428 
3429 	if ((thread->chosen_processor != processor) && (thread->chosen_processor != NULL)) {
3430 		SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT_IST(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED) | DBG_FUNC_NONE,
3431 		    (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
3432 	}
3433 
3434 	DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, current_proc());
3435 
3436 	SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
3437 
3438 #if KPERF
3439 	kperf_off_cpu(self);
3440 #endif /* KPERF */
3441 
3442 	/*
3443 	 * This is where we actually switch register context,
3444 	 * and address space if required.  We will next run
3445 	 * as a result of a subsequent context switch.
3446 	 *
3447 	 * Once registers are switched and the processor is running "thread",
3448 	 * the stack variables and non-volatile registers will contain whatever
3449 	 * was there the last time that thread blocked. No local variables should
3450 	 * be used after this point, except for the special case of "thread", which
3451 	 * the platform layer returns as the previous thread running on the processor
3452 	 * via the function call ABI as a return register, and "self", which may have
3453 	 * been stored on the stack or a non-volatile register, but a stale idea of
3454 	 * what was on the CPU is newly-accurate because that thread is again
3455 	 * running on the CPU.
3456 	 *
3457 	 * If one of the threads is using a continuation, thread_continue
3458 	 * is used to stitch up its context.
3459 	 *
3460 	 * If we are invoking a thread which is resuming from a continuation,
3461 	 * the CPU will invoke thread_continue next.
3462 	 *
3463 	 * If the current thread is parking in a continuation, then its state
3464 	 * won't be saved and the stack will be discarded. When the stack is
3465 	 * re-allocated, it will be configured to resume from thread_continue.
3466 	 */
3467 
3468 	assert(continuation == self->continuation);
3469 	thread = machine_switch_context(self, continuation, thread);
3470 	assert(self == current_thread_volatile());
3471 	TLOG(1, "thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread);
3472 
3473 	assert(continuation == NULL && self->continuation == NULL);
3474 
3475 	DTRACE_SCHED(on__cpu);
3476 
3477 #if KPERF
3478 	kperf_on_cpu(self, NULL, __builtin_frame_address(0));
3479 #endif /* KPERF */
3480 
3481 	/* Previous snap on the old stack is gone. */
3482 	recount_log_switch_thread_on(NULL);
3483 
3484 	/* We have been resumed and are set to run. */
3485 	thread_dispatch(thread, self);
3486 
3487 	return TRUE;
3488 }
3489 
3490 #if defined(CONFIG_SCHED_DEFERRED_AST)
3491 /*
3492  *	pset_cancel_deferred_dispatch:
3493  *
3494  *	Cancels all ASTs that we can cancel for the given processor set
3495  *	if the current processor is running the last runnable thread in the
3496  *	system.
3497  *
3498  *	This function assumes the current thread is runnable.  This must
3499  *	be called with the pset unlocked.
3500  */
3501 static void
pset_cancel_deferred_dispatch(processor_set_t pset,processor_t processor)3502 pset_cancel_deferred_dispatch(
3503 	processor_set_t         pset,
3504 	processor_t             processor)
3505 {
3506 	processor_t             active_processor = NULL;
3507 	uint32_t                sampled_sched_run_count;
3508 
3509 	pset_lock(pset);
3510 	sampled_sched_run_count = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
3511 
3512 	/*
3513 	 * If we have emptied the run queue, and our current thread is runnable, we
3514 	 * should tell any processors that are still DISPATCHING that they will
3515 	 * probably not have any work to do.  In the event that there are no
3516 	 * pending signals that we can cancel, this is also uninteresting.
3517 	 *
3518 	 * In the unlikely event that another thread becomes runnable while we are
3519 	 * doing this (sched_run_count is atomically updated, not guarded), the
3520 	 * codepath making it runnable SHOULD (a dangerous word) need the pset lock
3521 	 * in order to dispatch it to a processor in our pset.  So, the other
3522 	 * codepath will wait while we squash all cancelable ASTs, get the pset
3523 	 * lock, and then dispatch the freshly runnable thread.  So this should be
3524 	 * correct (we won't accidentally have a runnable thread that hasn't been
3525 	 * dispatched to an idle processor), if not ideal (we may be restarting the
3526 	 * dispatch process, which could have some overhead).
3527 	 */
3528 
3529 	if ((sampled_sched_run_count == 1) && (pset->pending_deferred_AST_cpu_mask)) {
3530 		uint64_t dispatching_map = (pset->cpu_state_map[PROCESSOR_DISPATCHING] &
3531 		    pset->pending_deferred_AST_cpu_mask &
3532 		    ~pset->pending_AST_URGENT_cpu_mask);
3533 		for (int cpuid = lsb_first(dispatching_map); cpuid >= 0; cpuid = lsb_next(dispatching_map, cpuid)) {
3534 			active_processor = processor_array[cpuid];
3535 			/*
3536 			 * If a processor is DISPATCHING, it could be because of
3537 			 * a cancelable signal.
3538 			 *
3539 			 * IF the processor is not our
3540 			 * current processor (the current processor should not
3541 			 * be DISPATCHING, so this is a bit paranoid), AND there
3542 			 * is a cancelable signal pending on the processor, AND
3543 			 * there is no non-cancelable signal pending (as there is
3544 			 * no point trying to backtrack on bringing the processor
3545 			 * up if a signal we cannot cancel is outstanding), THEN
3546 			 * it should make sense to roll back the processor state
3547 			 * to the IDLE state.
3548 			 *
3549 			 * If the racey nature of this approach (as the signal
3550 			 * will be arbitrated by hardware, and can fire as we
3551 			 * roll back state) results in the core responding
3552 			 * despite being pushed back to the IDLE state, it
3553 			 * should be no different than if the core took some
3554 			 * interrupt while IDLE.
3555 			 */
3556 			if (active_processor != processor) {
3557 				/*
3558 				 * Squash all of the processor state back to some
3559 				 * reasonable facsimile of PROCESSOR_IDLE.
3560 				 */
3561 
3562 				processor_state_update_idle(active_processor);
3563 				active_processor->deadline = RT_DEADLINE_NONE;
3564 				pset_update_processor_state(pset, active_processor, PROCESSOR_IDLE);
3565 				bit_clear(pset->pending_deferred_AST_cpu_mask, active_processor->cpu_id);
3566 				machine_signal_idle_cancel(active_processor);
3567 			}
3568 		}
3569 	}
3570 
3571 	pset_unlock(pset);
3572 }
3573 #else
3574 /* We don't support deferred ASTs; everything is candycanes and sunshine. */
3575 #endif
3576 
3577 static void
thread_csw_callout(thread_t old,thread_t new,uint64_t timestamp)3578 thread_csw_callout(
3579 	thread_t            old,
3580 	thread_t            new,
3581 	uint64_t            timestamp)
3582 {
3583 	perfcontrol_event event = (new->state & TH_IDLE) ? IDLE : CONTEXT_SWITCH;
3584 	uint64_t same_pri_latency = (new->state & TH_IDLE) ? 0 : new->same_pri_latency;
3585 	machine_switch_perfcontrol_context(event, timestamp, 0,
3586 	    same_pri_latency, old, new);
3587 }
3588 
3589 
3590 /*
3591  *	thread_dispatch:
3592  *
3593  *	Handle threads at context switch.  Re-dispatch other thread
3594  *	if still running, otherwise update run state and perform
3595  *	special actions.  Update quantum for other thread and begin
3596  *	the quantum for ourselves.
3597  *
3598  *      "thread" is the old thread that we have switched away from.
3599  *      "self" is the new current thread that we have context switched to
3600  *
3601  *	Called at splsched.
3602  *
3603  */
3604 void
thread_dispatch(thread_t thread,thread_t self)3605 thread_dispatch(
3606 	thread_t                thread,
3607 	thread_t                self)
3608 {
3609 	processor_t             processor = self->last_processor;
3610 	bool was_idle = false;
3611 	bool processor_bootstrap = (thread == THREAD_NULL);
3612 
3613 	assert(processor == current_processor());
3614 	assert(self == current_thread_volatile());
3615 	assert(thread != self);
3616 
3617 	if (thread != THREAD_NULL) {
3618 		/*
3619 		 * Do the perfcontrol callout for context switch.
3620 		 * The reason we do this here is:
3621 		 * - thread_dispatch() is called from various places that are not
3622 		 *   the direct context switch path for eg. processor shutdown etc.
3623 		 *   So adding the callout here covers all those cases.
3624 		 * - We want this callout as early as possible to be close
3625 		 *   to the timestamp taken in thread_invoke()
3626 		 * - We want to avoid holding the thread lock while doing the
3627 		 *   callout
3628 		 * - We do not want to callout if "thread" is NULL.
3629 		 */
3630 		thread_csw_callout(thread, self, processor->last_dispatch);
3631 
3632 #if KASAN
3633 		if (thread->continuation != NULL) {
3634 			/*
3635 			 * Thread has a continuation and the normal stack is going away.
3636 			 * Unpoison the stack and mark all fakestack objects as unused.
3637 			 */
3638 #if KASAN_CLASSIC
3639 			kasan_fakestack_drop(thread);
3640 #endif /* KASAN_CLASSIC */
3641 			if (thread->kernel_stack) {
3642 				kasan_unpoison_stack(thread->kernel_stack, kernel_stack_size);
3643 			}
3644 		}
3645 
3646 
3647 #if KASAN_CLASSIC
3648 		/*
3649 		 * Free all unused fakestack objects.
3650 		 */
3651 		kasan_fakestack_gc(thread);
3652 #endif /* KASAN_CLASSIC */
3653 #endif /* KASAN */
3654 
3655 		/*
3656 		 *	If blocked at a continuation, discard
3657 		 *	the stack.
3658 		 */
3659 		if (thread->continuation != NULL && thread->kernel_stack != 0) {
3660 			stack_free(thread);
3661 		}
3662 
3663 		if (thread->state & TH_IDLE) {
3664 			was_idle = true;
3665 			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3666 			    MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
3667 			    (uintptr_t)thread_tid(thread), 0, thread->state,
3668 			    sched_run_buckets[TH_BUCKET_RUN], 0);
3669 		} else {
3670 			int64_t consumed;
3671 			int64_t remainder = 0;
3672 
3673 			if (processor->quantum_end > processor->last_dispatch) {
3674 				remainder = processor->quantum_end -
3675 				    processor->last_dispatch;
3676 			}
3677 
3678 			consumed = thread->quantum_remaining - remainder;
3679 
3680 			if ((thread->reason & AST_LEDGER) == 0) {
3681 				/*
3682 				 * Bill CPU time to both the task and
3683 				 * the individual thread.
3684 				 */
3685 				ledger_credit_thread(thread, thread->t_ledger,
3686 				    task_ledgers.cpu_time, consumed);
3687 				ledger_credit_thread(thread, thread->t_threadledger,
3688 				    thread_ledgers.cpu_time, consumed);
3689 				if (thread->t_bankledger) {
3690 					ledger_credit_thread(thread, thread->t_bankledger,
3691 					    bank_ledgers.cpu_time,
3692 					    (consumed - thread->t_deduct_bank_ledger_time));
3693 				}
3694 				thread->t_deduct_bank_ledger_time = 0;
3695 				if (consumed > 0) {
3696 					/*
3697 					 * This should never be negative, but in traces we are seeing some instances
3698 					 * of consumed being negative.
3699 					 * <rdar://problem/57782596> thread_dispatch() thread CPU consumed calculation sometimes results in negative value
3700 					 */
3701 					sched_update_pset_avg_execution_time(current_processor()->processor_set, consumed, processor->last_dispatch, thread->th_sched_bucket);
3702 				}
3703 			}
3704 
3705 			/* For the thread that we just context switched away from, figure
3706 			 * out if we have expired the wq quantum and set the AST if we have
3707 			 */
3708 			if (thread_get_tag(thread) & THREAD_TAG_WORKQUEUE) {
3709 				thread_evaluate_workqueue_quantum_expiry(thread);
3710 			}
3711 
3712 			if (__improbable(thread->rwlock_count != 0)) {
3713 				smr_mark_active_trackers_stalled(thread);
3714 			}
3715 
3716 			/*
3717 			 * Pairs with task_restartable_ranges_synchronize
3718 			 */
3719 			wake_lock(thread);
3720 			thread_lock(thread);
3721 
3722 			/*
3723 			 * Same as ast_check(), in case we missed the IPI
3724 			 */
3725 			thread_reset_pcs_ack_IPI(thread);
3726 
3727 			/*
3728 			 * Apply a priority floor if the thread holds a kernel resource
3729 			 * or explicitly requested it.
3730 			 * Do this before checking starting_pri to avoid overpenalizing
3731 			 * repeated rwlock blockers.
3732 			 */
3733 			if (__improbable(thread->rwlock_count != 0)) {
3734 				lck_rw_set_promotion_locked(thread);
3735 			}
3736 			if (__improbable(thread->priority_floor_count != 0)) {
3737 				thread_floor_boost_set_promotion_locked(thread);
3738 			}
3739 
3740 			boolean_t keep_quantum = processor->first_timeslice;
3741 
3742 			/*
3743 			 * Treat a thread which has dropped priority since it got on core
3744 			 * as having expired its quantum.
3745 			 */
3746 			if (processor->starting_pri > thread->sched_pri) {
3747 				keep_quantum = FALSE;
3748 			}
3749 
3750 			/* Compute remainder of current quantum. */
3751 			if (keep_quantum &&
3752 			    processor->quantum_end > processor->last_dispatch) {
3753 				thread->quantum_remaining = (uint32_t)remainder;
3754 			} else {
3755 				thread->quantum_remaining = 0;
3756 			}
3757 
3758 			if (thread->sched_mode == TH_MODE_REALTIME) {
3759 				/*
3760 				 *	Cancel the deadline if the thread has
3761 				 *	consumed the entire quantum.
3762 				 */
3763 				if (thread->quantum_remaining == 0) {
3764 					KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_CANCEL_RT_DEADLINE) | DBG_FUNC_NONE,
3765 					    (uintptr_t)thread_tid(thread), thread->realtime.deadline, thread->realtime.computation, 0);
3766 					thread->realtime.deadline = RT_DEADLINE_QUANTUM_EXPIRED;
3767 				}
3768 			} else {
3769 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
3770 				/*
3771 				 *	For non-realtime threads treat a tiny
3772 				 *	remaining quantum as an expired quantum
3773 				 *	but include what's left next time.
3774 				 */
3775 				if (thread->quantum_remaining < min_std_quantum) {
3776 					thread->reason |= AST_QUANTUM;
3777 					thread->quantum_remaining += SCHED(initial_quantum_size)(thread);
3778 				}
3779 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
3780 			}
3781 
3782 			/*
3783 			 *	If we are doing a direct handoff then
3784 			 *	take the remainder of the quantum.
3785 			 */
3786 			if ((thread->reason & (AST_HANDOFF | AST_QUANTUM)) == AST_HANDOFF) {
3787 				self->quantum_remaining = thread->quantum_remaining;
3788 				thread->reason |= AST_QUANTUM;
3789 				thread->quantum_remaining = 0;
3790 			}
3791 
3792 			thread->computation_metered += (processor->last_dispatch - thread->computation_epoch);
3793 
3794 			if (!(thread->state & TH_WAIT)) {
3795 				/*
3796 				 *	Still runnable.
3797 				 */
3798 				thread->last_made_runnable_time = thread->last_basepri_change_time = processor->last_dispatch;
3799 
3800 				machine_thread_going_off_core(thread, FALSE, processor->last_dispatch, TRUE);
3801 
3802 				ast_t reason = thread->reason;
3803 				sched_options_t options = SCHED_NONE;
3804 
3805 				if (reason & AST_REBALANCE) {
3806 					options |= SCHED_REBALANCE;
3807 					if (reason & AST_QUANTUM) {
3808 						/*
3809 						 * Having gone to the trouble of forcing this thread off a less preferred core,
3810 						 * we should force the preferable core to reschedule immediately to give this
3811 						 * thread a chance to run instead of just sitting on the run queue where
3812 						 * it may just be stolen back by the idle core we just forced it off.
3813 						 * But only do this at the end of a quantum to prevent cascading effects.
3814 						 */
3815 						options |= SCHED_PREEMPT;
3816 					}
3817 				}
3818 
3819 				if (reason & AST_QUANTUM) {
3820 					options |= SCHED_TAILQ;
3821 				} else if (reason & AST_PREEMPT) {
3822 					options |= SCHED_HEADQ;
3823 				} else {
3824 					options |= (SCHED_PREEMPT | SCHED_TAILQ);
3825 				}
3826 
3827 				thread_setrun(thread, options);
3828 
3829 				KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3830 				    MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
3831 				    (uintptr_t)thread_tid(thread), thread->reason, thread->state,
3832 				    sched_run_buckets[TH_BUCKET_RUN], 0);
3833 
3834 				if (thread->wake_active) {
3835 					thread->wake_active = FALSE;
3836 					thread_unlock(thread);
3837 
3838 					thread_wakeup(&thread->wake_active);
3839 				} else {
3840 					thread_unlock(thread);
3841 				}
3842 
3843 				wake_unlock(thread);
3844 			} else {
3845 				/*
3846 				 *	Waiting.
3847 				 */
3848 				boolean_t should_terminate = FALSE;
3849 				uint32_t new_run_count;
3850 				int thread_state = thread->state;
3851 
3852 				/* Only the first call to thread_dispatch
3853 				 * after explicit termination should add
3854 				 * the thread to the termination queue
3855 				 */
3856 				if ((thread_state & (TH_TERMINATE | TH_TERMINATE2)) == TH_TERMINATE) {
3857 					should_terminate = TRUE;
3858 					thread_state |= TH_TERMINATE2;
3859 				}
3860 
3861 				timer_stop(&thread->runnable_timer, processor->last_dispatch);
3862 
3863 				thread_state &= ~TH_RUN;
3864 				thread->state = thread_state;
3865 
3866 				thread->last_made_runnable_time = thread->last_basepri_change_time = THREAD_NOT_RUNNABLE;
3867 				thread->chosen_processor = PROCESSOR_NULL;
3868 
3869 				new_run_count = SCHED(run_count_decr)(thread);
3870 
3871 #if CONFIG_SCHED_AUTO_JOIN
3872 				if ((thread->sched_flags & TH_SFLAG_THREAD_GROUP_AUTO_JOIN) != 0) {
3873 					work_interval_auto_join_unwind(thread);
3874 				}
3875 #endif /* CONFIG_SCHED_AUTO_JOIN */
3876 
3877 #if CONFIG_SCHED_SFI
3878 				if (thread->reason & AST_SFI) {
3879 					thread->wait_sfi_begin_time = processor->last_dispatch;
3880 				}
3881 #endif
3882 				machine_thread_going_off_core(thread, should_terminate, processor->last_dispatch, FALSE);
3883 
3884 				KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3885 				    MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
3886 				    (uintptr_t)thread_tid(thread), thread->reason, thread_state,
3887 				    new_run_count, 0);
3888 
3889 				if (thread_state & TH_WAIT_REPORT) {
3890 					(*thread->sched_call)(SCHED_CALL_BLOCK, thread);
3891 				}
3892 
3893 				if (thread->wake_active) {
3894 					thread->wake_active = FALSE;
3895 					thread_unlock(thread);
3896 
3897 					thread_wakeup(&thread->wake_active);
3898 				} else {
3899 					thread_unlock(thread);
3900 				}
3901 
3902 				wake_unlock(thread);
3903 
3904 				if (should_terminate) {
3905 					thread_terminate_enqueue(thread);
3906 				}
3907 			}
3908 		}
3909 		/*
3910 		 * The thread could have been added to the termination queue, so it's
3911 		 * unsafe to use after this point.
3912 		 */
3913 		thread = THREAD_NULL;
3914 	}
3915 
3916 	int urgency = THREAD_URGENCY_NONE;
3917 	uint64_t latency = 0;
3918 
3919 	/* Update (new) current thread and reprogram running timers */
3920 	thread_lock(self);
3921 
3922 	if (!(self->state & TH_IDLE)) {
3923 		uint64_t        arg1, arg2;
3924 
3925 #if CONFIG_SCHED_SFI
3926 		ast_t                   new_ast;
3927 
3928 		new_ast = sfi_thread_needs_ast(self, NULL);
3929 
3930 		if (new_ast != AST_NONE) {
3931 			ast_on(new_ast);
3932 		}
3933 #endif
3934 
3935 		if (processor->last_dispatch < self->last_made_runnable_time) {
3936 			panic("Non-monotonic time: dispatch at 0x%llx, runnable at 0x%llx",
3937 			    processor->last_dispatch, self->last_made_runnable_time);
3938 		}
3939 
3940 		assert(self->last_made_runnable_time <= self->last_basepri_change_time);
3941 
3942 		latency = processor->last_dispatch - self->last_made_runnable_time;
3943 		assert(latency >= self->same_pri_latency);
3944 
3945 		urgency = thread_get_urgency(self, &arg1, &arg2);
3946 
3947 		thread_tell_urgency(urgency, arg1, arg2, latency, self);
3948 
3949 		/*
3950 		 *	Start a new CPU limit interval if the previous one has
3951 		 *	expired. This should happen before initializing a new
3952 		 *	quantum.
3953 		 */
3954 		if (cpulimit_affects_quantum &&
3955 		    thread_cpulimit_interval_has_expired(processor->last_dispatch)) {
3956 			thread_cpulimit_restart(processor->last_dispatch);
3957 		}
3958 
3959 		/*
3960 		 *	Get a new quantum if none remaining.
3961 		 */
3962 		if (self->quantum_remaining == 0) {
3963 			thread_quantum_init(self, processor->last_dispatch);
3964 		}
3965 
3966 		/*
3967 		 *	Set up quantum timer and timeslice.
3968 		 */
3969 		processor->quantum_end = processor->last_dispatch +
3970 		    self->quantum_remaining;
3971 
3972 		running_timer_setup(processor, RUNNING_TIMER_QUANTUM, self,
3973 		    processor->quantum_end, processor->last_dispatch);
3974 		if (was_idle) {
3975 			/*
3976 			 * kperf's running timer is active whenever the idle thread for a
3977 			 * CPU is not running.
3978 			 */
3979 			kperf_running_setup(processor, processor->last_dispatch);
3980 		}
3981 		running_timers_activate(processor);
3982 		processor->first_timeslice = TRUE;
3983 	} else {
3984 		if (!processor_bootstrap) {
3985 			running_timers_deactivate(processor);
3986 		}
3987 		processor->first_timeslice = FALSE;
3988 		thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, 0, self);
3989 	}
3990 
3991 	assert(self->block_hint == kThreadWaitNone);
3992 	self->computation_epoch = processor->last_dispatch;
3993 	/*
3994 	 * This relies on the interrupt time being tallied up to the thread in the
3995 	 * exception handler epilogue, which is before AST context where preemption
3996 	 * is considered (and the scheduler is potentially invoked to
3997 	 * context switch, here).
3998 	 */
3999 	self->computation_interrupt_epoch = recount_current_thread_interrupt_time_mach();
4000 	self->reason = AST_NONE;
4001 	processor->starting_pri = self->sched_pri;
4002 
4003 	thread_unlock(self);
4004 
4005 	machine_thread_going_on_core(self, urgency, latency, self->same_pri_latency,
4006 	    processor->last_dispatch);
4007 
4008 #if defined(CONFIG_SCHED_DEFERRED_AST)
4009 	/*
4010 	 * TODO: Can we state that redispatching our old thread is also
4011 	 * uninteresting?
4012 	 */
4013 	if ((os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed) == 1) && !(self->state & TH_IDLE)) {
4014 		pset_cancel_deferred_dispatch(processor->processor_set, processor);
4015 	}
4016 #endif
4017 }
4018 
4019 /*
4020  *	thread_block_reason:
4021  *
4022  *	Forces a reschedule, blocking the caller if a wait
4023  *	has been asserted.
4024  *
4025  *	If a continuation is specified, then thread_invoke will
4026  *	attempt to discard the thread's kernel stack.  When the
4027  *	thread resumes, it will execute the continuation function
4028  *	on a new kernel stack.
4029  */
4030 wait_result_t
thread_block_reason(thread_continue_t continuation,void * parameter,ast_t reason)4031 thread_block_reason(
4032 	thread_continue_t       continuation,
4033 	void                            *parameter,
4034 	ast_t                           reason)
4035 {
4036 	thread_t        self = current_thread();
4037 	processor_t     processor;
4038 	thread_t        new_thread;
4039 	spl_t           s;
4040 
4041 	s = splsched();
4042 
4043 	processor = current_processor();
4044 
4045 	/* If we're explicitly yielding, force a subsequent quantum */
4046 	if (reason & AST_YIELD) {
4047 		processor->first_timeslice = FALSE;
4048 	}
4049 
4050 	/* We're handling all scheduling AST's */
4051 	ast_off(AST_SCHEDULING);
4052 
4053 	clear_pending_nonurgent_preemption(processor);
4054 
4055 #if PROC_REF_DEBUG
4056 	if ((continuation != NULL) && (get_threadtask(self) != kernel_task)) {
4057 		uthread_assert_zero_proc_refcount(get_bsdthread_info(self));
4058 	}
4059 #endif
4060 
4061 #if CONFIG_EXCLAVES
4062 	if (continuation != NULL) {
4063 		assert3u(self->th_exclaves_state & TH_EXCLAVES_STATE_ANY, ==, 0);
4064 	}
4065 #endif /* CONFIG_EXCLAVES */
4066 
4067 	self->continuation = continuation;
4068 	self->parameter = parameter;
4069 
4070 	if (self->state & ~(TH_RUN | TH_IDLE)) {
4071 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4072 		    MACHDBG_CODE(DBG_MACH_SCHED, MACH_BLOCK),
4073 		    reason, VM_KERNEL_UNSLIDE(continuation), 0, 0, 0);
4074 	}
4075 
4076 	do {
4077 		thread_lock(self);
4078 		new_thread = thread_select(self, processor, &reason);
4079 		thread_unlock(self);
4080 	} while (!thread_invoke(self, new_thread, reason));
4081 
4082 	splx(s);
4083 
4084 	return self->wait_result;
4085 }
4086 
4087 /*
4088  *	thread_block:
4089  *
4090  *	Block the current thread if a wait has been asserted.
4091  */
4092 wait_result_t
thread_block(thread_continue_t continuation)4093 thread_block(
4094 	thread_continue_t       continuation)
4095 {
4096 	return thread_block_reason(continuation, NULL, AST_NONE);
4097 }
4098 
4099 wait_result_t
thread_block_parameter(thread_continue_t continuation,void * parameter)4100 thread_block_parameter(
4101 	thread_continue_t       continuation,
4102 	void                            *parameter)
4103 {
4104 	return thread_block_reason(continuation, parameter, AST_NONE);
4105 }
4106 
4107 /*
4108  *	thread_run:
4109  *
4110  *	Switch directly from the current thread to the
4111  *	new thread, handing off our quantum if appropriate.
4112  *
4113  *	New thread must be runnable, and not on a run queue.
4114  *
4115  *	Called at splsched.
4116  */
4117 int
thread_run(thread_t self,thread_continue_t continuation,void * parameter,thread_t new_thread)4118 thread_run(
4119 	thread_t                        self,
4120 	thread_continue_t       continuation,
4121 	void                            *parameter,
4122 	thread_t                        new_thread)
4123 {
4124 	ast_t reason = AST_NONE;
4125 
4126 	if ((self->state & TH_IDLE) == 0) {
4127 		reason = AST_HANDOFF;
4128 	}
4129 
4130 	/* Must not get here without a chosen processor */
4131 	assert(new_thread->chosen_processor);
4132 
4133 	self->continuation = continuation;
4134 	self->parameter = parameter;
4135 
4136 	while (!thread_invoke(self, new_thread, reason)) {
4137 		/* the handoff failed, so we have to fall back to the normal block path */
4138 		processor_t processor = current_processor();
4139 
4140 		reason = AST_NONE;
4141 
4142 		thread_lock(self);
4143 		new_thread = thread_select(self, processor, &reason);
4144 		thread_unlock(self);
4145 	}
4146 
4147 	return self->wait_result;
4148 }
4149 
4150 /*
4151  *	thread_continue:
4152  *
4153  *	Called at splsched when a thread first receives
4154  *	a new stack after a continuation.
4155  *
4156  *	Called with THREAD_NULL as the old thread when
4157  *	invoked by machine_load_context.
4158  */
4159 void
thread_continue(thread_t thread)4160 thread_continue(
4161 	thread_t        thread)
4162 {
4163 	thread_t                self = current_thread();
4164 	thread_continue_t       continuation;
4165 	void                    *parameter;
4166 
4167 	DTRACE_SCHED(on__cpu);
4168 
4169 	continuation = self->continuation;
4170 	parameter = self->parameter;
4171 
4172 	assert(continuation != NULL);
4173 
4174 #if KPERF
4175 	kperf_on_cpu(self, continuation, NULL);
4176 #endif
4177 
4178 	thread_dispatch(thread, self);
4179 
4180 	self->continuation = self->parameter = NULL;
4181 
4182 #if SCHED_HYGIENE_DEBUG
4183 	/* Reset interrupt-masked spin debugging timeout */
4184 	ml_spin_debug_clear(self);
4185 #endif
4186 
4187 	TLOG(1, "thread_continue: calling call_continuation\n");
4188 
4189 	boolean_t enable_interrupts = TRUE;
4190 
4191 	/* bootstrap thread, idle thread need to stay interrupts-disabled */
4192 	if (thread == THREAD_NULL || (self->state & TH_IDLE)) {
4193 		enable_interrupts = FALSE;
4194 	}
4195 
4196 #if KASAN_TBI
4197 	kasan_unpoison_stack(self->kernel_stack, kernel_stack_size);
4198 #endif /* KASAN_TBI */
4199 
4200 
4201 	call_continuation(continuation, parameter, self->wait_result, enable_interrupts);
4202 	/*NOTREACHED*/
4203 }
4204 
4205 void
thread_quantum_init(thread_t thread,uint64_t now)4206 thread_quantum_init(thread_t thread, uint64_t now)
4207 {
4208 	uint64_t new_quantum = 0;
4209 
4210 	switch (thread->sched_mode) {
4211 	case TH_MODE_REALTIME:
4212 		new_quantum = thread->realtime.computation;
4213 		new_quantum = MIN(new_quantum, max_unsafe_rt_computation);
4214 		break;
4215 
4216 	case TH_MODE_FIXED:
4217 		new_quantum = SCHED(initial_quantum_size)(thread);
4218 		new_quantum = MIN(new_quantum, max_unsafe_fixed_computation);
4219 		break;
4220 
4221 	default:
4222 		new_quantum = SCHED(initial_quantum_size)(thread);
4223 		break;
4224 	}
4225 
4226 	if (cpulimit_affects_quantum) {
4227 		const uint64_t cpulimit_remaining = thread_cpulimit_remaining(now);
4228 
4229 		/*
4230 		 * If there's no remaining CPU time, the ledger system will
4231 		 * notice and put the thread to sleep.
4232 		 */
4233 		if (cpulimit_remaining > 0) {
4234 			new_quantum = MIN(new_quantum, cpulimit_remaining);
4235 		}
4236 	}
4237 
4238 	assert3u(new_quantum, <, UINT32_MAX);
4239 	assert3u(new_quantum, >, 0);
4240 
4241 	thread->quantum_remaining = (uint32_t)new_quantum;
4242 }
4243 
4244 uint32_t
sched_timeshare_initial_quantum_size(thread_t thread)4245 sched_timeshare_initial_quantum_size(thread_t thread)
4246 {
4247 	if ((thread != THREAD_NULL) && thread->th_sched_bucket == TH_BUCKET_SHARE_BG) {
4248 		return bg_quantum;
4249 	} else {
4250 		return std_quantum;
4251 	}
4252 }
4253 
4254 /*
4255  *	run_queue_init:
4256  *
4257  *	Initialize a run queue before first use.
4258  */
4259 void
run_queue_init(run_queue_t rq)4260 run_queue_init(
4261 	run_queue_t             rq)
4262 {
4263 	rq->highq = NOPRI;
4264 	for (u_int i = 0; i < BITMAP_LEN(NRQS); i++) {
4265 		rq->bitmap[i] = 0;
4266 	}
4267 	rq->urgency = rq->count = 0;
4268 	for (int i = 0; i < NRQS; i++) {
4269 		circle_queue_init(&rq->queues[i]);
4270 	}
4271 }
4272 
4273 /*
4274  *	run_queue_dequeue:
4275  *
4276  *	Perform a dequeue operation on a run queue,
4277  *	and return the resulting thread.
4278  *
4279  *	The run queue must be locked (see thread_run_queue_remove()
4280  *	for more info), and not empty.
4281  */
4282 thread_t
run_queue_dequeue(run_queue_t rq,sched_options_t options)4283 run_queue_dequeue(
4284 	run_queue_t     rq,
4285 	sched_options_t options)
4286 {
4287 	thread_t        thread;
4288 	circle_queue_t  queue = &rq->queues[rq->highq];
4289 
4290 	if (options & SCHED_HEADQ) {
4291 		thread = cqe_dequeue_head(queue, struct thread, runq_links);
4292 	} else {
4293 		thread = cqe_dequeue_tail(queue, struct thread, runq_links);
4294 	}
4295 
4296 	assert(thread != THREAD_NULL);
4297 	assert_thread_magic(thread);
4298 
4299 	thread_clear_runq(thread);
4300 	SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
4301 	rq->count--;
4302 	if (SCHED(priority_is_urgent)(rq->highq)) {
4303 		rq->urgency--; assert(rq->urgency >= 0);
4304 	}
4305 	if (circle_queue_empty(queue)) {
4306 		bitmap_clear(rq->bitmap, rq->highq);
4307 		rq->highq = bitmap_first(rq->bitmap, NRQS);
4308 	}
4309 
4310 	return thread;
4311 }
4312 
4313 /*
4314  *	run_queue_enqueue:
4315  *
4316  *	Perform a enqueue operation on a run queue.
4317  *
4318  *	The run queue must be locked (see thread_run_queue_remove()
4319  *	for more info).
4320  */
4321 boolean_t
run_queue_enqueue(run_queue_t rq,thread_t thread,sched_options_t options)4322 run_queue_enqueue(
4323 	run_queue_t      rq,
4324 	thread_t         thread,
4325 	sched_options_t  options)
4326 {
4327 	circle_queue_t  queue = &rq->queues[thread->sched_pri];
4328 	boolean_t       result = FALSE;
4329 
4330 	assert_thread_magic(thread);
4331 
4332 	if (circle_queue_empty(queue)) {
4333 		circle_enqueue_tail(queue, &thread->runq_links);
4334 
4335 		rq_bitmap_set(rq->bitmap, thread->sched_pri);
4336 		if (thread->sched_pri > rq->highq) {
4337 			rq->highq = thread->sched_pri;
4338 			result = TRUE;
4339 		}
4340 	} else {
4341 		if (options & SCHED_TAILQ) {
4342 			circle_enqueue_tail(queue, &thread->runq_links);
4343 		} else {
4344 			circle_enqueue_head(queue, &thread->runq_links);
4345 		}
4346 	}
4347 	if (SCHED(priority_is_urgent)(thread->sched_pri)) {
4348 		rq->urgency++;
4349 	}
4350 	SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
4351 	rq->count++;
4352 
4353 	return result;
4354 }
4355 
4356 /*
4357  *	run_queue_remove:
4358  *
4359  *	Remove a specific thread from a runqueue.
4360  *
4361  *	The run queue must be locked.
4362  */
4363 void
run_queue_remove(run_queue_t rq,thread_t thread)4364 run_queue_remove(
4365 	run_queue_t    rq,
4366 	thread_t       thread)
4367 {
4368 	circle_queue_t  queue = &rq->queues[thread->sched_pri];
4369 
4370 	thread_assert_runq_nonnull(thread);
4371 	assert_thread_magic(thread);
4372 
4373 	circle_dequeue(queue, &thread->runq_links);
4374 	SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
4375 	rq->count--;
4376 	if (SCHED(priority_is_urgent)(thread->sched_pri)) {
4377 		rq->urgency--; assert(rq->urgency >= 0);
4378 	}
4379 
4380 	if (circle_queue_empty(queue)) {
4381 		/* update run queue status */
4382 		bitmap_clear(rq->bitmap, thread->sched_pri);
4383 		rq->highq = bitmap_first(rq->bitmap, NRQS);
4384 	}
4385 
4386 	thread_clear_runq(thread);
4387 }
4388 
4389 /*
4390  *      run_queue_peek
4391  *
4392  *      Peek at the runq and return the highest
4393  *      priority thread from the runq.
4394  *
4395  *	The run queue must be locked.
4396  */
4397 thread_t
run_queue_peek(run_queue_t rq)4398 run_queue_peek(
4399 	run_queue_t    rq)
4400 {
4401 	if (rq->count > 0) {
4402 		circle_queue_t queue = &rq->queues[rq->highq];
4403 		thread_t thread = cqe_queue_first(queue, struct thread, runq_links);
4404 		assert_thread_magic(thread);
4405 		return thread;
4406 	} else {
4407 		return THREAD_NULL;
4408 	}
4409 }
4410 
4411 static bool
rt_runq_enqueue(rt_queue_t rt_run_queue,thread_t thread,processor_t processor)4412 rt_runq_enqueue(rt_queue_t rt_run_queue, thread_t thread, processor_t processor)
4413 {
4414 	int pri = thread->sched_pri;
4415 	assert((pri >= BASEPRI_RTQUEUES) && (pri <= MAXPRI));
4416 	int i = pri - BASEPRI_RTQUEUES;
4417 	rt_queue_pri_t *rt_runq = &rt_run_queue->rt_queue_pri[i];
4418 	bitmap_t *map = rt_run_queue->bitmap;
4419 
4420 	bitmap_set(map, i);
4421 
4422 	queue_t     queue       = &rt_runq->pri_queue;
4423 	uint64_t    deadline    = thread->realtime.deadline;
4424 	bool        preempt     = false;
4425 	bool        earliest    = false;
4426 
4427 	if (queue_empty(queue)) {
4428 		enqueue_tail(queue, &thread->runq_links);
4429 		preempt = true;
4430 		earliest = true;
4431 		rt_runq->pri_earliest_deadline = deadline;
4432 		rt_runq->pri_constraint = thread->realtime.constraint;
4433 	} else {
4434 		/* Insert into rt_runq in thread deadline order */
4435 		queue_entry_t iter;
4436 		qe_foreach(iter, queue) {
4437 			thread_t iter_thread = qe_element(iter, struct thread, runq_links);
4438 			assert_thread_magic(iter_thread);
4439 
4440 			if (deadline < iter_thread->realtime.deadline) {
4441 				if (iter == queue_first(queue)) {
4442 					preempt = true;
4443 					earliest = true;
4444 					rt_runq->pri_earliest_deadline = deadline;
4445 					rt_runq->pri_constraint = thread->realtime.constraint;
4446 				}
4447 				insque(&thread->runq_links, queue_prev(iter));
4448 				break;
4449 			} else if (iter == queue_last(queue)) {
4450 				enqueue_tail(queue, &thread->runq_links);
4451 				break;
4452 			}
4453 		}
4454 	}
4455 	if (earliest && (deadline < os_atomic_load_wide(&rt_run_queue->earliest_deadline, relaxed))) {
4456 		os_atomic_store_wide(&rt_run_queue->earliest_deadline, deadline, relaxed);
4457 		os_atomic_store(&rt_run_queue->constraint, thread->realtime.constraint, relaxed);
4458 		os_atomic_store(&rt_run_queue->ed_index, pri - BASEPRI_RTQUEUES, relaxed);
4459 	}
4460 
4461 	SCHED_STATS_RUNQ_CHANGE(&rt_run_queue->runq_stats, os_atomic_load(&rt_run_queue->count, relaxed));
4462 	rt_runq->pri_count++;
4463 	os_atomic_inc(&rt_run_queue->count, relaxed);
4464 
4465 	thread_set_runq_locked(thread, processor);
4466 
4467 	CHECK_RT_RUNQ_CONSISTENCY(rt_run_queue, thread);
4468 
4469 	return preempt;
4470 }
4471 
4472 static thread_t
rt_runq_dequeue(rt_queue_t rt_run_queue)4473 rt_runq_dequeue(rt_queue_t rt_run_queue)
4474 {
4475 	bitmap_t *map = rt_run_queue->bitmap;
4476 	int i = bitmap_first(map, NRTQS);
4477 	assert((i >= 0) && (i < NRTQS));
4478 
4479 	rt_queue_pri_t *rt_runq = &rt_run_queue->rt_queue_pri[i];
4480 
4481 	if (!sched_rt_runq_strict_priority) {
4482 		int ed_index = os_atomic_load(&rt_run_queue->ed_index, relaxed);
4483 		if (ed_index != i) {
4484 			assert((ed_index >= 0) && (ed_index < NRTQS));
4485 			rt_queue_pri_t *ed_runq = &rt_run_queue->rt_queue_pri[ed_index];
4486 
4487 			thread_t ed_thread = qe_queue_first(&ed_runq->pri_queue, struct thread, runq_links);
4488 			thread_t hi_thread = qe_queue_first(&rt_runq->pri_queue, struct thread, runq_links);
4489 
4490 			if (ed_thread->realtime.computation + hi_thread->realtime.computation + rt_deadline_epsilon < hi_thread->realtime.constraint) {
4491 				/* choose the earliest deadline thread */
4492 				rt_runq = ed_runq;
4493 				i = ed_index;
4494 			}
4495 		}
4496 	}
4497 
4498 	assert(rt_runq->pri_count > 0);
4499 	uint64_t earliest_deadline = RT_DEADLINE_NONE;
4500 	uint32_t constraint = RT_CONSTRAINT_NONE;
4501 	int ed_index = NOPRI;
4502 	thread_t new_thread = qe_dequeue_head(&rt_runq->pri_queue, struct thread, runq_links);
4503 	SCHED_STATS_RUNQ_CHANGE(&rt_run_queue->runq_stats, os_atomic_load(&rt_run_queue->count, relaxed));
4504 	if (--rt_runq->pri_count > 0) {
4505 		thread_t next_rt = qe_queue_first(&rt_runq->pri_queue, struct thread, runq_links);
4506 		assert(next_rt != THREAD_NULL);
4507 		earliest_deadline = next_rt->realtime.deadline;
4508 		constraint = next_rt->realtime.constraint;
4509 		ed_index = i;
4510 	} else {
4511 		bitmap_clear(map, i);
4512 	}
4513 	rt_runq->pri_earliest_deadline = earliest_deadline;
4514 	rt_runq->pri_constraint = constraint;
4515 
4516 	for (i = bitmap_first(map, NRTQS); i >= 0; i = bitmap_next(map, i)) {
4517 		rt_runq = &rt_run_queue->rt_queue_pri[i];
4518 		if (rt_runq->pri_earliest_deadline < earliest_deadline) {
4519 			earliest_deadline = rt_runq->pri_earliest_deadline;
4520 			constraint = rt_runq->pri_constraint;
4521 			ed_index = i;
4522 		}
4523 	}
4524 	os_atomic_store_wide(&rt_run_queue->earliest_deadline, earliest_deadline, relaxed);
4525 	os_atomic_store(&rt_run_queue->constraint, constraint, relaxed);
4526 	os_atomic_store(&rt_run_queue->ed_index, ed_index, relaxed);
4527 	os_atomic_dec(&rt_run_queue->count, relaxed);
4528 
4529 	thread_clear_runq(new_thread);
4530 
4531 	CHECK_RT_RUNQ_CONSISTENCY(rt_run_queue, THREAD_NULL);
4532 
4533 	return new_thread;
4534 }
4535 
4536 static thread_t
rt_runq_first(rt_queue_t rt_run_queue)4537 rt_runq_first(rt_queue_t rt_run_queue)
4538 {
4539 	bitmap_t *map = rt_run_queue->bitmap;
4540 	int i = bitmap_first(map, NRTQS);
4541 	if (i < 0) {
4542 		return THREAD_NULL;
4543 	}
4544 	rt_queue_pri_t *rt_runq = &rt_run_queue->rt_queue_pri[i];
4545 	thread_t next_rt = qe_queue_first(&rt_runq->pri_queue, struct thread, runq_links);
4546 
4547 	return next_rt;
4548 }
4549 
4550 static void
rt_runq_remove(rt_queue_t rt_run_queue,thread_t thread)4551 rt_runq_remove(rt_queue_t rt_run_queue, thread_t thread)
4552 {
4553 	CHECK_RT_RUNQ_CONSISTENCY(rt_run_queue, thread);
4554 
4555 	int pri = thread->sched_pri;
4556 	assert((pri >= BASEPRI_RTQUEUES) && (pri <= MAXPRI));
4557 	int i = pri - BASEPRI_RTQUEUES;
4558 	rt_queue_pri_t *rt_runq = &rt_run_queue->rt_queue_pri[i];
4559 	bitmap_t *map = rt_run_queue->bitmap;
4560 
4561 	assert(rt_runq->pri_count > 0);
4562 	uint64_t earliest_deadline = RT_DEADLINE_NONE;
4563 	uint32_t constraint = RT_CONSTRAINT_NONE;
4564 	int ed_index = NOPRI;
4565 	remqueue(&thread->runq_links);
4566 	SCHED_STATS_RUNQ_CHANGE(&rt_run_queue->runq_stats, os_atomic_load(&rt_run_queue->count, relaxed));
4567 	if (--rt_runq->pri_count > 0) {
4568 		thread_t next_rt = qe_queue_first(&rt_runq->pri_queue, struct thread, runq_links);
4569 		earliest_deadline = next_rt->realtime.deadline;
4570 		constraint = next_rt->realtime.constraint;
4571 		ed_index = i;
4572 	} else {
4573 		bitmap_clear(map, i);
4574 	}
4575 	rt_runq->pri_earliest_deadline = earliest_deadline;
4576 	rt_runq->pri_constraint = constraint;
4577 
4578 	for (i = bitmap_first(map, NRTQS); i >= 0; i = bitmap_next(map, i)) {
4579 		rt_runq = &rt_run_queue->rt_queue_pri[i];
4580 		if (rt_runq->pri_earliest_deadline < earliest_deadline) {
4581 			earliest_deadline = rt_runq->pri_earliest_deadline;
4582 			constraint = rt_runq->pri_constraint;
4583 			ed_index = i;
4584 		}
4585 	}
4586 	os_atomic_store_wide(&rt_run_queue->earliest_deadline, earliest_deadline, relaxed);
4587 	os_atomic_store(&rt_run_queue->constraint, constraint, relaxed);
4588 	os_atomic_store(&rt_run_queue->ed_index, ed_index, relaxed);
4589 	os_atomic_dec(&rt_run_queue->count, relaxed);
4590 
4591 	thread_clear_runq_locked(thread);
4592 
4593 	CHECK_RT_RUNQ_CONSISTENCY(rt_run_queue, THREAD_NULL);
4594 }
4595 
4596 rt_queue_t
sched_rtlocal_runq(processor_set_t pset)4597 sched_rtlocal_runq(processor_set_t pset)
4598 {
4599 	return &pset->rt_runq;
4600 }
4601 
4602 void
sched_rtlocal_init(processor_set_t pset)4603 sched_rtlocal_init(processor_set_t pset)
4604 {
4605 	pset_rt_init(pset);
4606 }
4607 
4608 void
sched_rtlocal_queue_shutdown(processor_t processor)4609 sched_rtlocal_queue_shutdown(processor_t processor)
4610 {
4611 	processor_set_t pset = processor->processor_set;
4612 	thread_t        thread;
4613 	queue_head_t    tqueue;
4614 
4615 	pset_lock(pset);
4616 
4617 	/* We only need to migrate threads if this is the last active or last recommended processor in the pset */
4618 	if (bit_count(pset_available_cpumap(pset)) > 0) {
4619 		pset_unlock(pset);
4620 		return;
4621 	}
4622 
4623 	queue_init(&tqueue);
4624 
4625 	while (rt_runq_count(pset) > 0) {
4626 		thread = rt_runq_dequeue(&pset->rt_runq);
4627 		enqueue_tail(&tqueue, &thread->runq_links);
4628 	}
4629 	sched_update_pset_load_average(pset, 0);
4630 	pset_update_rt_stealable_state(pset);
4631 	pset_unlock(pset);
4632 
4633 	qe_foreach_element_safe(thread, &tqueue, runq_links) {
4634 		remqueue(&thread->runq_links);
4635 
4636 		thread_lock(thread);
4637 
4638 		thread_setrun(thread, SCHED_TAILQ);
4639 
4640 		thread_unlock(thread);
4641 	}
4642 }
4643 
4644 /* Assumes RT lock is not held, and acquires splsched/rt_lock itself */
4645 void
sched_rtlocal_runq_scan(sched_update_scan_context_t scan_context)4646 sched_rtlocal_runq_scan(sched_update_scan_context_t scan_context)
4647 {
4648 	thread_t        thread;
4649 
4650 	pset_node_t node = &pset_node0;
4651 	processor_set_t pset = node->psets;
4652 
4653 	spl_t s = splsched();
4654 	do {
4655 		while (pset != NULL) {
4656 			pset_lock(pset);
4657 
4658 			bitmap_t *map = pset->rt_runq.bitmap;
4659 			for (int i = bitmap_first(map, NRTQS); i >= 0; i = bitmap_next(map, i)) {
4660 				rt_queue_pri_t *rt_runq = &pset->rt_runq.rt_queue_pri[i];
4661 
4662 				qe_foreach_element_safe(thread, &rt_runq->pri_queue, runq_links) {
4663 					if (thread->last_made_runnable_time < scan_context->earliest_rt_make_runnable_time) {
4664 						scan_context->earliest_rt_make_runnable_time = thread->last_made_runnable_time;
4665 					}
4666 				}
4667 			}
4668 
4669 			pset_unlock(pset);
4670 
4671 			pset = pset->pset_list;
4672 		}
4673 	} while (((node = node->node_list) != NULL) && ((pset = node->psets) != NULL));
4674 	splx(s);
4675 }
4676 
4677 int64_t
sched_rtlocal_runq_count_sum(void)4678 sched_rtlocal_runq_count_sum(void)
4679 {
4680 	pset_node_t node = &pset_node0;
4681 	processor_set_t pset = node->psets;
4682 	int64_t count = 0;
4683 
4684 	do {
4685 		while (pset != NULL) {
4686 			count += pset->rt_runq.runq_stats.count_sum;
4687 
4688 			pset = pset->pset_list;
4689 		}
4690 	} while (((node = node->node_list) != NULL) && ((pset = node->psets) != NULL));
4691 
4692 	return count;
4693 }
4694 
4695 /*
4696  * Called with stealing_pset locked and
4697  * returns with stealing_pset locked
4698  * but the lock will have been dropped
4699  * if a thread is returned.
4700  */
4701 thread_t
sched_rtlocal_steal_thread(processor_set_t stealing_pset,uint64_t earliest_deadline)4702 sched_rtlocal_steal_thread(processor_set_t stealing_pset, uint64_t earliest_deadline)
4703 {
4704 	if (!sched_allow_rt_steal) {
4705 		return THREAD_NULL;
4706 	}
4707 	pset_map_t pset_map = stealing_pset->node->pset_map;
4708 
4709 	bit_clear(pset_map, stealing_pset->pset_id);
4710 
4711 	processor_set_t pset = stealing_pset;
4712 
4713 	processor_set_t target_pset;
4714 	uint64_t target_deadline;
4715 
4716 retry:
4717 	target_pset = NULL;
4718 	target_deadline = earliest_deadline - rt_deadline_epsilon;
4719 
4720 	for (int pset_id = lsb_first(pset_map); pset_id >= 0; pset_id = lsb_next(pset_map, pset_id)) {
4721 		processor_set_t nset = pset_array[pset_id];
4722 
4723 		/*
4724 		 * During startup, while pset_array[] and node->pset_map are still being initialized,
4725 		 * the update to pset_map may become visible to this cpu before the update to pset_array[].
4726 		 * It would be good to avoid inserting a memory barrier here that is only needed during startup,
4727 		 * so just check nset is not NULL instead.
4728 		 */
4729 		if (nset && (nset->stealable_rt_threads_earliest_deadline < target_deadline)) {
4730 			target_deadline = nset->stealable_rt_threads_earliest_deadline;
4731 			target_pset = nset;
4732 		}
4733 	}
4734 
4735 	if (target_pset != NULL) {
4736 		pset = change_locked_pset(pset, target_pset);
4737 		if (pset->stealable_rt_threads_earliest_deadline <= target_deadline) {
4738 			thread_t new_thread = rt_runq_dequeue(&pset->rt_runq);
4739 			pset_update_rt_stealable_state(pset);
4740 			KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RT_STEAL) | DBG_FUNC_NONE, (uintptr_t)thread_tid(new_thread), pset->pset_id, pset->cpu_set_low, 0);
4741 
4742 			pset = change_locked_pset(pset, stealing_pset);
4743 			return new_thread;
4744 		}
4745 		pset = change_locked_pset(pset, stealing_pset);
4746 		earliest_deadline = rt_runq_earliest_deadline(pset);
4747 		goto retry;
4748 	}
4749 
4750 	pset = change_locked_pset(pset, stealing_pset);
4751 	return THREAD_NULL;
4752 }
4753 
4754 /*
4755  * pset is locked
4756  */
4757 thread_t
sched_rt_choose_thread(processor_set_t pset)4758 sched_rt_choose_thread(processor_set_t pset)
4759 {
4760 	processor_t processor = current_processor();
4761 
4762 	if (SCHED(steal_thread_enabled)(pset)) {
4763 		do {
4764 			bool spill_pending = bit_clear_if_set(pset->rt_pending_spill_cpu_mask, processor->cpu_id);
4765 			if (spill_pending) {
4766 				KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RT_SIGNAL_SPILL) | DBG_FUNC_END, processor->cpu_id, pset->rt_pending_spill_cpu_mask, 0, 2);
4767 			}
4768 			thread_t new_thread = SCHED(rt_steal_thread)(pset, rt_runq_earliest_deadline(pset));
4769 			if (new_thread != THREAD_NULL) {
4770 				if (bit_clear_if_set(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
4771 					KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RT_SIGNAL_SPILL) | DBG_FUNC_END, processor->cpu_id, pset->rt_pending_spill_cpu_mask, 0, 3);
4772 				}
4773 				return new_thread;
4774 			}
4775 		} while (bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id));
4776 	}
4777 
4778 	if (bit_clear_if_set(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
4779 		KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RT_SIGNAL_SPILL) | DBG_FUNC_END, processor->cpu_id, pset->rt_pending_spill_cpu_mask, 0, 4);
4780 	}
4781 
4782 	if (rt_runq_count(pset) > 0) {
4783 		thread_t new_thread = rt_runq_dequeue(SCHED(rt_runq)(pset));
4784 		assert(new_thread != THREAD_NULL);
4785 		pset_update_rt_stealable_state(pset);
4786 		return new_thread;
4787 	}
4788 
4789 	return THREAD_NULL;
4790 }
4791 
4792 /*
4793  *	realtime_queue_insert:
4794  *
4795  *	Enqueue a thread for realtime execution.
4796  */
4797 static bool
realtime_queue_insert(processor_t processor,processor_set_t pset,thread_t thread)4798 realtime_queue_insert(processor_t processor, processor_set_t pset, thread_t thread)
4799 {
4800 	pset_assert_locked(pset);
4801 
4802 	bool preempt = rt_runq_enqueue(SCHED(rt_runq)(pset), thread, processor);
4803 	pset_update_rt_stealable_state(pset);
4804 
4805 	return preempt;
4806 }
4807 
4808 /*
4809  *	realtime_setrun:
4810  *
4811  *	Dispatch a thread for realtime execution.
4812  *
4813  *	Thread must be locked.  Associated pset must
4814  *	be locked, and is returned unlocked.
4815  */
4816 static void
realtime_setrun(processor_t chosen_processor,thread_t thread)4817 realtime_setrun(
4818 	processor_t                     chosen_processor,
4819 	thread_t                        thread)
4820 {
4821 	processor_set_t pset = chosen_processor->processor_set;
4822 	pset_assert_locked(pset);
4823 	bool pset_is_locked = true;
4824 
4825 	int n_backup = 0;
4826 
4827 	if (thread->realtime.constraint <= rt_constraint_threshold) {
4828 		n_backup = sched_rt_n_backup_processors;
4829 	}
4830 	assert((n_backup >= 0) && (n_backup <= SCHED_MAX_BACKUP_PROCESSORS));
4831 
4832 	int existing_backups = bit_count(pset->pending_AST_URGENT_cpu_mask) - rt_runq_count(pset);
4833 	if (existing_backups > 0) {
4834 		n_backup = n_backup - existing_backups;
4835 		if (n_backup < 0) {
4836 			n_backup = 0;
4837 		}
4838 	}
4839 
4840 	sched_ipi_type_t ipi_type[SCHED_MAX_BACKUP_PROCESSORS + 1] = {};
4841 	processor_t ipi_processor[SCHED_MAX_BACKUP_PROCESSORS + 1] = {};
4842 
4843 	thread->chosen_processor = chosen_processor;
4844 
4845 	/* <rdar://problem/15102234> */
4846 	assert(thread->bound_processor == PROCESSOR_NULL);
4847 
4848 	realtime_queue_insert(chosen_processor, pset, thread);
4849 
4850 	processor_t processor = chosen_processor;
4851 
4852 	int count = 0;
4853 	for (int i = 0; i <= n_backup; i++) {
4854 		if (i == 0) {
4855 			ipi_type[i] = SCHED_IPI_NONE;
4856 			ipi_processor[i] = processor;
4857 			count++;
4858 
4859 			ast_t preempt = AST_NONE;
4860 			if (thread->sched_pri > processor->current_pri) {
4861 				preempt = (AST_PREEMPT | AST_URGENT);
4862 			} else if (thread->sched_pri == processor->current_pri) {
4863 				if (deadline_add(thread->realtime.deadline, rt_deadline_epsilon) < processor->deadline) {
4864 					preempt = (AST_PREEMPT | AST_URGENT);
4865 				}
4866 			}
4867 
4868 			if (preempt != AST_NONE) {
4869 				if (processor->state == PROCESSOR_IDLE) {
4870 					if (processor == current_processor()) {
4871 						pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
4872 						ast_on(preempt);
4873 
4874 						if ((preempt & AST_URGENT) == AST_URGENT) {
4875 							if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4876 								KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
4877 								    processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 1);
4878 							}
4879 						}
4880 
4881 						if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
4882 							bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
4883 						}
4884 					} else {
4885 						ipi_type[i] = sched_ipi_action(processor, thread, SCHED_IPI_EVENT_RT_PREEMPT);
4886 					}
4887 				} else if (processor->state == PROCESSOR_DISPATCHING) {
4888 					if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4889 						KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
4890 						    processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 2);
4891 					}
4892 				} else {
4893 					if (processor == current_processor()) {
4894 						ast_on(preempt);
4895 
4896 						if ((preempt & AST_URGENT) == AST_URGENT) {
4897 							if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4898 								KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
4899 								    processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 3);
4900 							}
4901 						}
4902 
4903 						if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
4904 							bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
4905 						}
4906 					} else {
4907 						ipi_type[i] = sched_ipi_action(processor, thread, SCHED_IPI_EVENT_RT_PREEMPT);
4908 					}
4909 				}
4910 			} else {
4911 				/* Selected processor was too busy, just keep thread enqueued and let other processors drain it naturally. */
4912 			}
4913 		} else {
4914 			if (!pset_is_locked) {
4915 				pset_lock(pset);
4916 			}
4917 			ipi_type[i] = SCHED_IPI_NONE;
4918 			ipi_processor[i] = PROCESSOR_NULL;
4919 			pset_is_locked = !choose_next_rt_processor_for_IPI(pset, chosen_processor, false, &ipi_processor[i], &ipi_type[i]);
4920 			if (ipi_processor[i] == PROCESSOR_NULL) {
4921 				break;
4922 			}
4923 			count++;
4924 
4925 			KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_NEXT_PROCESSOR) | DBG_FUNC_NONE,
4926 			    ipi_processor[i]->cpu_id, ipi_processor[i]->state, backup, 1);
4927 #if defined(__x86_64__)
4928 #define p_is_good(p) (((p)->processor_primary == (p)) && ((sched_avoid_cpu0 != 1) || ((p)->cpu_id != 0)))
4929 			if (n_backup == SCHED_DEFAULT_BACKUP_PROCESSORS_SMT) {
4930 				processor_t p0 = ipi_processor[0];
4931 				processor_t p1 = ipi_processor[1];
4932 				assert(p0 && p1);
4933 				if (p_is_good(p0) && p_is_good(p1)) {
4934 					/*
4935 					 * Both the chosen processor and the first backup are non-cpu0 primaries,
4936 					 * so there is no need for a 2nd backup processor.
4937 					 */
4938 					break;
4939 				}
4940 			}
4941 #endif
4942 		}
4943 	}
4944 
4945 	if (pset_is_locked) {
4946 		pset_unlock(pset);
4947 	}
4948 
4949 	assert((count > 0) && (count <= (n_backup + 1)));
4950 	for (int i = 0; i < count; i++) {
4951 		assert(ipi_processor[i] != PROCESSOR_NULL);
4952 		sched_ipi_perform(ipi_processor[i], ipi_type[i]);
4953 	}
4954 }
4955 
4956 
4957 sched_ipi_type_t
sched_ipi_deferred_policy(processor_set_t pset,processor_t dst,thread_t thread,__unused sched_ipi_event_t event)4958 sched_ipi_deferred_policy(processor_set_t pset, processor_t dst,
4959     thread_t thread, __unused sched_ipi_event_t event)
4960 {
4961 #if defined(CONFIG_SCHED_DEFERRED_AST)
4962 #if CONFIG_THREAD_GROUPS
4963 	if (thread) {
4964 		struct thread_group *tg = thread_group_get(thread);
4965 		if (thread_group_uses_immediate_ipi(tg)) {
4966 			return SCHED_IPI_IMMEDIATE;
4967 		}
4968 	}
4969 #endif /* CONFIG_THREAD_GROUPS */
4970 	if (!bit_test(pset->pending_deferred_AST_cpu_mask, dst->cpu_id)) {
4971 		return SCHED_IPI_DEFERRED;
4972 	}
4973 #else /* CONFIG_SCHED_DEFERRED_AST */
4974 	(void) thread;
4975 	panic("Request for deferred IPI on an unsupported platform; pset: %p CPU: %d", pset, dst->cpu_id);
4976 #endif /* CONFIG_SCHED_DEFERRED_AST */
4977 	return SCHED_IPI_NONE;
4978 }
4979 
4980 sched_ipi_type_t
sched_ipi_action(processor_t dst,thread_t thread,sched_ipi_event_t event)4981 sched_ipi_action(processor_t dst, thread_t thread, sched_ipi_event_t event)
4982 {
4983 	sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
4984 	assert(dst != NULL);
4985 
4986 	processor_set_t pset = dst->processor_set;
4987 	if (current_processor() == dst) {
4988 		return SCHED_IPI_NONE;
4989 	}
4990 
4991 	bool dst_idle = (dst->state == PROCESSOR_IDLE);
4992 	if (dst_idle) {
4993 		pset_update_processor_state(pset, dst, PROCESSOR_DISPATCHING);
4994 	}
4995 
4996 	ipi_type = SCHED(ipi_policy)(dst, thread, dst_idle, event);
4997 	switch (ipi_type) {
4998 	case SCHED_IPI_NONE:
4999 		return SCHED_IPI_NONE;
5000 #if defined(CONFIG_SCHED_DEFERRED_AST)
5001 	case SCHED_IPI_DEFERRED:
5002 		bit_set(pset->pending_deferred_AST_cpu_mask, dst->cpu_id);
5003 		break;
5004 #endif /* CONFIG_SCHED_DEFERRED_AST */
5005 	default:
5006 		if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, dst->cpu_id)) {
5007 			KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
5008 			    dst->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 4);
5009 		}
5010 		bit_set(pset->pending_AST_PREEMPT_cpu_mask, dst->cpu_id);
5011 		break;
5012 	}
5013 	return ipi_type;
5014 }
5015 
5016 sched_ipi_type_t
sched_ipi_policy(processor_t dst,thread_t thread,boolean_t dst_idle,sched_ipi_event_t event)5017 sched_ipi_policy(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event)
5018 {
5019 	sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
5020 	boolean_t deferred_ipi_supported = false;
5021 	processor_set_t pset = dst->processor_set;
5022 
5023 #if defined(CONFIG_SCHED_DEFERRED_AST)
5024 	deferred_ipi_supported = true;
5025 #endif /* CONFIG_SCHED_DEFERRED_AST */
5026 
5027 	switch (event) {
5028 	case SCHED_IPI_EVENT_SPILL:
5029 	case SCHED_IPI_EVENT_SMT_REBAL:
5030 	case SCHED_IPI_EVENT_REBALANCE:
5031 	case SCHED_IPI_EVENT_BOUND_THR:
5032 	case SCHED_IPI_EVENT_RT_PREEMPT:
5033 		/*
5034 		 * The RT preempt, spill, SMT rebalance, rebalance and the bound thread
5035 		 * scenarios use immediate IPIs always.
5036 		 */
5037 		ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
5038 		break;
5039 	case SCHED_IPI_EVENT_PREEMPT:
5040 		/* In the preemption case, use immediate IPIs for RT threads */
5041 		if (thread && (thread->sched_pri >= BASEPRI_RTQUEUES)) {
5042 			ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
5043 			break;
5044 		}
5045 
5046 		/*
5047 		 * For Non-RT threads preemption,
5048 		 * If the core is active, use immediate IPIs.
5049 		 * If the core is idle, use deferred IPIs if supported; otherwise immediate IPI.
5050 		 */
5051 		if (deferred_ipi_supported && dst_idle) {
5052 			return sched_ipi_deferred_policy(pset, dst, thread, event);
5053 		}
5054 		ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
5055 		break;
5056 	default:
5057 		panic("Unrecognized scheduler IPI event type %d", event);
5058 	}
5059 	assert(ipi_type != SCHED_IPI_NONE);
5060 	return ipi_type;
5061 }
5062 
5063 void
sched_ipi_perform(processor_t dst,sched_ipi_type_t ipi)5064 sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi)
5065 {
5066 	switch (ipi) {
5067 	case SCHED_IPI_NONE:
5068 		break;
5069 	case SCHED_IPI_IDLE:
5070 		machine_signal_idle(dst);
5071 		break;
5072 	case SCHED_IPI_IMMEDIATE:
5073 		cause_ast_check(dst);
5074 		break;
5075 	case SCHED_IPI_DEFERRED:
5076 		machine_signal_idle_deferred(dst);
5077 		break;
5078 	default:
5079 		panic("Unrecognized scheduler IPI type: %d", ipi);
5080 	}
5081 }
5082 
5083 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
5084 
5085 boolean_t
priority_is_urgent(int priority)5086 priority_is_urgent(int priority)
5087 {
5088 	return bitmap_test(sched_preempt_pri, priority) ? TRUE : FALSE;
5089 }
5090 
5091 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
5092 
5093 /*
5094  *	processor_setrun:
5095  *
5096  *	Dispatch a thread for execution on a
5097  *	processor.
5098  *
5099  *	Thread must be locked.  Associated pset must
5100  *	be locked, and is returned unlocked.
5101  */
5102 static void
processor_setrun(processor_t processor,thread_t thread,integer_t options)5103 processor_setrun(
5104 	processor_t                     processor,
5105 	thread_t                        thread,
5106 	integer_t                       options)
5107 {
5108 	processor_set_t pset = processor->processor_set;
5109 	pset_assert_locked(pset);
5110 	ast_t preempt = AST_NONE;
5111 	enum { eExitIdle, eInterruptRunning, eDoNothing } ipi_action = eDoNothing;
5112 
5113 	sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
5114 
5115 	thread->chosen_processor = processor;
5116 
5117 	/*
5118 	 *	Set preemption mode.
5119 	 */
5120 #if defined(CONFIG_SCHED_DEFERRED_AST)
5121 	/* TODO: Do we need to care about urgency (see rdar://problem/20136239)? */
5122 #endif
5123 	if (SCHED(priority_is_urgent)(thread->sched_pri) && thread->sched_pri > processor->current_pri) {
5124 		preempt = (AST_PREEMPT | AST_URGENT);
5125 	} else if (processor->current_is_eagerpreempt) {
5126 		preempt = (AST_PREEMPT | AST_URGENT);
5127 	} else if ((thread->sched_mode == TH_MODE_TIMESHARE) && (thread->sched_pri < thread->base_pri)) {
5128 		if (SCHED(priority_is_urgent)(thread->base_pri) && thread->sched_pri > processor->current_pri) {
5129 			preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
5130 		} else {
5131 			preempt = AST_NONE;
5132 		}
5133 	} else {
5134 		preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
5135 	}
5136 
5137 	if ((options & (SCHED_PREEMPT | SCHED_REBALANCE)) == (SCHED_PREEMPT | SCHED_REBALANCE)) {
5138 		/*
5139 		 * Having gone to the trouble of forcing this thread off a less preferred core,
5140 		 * we should force the preferable core to reschedule immediately to give this
5141 		 * thread a chance to run instead of just sitting on the run queue where
5142 		 * it may just be stolen back by the idle core we just forced it off.
5143 		 */
5144 		preempt |= AST_PREEMPT;
5145 	}
5146 
5147 	SCHED(processor_enqueue)(processor, thread, options);
5148 	sched_update_pset_load_average(pset, 0);
5149 
5150 	if (preempt != AST_NONE) {
5151 		if (processor->state == PROCESSOR_IDLE) {
5152 			ipi_action = eExitIdle;
5153 		} else if (processor->state == PROCESSOR_DISPATCHING) {
5154 			if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
5155 				KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
5156 				    processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 5);
5157 			}
5158 		} else if (processor->state == PROCESSOR_RUNNING &&
5159 		    (thread->sched_pri >= processor->current_pri)) {
5160 			ipi_action = eInterruptRunning;
5161 		}
5162 	} else {
5163 		/*
5164 		 * New thread is not important enough to preempt what is running, but
5165 		 * special processor states may need special handling
5166 		 */
5167 		if (processor->state == PROCESSOR_IDLE) {
5168 			ipi_action = eExitIdle;
5169 		} else if (processor->state == PROCESSOR_DISPATCHING) {
5170 			if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
5171 				KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
5172 				    processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 6);
5173 			}
5174 		}
5175 	}
5176 
5177 	if (ipi_action != eDoNothing) {
5178 		if (processor == current_processor()) {
5179 			if (ipi_action == eExitIdle) {
5180 				pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
5181 			}
5182 			if ((preempt = csw_check_locked(processor->active_thread, processor, pset, AST_NONE)) != AST_NONE) {
5183 				ast_on(preempt);
5184 			}
5185 
5186 			if ((preempt & AST_URGENT) == AST_URGENT) {
5187 				if (bit_set_if_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
5188 					KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_START,
5189 					    processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, (uintptr_t)thread_tid(thread), 7);
5190 				}
5191 			} else {
5192 				if (bit_clear_if_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
5193 					KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_END, processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, 0, 7);
5194 				}
5195 			}
5196 
5197 			if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
5198 				bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
5199 			} else {
5200 				bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
5201 			}
5202 		} else {
5203 			sched_ipi_event_t event = (options & SCHED_REBALANCE) ? SCHED_IPI_EVENT_REBALANCE : SCHED_IPI_EVENT_PREEMPT;
5204 			ipi_type = sched_ipi_action(processor, thread, event);
5205 		}
5206 	}
5207 
5208 	pset_unlock(pset);
5209 	sched_ipi_perform(processor, ipi_type);
5210 
5211 	if (ipi_action != eDoNothing && processor == current_processor()) {
5212 		ast_t new_preempt = update_pending_nonurgent_preemption(processor, preempt);
5213 		ast_on(new_preempt);
5214 	}
5215 }
5216 
5217 /*
5218  *	choose_next_pset:
5219  *
5220  *	Return the next sibling pset containing
5221  *	available processors.
5222  *
5223  *	Returns the original pset if none other is
5224  *	suitable.
5225  */
5226 static processor_set_t
choose_next_pset(processor_set_t pset)5227 choose_next_pset(
5228 	processor_set_t         pset)
5229 {
5230 	processor_set_t         nset = pset;
5231 
5232 	do {
5233 		nset = next_pset(nset);
5234 
5235 		/*
5236 		 * Sometimes during startup the pset_map can contain a bit
5237 		 * for a pset that isn't fully published in pset_array because
5238 		 * the pset_map read isn't an acquire load.
5239 		 *
5240 		 * In order to avoid needing an acquire barrier here, just bail
5241 		 * out.
5242 		 */
5243 		if (nset == PROCESSOR_SET_NULL) {
5244 			return pset;
5245 		}
5246 	} while (nset->online_processor_count < 1 && nset != pset);
5247 
5248 	return nset;
5249 }
5250 
5251 /*
5252  *	choose_processor:
5253  *
5254  *	Choose a processor for the thread, beginning at
5255  *	the pset.  Accepts an optional processor hint in
5256  *	the pset.
5257  *
5258  *	Returns a processor, possibly from a different pset.
5259  *
5260  *	The thread must be locked.  The pset must be locked,
5261  *	and the resulting pset is locked on return.
5262  */
5263 processor_t
choose_processor(processor_set_t starting_pset,processor_t processor,thread_t thread)5264 choose_processor(
5265 	processor_set_t         starting_pset,
5266 	processor_t             processor,
5267 	thread_t                thread)
5268 {
5269 	processor_set_t pset = starting_pset;
5270 	processor_set_t nset;
5271 
5272 	assert(thread->sched_pri <= MAXPRI);
5273 
5274 	/*
5275 	 * Prefer the hinted processor, when appropriate.
5276 	 */
5277 
5278 	/* Fold last processor hint from secondary processor to its primary */
5279 	if (processor != PROCESSOR_NULL) {
5280 		processor = processor->processor_primary;
5281 	}
5282 
5283 	/*
5284 	 * Only consult platform layer if pset is active, which
5285 	 * it may not be in some cases when a multi-set system
5286 	 * is going to sleep.
5287 	 */
5288 	if (pset->online_processor_count) {
5289 		if ((processor == PROCESSOR_NULL) || (processor->processor_set == pset && processor->state == PROCESSOR_IDLE)) {
5290 			processor_t mc_processor = machine_choose_processor(pset, processor);
5291 			if (mc_processor != PROCESSOR_NULL) {
5292 				processor = mc_processor->processor_primary;
5293 			}
5294 		}
5295 	}
5296 
5297 	/*
5298 	 * At this point, we may have a processor hint, and we may have
5299 	 * an initial starting pset. If the hint is not in the pset, or
5300 	 * if the hint is for a processor in an invalid state, discard
5301 	 * the hint.
5302 	 */
5303 	if (processor != PROCESSOR_NULL) {
5304 		if (processor->processor_set != pset) {
5305 			processor = PROCESSOR_NULL;
5306 		} else if (!processor->is_recommended) {
5307 			processor = PROCESSOR_NULL;
5308 		} else {
5309 			switch (processor->state) {
5310 			case PROCESSOR_START:
5311 			case PROCESSOR_PENDING_OFFLINE:
5312 			case PROCESSOR_OFF_LINE:
5313 				/*
5314 				 * Hint is for a processor that cannot support running new threads.
5315 				 */
5316 				processor = PROCESSOR_NULL;
5317 				break;
5318 			case PROCESSOR_IDLE:
5319 				/*
5320 				 * Hint is for an idle processor. Assume it is no worse than any other
5321 				 * idle processor. The platform layer had an opportunity to provide
5322 				 * the "least cost idle" processor above.
5323 				 */
5324 				if ((thread->sched_pri < BASEPRI_RTQUEUES) || processor_is_fast_track_candidate_for_realtime_thread(pset, processor)) {
5325 					uint64_t idle_primary_map = (pset->cpu_state_map[PROCESSOR_IDLE] & pset->primary_map & pset->recommended_bitmask);
5326 					uint64_t non_avoided_idle_primary_map = idle_primary_map & ~pset->perfcontrol_cpu_migration_bitmask;
5327 					/*
5328 					 * If the rotation bitmask to force a migration is set for this core and there's an idle core that
5329 					 * that needn't be avoided, don't continue running on the same core.
5330 					 */
5331 					if (!(bit_test(processor->processor_set->perfcontrol_cpu_migration_bitmask, processor->cpu_id) && non_avoided_idle_primary_map != 0)) {
5332 						return processor;
5333 					}
5334 				}
5335 				processor = PROCESSOR_NULL;
5336 				break;
5337 			case PROCESSOR_RUNNING:
5338 			case PROCESSOR_DISPATCHING:
5339 				/*
5340 				 * Hint is for an active CPU. This fast-path allows
5341 				 * realtime threads to preempt non-realtime threads
5342 				 * to regain their previous executing processor.
5343 				 */
5344 				if (thread->sched_pri >= BASEPRI_RTQUEUES) {
5345 					if (processor_is_fast_track_candidate_for_realtime_thread(pset, processor)) {
5346 						return processor;
5347 					}
5348 					processor = PROCESSOR_NULL;
5349 				}
5350 
5351 				/* Otherwise, use hint as part of search below */
5352 				break;
5353 			default:
5354 				processor = PROCESSOR_NULL;
5355 				break;
5356 			}
5357 		}
5358 	}
5359 
5360 	/*
5361 	 * Iterate through the processor sets to locate
5362 	 * an appropriate processor. Seed results with
5363 	 * a last-processor hint, if available, so that
5364 	 * a search must find something strictly better
5365 	 * to replace it.
5366 	 *
5367 	 * A primary/secondary pair of SMT processors are
5368 	 * "unpaired" if the primary is busy but its
5369 	 * corresponding secondary is idle (so the physical
5370 	 * core has full use of its resources).
5371 	 */
5372 
5373 	integer_t lowest_priority = MAXPRI + 1;
5374 	integer_t lowest_secondary_priority = MAXPRI + 1;
5375 	integer_t lowest_unpaired_primary_priority = MAXPRI + 1;
5376 	integer_t lowest_idle_secondary_priority = MAXPRI + 1;
5377 	integer_t lowest_count = INT_MAX;
5378 	processor_t lp_processor = PROCESSOR_NULL;
5379 	processor_t lp_unpaired_primary_processor = PROCESSOR_NULL;
5380 	processor_t lp_idle_secondary_processor = PROCESSOR_NULL;
5381 	processor_t lp_paired_secondary_processor = PROCESSOR_NULL;
5382 	processor_t lc_processor = PROCESSOR_NULL;
5383 
5384 	if (processor != PROCESSOR_NULL) {
5385 		/* All other states should be enumerated above. */
5386 		assert(processor->state == PROCESSOR_RUNNING || processor->state == PROCESSOR_DISPATCHING);
5387 		assert(thread->sched_pri < BASEPRI_RTQUEUES);
5388 
5389 		lowest_priority = processor->current_pri;
5390 		lp_processor = processor;
5391 
5392 		lowest_count = SCHED(processor_runq_count)(processor);
5393 		lc_processor = processor;
5394 	}
5395 
5396 	if (thread->sched_pri >= BASEPRI_RTQUEUES) {
5397 		pset_node_t node = pset->node;
5398 		bool include_ast_urgent_pending_cpus = false;
5399 		cpumap_t ast_urgent_pending;
5400 try_again:
5401 		ast_urgent_pending = 0;
5402 		int consider_secondaries = (!pset->is_SMT) || (bit_count(node->pset_map) == 1) || (node->pset_non_rt_primary_map == 0) || include_ast_urgent_pending_cpus;
5403 		for (; consider_secondaries < 2; consider_secondaries++) {
5404 			pset = change_locked_pset(pset, starting_pset);
5405 			do {
5406 				cpumap_t available_map = pset_available_cpumap(pset);
5407 				if (available_map == 0) {
5408 					goto no_available_cpus;
5409 				}
5410 
5411 				processor = choose_processor_for_realtime_thread(pset, PROCESSOR_NULL, consider_secondaries, false);
5412 				if (processor) {
5413 					return processor;
5414 				}
5415 
5416 				if (consider_secondaries) {
5417 					processor = choose_furthest_deadline_processor_for_realtime_thread(pset, thread->sched_pri, thread->realtime.deadline, PROCESSOR_NULL, false, include_ast_urgent_pending_cpus);
5418 					if (processor) {
5419 						/*
5420 						 * Instead of looping through all the psets to find the global
5421 						 * furthest deadline processor, preempt the first candidate found.
5422 						 * The preempted thread will then find any other available far deadline
5423 						 * processors to preempt.
5424 						 */
5425 						return processor;
5426 					}
5427 
5428 					ast_urgent_pending |= pset->pending_AST_URGENT_cpu_mask;
5429 
5430 					if (rt_runq_count(pset) < lowest_count) {
5431 						int cpuid = bit_first(available_map);
5432 						assert(cpuid >= 0);
5433 						lc_processor = processor_array[cpuid];
5434 						lowest_count = rt_runq_count(pset);
5435 					}
5436 				}
5437 
5438 no_available_cpus:
5439 				nset = next_pset(pset);
5440 
5441 				if (nset != starting_pset) {
5442 					pset = change_locked_pset(pset, nset);
5443 				}
5444 			} while (nset != starting_pset);
5445 		}
5446 
5447 		/* Short cut for single pset nodes */
5448 		if (bit_count(node->pset_map) == 1) {
5449 			if (lc_processor) {
5450 				pset_assert_locked(lc_processor->processor_set);
5451 				return lc_processor;
5452 			}
5453 		} else {
5454 			if (ast_urgent_pending && !include_ast_urgent_pending_cpus) {
5455 				/* See the comment in choose_furthest_deadline_processor_for_realtime_thread() */
5456 				include_ast_urgent_pending_cpus = true;
5457 				goto try_again;
5458 			}
5459 		}
5460 
5461 		processor = lc_processor;
5462 
5463 		if (processor) {
5464 			pset = change_locked_pset(pset, processor->processor_set);
5465 			/* Check that chosen processor is still usable */
5466 			cpumap_t available_map = pset_available_cpumap(pset);
5467 			if (bit_test(available_map, processor->cpu_id)) {
5468 				return processor;
5469 			}
5470 
5471 			/* processor is no longer usable */
5472 			processor = PROCESSOR_NULL;
5473 		}
5474 
5475 		pset_assert_locked(pset);
5476 		pset_unlock(pset);
5477 		return PROCESSOR_NULL;
5478 	}
5479 
5480 	/* No realtime threads from this point on */
5481 	assert(thread->sched_pri < BASEPRI_RTQUEUES);
5482 
5483 	do {
5484 		/*
5485 		 * Choose an idle processor, in pset traversal order
5486 		 */
5487 		uint64_t idle_primary_map = (pset->cpu_state_map[PROCESSOR_IDLE] & pset->primary_map & pset->recommended_bitmask);
5488 		uint64_t preferred_idle_primary_map = idle_primary_map & pset->perfcontrol_cpu_preferred_bitmask;
5489 
5490 		/* there shouldn't be a pending AST if the processor is idle */
5491 		assert((idle_primary_map & pset->pending_AST_URGENT_cpu_mask) == 0);
5492 
5493 		/*
5494 		 * Look at the preferred cores first.
5495 		 */
5496 		int cpuid = lsb_next(preferred_idle_primary_map, pset->cpu_preferred_last_chosen);
5497 		if (cpuid < 0) {
5498 			cpuid = lsb_first(preferred_idle_primary_map);
5499 		}
5500 		if (cpuid >= 0) {
5501 			processor = processor_array[cpuid];
5502 			pset->cpu_preferred_last_chosen = cpuid;
5503 			return processor;
5504 		}
5505 
5506 		/*
5507 		 * Look at the cores that don't need to be avoided next.
5508 		 */
5509 		if (pset->perfcontrol_cpu_migration_bitmask != 0) {
5510 			uint64_t non_avoided_idle_primary_map = idle_primary_map & ~pset->perfcontrol_cpu_migration_bitmask;
5511 			cpuid = lsb_next(non_avoided_idle_primary_map, pset->cpu_preferred_last_chosen);
5512 			if (cpuid < 0) {
5513 				cpuid = lsb_first(non_avoided_idle_primary_map);
5514 			}
5515 			if (cpuid >= 0) {
5516 				processor = processor_array[cpuid];
5517 				pset->cpu_preferred_last_chosen = cpuid;
5518 				return processor;
5519 			}
5520 		}
5521 
5522 		/*
5523 		 * Fall back to any remaining idle cores if none of the preferred ones and non-avoided ones are available.
5524 		 */
5525 		cpuid = lsb_first(idle_primary_map);
5526 		if (cpuid >= 0) {
5527 			processor = processor_array[cpuid];
5528 			return processor;
5529 		}
5530 
5531 		/*
5532 		 * Otherwise, enumerate active and idle processors to find primary candidates
5533 		 * with lower priority/etc.
5534 		 */
5535 
5536 		uint64_t active_map = ((pset->cpu_state_map[PROCESSOR_RUNNING] | pset->cpu_state_map[PROCESSOR_DISPATCHING]) &
5537 		    pset->recommended_bitmask &
5538 		    ~pset->pending_AST_URGENT_cpu_mask);
5539 
5540 		if (SCHED(priority_is_urgent)(thread->sched_pri) == FALSE) {
5541 			active_map &= ~pset->pending_AST_PREEMPT_cpu_mask;
5542 		}
5543 
5544 		active_map = bit_ror64(active_map, (pset->last_chosen + 1));
5545 		for (int rotid = lsb_first(active_map); rotid >= 0; rotid = lsb_next(active_map, rotid)) {
5546 			cpuid = ((rotid + pset->last_chosen + 1) & 63);
5547 			processor = processor_array[cpuid];
5548 
5549 			integer_t cpri = processor->current_pri;
5550 			processor_t primary = processor->processor_primary;
5551 			if (primary != processor) {
5552 				/* If primary is running a NO_SMT thread, don't choose its secondary */
5553 				if (!((primary->state == PROCESSOR_RUNNING) && processor_active_thread_no_smt(primary))) {
5554 					if (cpri < lowest_secondary_priority) {
5555 						lowest_secondary_priority = cpri;
5556 						lp_paired_secondary_processor = processor;
5557 					}
5558 				}
5559 			} else {
5560 				if (cpri < lowest_priority) {
5561 					lowest_priority = cpri;
5562 					lp_processor = processor;
5563 				}
5564 			}
5565 
5566 			integer_t ccount = SCHED(processor_runq_count)(processor);
5567 			if (ccount < lowest_count) {
5568 				lowest_count = ccount;
5569 				lc_processor = processor;
5570 			}
5571 		}
5572 
5573 		/*
5574 		 * For SMT configs, these idle secondary processors must have active primary. Otherwise
5575 		 * the idle primary would have short-circuited the loop above
5576 		 */
5577 		uint64_t idle_secondary_map = (pset->cpu_state_map[PROCESSOR_IDLE] &
5578 		    ~pset->primary_map &
5579 		    pset->recommended_bitmask);
5580 
5581 		/* there shouldn't be a pending AST if the processor is idle */
5582 		assert((idle_secondary_map & pset->pending_AST_URGENT_cpu_mask) == 0);
5583 		assert((idle_secondary_map & pset->pending_AST_PREEMPT_cpu_mask) == 0);
5584 
5585 		for (cpuid = lsb_first(idle_secondary_map); cpuid >= 0; cpuid = lsb_next(idle_secondary_map, cpuid)) {
5586 			processor = processor_array[cpuid];
5587 
5588 			processor_t cprimary = processor->processor_primary;
5589 
5590 			integer_t primary_pri = cprimary->current_pri;
5591 
5592 			/*
5593 			 * TODO: This should also make the same decisions
5594 			 * as secondary_can_run_realtime_thread
5595 			 *
5596 			 * TODO: Keep track of the pending preemption priority
5597 			 * of the primary to make this more accurate.
5598 			 */
5599 
5600 			/* If the primary is running a no-smt thread, then don't choose its secondary */
5601 			if (cprimary->state == PROCESSOR_RUNNING &&
5602 			    processor_active_thread_no_smt(cprimary)) {
5603 				continue;
5604 			}
5605 
5606 			/*
5607 			 * Find the idle secondary processor with the lowest priority primary
5608 			 *
5609 			 * We will choose this processor as a fallback if we find no better
5610 			 * primary to preempt.
5611 			 */
5612 			if (primary_pri < lowest_idle_secondary_priority) {
5613 				lp_idle_secondary_processor = processor;
5614 				lowest_idle_secondary_priority = primary_pri;
5615 			}
5616 
5617 			/* Find the the lowest priority active primary with idle secondary */
5618 			if (primary_pri < lowest_unpaired_primary_priority) {
5619 				/* If the primary processor is offline or starting up, it's not a candidate for this path */
5620 				if (cprimary->state != PROCESSOR_RUNNING &&
5621 				    cprimary->state != PROCESSOR_DISPATCHING) {
5622 					continue;
5623 				}
5624 
5625 				if (!cprimary->is_recommended) {
5626 					continue;
5627 				}
5628 
5629 				/* if the primary is pending preemption, don't try to re-preempt it */
5630 				if (bit_test(pset->pending_AST_URGENT_cpu_mask, cprimary->cpu_id)) {
5631 					continue;
5632 				}
5633 
5634 				if (SCHED(priority_is_urgent)(thread->sched_pri) == FALSE &&
5635 				    bit_test(pset->pending_AST_PREEMPT_cpu_mask, cprimary->cpu_id)) {
5636 					continue;
5637 				}
5638 
5639 				lowest_unpaired_primary_priority = primary_pri;
5640 				lp_unpaired_primary_processor = cprimary;
5641 			}
5642 		}
5643 
5644 		/*
5645 		 * We prefer preempting a primary processor over waking up its secondary.
5646 		 * The secondary will then be woken up by the preempted thread.
5647 		 */
5648 		if (thread->sched_pri > lowest_unpaired_primary_priority) {
5649 			pset->last_chosen = lp_unpaired_primary_processor->cpu_id;
5650 			return lp_unpaired_primary_processor;
5651 		}
5652 
5653 		/*
5654 		 * We prefer preempting a lower priority active processor over directly
5655 		 * waking up an idle secondary.
5656 		 * The preempted thread will then find the idle secondary.
5657 		 */
5658 		if (thread->sched_pri > lowest_priority) {
5659 			pset->last_chosen = lp_processor->cpu_id;
5660 			return lp_processor;
5661 		}
5662 
5663 		/*
5664 		 * lc_processor is used to indicate the best processor set run queue
5665 		 * on which to enqueue a thread when all available CPUs are busy with
5666 		 * higher priority threads, so try to make sure it is initialized.
5667 		 */
5668 		if (lc_processor == PROCESSOR_NULL) {
5669 			cpumap_t available_map = pset_available_cpumap(pset);
5670 			cpuid = lsb_first(available_map);
5671 			if (cpuid >= 0) {
5672 				lc_processor = processor_array[cpuid];
5673 				lowest_count = SCHED(processor_runq_count)(lc_processor);
5674 			}
5675 		}
5676 
5677 		/*
5678 		 * Move onto the next processor set.
5679 		 *
5680 		 * If all primary processors in this pset are running a higher
5681 		 * priority thread, move on to next pset. Only when we have
5682 		 * exhausted the search for primary processors do we
5683 		 * fall back to secondaries.
5684 		 */
5685 #if CONFIG_SCHED_EDGE
5686 		/*
5687 		 * The edge scheduler expects a CPU to be selected from the pset it passed in
5688 		 * as the starting pset for non-RT workloads. The edge migration algorithm
5689 		 * should already have considered idle CPUs and loads to decide the starting_pset;
5690 		 * which means that this loop can be short-circuted.
5691 		 */
5692 		nset = starting_pset;
5693 #else /* CONFIG_SCHED_EDGE */
5694 		nset = next_pset(pset);
5695 #endif /* CONFIG_SCHED_EDGE */
5696 
5697 		if (nset != starting_pset) {
5698 			pset = change_locked_pset(pset, nset);
5699 		}
5700 	} while (nset != starting_pset);
5701 
5702 	/*
5703 	 * Make sure that we pick a running processor,
5704 	 * and that the correct processor set is locked.
5705 	 * Since we may have unlocked the candidate processor's
5706 	 * pset, it may have changed state.
5707 	 *
5708 	 * All primary processors are running a higher priority
5709 	 * thread, so the only options left are enqueuing on
5710 	 * the secondary processor that would perturb the least priority
5711 	 * primary, or the least busy primary.
5712 	 */
5713 
5714 	/* lowest_priority is evaluated in the main loops above */
5715 	if (lp_idle_secondary_processor != PROCESSOR_NULL) {
5716 		processor = lp_idle_secondary_processor;
5717 	} else if (lp_paired_secondary_processor != PROCESSOR_NULL) {
5718 		processor = lp_paired_secondary_processor;
5719 	} else if (lc_processor != PROCESSOR_NULL) {
5720 		processor = lc_processor;
5721 	} else {
5722 		processor = PROCESSOR_NULL;
5723 	}
5724 
5725 	if (processor) {
5726 		pset = change_locked_pset(pset, processor->processor_set);
5727 		/* Check that chosen processor is still usable */
5728 		cpumap_t available_map = pset_available_cpumap(pset);
5729 		if (bit_test(available_map, processor->cpu_id)) {
5730 			pset->last_chosen = processor->cpu_id;
5731 			return processor;
5732 		}
5733 
5734 		/* processor is no longer usable */
5735 		processor = PROCESSOR_NULL;
5736 	}
5737 
5738 	pset_assert_locked(pset);
5739 	pset_unlock(pset);
5740 	return PROCESSOR_NULL;
5741 }
5742 
5743 /*
5744  * Default implementation of SCHED(choose_node)()
5745  * for single node systems
5746  */
5747 pset_node_t
sched_choose_node(__unused thread_t thread)5748 sched_choose_node(__unused thread_t thread)
5749 {
5750 	return &pset_node0;
5751 }
5752 
5753 /*
5754  *	choose_starting_pset:
5755  *
5756  *	Choose a starting processor set for the thread.
5757  *	May return a processor hint within the pset.
5758  *
5759  *	Returns a starting processor set, to be used by
5760  *      choose_processor.
5761  *
5762  *	The thread must be locked.  The resulting pset is unlocked on return,
5763  *      and is chosen without taking any pset locks.
5764  */
5765 processor_set_t
choose_starting_pset(pset_node_t node,thread_t thread,processor_t * processor_hint)5766 choose_starting_pset(pset_node_t node, thread_t thread, processor_t *processor_hint)
5767 {
5768 	processor_set_t pset;
5769 	processor_t processor = PROCESSOR_NULL;
5770 
5771 	if (thread->affinity_set != AFFINITY_SET_NULL) {
5772 		/*
5773 		 * Use affinity set policy hint.
5774 		 */
5775 		pset = thread->affinity_set->aset_pset;
5776 	} else if (thread->last_processor != PROCESSOR_NULL) {
5777 		/*
5778 		 *	Simple (last processor) affinity case.
5779 		 */
5780 		processor = thread->last_processor;
5781 		pset = processor->processor_set;
5782 	} else {
5783 		/*
5784 		 *	No Affinity case:
5785 		 *
5786 		 *	Utilitize a per task hint to spread threads
5787 		 *	among the available processor sets.
5788 		 * NRG this seems like the wrong thing to do.
5789 		 * See also task->pset_hint = pset in thread_setrun()
5790 		 */
5791 		pset = get_threadtask(thread)->pset_hint;
5792 		if (pset == PROCESSOR_SET_NULL) {
5793 			pset = current_processor()->processor_set;
5794 		}
5795 
5796 		pset = choose_next_pset(pset);
5797 	}
5798 
5799 	if (!bit_test(node->pset_map, pset->pset_id)) {
5800 		/* pset is not from this node so choose one that is */
5801 		int id = lsb_first(node->pset_map);
5802 		if (id < 0) {
5803 			/* startup race, so check again under the node lock */
5804 			lck_spin_lock(&pset_node_lock);
5805 			if (bit_test(node->pset_map, pset->pset_id)) {
5806 				id = pset->pset_id;
5807 			} else {
5808 				id = lsb_first(node->pset_map);
5809 			}
5810 			lck_spin_unlock(&pset_node_lock);
5811 		}
5812 		assert(id >= 0);
5813 		pset = pset_array[id];
5814 	}
5815 
5816 	if (bit_count(node->pset_map) == 1) {
5817 		/* Only a single pset in this node */
5818 		goto out;
5819 	}
5820 
5821 	bool avoid_cpu0 = false;
5822 
5823 #if defined(__x86_64__)
5824 	if ((thread->sched_pri >= BASEPRI_RTQUEUES) && sched_avoid_cpu0) {
5825 		/* Avoid the pset containing cpu0 */
5826 		avoid_cpu0 = true;
5827 		/* Assert that cpu0 is in pset0.  I expect this to be true on __x86_64__ */
5828 		assert(bit_test(pset_array[0]->cpu_bitmask, 0));
5829 	}
5830 #endif
5831 
5832 	if (thread->sched_pri >= BASEPRI_RTQUEUES) {
5833 		pset_map_t rt_target_map = atomic_load(&node->pset_non_rt_primary_map);
5834 		if ((avoid_cpu0 && pset->pset_id == 0) || !bit_test(rt_target_map, pset->pset_id)) {
5835 			if (avoid_cpu0) {
5836 				rt_target_map = bit_ror64(rt_target_map, 1);
5837 			}
5838 			int rotid = lsb_first(rt_target_map);
5839 			if (rotid >= 0) {
5840 				int id = avoid_cpu0 ? ((rotid + 1) & 63) : rotid;
5841 				pset = pset_array[id];
5842 				goto out;
5843 			}
5844 		}
5845 		if (!pset->is_SMT || !sched_allow_rt_smt) {
5846 			/* All psets are full of RT threads - fall back to choose processor to find the furthest deadline RT thread */
5847 			goto out;
5848 		}
5849 		rt_target_map = atomic_load(&node->pset_non_rt_map);
5850 		if ((avoid_cpu0 && pset->pset_id == 0) || !bit_test(rt_target_map, pset->pset_id)) {
5851 			if (avoid_cpu0) {
5852 				rt_target_map = bit_ror64(rt_target_map, 1);
5853 			}
5854 			int rotid = lsb_first(rt_target_map);
5855 			if (rotid >= 0) {
5856 				int id = avoid_cpu0 ? ((rotid + 1) & 63) : rotid;
5857 				pset = pset_array[id];
5858 				goto out;
5859 			}
5860 		}
5861 		/* All psets are full of RT threads - fall back to choose processor to find the furthest deadline RT thread */
5862 	} else {
5863 		pset_map_t idle_map = atomic_load(&node->pset_idle_map);
5864 		if (!bit_test(idle_map, pset->pset_id)) {
5865 			int next_idle_pset_id = lsb_first(idle_map);
5866 			if (next_idle_pset_id >= 0) {
5867 				pset = pset_array[next_idle_pset_id];
5868 			}
5869 		}
5870 	}
5871 
5872 out:
5873 	if ((processor != PROCESSOR_NULL) && (processor->processor_set != pset)) {
5874 		processor = PROCESSOR_NULL;
5875 	}
5876 	if (processor != PROCESSOR_NULL) {
5877 		*processor_hint = processor;
5878 	}
5879 
5880 	assert(pset != NULL);
5881 	return pset;
5882 }
5883 
5884 /*
5885  *	thread_setrun:
5886  *
5887  *	Dispatch thread for execution, onto an idle
5888  *	processor or run queue, and signal a preemption
5889  *	as appropriate.
5890  *
5891  *	Thread must be locked.
5892  */
5893 void
thread_setrun(thread_t thread,sched_options_t options)5894 thread_setrun(
5895 	thread_t                        thread,
5896 	sched_options_t                 options)
5897 {
5898 	processor_t                     processor = PROCESSOR_NULL;
5899 	processor_set_t         pset;
5900 
5901 	assert((thread->state & (TH_RUN | TH_WAIT | TH_UNINT | TH_TERMINATE | TH_TERMINATE2)) == TH_RUN);
5902 	thread_assert_runq_null(thread);
5903 
5904 #if CONFIG_PREADOPT_TG
5905 	/* We know that the thread is not in the runq by virtue of being in this
5906 	 * function and the thread is not self since we are running. We can safely
5907 	 * resolve the thread group hierarchy and modify the thread's thread group
5908 	 * here. */
5909 	thread_resolve_and_enforce_thread_group_hierarchy_if_needed(thread);
5910 #endif
5911 
5912 	/*
5913 	 *	Update priority if needed.
5914 	 */
5915 	if (SCHED(can_update_priority)(thread)) {
5916 		SCHED(update_priority)(thread);
5917 	}
5918 	thread->sfi_class = sfi_thread_classify(thread);
5919 
5920 	if (thread->bound_processor == PROCESSOR_NULL) {
5921 		/*
5922 		 * Unbound case.
5923 		 *
5924 		 * Usually, this loop will only be executed once,
5925 		 * but if CLPC derecommends a processor after it has been chosen,
5926 		 * or if a processor is shut down after it is chosen,
5927 		 * choose_processor() may return NULL, so a retry
5928 		 * may be necessary.  A single retry will usually
5929 		 * be enough, and we can't afford to retry too many times
5930 		 * because interrupts are disabled.
5931 		 */
5932 #define CHOOSE_PROCESSOR_MAX_RETRIES 3
5933 		for (int retry = 0; retry <= CHOOSE_PROCESSOR_MAX_RETRIES; retry++) {
5934 			processor_t processor_hint = PROCESSOR_NULL;
5935 			pset_node_t node = SCHED(choose_node)(thread);
5936 			processor_set_t starting_pset = choose_starting_pset(node, thread, &processor_hint);
5937 
5938 			pset_lock(starting_pset);
5939 
5940 			processor = SCHED(choose_processor)(starting_pset, processor_hint, thread);
5941 			if (processor != PROCESSOR_NULL) {
5942 				pset = processor->processor_set;
5943 				pset_assert_locked(pset);
5944 				break;
5945 			}
5946 		}
5947 		/*
5948 		 * If choose_processor() still returns NULL,
5949 		 * which is very unlikely, we need a fallback.
5950 		 */
5951 		if (processor == PROCESSOR_NULL) {
5952 			bool unlock_available_cores_lock = false;
5953 			if (sched_all_cpus_offline()) {
5954 				/*
5955 				 * There are no available processors
5956 				 * because we're in final system shutdown.
5957 				 * Enqueue on the master processor and we'll
5958 				 * handle it when it powers back up.
5959 				 */
5960 				processor = master_processor;
5961 			} else if (support_bootcpu_shutdown) {
5962 				/*
5963 				 * Grab the sched_available_cores_lock to select
5964 				 * some available processor and prevent it from
5965 				 * becoming offline while we enqueue the thread.
5966 				 *
5967 				 * This is very close to a lock inversion, but
5968 				 * places that do call thread_setrun with this
5969 				 * lock held know that the current cpu will be
5970 				 * schedulable, so we won't fall out of
5971 				 * choose_processor.
5972 				 */
5973 				simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
5974 				unlock_available_cores_lock = true;
5975 
5976 				int last_resort_cpu = sched_last_resort_cpu();
5977 
5978 				processor = processor_array[last_resort_cpu];
5979 			} else {
5980 				/*
5981 				 * The master processor is never shut down, always safe to choose.
5982 				 */
5983 				processor = master_processor;
5984 			}
5985 			pset = processor->processor_set;
5986 			pset_lock(pset);
5987 			assert((pset_available_cpu_count(pset) > 0) || (processor->state != PROCESSOR_OFF_LINE && processor->is_recommended));
5988 			if (unlock_available_cores_lock) {
5989 				simple_unlock(&sched_available_cores_lock);
5990 			}
5991 		}
5992 		task_t task = get_threadtask(thread);
5993 		if (!(task->t_flags & TF_USE_PSET_HINT_CLUSTER_TYPE)) {
5994 			task->pset_hint = pset; /* NRG this is done without holding the task lock */
5995 		}
5996 		SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT_IST(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
5997 		    (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0);
5998 		assert((pset_available_cpu_count(pset) > 0) || (processor->state != PROCESSOR_OFF_LINE && processor->is_recommended));
5999 	} else {
6000 		/*
6001 		 *	Bound case:
6002 		 *
6003 		 *	Unconditionally dispatch on the processor.
6004 		 */
6005 		processor = thread->bound_processor;
6006 		pset = processor->processor_set;
6007 		pset_lock(pset);
6008 
6009 		SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT_IST(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
6010 		    (uintptr_t)thread_tid(thread), (uintptr_t)-2, processor->cpu_id, processor->state, 0);
6011 	}
6012 
6013 	/*
6014 	 *	Dispatch the thread on the chosen processor.
6015 	 *	TODO: This should be based on sched_mode, not sched_pri
6016 	 */
6017 	if (thread->sched_pri >= BASEPRI_RTQUEUES) {
6018 		realtime_setrun(processor, thread);
6019 	} else {
6020 		processor_setrun(processor, thread, options);
6021 	}
6022 	/* pset is now unlocked */
6023 	if (thread->bound_processor == PROCESSOR_NULL) {
6024 		SCHED(check_spill)(pset, thread);
6025 	}
6026 }
6027 
6028 processor_set_t
task_choose_pset(task_t task)6029 task_choose_pset(
6030 	task_t          task)
6031 {
6032 	processor_set_t         pset = task->pset_hint;
6033 
6034 	if (pset != PROCESSOR_SET_NULL) {
6035 		pset = choose_next_pset(pset);
6036 	}
6037 
6038 	return pset;
6039 }
6040 
6041 /*
6042  *	Check for a preemption point in
6043  *	the current context.
6044  *
6045  *	Called at splsched with thread locked.
6046  */
6047 ast_t
csw_check(thread_t thread,processor_t processor,ast_t check_reason)6048 csw_check(
6049 	thread_t                thread,
6050 	processor_t             processor,
6051 	ast_t                   check_reason)
6052 {
6053 	processor_set_t pset = processor->processor_set;
6054 
6055 	assert(thread == processor->active_thread);
6056 
6057 	pset_lock(pset);
6058 
6059 	processor_state_update_from_thread(processor, thread, true);
6060 
6061 	ast_t preempt = csw_check_locked(thread, processor, pset, check_reason);
6062 
6063 	/* Acknowledge the IPI if we decided not to preempt */
6064 
6065 	if ((preempt & AST_URGENT) == 0) {
6066 		if (bit_clear_if_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
6067 			KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PENDING_AST_URGENT) | DBG_FUNC_END, processor->cpu_id, pset->pending_AST_URGENT_cpu_mask, 0, 8);
6068 		}
6069 	}
6070 
6071 	if ((preempt & AST_PREEMPT) == 0) {
6072 		bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
6073 	}
6074 
6075 	pset_unlock(pset);
6076 
6077 	return update_pending_nonurgent_preemption(processor, preempt);
6078 }
6079 
6080 void
clear_pending_nonurgent_preemption(processor_t processor)6081 clear_pending_nonurgent_preemption(processor_t processor)
6082 {
6083 	if (!processor->pending_nonurgent_preemption) {
6084 		return;
6085 	}
6086 
6087 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_PREEMPT_TIMER_ACTIVE) | DBG_FUNC_END);
6088 
6089 	processor->pending_nonurgent_preemption = false;
6090 	running_timer_clear(processor, RUNNING_TIMER_PREEMPT);
6091 }
6092 
6093 ast_t
update_pending_nonurgent_preemption(processor_t processor,ast_t reason)6094 update_pending_nonurgent_preemption(processor_t processor, ast_t reason)
6095 {
6096 	if ((reason & (AST_URGENT | AST_PREEMPT)) != (AST_PREEMPT)) {
6097 		clear_pending_nonurgent_preemption(processor);
6098 		return reason;
6099 	}
6100 
6101 	if (nonurgent_preemption_timer_abs == 0) {
6102 		/* Preemption timer not enabled */
6103 		return reason;
6104 	}
6105 
6106 	if (current_thread()->state & TH_IDLE) {
6107 		/* idle threads don't need nonurgent preemption */
6108 		return reason;
6109 	}
6110 
6111 	if (processor->pending_nonurgent_preemption) {
6112 		/* Timer is already armed, no need to do it again */
6113 		return reason;
6114 	}
6115 
6116 	if (ml_did_interrupt_userspace()) {
6117 		/*
6118 		 * We're preempting userspace here, so we don't need
6119 		 * to defer the preemption.  Force AST_URGENT
6120 		 * so that we can avoid arming this timer without risking
6121 		 * ast_taken_user deciding to spend too long in kernel
6122 		 * space to handle other ASTs.
6123 		 */
6124 
6125 		return reason | AST_URGENT;
6126 	}
6127 
6128 	/*
6129 	 * We've decided to do a nonurgent preemption when running in
6130 	 * kernelspace. We defer the preemption until reaching userspace boundary
6131 	 * to give a grace period for locks etc to be dropped and to reach
6132 	 * a clean preemption point, so that the preempting thread doesn't
6133 	 * always immediately hit the lock that the waking thread still holds.
6134 	 *
6135 	 * Arm a timer to enforce that the preemption executes within a bounded
6136 	 * time if the thread doesn't block or return to userspace quickly.
6137 	 */
6138 
6139 	processor->pending_nonurgent_preemption = true;
6140 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_PREEMPT_TIMER_ACTIVE) | DBG_FUNC_START,
6141 	    reason);
6142 
6143 	uint64_t now = mach_absolute_time();
6144 
6145 	uint64_t deadline = now + nonurgent_preemption_timer_abs;
6146 
6147 	running_timer_enter(processor, RUNNING_TIMER_PREEMPT, NULL,
6148 	    now, deadline);
6149 
6150 	return reason;
6151 }
6152 
6153 /*
6154  * Check for preemption at splsched with
6155  * pset locked and processor as the current
6156  * processor.
6157  */
6158 ast_t
csw_check_locked(thread_t thread,processor_t processor,processor_set_t pset,ast_t check_reason)6159 csw_check_locked(
6160 	thread_t                thread,
6161 	processor_t             processor,
6162 	processor_set_t         pset,
6163 	ast_t                   check_reason)
6164 {
6165 	assert(processor == current_processor());
6166 	/*
6167 	 * If the current thread is running on a processor that is no longer recommended,
6168 	 * urgently preempt it, at which point thread_select() should
6169 	 * try to idle the processor and re-dispatch the thread to a recommended processor.
6170 	 */
6171 	if (!processor->is_recommended) {
6172 		return check_reason | AST_PREEMPT | AST_URGENT;
6173 	}
6174 
6175 	if (bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
6176 		return check_reason | AST_PREEMPT | AST_URGENT;
6177 	}
6178 
6179 	if (rt_runq_count(pset) > 0) {
6180 		if ((rt_runq_priority(pset) > processor->current_pri) || !processor->first_timeslice) {
6181 			return check_reason | AST_PREEMPT | AST_URGENT;
6182 		} else if (deadline_add(rt_runq_earliest_deadline(pset), rt_deadline_epsilon) < processor->deadline) {
6183 			return check_reason | AST_PREEMPT | AST_URGENT;
6184 		} else {
6185 			return check_reason | AST_PREEMPT;
6186 		}
6187 	}
6188 
6189 	ast_t result = SCHED(processor_csw_check)(processor);
6190 	if (result != AST_NONE) {
6191 		return check_reason | result | (thread_is_eager_preempt(thread) ? AST_URGENT : AST_NONE);
6192 	}
6193 
6194 	/*
6195 	 * Same for avoid-processor
6196 	 *
6197 	 * TODO: Should these set AST_REBALANCE?
6198 	 */
6199 	if (SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread, check_reason)) {
6200 		return check_reason | AST_PREEMPT;
6201 	}
6202 
6203 	/*
6204 	 * Even though we could continue executing on this processor, a
6205 	 * secondary SMT core should try to shed load to another primary core.
6206 	 *
6207 	 * TODO: Should this do the same check that thread_select does? i.e.
6208 	 * if no bound threads target this processor, and idle primaries exist, preempt
6209 	 * The case of RT threads existing is already taken care of above
6210 	 */
6211 
6212 	if (processor->current_pri < BASEPRI_RTQUEUES &&
6213 	    processor->processor_primary != processor) {
6214 		return check_reason | AST_PREEMPT;
6215 	}
6216 
6217 	if (thread->state & TH_SUSP) {
6218 		return check_reason | AST_PREEMPT;
6219 	}
6220 
6221 #if CONFIG_SCHED_SFI
6222 	/*
6223 	 * Current thread may not need to be preempted, but maybe needs
6224 	 * an SFI wait?
6225 	 */
6226 	result = sfi_thread_needs_ast(thread, NULL);
6227 	if (result != AST_NONE) {
6228 		return result;
6229 	}
6230 #endif
6231 
6232 	return AST_NONE;
6233 }
6234 
6235 /*
6236  * Handle preemption IPI or IPI in response to setting an AST flag
6237  * Triggered by cause_ast_check
6238  * Called at splsched
6239  */
6240 void
ast_check(processor_t processor)6241 ast_check(processor_t processor)
6242 {
6243 	smr_ack_ipi();
6244 
6245 	if (processor->state != PROCESSOR_RUNNING) {
6246 		return;
6247 	}
6248 
6249 	SCHED_DEBUG_AST_CHECK_KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SCHED,
6250 	    MACH_SCHED_AST_CHECK) | DBG_FUNC_START);
6251 
6252 	thread_t thread = processor->active_thread;
6253 
6254 	assert(thread == current_thread());
6255 
6256 	/*
6257 	 * Pairs with task_restartable_ranges_synchronize
6258 	 */
6259 	thread_lock(thread);
6260 
6261 	thread_reset_pcs_ack_IPI(thread);
6262 
6263 	/*
6264 	 * Propagate thread ast to processor.
6265 	 * (handles IPI in response to setting AST flag)
6266 	 */
6267 	ast_propagate(thread);
6268 
6269 	/*
6270 	 * Stash the old urgency and perfctl values to find out if
6271 	 * csw_check updates them.
6272 	 */
6273 	thread_urgency_t old_urgency = processor->current_urgency;
6274 	perfcontrol_class_t old_perfctl_class = processor->current_perfctl_class;
6275 
6276 	ast_t preempt;
6277 
6278 	if ((preempt = csw_check(thread, processor, AST_NONE)) != AST_NONE) {
6279 		ast_on(preempt);
6280 	}
6281 
6282 	if (old_urgency != processor->current_urgency) {
6283 		/*
6284 		 * Urgency updates happen with the thread lock held (ugh).
6285 		 * TODO: This doesn't notice QoS changes...
6286 		 */
6287 		uint64_t urgency_param1, urgency_param2;
6288 
6289 		thread_urgency_t urgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
6290 		thread_tell_urgency(urgency, urgency_param1, urgency_param2, 0, thread);
6291 	}
6292 
6293 	thread_unlock(thread);
6294 
6295 	if (old_perfctl_class != processor->current_perfctl_class) {
6296 		/*
6297 		 * We updated the perfctl class of this thread from another core.
6298 		 * Let CLPC know that the currently running thread has a new
6299 		 * class.
6300 		 */
6301 
6302 		machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE,
6303 		    mach_approximate_time(), 0, thread);
6304 	}
6305 
6306 	SCHED_DEBUG_AST_CHECK_KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SCHED,
6307 	    MACH_SCHED_AST_CHECK) | DBG_FUNC_END, preempt);
6308 }
6309 
6310 
6311 void
thread_preempt_expire(timer_call_param_t p0,__unused timer_call_param_t p1)6312 thread_preempt_expire(
6313 	timer_call_param_t      p0,
6314 	__unused timer_call_param_t      p1)
6315 {
6316 	processor_t processor = p0;
6317 
6318 	assert(processor == current_processor());
6319 	assert(p1 == NULL);
6320 
6321 	thread_t thread = current_thread();
6322 
6323 	/*
6324 	 * This is set and cleared by the current core, so we will
6325 	 * never see a race with running timer expiration
6326 	 */
6327 	assert(processor->pending_nonurgent_preemption);
6328 
6329 	clear_pending_nonurgent_preemption(processor);
6330 
6331 	thread_lock(thread);
6332 
6333 	/*
6334 	 * Check again to see if it's still worth a
6335 	 * context switch, but this time force enable kernel preemption
6336 	 */
6337 
6338 	ast_t preempt = csw_check(thread, processor, AST_URGENT);
6339 
6340 	if (preempt) {
6341 		ast_on(preempt);
6342 	}
6343 
6344 	thread_unlock(thread);
6345 
6346 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_PREEMPT_TIMER_ACTIVE), preempt);
6347 }
6348 
6349 
6350 /*
6351  *	set_sched_pri:
6352  *
6353  *	Set the scheduled priority of the specified thread.
6354  *
6355  *	This may cause the thread to change queues.
6356  *
6357  *	Thread must be locked.
6358  */
6359 void
set_sched_pri(thread_t thread,int16_t new_priority,set_sched_pri_options_t options)6360 set_sched_pri(
6361 	thread_t        thread,
6362 	int16_t         new_priority,
6363 	set_sched_pri_options_t options)
6364 {
6365 	bool is_current_thread = (thread == current_thread());
6366 	bool removed_from_runq = false;
6367 	bool lazy_update = ((options & SETPRI_LAZY) == SETPRI_LAZY);
6368 
6369 	int16_t old_priority = thread->sched_pri;
6370 
6371 	/* If we're already at this priority, no need to mess with the runqueue */
6372 	if (new_priority == old_priority) {
6373 #if CONFIG_SCHED_CLUTCH
6374 		/* For the first thread in the system, the priority is correct but
6375 		 * th_sched_bucket is still TH_BUCKET_RUN. Since the clutch
6376 		 * scheduler relies on the bucket being set for all threads, update
6377 		 * its bucket here.
6378 		 */
6379 		if (thread->th_sched_bucket == TH_BUCKET_RUN) {
6380 			assert(thread == vm_pageout_scan_thread);
6381 			SCHED(update_thread_bucket)(thread);
6382 		}
6383 #endif /* CONFIG_SCHED_CLUTCH */
6384 
6385 		return;
6386 	}
6387 
6388 	if (is_current_thread) {
6389 		assert(thread->state & TH_RUN);
6390 		thread_assert_runq_null(thread);
6391 	} else {
6392 		removed_from_runq = thread_run_queue_remove(thread);
6393 	}
6394 
6395 	thread->sched_pri = new_priority;
6396 
6397 #if CONFIG_SCHED_CLUTCH
6398 	/*
6399 	 * Since for the clutch scheduler, the thread's bucket determines its runq
6400 	 * in the hierarchy it is important to update the bucket when the thread
6401 	 * lock is held and the thread has been removed from the runq hierarchy.
6402 	 */
6403 	SCHED(update_thread_bucket)(thread);
6404 
6405 #endif /* CONFIG_SCHED_CLUTCH */
6406 
6407 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
6408 	    (uintptr_t)thread_tid(thread),
6409 	    thread->base_pri,
6410 	    thread->sched_pri,
6411 	    thread->sched_usage,
6412 	    0);
6413 
6414 	if (removed_from_runq) {
6415 		thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
6416 	} else if (is_current_thread) {
6417 		processor_t processor = thread->last_processor;
6418 		assert(processor == current_processor());
6419 
6420 		thread_urgency_t old_urgency = processor->current_urgency;
6421 
6422 		/*
6423 		 * When dropping in priority, check if the thread no longer belongs on core.
6424 		 * If a thread raises its own priority, don't aggressively rebalance it.
6425 		 * <rdar://problem/31699165>
6426 		 *
6427 		 * csw_check does a processor_state_update_from_thread, but
6428 		 * we should do our own if we're being lazy.
6429 		 */
6430 		if (!lazy_update && new_priority < old_priority) {
6431 			ast_t preempt;
6432 
6433 			if ((preempt = csw_check(thread, processor, AST_NONE)) != AST_NONE) {
6434 				ast_on(preempt);
6435 			}
6436 		} else {
6437 			processor_state_update_from_thread(processor, thread, false);
6438 		}
6439 
6440 		/*
6441 		 * set_sched_pri doesn't alter RT params. We expect direct base priority/QoS
6442 		 * class alterations from user space to occur relatively infrequently, hence
6443 		 * those are lazily handled. QoS classes have distinct priority bands, and QoS
6444 		 * inheritance is expected to involve priority changes.
6445 		 */
6446 		if (processor->current_urgency != old_urgency) {
6447 			uint64_t urgency_param1, urgency_param2;
6448 
6449 			thread_urgency_t new_urgency = thread_get_urgency(thread,
6450 			    &urgency_param1, &urgency_param2);
6451 
6452 			thread_tell_urgency(new_urgency, urgency_param1,
6453 			    urgency_param2, 0, thread);
6454 		}
6455 
6456 		/* TODO: only call this if current_perfctl_class changed */
6457 		uint64_t ctime = mach_approximate_time();
6458 		machine_thread_going_on_core(thread, processor->current_urgency, 0, 0, ctime);
6459 	} else if (thread->state & TH_RUN) {
6460 		processor_t processor = thread->last_processor;
6461 
6462 		if (!lazy_update &&
6463 		    processor != PROCESSOR_NULL &&
6464 		    processor != current_processor() &&
6465 		    processor->active_thread == thread) {
6466 			cause_ast_check(processor);
6467 		}
6468 	}
6469 }
6470 
6471 /*
6472  * thread_run_queue_remove_for_handoff
6473  *
6474  * Pull a thread or its (recursive) push target out of the runqueue
6475  * so that it is ready for thread_run()
6476  *
6477  * Called at splsched
6478  *
6479  * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
6480  * This may be different than the thread that was passed in.
6481  */
6482 thread_t
thread_run_queue_remove_for_handoff(thread_t thread)6483 thread_run_queue_remove_for_handoff(thread_t thread)
6484 {
6485 	thread_t pulled_thread = THREAD_NULL;
6486 
6487 	thread_lock(thread);
6488 
6489 	/*
6490 	 * Check that the thread is not bound to a different processor,
6491 	 * NO_SMT flag is not set on the thread, cluster type of
6492 	 * processor matches with thread if the thread is pinned to a
6493 	 * particular cluster and that realtime is not involved.
6494 	 *
6495 	 * Next, pull it off its run queue.  If it doesn't come, it's not eligible.
6496 	 */
6497 	processor_t processor = current_processor();
6498 	if ((thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor)
6499 	    && (!thread_no_smt(thread))
6500 	    && (processor->current_pri < BASEPRI_RTQUEUES)
6501 	    && (thread->sched_pri < BASEPRI_RTQUEUES)
6502 #if __AMP__
6503 	    && ((thread->th_bound_cluster_id == THREAD_BOUND_CLUSTER_NONE) ||
6504 	    processor->processor_set->pset_id == thread->th_bound_cluster_id)
6505 #endif /* __AMP__ */
6506 	    ) {
6507 		if (thread_run_queue_remove(thread)) {
6508 			pulled_thread = thread;
6509 		}
6510 	}
6511 
6512 	thread_unlock(thread);
6513 
6514 	return pulled_thread;
6515 }
6516 
6517 /*
6518  * thread_prepare_for_handoff
6519  *
6520  * Make the thread ready for handoff.
6521  * If the thread was runnable then pull it off the runq, if the thread could
6522  * not be pulled, return NULL.
6523  *
6524  * If the thread was woken up from wait for handoff, make sure it is not bound to
6525  * different processor.
6526  *
6527  * Called at splsched
6528  *
6529  * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
6530  * This may be different than the thread that was passed in.
6531  */
6532 thread_t
thread_prepare_for_handoff(thread_t thread,thread_handoff_option_t option)6533 thread_prepare_for_handoff(thread_t thread, thread_handoff_option_t option)
6534 {
6535 	thread_t pulled_thread = THREAD_NULL;
6536 
6537 	if (option & THREAD_HANDOFF_SETRUN_NEEDED) {
6538 		processor_t processor = current_processor();
6539 		thread_lock(thread);
6540 
6541 		/*
6542 		 * Check that the thread is not bound to a different processor,
6543 		 * NO_SMT flag is not set on the thread and cluster type of
6544 		 * processor matches with thread if the thread is pinned to a
6545 		 * particular cluster. Call setrun instead if above conditions
6546 		 * are not satisfied.
6547 		 */
6548 		if ((thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor)
6549 		    && (!thread_no_smt(thread))
6550 #if __AMP__
6551 		    && ((thread->th_bound_cluster_id == THREAD_BOUND_CLUSTER_NONE) ||
6552 		    processor->processor_set->pset_id == thread->th_bound_cluster_id)
6553 #endif /* __AMP__ */
6554 		    ) {
6555 			pulled_thread = thread;
6556 		} else {
6557 			thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
6558 		}
6559 		thread_unlock(thread);
6560 	} else {
6561 		pulled_thread = thread_run_queue_remove_for_handoff(thread);
6562 	}
6563 
6564 	return pulled_thread;
6565 }
6566 
6567 /*
6568  *	thread_run_queue_remove:
6569  *
6570  *	Remove a thread from its current run queue and
6571  *	return TRUE if successful.
6572  *
6573  *	Thread must be locked.
6574  *
6575  *	If thread->runq is PROCESSOR_NULL, the thread will not re-enter the
6576  *	run queues because the caller locked the thread.  Otherwise
6577  *	the thread is on a run queue, but could be chosen for dispatch
6578  *	and removed by another processor under a different lock, which
6579  *	will set thread->runq to PROCESSOR_NULL.
6580  *
6581  *	Hence the thread select path must not rely on anything that could
6582  *	be changed under the thread lock after calling this function,
6583  *	most importantly thread->sched_pri.
6584  */
6585 boolean_t
thread_run_queue_remove(thread_t thread)6586 thread_run_queue_remove(
6587 	thread_t        thread)
6588 {
6589 	boolean_t removed = FALSE;
6590 
6591 	if ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT) {
6592 		/* Thread isn't runnable */
6593 		thread_assert_runq_null(thread);
6594 		return FALSE;
6595 	}
6596 
6597 	processor_t processor = thread_get_runq(thread);
6598 	if (processor == PROCESSOR_NULL) {
6599 		/*
6600 		 * The thread is either not on the runq,
6601 		 * or is in the midst of being removed from the runq.
6602 		 *
6603 		 * runq is set to NULL under the pset lock, not the thread
6604 		 * lock, so the thread may still be in the process of being dequeued
6605 		 * from the runq. It will wait in invoke for the thread lock to be
6606 		 * dropped.
6607 		 */
6608 
6609 		return FALSE;
6610 	}
6611 
6612 	if (thread->sched_pri < BASEPRI_RTQUEUES) {
6613 		return SCHED(processor_queue_remove)(processor, thread);
6614 	}
6615 
6616 	processor_set_t pset = processor->processor_set;
6617 
6618 	pset_lock(pset);
6619 
6620 	/*
6621 	 * Must re-read the thread runq after acquiring the pset lock, in
6622 	 * case another core swooped in before us to dequeue the thread.
6623 	 */
6624 	if (thread_get_runq_locked(thread) != PROCESSOR_NULL) {
6625 		/*
6626 		 *	Thread is on the RT run queue and we have a lock on
6627 		 *	that run queue.
6628 		 */
6629 		rt_runq_remove(SCHED(rt_runq)(pset), thread);
6630 		pset_update_rt_stealable_state(pset);
6631 
6632 		removed = TRUE;
6633 	}
6634 
6635 	pset_unlock(pset);
6636 
6637 	return removed;
6638 }
6639 
6640 /*
6641  * Put the thread back where it goes after a thread_run_queue_remove
6642  *
6643  * Thread must have been removed under the same thread lock hold
6644  *
6645  * thread locked, at splsched
6646  */
6647 void
thread_run_queue_reinsert(thread_t thread,sched_options_t options)6648 thread_run_queue_reinsert(thread_t thread, sched_options_t options)
6649 {
6650 	thread_assert_runq_null(thread);
6651 	assert(thread->state & (TH_RUN));
6652 
6653 	thread_setrun(thread, options);
6654 }
6655 
6656 void
sys_override_cpu_throttle(boolean_t enable_override)6657 sys_override_cpu_throttle(boolean_t enable_override)
6658 {
6659 	if (enable_override) {
6660 		cpu_throttle_enabled = 0;
6661 	} else {
6662 		cpu_throttle_enabled = 1;
6663 	}
6664 }
6665 
6666 thread_urgency_t
thread_get_urgency(thread_t thread,uint64_t * arg1,uint64_t * arg2)6667 thread_get_urgency(thread_t thread, uint64_t *arg1, uint64_t *arg2)
6668 {
6669 	uint64_t urgency_param1 = 0, urgency_param2 = 0;
6670 	task_t task = get_threadtask_early(thread);
6671 
6672 	thread_urgency_t urgency;
6673 
6674 	if (thread == NULL || task == TASK_NULL || (thread->state & TH_IDLE)) {
6675 		urgency_param1 = 0;
6676 		urgency_param2 = 0;
6677 
6678 		urgency = THREAD_URGENCY_NONE;
6679 	} else if (thread->sched_mode == TH_MODE_REALTIME) {
6680 		urgency_param1 = thread->realtime.period;
6681 		urgency_param2 = thread->realtime.deadline;
6682 
6683 		urgency = THREAD_URGENCY_REAL_TIME;
6684 	} else if (cpu_throttle_enabled &&
6685 	    (thread->sched_pri <= MAXPRI_THROTTLE) &&
6686 	    (thread->base_pri <= MAXPRI_THROTTLE)) {
6687 		/*
6688 		 * Threads that are running at low priority but are not
6689 		 * tagged with a specific QoS are separated out from
6690 		 * the "background" urgency. Performance management
6691 		 * subsystem can decide to either treat these threads
6692 		 * as normal threads or look at other signals like thermal
6693 		 * levels for optimal power/perf tradeoffs for a platform.
6694 		 */
6695 		boolean_t thread_lacks_qos = (proc_get_effective_thread_policy(thread, TASK_POLICY_QOS) == THREAD_QOS_UNSPECIFIED); //thread_has_qos_policy(thread);
6696 		boolean_t task_is_suppressed = (proc_get_effective_task_policy(task, TASK_POLICY_SUP_ACTIVE) == 0x1);
6697 
6698 		/*
6699 		 * Background urgency applied when thread priority is
6700 		 * MAXPRI_THROTTLE or lower and thread is not promoted
6701 		 * and thread has a QoS specified
6702 		 */
6703 		urgency_param1 = thread->sched_pri;
6704 		urgency_param2 = thread->base_pri;
6705 
6706 		if (thread_lacks_qos && !task_is_suppressed) {
6707 			urgency = THREAD_URGENCY_LOWPRI;
6708 		} else {
6709 			urgency = THREAD_URGENCY_BACKGROUND;
6710 		}
6711 	} else {
6712 		/* For otherwise unclassified threads, report throughput QoS parameters */
6713 		urgency_param1 = proc_get_effective_thread_policy(thread, TASK_POLICY_THROUGH_QOS);
6714 		urgency_param2 = proc_get_effective_task_policy(task, TASK_POLICY_THROUGH_QOS);
6715 		urgency = THREAD_URGENCY_NORMAL;
6716 	}
6717 
6718 	if (arg1 != NULL) {
6719 		*arg1 = urgency_param1;
6720 	}
6721 	if (arg2 != NULL) {
6722 		*arg2 = urgency_param2;
6723 	}
6724 
6725 	return urgency;
6726 }
6727 
6728 perfcontrol_class_t
thread_get_perfcontrol_class(thread_t thread)6729 thread_get_perfcontrol_class(thread_t thread)
6730 {
6731 	/* Special case handling */
6732 	if (thread->state & TH_IDLE) {
6733 		return PERFCONTROL_CLASS_IDLE;
6734 	}
6735 
6736 	if (thread->sched_mode == TH_MODE_REALTIME) {
6737 		return PERFCONTROL_CLASS_REALTIME;
6738 	}
6739 
6740 	/* perfcontrol_class based on base_pri */
6741 	if (thread->base_pri <= MAXPRI_THROTTLE) {
6742 		return PERFCONTROL_CLASS_BACKGROUND;
6743 	} else if (thread->base_pri <= BASEPRI_UTILITY) {
6744 		return PERFCONTROL_CLASS_UTILITY;
6745 	} else if (thread->base_pri <= BASEPRI_DEFAULT) {
6746 		return PERFCONTROL_CLASS_NONUI;
6747 	} else if (thread->base_pri <= BASEPRI_USER_INITIATED) {
6748 		return PERFCONTROL_CLASS_USER_INITIATED;
6749 	} else if (thread->base_pri <= BASEPRI_FOREGROUND) {
6750 		return PERFCONTROL_CLASS_UI;
6751 	} else {
6752 		if (get_threadtask(thread) == kernel_task) {
6753 			/*
6754 			 * Classify Above UI kernel threads as PERFCONTROL_CLASS_KERNEL.
6755 			 * All other lower priority kernel threads should be treated
6756 			 * as regular threads for performance control purposes.
6757 			 */
6758 			return PERFCONTROL_CLASS_KERNEL;
6759 		}
6760 		return PERFCONTROL_CLASS_ABOVEUI;
6761 	}
6762 }
6763 
6764 /*
6765  *	This is the processor idle loop, which just looks for other threads
6766  *	to execute.  Processor idle threads invoke this without supplying a
6767  *	current thread to idle without an asserted wait state.
6768  *
6769  *	Returns a the next thread to execute if dispatched directly.
6770  */
6771 
6772 #if 0
6773 #define IDLE_KERNEL_DEBUG_CONSTANT(...) KERNEL_DEBUG_CONSTANT(__VA_ARGS__)
6774 #else
6775 #define IDLE_KERNEL_DEBUG_CONSTANT(...) do { } while(0)
6776 #endif
6777 
6778 #if (DEVELOPMENT || DEBUG)
6779 int sched_idle_delay_cpuid = -1;
6780 #endif
6781 
6782 thread_t
processor_idle(thread_t thread,processor_t processor)6783 processor_idle(
6784 	thread_t                        thread,
6785 	processor_t                     processor)
6786 {
6787 	processor_set_t         pset = processor->processor_set;
6788 	struct recount_snap snap = { 0 };
6789 
6790 	(void)splsched();
6791 
6792 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
6793 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_START,
6794 	    (uintptr_t)thread_tid(thread), 0, 0, 0, 0);
6795 
6796 	SCHED_STATS_INC(idle_transitions);
6797 	assert(processor->running_timers_active == false);
6798 
6799 	recount_snapshot(&snap);
6800 	recount_processor_idle(&processor->pr_recount, &snap);
6801 
6802 	while (1) {
6803 		/*
6804 		 * Ensure that updates to my processor and pset state,
6805 		 * made by the IPI source processor before sending the IPI,
6806 		 * are visible on this processor now (even though we don't
6807 		 * take the pset lock yet).
6808 		 */
6809 		atomic_thread_fence(memory_order_acquire);
6810 
6811 		if (processor->state != PROCESSOR_IDLE) {
6812 			break;
6813 		}
6814 		if (bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
6815 			break;
6816 		}
6817 #if defined(CONFIG_SCHED_DEFERRED_AST)
6818 		if (bit_test(pset->pending_deferred_AST_cpu_mask, processor->cpu_id)) {
6819 			break;
6820 		}
6821 #endif
6822 		if (bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id)) {
6823 			break;
6824 		}
6825 
6826 		if (processor->is_recommended && (processor->processor_primary == processor)) {
6827 			if (rt_runq_count(pset)) {
6828 				break;
6829 			}
6830 		} else {
6831 			if (SCHED(processor_bound_count)(processor)) {
6832 				break;
6833 			}
6834 		}
6835 
6836 		IDLE_KERNEL_DEBUG_CONSTANT(
6837 			MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -1, 0);
6838 
6839 		machine_track_platform_idle(TRUE);
6840 
6841 		machine_idle();
6842 		/* returns with interrupts enabled */
6843 
6844 		machine_track_platform_idle(FALSE);
6845 
6846 #if (DEVELOPMENT || DEBUG)
6847 		if (processor->cpu_id == sched_idle_delay_cpuid) {
6848 			delay(500);
6849 		}
6850 #endif
6851 
6852 		(void)splsched();
6853 
6854 		atomic_thread_fence(memory_order_acquire);
6855 
6856 		IDLE_KERNEL_DEBUG_CONSTANT(
6857 			MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -2, 0);
6858 
6859 		/*
6860 		 * Check if we should call sched_timeshare_consider_maintenance() here.
6861 		 * The CPU was woken out of idle due to an interrupt and we should do the
6862 		 * call only if the processor is still idle. If the processor is non-idle,
6863 		 * the threads running on the processor would do the call as part of
6864 		 * context swithing.
6865 		 */
6866 		if (processor->state == PROCESSOR_IDLE) {
6867 			sched_timeshare_consider_maintenance(mach_absolute_time(), true);
6868 		}
6869 
6870 		if (!SCHED(processor_queue_empty)(processor)) {
6871 			/* Secondary SMT processors respond to directed wakeups
6872 			 * exclusively. Some platforms induce 'spurious' SMT wakeups.
6873 			 */
6874 			if (processor->processor_primary == processor) {
6875 				break;
6876 			}
6877 		}
6878 	}
6879 
6880 	recount_snapshot(&snap);
6881 	recount_processor_run(&processor->pr_recount, &snap);
6882 	smr_cpu_join(processor, snap.rsn_time_mach);
6883 
6884 	ast_t reason = AST_NONE;
6885 
6886 	/* We're handling all scheduling AST's */
6887 	ast_off(AST_SCHEDULING);
6888 
6889 	/*
6890 	 * thread_select will move the processor from dispatching to running,
6891 	 * or put it in idle if there's nothing to do.
6892 	 */
6893 	thread_t cur_thread = current_thread();
6894 
6895 	thread_lock(cur_thread);
6896 	thread_t new_thread = thread_select(cur_thread, processor, &reason);
6897 	thread_unlock(cur_thread);
6898 
6899 	assert(processor->running_timers_active == false);
6900 
6901 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
6902 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_END,
6903 	    (uintptr_t)thread_tid(thread), processor->state, (uintptr_t)thread_tid(new_thread), reason, 0);
6904 
6905 	return new_thread;
6906 }
6907 
6908 /*
6909  *	Each processor has a dedicated thread which
6910  *	executes the idle loop when there is no suitable
6911  *	previous context.
6912  *
6913  *	This continuation is entered with interrupts disabled.
6914  */
6915 void
idle_thread(__assert_only void * parameter,__unused wait_result_t result)6916 idle_thread(__assert_only void* parameter,
6917     __unused wait_result_t result)
6918 {
6919 	assert(ml_get_interrupts_enabled() == FALSE);
6920 	assert(parameter == NULL);
6921 
6922 	processor_t processor = current_processor();
6923 
6924 	smr_cpu_leave(processor, processor->last_dispatch);
6925 
6926 	/*
6927 	 * Ensure that anything running in idle context triggers
6928 	 * preemption-disabled checks.
6929 	 */
6930 	disable_preemption_without_measurements();
6931 
6932 	/*
6933 	 * Enable interrupts temporarily to handle any pending interrupts
6934 	 * or IPIs before deciding to sleep
6935 	 */
6936 	spllo();
6937 
6938 	thread_t new_thread = processor_idle(THREAD_NULL, processor);
6939 	/* returns with interrupts disabled */
6940 
6941 	enable_preemption();
6942 
6943 	if (new_thread != THREAD_NULL) {
6944 		thread_run(processor->idle_thread,
6945 		    idle_thread, NULL, new_thread);
6946 		/*NOTREACHED*/
6947 	}
6948 
6949 	thread_block(idle_thread);
6950 	/*NOTREACHED*/
6951 }
6952 
6953 void
idle_thread_create(processor_t processor,thread_continue_t continuation)6954 idle_thread_create(
6955 	processor_t             processor,
6956 	thread_continue_t       continuation)
6957 {
6958 	kern_return_t   result;
6959 	thread_t                thread;
6960 	spl_t                   s;
6961 	char                    name[MAXTHREADNAMESIZE];
6962 
6963 	result = kernel_thread_create(continuation, NULL, MAXPRI_KERNEL, &thread);
6964 	if (result != KERN_SUCCESS) {
6965 		panic("idle_thread_create failed: %d", result);
6966 	}
6967 
6968 	snprintf(name, sizeof(name), "idle #%d", processor->cpu_id);
6969 	thread_set_thread_name(thread, name);
6970 
6971 	s = splsched();
6972 	thread_lock(thread);
6973 	thread->bound_processor = processor;
6974 	thread->chosen_processor = processor;
6975 	processor->idle_thread = thread;
6976 	thread->sched_pri = thread->base_pri = IDLEPRI;
6977 	thread->state = (TH_RUN | TH_IDLE);
6978 	thread->options |= TH_OPT_IDLE_THREAD;
6979 	thread->last_made_runnable_time = thread->last_basepri_change_time = mach_absolute_time();
6980 	thread_unlock(thread);
6981 	splx(s);
6982 
6983 	thread_deallocate(thread);
6984 }
6985 
6986 /*
6987  * sched_startup:
6988  *
6989  * Kicks off scheduler services.
6990  *
6991  * Called at splsched.
6992  */
6993 void
sched_startup(void)6994 sched_startup(void)
6995 {
6996 	kern_return_t   result;
6997 	thread_t                thread;
6998 
6999 	simple_lock_init(&sched_vm_group_list_lock, 0);
7000 
7001 	result = kernel_thread_start_priority((thread_continue_t)sched_init_thread,
7002 	    NULL, MAXPRI_KERNEL, &thread);
7003 	if (result != KERN_SUCCESS) {
7004 		panic("sched_startup");
7005 	}
7006 
7007 	thread_deallocate(thread);
7008 
7009 	assert_thread_magic(thread);
7010 
7011 	/*
7012 	 * Yield to the sched_init_thread once, to
7013 	 * initialize our own thread after being switched
7014 	 * back to.
7015 	 *
7016 	 * The current thread is the only other thread
7017 	 * active at this point.
7018 	 */
7019 	thread_block(THREAD_CONTINUE_NULL);
7020 
7021 	assert_thread_magic(thread);
7022 }
7023 
7024 #if __arm64__
7025 static _Atomic uint64_t sched_perfcontrol_callback_deadline;
7026 #endif /* __arm64__ */
7027 
7028 
7029 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
7030 
7031 static volatile uint64_t                sched_maintenance_deadline;
7032 static uint64_t                         sched_tick_last_abstime;
7033 static uint64_t                         sched_tick_delta;
7034 uint64_t                                sched_tick_max_delta;
7035 
7036 
7037 /*
7038  *	sched_init_thread:
7039  *
7040  *	Perform periodic bookkeeping functions about ten
7041  *	times per second.
7042  */
7043 void
sched_timeshare_maintenance_continue(void)7044 sched_timeshare_maintenance_continue(void)
7045 {
7046 	uint64_t        sched_tick_ctime, late_time;
7047 
7048 	struct sched_update_scan_context scan_context = {
7049 		.earliest_bg_make_runnable_time = UINT64_MAX,
7050 		.earliest_normal_make_runnable_time = UINT64_MAX,
7051 		.earliest_rt_make_runnable_time = UINT64_MAX
7052 	};
7053 
7054 	sched_tick_ctime = mach_absolute_time();
7055 
7056 	if (__improbable(sched_tick_last_abstime == 0)) {
7057 		sched_tick_last_abstime = sched_tick_ctime;
7058 		late_time = 0;
7059 		sched_tick_delta = 1;
7060 	} else {
7061 		late_time = sched_tick_ctime - sched_tick_last_abstime;
7062 		sched_tick_delta = late_time / sched_tick_interval;
7063 		/* Ensure a delta of 1, since the interval could be slightly
7064 		 * smaller than the sched_tick_interval due to dispatch
7065 		 * latencies.
7066 		 */
7067 		sched_tick_delta = MAX(sched_tick_delta, 1);
7068 
7069 		/* In the event interrupt latencies or platform
7070 		 * idle events that advanced the timebase resulted
7071 		 * in periods where no threads were dispatched,
7072 		 * cap the maximum "tick delta" at SCHED_TICK_MAX_DELTA
7073 		 * iterations.
7074 		 */
7075 		sched_tick_delta = MIN(sched_tick_delta, SCHED_TICK_MAX_DELTA);
7076 
7077 		sched_tick_last_abstime = sched_tick_ctime;
7078 		sched_tick_max_delta = MAX(sched_tick_delta, sched_tick_max_delta);
7079 	}
7080 
7081 	scan_context.sched_tick_last_abstime = sched_tick_last_abstime;
7082 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_START,
7083 	    sched_tick_delta, late_time, 0, 0, 0);
7084 
7085 	/* Add a number of pseudo-ticks corresponding to the elapsed interval
7086 	 * This could be greater than 1 if substantial intervals where
7087 	 * all processors are idle occur, which rarely occurs in practice.
7088 	 */
7089 
7090 	sched_tick += sched_tick_delta;
7091 
7092 	update_vm_info();
7093 
7094 	/*
7095 	 *  Compute various averages.
7096 	 */
7097 	compute_averages(sched_tick_delta);
7098 
7099 	/*
7100 	 *  Scan the run queues for threads which
7101 	 *  may need to be updated, and find the earliest runnable thread on the runqueue
7102 	 *  to report its latency.
7103 	 */
7104 	SCHED(thread_update_scan)(&scan_context);
7105 
7106 	SCHED(rt_runq_scan)(&scan_context);
7107 
7108 	uint64_t ctime = mach_absolute_time();
7109 
7110 	uint64_t bg_max_latency       = (ctime > scan_context.earliest_bg_make_runnable_time) ?
7111 	    ctime - scan_context.earliest_bg_make_runnable_time : 0;
7112 
7113 	uint64_t default_max_latency  = (ctime > scan_context.earliest_normal_make_runnable_time) ?
7114 	    ctime - scan_context.earliest_normal_make_runnable_time : 0;
7115 
7116 	uint64_t realtime_max_latency = (ctime > scan_context.earliest_rt_make_runnable_time) ?
7117 	    ctime - scan_context.earliest_rt_make_runnable_time : 0;
7118 
7119 	machine_max_runnable_latency(bg_max_latency, default_max_latency, realtime_max_latency);
7120 
7121 	/*
7122 	 * Check to see if the special sched VM group needs attention.
7123 	 */
7124 	sched_vm_group_maintenance();
7125 
7126 #if __arm64__
7127 	/* Check to see if the recommended cores failsafe is active */
7128 	sched_recommended_cores_maintenance();
7129 #endif /* __arm64__ */
7130 
7131 
7132 #if DEBUG || DEVELOPMENT
7133 #if __x86_64__
7134 #include <i386/misc_protos.h>
7135 	/* Check for long-duration interrupts */
7136 	mp_interrupt_watchdog();
7137 #endif /* __x86_64__ */
7138 #endif /* DEBUG || DEVELOPMENT */
7139 
7140 	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_END,
7141 	    sched_pri_shifts[TH_BUCKET_SHARE_FG], sched_pri_shifts[TH_BUCKET_SHARE_BG],
7142 	    sched_pri_shifts[TH_BUCKET_SHARE_UT], sched_pri_shifts[TH_BUCKET_SHARE_DF], 0);
7143 
7144 	assert_wait((event_t)sched_timeshare_maintenance_continue, THREAD_UNINT);
7145 	thread_block((thread_continue_t)sched_timeshare_maintenance_continue);
7146 	/*NOTREACHED*/
7147 }
7148 
7149 static uint64_t sched_maintenance_wakeups;
7150 
7151 /*
7152  * Determine if the set of routines formerly driven by a maintenance timer
7153  * must be invoked, based on a deadline comparison. Signals the scheduler
7154  * maintenance thread on deadline expiration. Must be invoked at an interval
7155  * lower than the "sched_tick_interval", currently accomplished by
7156  * invocation via the quantum expiration timer and at context switch time.
7157  * Performance matters: this routine reuses a timestamp approximating the
7158  * current absolute time received from the caller, and should perform
7159  * no more than a comparison against the deadline in the common case.
7160  */
7161 void
sched_timeshare_consider_maintenance(uint64_t ctime,bool safe_point)7162 sched_timeshare_consider_maintenance(uint64_t ctime, bool safe_point)
7163 {
7164 	uint64_t deadline = sched_maintenance_deadline;
7165 
7166 	if (__improbable(ctime >= deadline)) {
7167 		if (__improbable(current_thread() == sched_maintenance_thread)) {
7168 			return;
7169 		}
7170 		OSMemoryBarrier();
7171 
7172 		uint64_t ndeadline = ctime + sched_tick_interval;
7173 
7174 		if (__probable(os_atomic_cmpxchg(&sched_maintenance_deadline, deadline, ndeadline, seq_cst))) {
7175 			thread_wakeup((event_t)sched_timeshare_maintenance_continue);
7176 			sched_maintenance_wakeups++;
7177 			smr_maintenance(ctime);
7178 		}
7179 	}
7180 
7181 	smr_cpu_tick(ctime, safe_point);
7182 
7183 #if !CONFIG_SCHED_CLUTCH
7184 	/*
7185 	 * Only non-clutch schedulers use the global load calculation EWMA algorithm. For clutch
7186 	 * scheduler, the load is maintained at the thread group and bucket level.
7187 	 */
7188 	uint64_t load_compute_deadline = os_atomic_load_wide(&sched_load_compute_deadline, relaxed);
7189 
7190 	if (__improbable(load_compute_deadline && ctime >= load_compute_deadline)) {
7191 		uint64_t new_deadline = 0;
7192 		if (os_atomic_cmpxchg(&sched_load_compute_deadline, load_compute_deadline, new_deadline, relaxed)) {
7193 			compute_sched_load();
7194 			new_deadline = ctime + sched_load_compute_interval_abs;
7195 			os_atomic_store_wide(&sched_load_compute_deadline, new_deadline, relaxed);
7196 		}
7197 	}
7198 #endif /* CONFIG_SCHED_CLUTCH */
7199 
7200 #if __arm64__
7201 	uint64_t perf_deadline = os_atomic_load(&sched_perfcontrol_callback_deadline, relaxed);
7202 
7203 	if (__improbable(perf_deadline && ctime >= perf_deadline)) {
7204 		/* CAS in 0, if success, make callback. Otherwise let the next context switch check again. */
7205 		if (os_atomic_cmpxchg(&sched_perfcontrol_callback_deadline, perf_deadline, 0, relaxed)) {
7206 			machine_perfcontrol_deadline_passed(perf_deadline);
7207 		}
7208 	}
7209 #endif /* __arm64__ */
7210 }
7211 
7212 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
7213 
7214 void
sched_init_thread(void)7215 sched_init_thread(void)
7216 {
7217 	thread_block(THREAD_CONTINUE_NULL);
7218 
7219 	thread_t thread = current_thread();
7220 
7221 	thread_set_thread_name(thread, "sched_maintenance_thread");
7222 
7223 	sched_maintenance_thread = thread;
7224 
7225 	SCHED(maintenance_continuation)();
7226 
7227 	/*NOTREACHED*/
7228 }
7229 
7230 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
7231 
7232 /*
7233  *	thread_update_scan / runq_scan:
7234  *
7235  *	Scan the run queues to account for timesharing threads
7236  *	which need to be updated.
7237  *
7238  *	Scanner runs in two passes.  Pass one squirrels likely
7239  *	threads away in an array, pass two does the update.
7240  *
7241  *	This is necessary because the run queue is locked for
7242  *	the candidate scan, but	the thread is locked for the update.
7243  *
7244  *	Array should be sized to make forward progress, without
7245  *	disabling preemption for long periods.
7246  */
7247 
7248 #define THREAD_UPDATE_SIZE              128
7249 
7250 static thread_t thread_update_array[THREAD_UPDATE_SIZE];
7251 static uint32_t thread_update_count = 0;
7252 
7253 /* Returns TRUE if thread was added, FALSE if thread_update_array is full */
7254 boolean_t
thread_update_add_thread(thread_t thread)7255 thread_update_add_thread(thread_t thread)
7256 {
7257 	if (thread_update_count == THREAD_UPDATE_SIZE) {
7258 		return FALSE;
7259 	}
7260 
7261 	thread_update_array[thread_update_count++] = thread;
7262 	thread_reference(thread);
7263 	return TRUE;
7264 }
7265 
7266 void
thread_update_process_threads(void)7267 thread_update_process_threads(void)
7268 {
7269 	assert(thread_update_count <= THREAD_UPDATE_SIZE);
7270 
7271 	for (uint32_t i = 0; i < thread_update_count; i++) {
7272 		thread_t thread = thread_update_array[i];
7273 		assert_thread_magic(thread);
7274 		thread_update_array[i] = THREAD_NULL;
7275 
7276 		spl_t s = splsched();
7277 		thread_lock(thread);
7278 		if (!(thread->state & (TH_WAIT)) && thread->sched_stamp != sched_tick) {
7279 			SCHED(update_priority)(thread);
7280 		}
7281 		thread_unlock(thread);
7282 		splx(s);
7283 
7284 		thread_deallocate(thread);
7285 	}
7286 
7287 	thread_update_count = 0;
7288 }
7289 
7290 static boolean_t
runq_scan_thread(thread_t thread,sched_update_scan_context_t scan_context)7291 runq_scan_thread(
7292 	thread_t thread,
7293 	sched_update_scan_context_t scan_context)
7294 {
7295 	assert_thread_magic(thread);
7296 
7297 	if (thread->sched_stamp != sched_tick &&
7298 	    thread->sched_mode == TH_MODE_TIMESHARE) {
7299 		if (thread_update_add_thread(thread) == FALSE) {
7300 			return TRUE;
7301 		}
7302 	}
7303 
7304 	if (cpu_throttle_enabled && ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->base_pri <= MAXPRI_THROTTLE))) {
7305 		if (thread->last_made_runnable_time < scan_context->earliest_bg_make_runnable_time) {
7306 			scan_context->earliest_bg_make_runnable_time = thread->last_made_runnable_time;
7307 		}
7308 	} else {
7309 		if (thread->last_made_runnable_time < scan_context->earliest_normal_make_runnable_time) {
7310 			scan_context->earliest_normal_make_runnable_time = thread->last_made_runnable_time;
7311 		}
7312 	}
7313 
7314 	return FALSE;
7315 }
7316 
7317 /*
7318  *	Scan a runq for candidate threads.
7319  *
7320  *	Returns TRUE if retry is needed.
7321  */
7322 boolean_t
runq_scan(run_queue_t runq,sched_update_scan_context_t scan_context)7323 runq_scan(
7324 	run_queue_t                   runq,
7325 	sched_update_scan_context_t   scan_context)
7326 {
7327 	int count       = runq->count;
7328 	int queue_index;
7329 
7330 	assert(count >= 0);
7331 
7332 	if (count == 0) {
7333 		return FALSE;
7334 	}
7335 
7336 	for (queue_index = bitmap_first(runq->bitmap, NRQS);
7337 	    queue_index >= 0;
7338 	    queue_index = bitmap_next(runq->bitmap, queue_index)) {
7339 		thread_t thread;
7340 		circle_queue_t queue = &runq->queues[queue_index];
7341 
7342 		cqe_foreach_element(thread, queue, runq_links) {
7343 			assert(count > 0);
7344 			if (runq_scan_thread(thread, scan_context) == TRUE) {
7345 				return TRUE;
7346 			}
7347 			count--;
7348 		}
7349 	}
7350 
7351 	return FALSE;
7352 }
7353 
7354 #if CONFIG_SCHED_CLUTCH
7355 
7356 boolean_t
sched_clutch_timeshare_scan(queue_t thread_queue,uint16_t thread_count,sched_update_scan_context_t scan_context)7357 sched_clutch_timeshare_scan(
7358 	queue_t thread_queue,
7359 	uint16_t thread_count,
7360 	sched_update_scan_context_t scan_context)
7361 {
7362 	if (thread_count == 0) {
7363 		return FALSE;
7364 	}
7365 
7366 	thread_t thread;
7367 	qe_foreach_element_safe(thread, thread_queue, th_clutch_timeshare_link) {
7368 		if (runq_scan_thread(thread, scan_context) == TRUE) {
7369 			return TRUE;
7370 		}
7371 		thread_count--;
7372 	}
7373 
7374 	assert(thread_count == 0);
7375 	return FALSE;
7376 }
7377 
7378 
7379 #endif /* CONFIG_SCHED_CLUTCH */
7380 
7381 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
7382 
7383 bool
thread_is_eager_preempt(thread_t thread)7384 thread_is_eager_preempt(thread_t thread)
7385 {
7386 	return thread->sched_flags & TH_SFLAG_EAGERPREEMPT;
7387 }
7388 
7389 void
thread_set_eager_preempt(thread_t thread)7390 thread_set_eager_preempt(thread_t thread)
7391 {
7392 	spl_t s = splsched();
7393 	thread_lock(thread);
7394 
7395 	assert(!thread_is_eager_preempt(thread));
7396 
7397 	thread->sched_flags |= TH_SFLAG_EAGERPREEMPT;
7398 
7399 	if (thread == current_thread()) {
7400 		/* csw_check updates current_is_eagerpreempt on the processor */
7401 		ast_t ast = csw_check(thread, current_processor(), AST_NONE);
7402 
7403 		thread_unlock(thread);
7404 
7405 		if (ast != AST_NONE) {
7406 			thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast);
7407 		}
7408 	} else {
7409 		processor_t last_processor = thread->last_processor;
7410 
7411 		if (last_processor != PROCESSOR_NULL &&
7412 		    last_processor->state == PROCESSOR_RUNNING &&
7413 		    last_processor->active_thread == thread) {
7414 			cause_ast_check(last_processor);
7415 		}
7416 
7417 		thread_unlock(thread);
7418 	}
7419 
7420 	splx(s);
7421 }
7422 
7423 void
thread_clear_eager_preempt(thread_t thread)7424 thread_clear_eager_preempt(thread_t thread)
7425 {
7426 	spl_t s = splsched();
7427 	thread_lock(thread);
7428 
7429 	assert(thread_is_eager_preempt(thread));
7430 
7431 	thread->sched_flags &= ~TH_SFLAG_EAGERPREEMPT;
7432 
7433 	if (thread == current_thread()) {
7434 		current_processor()->current_is_eagerpreempt = false;
7435 	}
7436 
7437 	thread_unlock(thread);
7438 	splx(s);
7439 }
7440 
7441 /*
7442  * Scheduling statistics
7443  */
7444 void
sched_stats_handle_csw(processor_t processor,int reasons,int selfpri,int otherpri)7445 sched_stats_handle_csw(processor_t processor, int reasons, int selfpri, int otherpri)
7446 {
7447 	struct sched_statistics *stats;
7448 	boolean_t to_realtime = FALSE;
7449 
7450 	stats = PERCPU_GET_RELATIVE(sched_stats, processor, processor);
7451 	stats->csw_count++;
7452 
7453 	if (otherpri >= BASEPRI_REALTIME) {
7454 		stats->rt_sched_count++;
7455 		to_realtime = TRUE;
7456 	}
7457 
7458 	if ((reasons & AST_PREEMPT) != 0) {
7459 		stats->preempt_count++;
7460 
7461 		if (selfpri >= BASEPRI_REALTIME) {
7462 			stats->preempted_rt_count++;
7463 		}
7464 
7465 		if (to_realtime) {
7466 			stats->preempted_by_rt_count++;
7467 		}
7468 	}
7469 }
7470 
7471 void
sched_stats_handle_runq_change(struct runq_stats * stats,int old_count)7472 sched_stats_handle_runq_change(struct runq_stats *stats, int old_count)
7473 {
7474 	uint64_t timestamp = mach_absolute_time();
7475 
7476 	stats->count_sum += (timestamp - stats->last_change_timestamp) * old_count;
7477 	stats->last_change_timestamp = timestamp;
7478 }
7479 
7480 /*
7481  *     For calls from assembly code
7482  */
7483 #undef thread_wakeup
7484 void
7485 thread_wakeup(
7486 	event_t         x);
7487 
7488 void
thread_wakeup(event_t x)7489 thread_wakeup(
7490 	event_t         x)
7491 {
7492 	thread_wakeup_with_result(x, THREAD_AWAKENED);
7493 }
7494 
7495 boolean_t
preemption_enabled(void)7496 preemption_enabled(void)
7497 {
7498 	return get_preemption_level() == 0 && ml_get_interrupts_enabled();
7499 }
7500 
7501 static void
sched_timer_deadline_tracking_init(void)7502 sched_timer_deadline_tracking_init(void)
7503 {
7504 	nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT, &timer_deadline_tracking_bin_1);
7505 	nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT, &timer_deadline_tracking_bin_2);
7506 }
7507 
7508 /*
7509  * Check that all CPUs are successfully powered up in places where that's expected.
7510  */
7511 static void
check_all_cpus_are_done_starting(processor_start_kind_t start_kind)7512 check_all_cpus_are_done_starting(processor_start_kind_t start_kind)
7513 {
7514 	/*
7515 	 * `processor_count` may include registered CPUs above cpus= or cpumask= limit.
7516 	 * Use machine_info.logical_cpu_max for the CPU IDs that matter.
7517 	 */
7518 	for (int cpu_id = 0; cpu_id < machine_info.logical_cpu_max; cpu_id++) {
7519 		processor_t processor = processor_array[cpu_id];
7520 		processor_wait_for_start(processor, start_kind);
7521 	}
7522 }
7523 
7524 /*
7525  * Find some available online CPU that threads can be enqueued on
7526  *
7527  * Called with the sched_available_cores_lock held
7528  */
7529 static int
sched_last_resort_cpu(void)7530 sched_last_resort_cpu(void)
7531 {
7532 	simple_lock_assert(&sched_available_cores_lock, LCK_ASSERT_OWNED);
7533 
7534 	int last_resort_cpu = lsb_first(pcs.pcs_effective.pcs_online_cores);
7535 
7536 	if (last_resort_cpu == -1) {
7537 		panic("no last resort cpu found!");
7538 	}
7539 
7540 	return last_resort_cpu;
7541 }
7542 
7543 
7544 static void
assert_no_processors_in_transition_locked()7545 assert_no_processors_in_transition_locked()
7546 {
7547 	assert(pcs.pcs_in_kernel_sleep == false);
7548 
7549 	/* All processors must be either running or offline */
7550 	assert(pcs.pcs_managed_cores ==
7551 	    (processor_offline_state_map[PROCESSOR_OFFLINE_RUNNING] |
7552 	    processor_offline_state_map[PROCESSOR_OFFLINE_FULLY_OFFLINE]));
7553 
7554 	/* All state transitions must be quiesced at this point */
7555 	assert(pcs.pcs_effective.pcs_online_cores ==
7556 	    processor_offline_state_map[PROCESSOR_OFFLINE_RUNNING]);
7557 }
7558 
7559 static struct powered_cores_state
sched_compute_requested_powered_cores()7560 sched_compute_requested_powered_cores()
7561 {
7562 	simple_lock_assert(&sched_available_cores_lock, LCK_ASSERT_OWNED);
7563 
7564 	struct powered_cores_state output = {
7565 		.pcs_online_cores = pcs.pcs_managed_cores,
7566 		.pcs_powerdown_recommended_cores = pcs.pcs_managed_cores,
7567 		.pcs_tempdown_cores = 0,
7568 	};
7569 
7570 	if (!pcs.pcs_init_completed) {
7571 		return output;
7572 	}
7573 
7574 	/*
7575 	 * if we unify this with derecommendation, note that only sleep should stop derecommendation,
7576 	 * not dtrace et al
7577 	 */
7578 	if (pcs.pcs_powerdown_suspend_count) {
7579 		return output;
7580 	} else {
7581 		/*
7582 		 * The cores power clients like ANE require or
7583 		 * the kernel cannot offline
7584 		 */
7585 		cpumap_t system_required_powered_cores = pcs.pcs_required_online_pmgr |
7586 		    pcs.pcs_required_online_system;
7587 
7588 		cpumap_t online_cores_goal;
7589 
7590 		if (pcs.pcs_user_online_core_control) {
7591 			/* This is our new goal state for powered cores */
7592 			output.pcs_powerdown_recommended_cores = pcs.pcs_requested_online_user;
7593 			online_cores_goal = pcs.pcs_requested_online_user | system_required_powered_cores;
7594 		} else {
7595 			/* Remove the cores CLPC wants to power down */
7596 			cpumap_t clpc_wanted_powered_cores = pcs.pcs_managed_cores;
7597 			clpc_wanted_powered_cores &= pcs.pcs_requested_online_clpc_user;
7598 			clpc_wanted_powered_cores &= pcs.pcs_requested_online_clpc_system;
7599 
7600 			output.pcs_powerdown_recommended_cores = clpc_wanted_powered_cores;
7601 			online_cores_goal = clpc_wanted_powered_cores | system_required_powered_cores;
7602 
7603 			/* Any cores in managed cores that are not in wanted powered become temporary */
7604 			output.pcs_tempdown_cores = (pcs.pcs_managed_cores & ~clpc_wanted_powered_cores);
7605 
7606 			/* Future: Treat CLPC user/system separately. */
7607 		}
7608 
7609 		if (online_cores_goal == 0) {
7610 			/*
7611 			 * If we're somehow trying to disable all CPUs,
7612 			 * force online the lowest numbered CPU.
7613 			 */
7614 			online_cores_goal = BIT(lsb_first(pcs.pcs_managed_cores));
7615 		}
7616 
7617 #if RHODES_CLUSTER_POWERDOWN_WORKAROUND
7618 		/*
7619 		 * Because warm CPU boot from WFI is not currently implemented,
7620 		 * we cannot power down only one CPU in a cluster, so we force up
7621 		 * all the CPUs in the cluster if any one CPU is up in the cluster.
7622 		 * Once all CPUs are disabled, then the whole cluster goes down at once.
7623 		 */
7624 
7625 		cpumap_t workaround_online_cores = 0;
7626 
7627 		const ml_topology_info_t* topology = ml_get_topology_info();
7628 		for (unsigned int i = 0; i < topology->num_clusters; i++) {
7629 			ml_topology_cluster_t* cluster = &topology->clusters[i];
7630 			if ((cluster->cpu_mask & online_cores_goal) != 0) {
7631 				workaround_online_cores |= cluster->cpu_mask;
7632 			}
7633 		}
7634 
7635 		online_cores_goal = workaround_online_cores;
7636 #endif /* RHODES_CLUSTER_POWERDOWN_WORKAROUND */
7637 
7638 		output.pcs_online_cores = online_cores_goal;
7639 	}
7640 
7641 	return output;
7642 }
7643 
7644 static bool
sched_needs_update_requested_powered_cores()7645 sched_needs_update_requested_powered_cores()
7646 {
7647 	if (!pcs.pcs_init_completed) {
7648 		return false;
7649 	}
7650 
7651 	struct powered_cores_state requested = sched_compute_requested_powered_cores();
7652 
7653 	struct powered_cores_state effective = pcs.pcs_effective;
7654 
7655 	if (requested.pcs_powerdown_recommended_cores != effective.pcs_powerdown_recommended_cores ||
7656 	    requested.pcs_online_cores != effective.pcs_online_cores ||
7657 	    requested.pcs_tempdown_cores != effective.pcs_tempdown_cores) {
7658 		return true;
7659 	} else {
7660 		return false;
7661 	}
7662 }
7663 
7664 kern_return_t
sched_processor_exit_user(processor_t processor)7665 sched_processor_exit_user(processor_t processor)
7666 {
7667 	assert(processor);
7668 
7669 	lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
7670 	assert(preemption_enabled());
7671 
7672 	kern_return_t result;
7673 
7674 	spl_t s = splsched();
7675 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7676 
7677 	if (!enable_processor_exit) {
7678 		/* This API is not supported on this device. */
7679 		result = KERN_NOT_SUPPORTED;
7680 		goto unlock;
7681 	}
7682 
7683 	if (bit_test(pcs.pcs_required_online_system, processor->cpu_id)) {
7684 		/* This CPU can never change state outside of sleep. */
7685 		result = KERN_NOT_SUPPORTED;
7686 		goto unlock;
7687 	}
7688 
7689 	/*
7690 	 * Future: Instead of failing, simulate the processor
7691 	 * being shut down via derecommendation and decrementing active count.
7692 	 */
7693 	if (bit_test(pcs.pcs_required_online_pmgr, processor->cpu_id)) {
7694 		/* PMGR won't let us power down this CPU right now. */
7695 		result = KERN_FAILURE;
7696 		goto unlock;
7697 	}
7698 
7699 	if (pcs.pcs_powerdown_suspend_count) {
7700 		/* A tool that disables CPU powerdown is active. */
7701 		result = KERN_FAILURE;
7702 		goto unlock;
7703 	}
7704 
7705 	if (!bit_test(pcs.pcs_requested_online_user, processor->cpu_id)) {
7706 		/* The CPU is already powered off by userspace. */
7707 		result = KERN_NODE_DOWN;
7708 		goto unlock;
7709 	}
7710 
7711 	if ((pcs.pcs_recommended_cores & pcs.pcs_effective.pcs_online_cores) == BIT(processor->cpu_id)) {
7712 		/* This is the last available core, can't shut it down. */
7713 		result = KERN_RESOURCE_SHORTAGE;
7714 		goto unlock;
7715 	}
7716 
7717 	result = KERN_SUCCESS;
7718 
7719 	if (!pcs.pcs_user_online_core_control) {
7720 		pcs.pcs_user_online_core_control = true;
7721 	}
7722 
7723 	bit_clear(pcs.pcs_requested_online_user, processor->cpu_id);
7724 
7725 	if (sched_needs_update_requested_powered_cores()) {
7726 		sched_update_powered_cores_drops_lock(REASON_USER, s);
7727 	}
7728 
7729 unlock:
7730 	simple_unlock(&sched_available_cores_lock);
7731 	splx(s);
7732 
7733 	return result;
7734 }
7735 
7736 kern_return_t
sched_processor_start_user(processor_t processor)7737 sched_processor_start_user(processor_t processor)
7738 {
7739 	assert(processor);
7740 
7741 	lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
7742 	assert(preemption_enabled());
7743 
7744 	kern_return_t result;
7745 
7746 	spl_t s = splsched();
7747 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7748 
7749 	if (!enable_processor_exit) {
7750 		result = KERN_NOT_SUPPORTED;
7751 		goto unlock;
7752 	}
7753 
7754 	if (bit_test(pcs.pcs_required_online_system, processor->cpu_id)) {
7755 		result = KERN_NOT_SUPPORTED;
7756 		goto unlock;
7757 	}
7758 
7759 	/* Not allowed to start an SMT processor while SMT is disabled */
7760 	if ((sched_enable_smt == 0) && (processor->processor_primary != processor)) {
7761 		result = KERN_FAILURE;
7762 		goto unlock;
7763 	}
7764 
7765 	if (pcs.pcs_powerdown_suspend_count) {
7766 		result = KERN_FAILURE;
7767 		goto unlock;
7768 	}
7769 
7770 	if (bit_test(pcs.pcs_requested_online_user, processor->cpu_id)) {
7771 		result = KERN_FAILURE;
7772 		goto unlock;
7773 	}
7774 
7775 	result = KERN_SUCCESS;
7776 
7777 	bit_set(pcs.pcs_requested_online_user, processor->cpu_id);
7778 
7779 	/*
7780 	 * Once the user puts all CPUs back online,
7781 	 * we can resume automatic cluster power down.
7782 	 */
7783 	if (pcs.pcs_requested_online_user == pcs.pcs_managed_cores) {
7784 		pcs.pcs_user_online_core_control = false;
7785 	}
7786 
7787 	if (sched_needs_update_requested_powered_cores()) {
7788 		sched_update_powered_cores_drops_lock(REASON_USER, s);
7789 	}
7790 
7791 unlock:
7792 	simple_unlock(&sched_available_cores_lock);
7793 	splx(s);
7794 
7795 	return result;
7796 }
7797 
7798 sched_cond_atomic_t sched_update_powered_cores_wakeup;
7799 thread_t sched_update_powered_cores_thread;
7800 
7801 
7802 static void OS_NORETURN sched_update_powered_cores_continue(void *param __unused, wait_result_t wr __unused);
7803 
7804 /*
7805  * After all processors have been ml_processor_register'ed and processor_boot'ed
7806  * the scheduler can finalize its datastructures and allow CPU power state changes.
7807  *
7808  * Enforce that this only happens *once*. More than once is definitely not OK. rdar://121270513
7809  */
7810 void
sched_cpu_init_completed(void)7811 sched_cpu_init_completed(void)
7812 {
7813 	static bool sched_cpu_init_completed_called = false;
7814 
7815 	if (!os_atomic_cmpxchg(&sched_cpu_init_completed_called, false, true, relaxed)) {
7816 		panic("sched_cpu_init_completed called twice! %d", sched_cpu_init_completed_called);
7817 	}
7818 
7819 	if (SCHED(cpu_init_completed) != NULL) {
7820 		SCHED(cpu_init_completed)();
7821 	}
7822 
7823 	/* Wait for any cpu that is still starting, and enforce that they eventually complete. */
7824 	check_all_cpus_are_done_starting(PROCESSOR_FIRST_BOOT);
7825 
7826 	lck_mtx_lock(&cluster_powerdown_lock);
7827 
7828 	assert(sched_update_powered_cores_thread == THREAD_NULL);
7829 
7830 	sched_cond_init(&sched_update_powered_cores_wakeup);
7831 
7832 	kern_return_t result = kernel_thread_start_priority(
7833 		sched_update_powered_cores_continue,
7834 		NULL, MAXPRI_KERNEL, &sched_update_powered_cores_thread);
7835 	if (result != KERN_SUCCESS) {
7836 		panic("failed to create sched_update_powered_cores thread");
7837 	}
7838 
7839 	thread_set_thread_name(sched_update_powered_cores_thread,
7840 	    "sched_update_powered_cores");
7841 
7842 	spl_t s = splsched();
7843 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7844 
7845 	assert(pcs.pcs_init_completed == false);
7846 
7847 	pcs.pcs_managed_cores = pcs.pcs_effective.pcs_online_cores;
7848 
7849 	assert(__builtin_popcountll(pcs.pcs_managed_cores) == machine_info.logical_cpu_max);
7850 
7851 	/* If CLPC tries to cluster power down before this point, it's ignored. */
7852 	pcs.pcs_requested_online_user = pcs.pcs_managed_cores;
7853 	pcs.pcs_requested_online_clpc_system = pcs.pcs_managed_cores;
7854 	pcs.pcs_requested_online_clpc_user = pcs.pcs_managed_cores;
7855 
7856 	cpumap_t system_required_cores = 0;
7857 
7858 	/*
7859 	 * Ask the platform layer which CPUs are allowed to
7860 	 * be powered off outside of system sleep.
7861 	 */
7862 	for (int cpu_id = 0; cpu_id < machine_info.logical_cpu_max; cpu_id++) {
7863 		if (!ml_cpu_can_exit(cpu_id)) {
7864 			bit_set(system_required_cores, cpu_id);
7865 		}
7866 	}
7867 
7868 	pcs.pcs_required_online_system = system_required_cores;
7869 	pcs.pcs_effective.pcs_powerdown_recommended_cores = pcs.pcs_managed_cores;
7870 
7871 	pcs.pcs_requested = sched_compute_requested_powered_cores();
7872 
7873 	assert(pcs.pcs_requested.pcs_powerdown_recommended_cores == pcs.pcs_managed_cores);
7874 	assert(pcs.pcs_requested.pcs_online_cores == pcs.pcs_managed_cores);
7875 	assert(pcs.pcs_requested.pcs_tempdown_cores == 0);
7876 
7877 	assert(pcs.pcs_effective.pcs_powerdown_recommended_cores == pcs.pcs_managed_cores);
7878 	assert(pcs.pcs_effective.pcs_online_cores == pcs.pcs_managed_cores);
7879 	assert(pcs.pcs_effective.pcs_tempdown_cores == 0);
7880 
7881 	pcs.pcs_init_completed = true;
7882 
7883 	simple_unlock(&sched_available_cores_lock);
7884 	splx(s);
7885 
7886 	lck_mtx_unlock(&cluster_powerdown_lock);
7887 
7888 	/* Release the +1 pcs_powerdown_suspend_count that we booted up with. */
7889 	resume_cluster_powerdown();
7890 }
7891 
7892 bool
sched_is_in_sleep(void)7893 sched_is_in_sleep(void)
7894 {
7895 	return pcs.pcs_in_kernel_sleep || pcs.pcs_wants_kernel_sleep;
7896 }
7897 
7898 bool
sched_is_cpu_init_completed(void)7899 sched_is_cpu_init_completed(void)
7900 {
7901 	return pcs.pcs_init_completed;
7902 }
7903 
7904 processor_reason_t last_sched_update_powered_cores_continue_reason;
7905 
7906 static void OS_NORETURN
sched_update_powered_cores_continue(void * param __unused,wait_result_t wr __unused)7907 sched_update_powered_cores_continue(void *param __unused, wait_result_t wr __unused)
7908 {
7909 	sched_cond_ack(&sched_update_powered_cores_wakeup);
7910 
7911 	while (true) {
7912 		lck_mtx_lock(&cluster_powerdown_lock);
7913 
7914 		spl_t s = splsched();
7915 		simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7916 
7917 		bool needs_update = sched_needs_update_requested_powered_cores();
7918 
7919 		if (needs_update) {
7920 			/* This thread shouldn't need to make changes while powerdown is suspended */
7921 			assert(pcs.pcs_powerdown_suspend_count == 0);
7922 
7923 			processor_reason_t reason = last_sched_update_powered_cores_continue_reason;
7924 
7925 			sched_update_powered_cores_drops_lock(reason, s);
7926 		}
7927 
7928 		simple_unlock(&sched_available_cores_lock);
7929 		splx(s);
7930 
7931 		lck_mtx_unlock(&cluster_powerdown_lock);
7932 
7933 		/* If we did an update, we dropped the lock, so check again. */
7934 
7935 		if (!needs_update) {
7936 			sched_cond_wait(&sched_update_powered_cores_wakeup, THREAD_UNINT,
7937 			    sched_update_powered_cores_continue);
7938 			/* The condition was signaled since we last blocked, check again. */
7939 		}
7940 	}
7941 }
7942 
7943 __options_decl(sched_powered_cores_flags_t, uint32_t, {
7944 	ASSERT_IN_SLEEP                 = 0x10000000,
7945 	ASSERT_POWERDOWN_SUSPENDED      = 0x20000000,
7946 	POWERED_CORES_OPTIONS_MASK      = ASSERT_IN_SLEEP | ASSERT_POWERDOWN_SUSPENDED,
7947 });
7948 
7949 /*
7950  * This is KPI with CLPC.
7951  */
7952 void
sched_perfcontrol_update_powered_cores(uint64_t requested_powered_cores,processor_reason_t reason,__unused uint32_t flags)7953 sched_perfcontrol_update_powered_cores(
7954 	uint64_t requested_powered_cores,
7955 	processor_reason_t reason,
7956 	__unused uint32_t flags)
7957 {
7958 	assert((reason == REASON_CLPC_SYSTEM) || (reason == REASON_CLPC_USER));
7959 
7960 #if DEVELOPMENT || DEBUG
7961 	if (flags & (ASSERT_IN_SLEEP | ASSERT_POWERDOWN_SUSPENDED)) {
7962 		if (flags & ASSERT_POWERDOWN_SUSPENDED) {
7963 			assert(pcs.pcs_powerdown_suspend_count > 0);
7964 		}
7965 		if (flags & ASSERT_IN_SLEEP) {
7966 			assert(pcs.pcs_sleep_override_recommended == true);
7967 		}
7968 		return;
7969 	}
7970 #endif
7971 
7972 	spl_t s = splsched();
7973 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
7974 
7975 	cpumap_t requested_cores = requested_powered_cores & pcs.pcs_managed_cores;
7976 
7977 	if (reason == REASON_CLPC_SYSTEM) {
7978 		pcs.pcs_requested_online_clpc_system = requested_cores;
7979 	} else if (reason == REASON_CLPC_USER) {
7980 		pcs.pcs_requested_online_clpc_user = requested_cores;
7981 	}
7982 
7983 	bool needs_update = sched_needs_update_requested_powered_cores();
7984 
7985 	if (needs_update) {
7986 		last_sched_update_powered_cores_continue_reason = reason;
7987 	}
7988 
7989 	simple_unlock(&sched_available_cores_lock);
7990 	splx(s);
7991 
7992 	if (needs_update) {
7993 		sched_cond_signal(&sched_update_powered_cores_wakeup,
7994 		    sched_update_powered_cores_thread);
7995 	}
7996 }
7997 
7998 /*
7999  * This doesn't just suspend cluster powerdown.
8000  * It also powers up all the cores and leaves them up,
8001  * even if some user wanted them down.
8002  * This is important because dtrace, monotonic, and others can't handle any
8003  * powered down cores, not just cluster powerdown.
8004  */
8005 static void
suspend_cluster_powerdown_locked(bool for_sleep)8006 suspend_cluster_powerdown_locked(bool for_sleep)
8007 {
8008 	lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
8009 	kprintf("%s>calling sched_update_powered_cores to suspend powerdown\n", __func__);
8010 
8011 	spl_t s = splsched();
8012 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8013 
8014 	assert(pcs.pcs_powerdown_suspend_count >= 0);
8015 
8016 	if (for_sleep) {
8017 		assert(!pcs.pcs_wants_kernel_sleep);
8018 		assert(!pcs.pcs_in_kernel_sleep);
8019 		pcs.pcs_wants_kernel_sleep = true;
8020 	}
8021 
8022 	pcs.pcs_powerdown_suspend_count++;
8023 
8024 	if (sched_needs_update_requested_powered_cores()) {
8025 		sched_update_powered_cores_drops_lock(REASON_SYSTEM, s);
8026 	}
8027 
8028 	if (for_sleep) {
8029 		assert(pcs.pcs_wants_kernel_sleep);
8030 		assert(!pcs.pcs_in_kernel_sleep);
8031 		pcs.pcs_in_kernel_sleep = true;
8032 
8033 		assert(sched_needs_update_requested_powered_cores() == false);
8034 	}
8035 
8036 	simple_unlock(&sched_available_cores_lock);
8037 	splx(s);
8038 
8039 	if (pcs.pcs_init_completed) {
8040 		/* At this point, no cpu should be still starting. Let's enforce that. */
8041 		check_all_cpus_are_done_starting(for_sleep ?
8042 		    PROCESSOR_BEFORE_ENTERING_SLEEP : PROCESSOR_CLUSTER_POWERDOWN_SUSPEND);
8043 	}
8044 }
8045 
8046 static void
resume_cluster_powerdown_locked(bool for_sleep)8047 resume_cluster_powerdown_locked(bool for_sleep)
8048 {
8049 	lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
8050 
8051 	if (pcs.pcs_init_completed) {
8052 		/* At this point, no cpu should be still starting. Let's enforce that. */
8053 		check_all_cpus_are_done_starting(for_sleep ?
8054 		    PROCESSOR_WAKE_FROM_SLEEP : PROCESSOR_CLUSTER_POWERDOWN_RESUME);
8055 	}
8056 
8057 	kprintf("%s>calling sched_update_powered_cores to resume powerdown\n", __func__);
8058 
8059 	spl_t s = splsched();
8060 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8061 
8062 	if (pcs.pcs_powerdown_suspend_count <= 0) {
8063 		panic("resume_cluster_powerdown() called with pcs.pcs_powerdown_suspend_count=%d\n", pcs.pcs_powerdown_suspend_count);
8064 	}
8065 
8066 	if (for_sleep) {
8067 		assert(pcs.pcs_wants_kernel_sleep);
8068 		assert(pcs.pcs_in_kernel_sleep);
8069 		pcs.pcs_wants_kernel_sleep = false;
8070 	}
8071 
8072 	pcs.pcs_powerdown_suspend_count--;
8073 
8074 	if (pcs.pcs_powerdown_suspend_count == 0) {
8075 		/* Returning to client controlled powerdown mode */
8076 		assert(pcs.pcs_init_completed);
8077 
8078 		/* To match previous behavior, clear the user state */
8079 		pcs.pcs_requested_online_user = pcs.pcs_managed_cores;
8080 		pcs.pcs_user_online_core_control = false;
8081 
8082 		/* To match previous behavior, clear the requested CLPC state. */
8083 		pcs.pcs_requested_online_clpc_user = pcs.pcs_managed_cores;
8084 		pcs.pcs_requested_online_clpc_system = pcs.pcs_managed_cores;
8085 	}
8086 
8087 	if (sched_needs_update_requested_powered_cores()) {
8088 		sched_update_powered_cores_drops_lock(REASON_SYSTEM, s);
8089 	}
8090 
8091 	if (for_sleep) {
8092 		assert(!pcs.pcs_wants_kernel_sleep);
8093 		assert(pcs.pcs_in_kernel_sleep);
8094 		pcs.pcs_in_kernel_sleep = false;
8095 
8096 		assert(sched_needs_update_requested_powered_cores() == false);
8097 	}
8098 
8099 	simple_unlock(&sched_available_cores_lock);
8100 	splx(s);
8101 }
8102 
8103 static uint64_t
die_and_cluster_to_cpu_mask(__unused unsigned int die_id,__unused unsigned int die_cluster_id)8104 die_and_cluster_to_cpu_mask(
8105 	__unused unsigned int die_id,
8106 	__unused unsigned int die_cluster_id)
8107 {
8108 #if __arm__ || __arm64__
8109 	const ml_topology_info_t* topology = ml_get_topology_info();
8110 	unsigned int num_clusters = topology->num_clusters;
8111 	for (unsigned int i = 0; i < num_clusters; i++) {
8112 		ml_topology_cluster_t* cluster = &topology->clusters[i];
8113 		if ((cluster->die_id == die_id) &&
8114 		    (cluster->die_cluster_id == die_cluster_id)) {
8115 			return cluster->cpu_mask;
8116 		}
8117 	}
8118 #endif
8119 	return 0ull;
8120 }
8121 
8122 /*
8123  * Take an assertion that ensures all CPUs in the cluster are powered up until
8124  * the assertion is released.
8125  * A system suspend will still power down the CPUs.
8126  * This call will stall if system suspend is in progress.
8127  *
8128  * Future ER: Could this just power up the cluster, and leave enabling the
8129  * processors to be asynchronous, or deferred?
8130  *
8131  * Enabling the rail is synchronous, it must be powered up before returning.
8132  */
8133 void
sched_enable_acc_rail(unsigned int die_id,unsigned int die_cluster_id)8134 sched_enable_acc_rail(unsigned int die_id, unsigned int die_cluster_id)
8135 {
8136 	uint64_t core_mask = die_and_cluster_to_cpu_mask(die_id, die_cluster_id);
8137 
8138 	lck_mtx_lock(&cluster_powerdown_lock);
8139 
8140 	/*
8141 	 * Note: if pcs.pcs_init_completed is false, because the
8142 	 * CPUs have not booted yet, then we assume that all
8143 	 * clusters are already powered up at boot (see IOCPUInitialize)
8144 	 * so we don't have to wait for cpu boot to complete.
8145 	 * We'll still save the requested assertion and enforce it after
8146 	 * boot completes.
8147 	 */
8148 
8149 	spl_t s = splsched();
8150 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8151 
8152 	if (pcs.pcs_init_completed) {
8153 		assert3u(pcs.pcs_managed_cores & core_mask, ==, core_mask);
8154 	}
8155 
8156 	/* Can't enable something that is already enabled */
8157 	assert((pcs.pcs_required_online_pmgr & core_mask) == 0);
8158 
8159 	pcs.pcs_required_online_pmgr |= core_mask;
8160 
8161 	if (sched_needs_update_requested_powered_cores()) {
8162 		sched_update_powered_cores_drops_lock(REASON_PMGR_SYSTEM, s);
8163 	}
8164 
8165 	simple_unlock(&sched_available_cores_lock);
8166 	splx(s);
8167 
8168 	lck_mtx_unlock(&cluster_powerdown_lock);
8169 }
8170 
8171 /*
8172  * Release the assertion ensuring the cluster is powered up.
8173  * This operation is asynchronous, so PMGR doesn't need to wait until it takes
8174  * effect. If the enable comes in before it takes effect, it'll either
8175  * wait on the lock, or the async thread will discover it needs no update.
8176  */
8177 void
sched_disable_acc_rail(unsigned int die_id,unsigned int die_cluster_id)8178 sched_disable_acc_rail(unsigned int die_id, unsigned int die_cluster_id)
8179 {
8180 	uint64_t core_mask = die_and_cluster_to_cpu_mask(die_id, die_cluster_id);
8181 
8182 	spl_t s = splsched();
8183 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8184 
8185 	/* Can't disable something that is already disabled */
8186 	assert((pcs.pcs_required_online_pmgr & core_mask) == core_mask);
8187 
8188 	if (pcs.pcs_init_completed) {
8189 		assert3u(pcs.pcs_managed_cores & core_mask, ==, core_mask);
8190 	}
8191 
8192 	pcs.pcs_required_online_pmgr &= ~core_mask;
8193 
8194 	bool needs_update = sched_needs_update_requested_powered_cores();
8195 
8196 	if (needs_update) {
8197 		last_sched_update_powered_cores_continue_reason = REASON_PMGR_SYSTEM;
8198 	}
8199 
8200 	simple_unlock(&sched_available_cores_lock);
8201 	splx(s);
8202 
8203 	if (needs_update) {
8204 		sched_cond_signal(&sched_update_powered_cores_wakeup,
8205 		    sched_update_powered_cores_thread);
8206 	}
8207 }
8208 
8209 void
suspend_cluster_powerdown(void)8210 suspend_cluster_powerdown(void)
8211 {
8212 	lck_mtx_lock(&cluster_powerdown_lock);
8213 	suspend_cluster_powerdown_locked(false);
8214 	lck_mtx_unlock(&cluster_powerdown_lock);
8215 }
8216 
8217 void
resume_cluster_powerdown(void)8218 resume_cluster_powerdown(void)
8219 {
8220 	lck_mtx_lock(&cluster_powerdown_lock);
8221 	resume_cluster_powerdown_locked(false);
8222 	lck_mtx_unlock(&cluster_powerdown_lock);
8223 
8224 	if (sched_enable_smt == 0) {
8225 		enable_smt_processors(false);
8226 	}
8227 }
8228 
8229 
8230 LCK_MTX_DECLARE(user_cluster_powerdown_lock, &cluster_powerdown_grp);
8231 static bool user_suspended_cluster_powerdown = false;
8232 
8233 kern_return_t
suspend_cluster_powerdown_from_user(void)8234 suspend_cluster_powerdown_from_user(void)
8235 {
8236 	kern_return_t ret = KERN_FAILURE;
8237 
8238 	lck_mtx_lock(&user_cluster_powerdown_lock);
8239 
8240 	if (!user_suspended_cluster_powerdown) {
8241 		suspend_cluster_powerdown();
8242 		user_suspended_cluster_powerdown = true;
8243 		ret = KERN_SUCCESS;
8244 	}
8245 
8246 	lck_mtx_unlock(&user_cluster_powerdown_lock);
8247 
8248 	return ret;
8249 }
8250 
8251 kern_return_t
resume_cluster_powerdown_from_user(void)8252 resume_cluster_powerdown_from_user(void)
8253 {
8254 	kern_return_t ret = KERN_FAILURE;
8255 
8256 	lck_mtx_lock(&user_cluster_powerdown_lock);
8257 
8258 	if (user_suspended_cluster_powerdown) {
8259 		resume_cluster_powerdown();
8260 		user_suspended_cluster_powerdown = false;
8261 		ret = KERN_SUCCESS;
8262 	}
8263 
8264 	lck_mtx_unlock(&user_cluster_powerdown_lock);
8265 
8266 	return ret;
8267 }
8268 
8269 int
get_cluster_powerdown_user_suspended(void)8270 get_cluster_powerdown_user_suspended(void)
8271 {
8272 	lck_mtx_lock(&user_cluster_powerdown_lock);
8273 
8274 	int ret = (int)user_suspended_cluster_powerdown;
8275 
8276 	lck_mtx_unlock(&user_cluster_powerdown_lock);
8277 
8278 	return ret;
8279 }
8280 
8281 #if DEVELOPMENT || DEBUG
8282 /* Functions to support the temporary sysctl */
8283 static uint64_t saved_requested_powered_cores = ALL_CORES_POWERED;
8284 void
sched_set_powered_cores(int requested_powered_cores)8285 sched_set_powered_cores(int requested_powered_cores)
8286 {
8287 	processor_reason_t reason = bit_test(requested_powered_cores, 31) ? REASON_CLPC_USER : REASON_CLPC_SYSTEM;
8288 	sched_powered_cores_flags_t flags = requested_powered_cores & POWERED_CORES_OPTIONS_MASK;
8289 
8290 	saved_requested_powered_cores = requested_powered_cores;
8291 
8292 	requested_powered_cores = bits(requested_powered_cores, 28, 0);
8293 
8294 	sched_perfcontrol_update_powered_cores(requested_powered_cores, reason, flags);
8295 }
8296 int
sched_get_powered_cores(void)8297 sched_get_powered_cores(void)
8298 {
8299 	return (int)saved_requested_powered_cores;
8300 }
8301 
8302 uint64_t
sched_sysctl_get_recommended_cores(void)8303 sched_sysctl_get_recommended_cores(void)
8304 {
8305 	return pcs.pcs_recommended_cores;
8306 }
8307 #endif
8308 
8309 /*
8310  * Ensure that all cores are powered and recommended before sleep
8311  * Acquires cluster_powerdown_lock and returns with it held.
8312  */
8313 void
sched_override_available_cores_for_sleep(void)8314 sched_override_available_cores_for_sleep(void)
8315 {
8316 	if (!pcs.pcs_init_completed) {
8317 		panic("Attempting to sleep before all CPUS are registered");
8318 	}
8319 
8320 	lck_mtx_lock(&cluster_powerdown_lock);
8321 
8322 	spl_t s = splsched();
8323 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8324 
8325 	assert(pcs.pcs_sleep_override_recommended == false);
8326 
8327 	pcs.pcs_sleep_override_recommended = true;
8328 	sched_update_recommended_cores_locked(REASON_SYSTEM, 0);
8329 
8330 	simple_unlock(&sched_available_cores_lock);
8331 	splx(s);
8332 
8333 	suspend_cluster_powerdown_locked(true);
8334 }
8335 
8336 /*
8337  * Restore the previously recommended cores, but leave all cores powered
8338  * after sleep.
8339  * Called with cluster_powerdown_lock still held, releases the lock.
8340  */
8341 void
sched_restore_available_cores_after_sleep(void)8342 sched_restore_available_cores_after_sleep(void)
8343 {
8344 	lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
8345 
8346 	spl_t s = splsched();
8347 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8348 	assert(pcs.pcs_sleep_override_recommended == true);
8349 
8350 	pcs.pcs_sleep_override_recommended = false;
8351 	sched_update_recommended_cores_locked(REASON_NONE, 0);
8352 
8353 	simple_unlock(&sched_available_cores_lock);
8354 	splx(s);
8355 
8356 	resume_cluster_powerdown_locked(true);
8357 
8358 	lck_mtx_unlock(&cluster_powerdown_lock);
8359 
8360 	if (sched_enable_smt == 0) {
8361 		enable_smt_processors(false);
8362 	}
8363 }
8364 
8365 #if __arm__ || __arm64__
8366 
8367 uint64_t    perfcontrol_failsafe_maintenance_runnable_time;
8368 uint64_t    perfcontrol_failsafe_activation_time;
8369 uint64_t    perfcontrol_failsafe_deactivation_time;
8370 
8371 /* data covering who likely caused it and how long they ran */
8372 #define FAILSAFE_NAME_LEN       33 /* (2*MAXCOMLEN)+1 from size of p_name */
8373 char        perfcontrol_failsafe_name[FAILSAFE_NAME_LEN];
8374 int         perfcontrol_failsafe_pid;
8375 uint64_t    perfcontrol_failsafe_tid;
8376 uint64_t    perfcontrol_failsafe_thread_timer_at_start;
8377 uint64_t    perfcontrol_failsafe_thread_timer_last_seen;
8378 uint64_t    perfcontrol_failsafe_recommended_at_trigger;
8379 
8380 /*
8381  * Perf controller calls here to update the recommended core bitmask.
8382  * If the failsafe is active, we don't immediately apply the new value.
8383  * Instead, we store the new request and use it after the failsafe deactivates.
8384  *
8385  * If the failsafe is not active, immediately apply the update.
8386  *
8387  * No scheduler locks are held, no other locks are held that scheduler might depend on,
8388  * interrupts are enabled
8389  *
8390  * currently prototype is in osfmk/arm/machine_routines.h
8391  */
8392 void
sched_perfcontrol_update_recommended_cores_reason(uint64_t recommended_cores,processor_reason_t reason,__unused uint32_t flags)8393 sched_perfcontrol_update_recommended_cores_reason(
8394 	uint64_t                recommended_cores,
8395 	processor_reason_t      reason,
8396 	__unused uint32_t       flags)
8397 {
8398 	assert(preemption_enabled());
8399 
8400 	spl_t s = splsched();
8401 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8402 
8403 	if (reason == REASON_CLPC_SYSTEM) {
8404 		pcs.pcs_requested_recommended_clpc_system = recommended_cores;
8405 	} else {
8406 		assert(reason == REASON_CLPC_USER);
8407 		pcs.pcs_requested_recommended_clpc_user = recommended_cores;
8408 	}
8409 
8410 	pcs.pcs_requested_recommended_clpc = pcs.pcs_requested_recommended_clpc_system &
8411 	    pcs.pcs_requested_recommended_clpc_user;
8412 
8413 	sysctl_sched_recommended_cores = pcs.pcs_requested_recommended_clpc;
8414 
8415 	sched_update_recommended_cores_locked(reason, 0);
8416 
8417 	simple_unlock(&sched_available_cores_lock);
8418 	splx(s);
8419 }
8420 
8421 void
sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores)8422 sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores)
8423 {
8424 	sched_perfcontrol_update_recommended_cores_reason(recommended_cores, REASON_CLPC_USER, 0);
8425 }
8426 
8427 /*
8428  * Consider whether we need to activate the recommended cores failsafe
8429  *
8430  * Called from quantum timer interrupt context of a realtime thread
8431  * No scheduler locks are held, interrupts are disabled
8432  */
8433 void
sched_consider_recommended_cores(uint64_t ctime,thread_t cur_thread)8434 sched_consider_recommended_cores(uint64_t ctime, thread_t cur_thread)
8435 {
8436 	/*
8437 	 * Check if a realtime thread is starving the system
8438 	 * and bringing up non-recommended cores would help
8439 	 *
8440 	 * TODO: Is this the correct check for recommended == possible cores?
8441 	 * TODO: Validate the checks without the relevant lock are OK.
8442 	 */
8443 
8444 	if (__improbable(pcs.pcs_recommended_clpc_failsafe_active)) {
8445 		/* keep track of how long the responsible thread runs */
8446 		uint64_t cur_th_time = recount_current_thread_time_mach();
8447 
8448 		simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8449 
8450 		if (pcs.pcs_recommended_clpc_failsafe_active &&
8451 		    cur_thread->thread_id == perfcontrol_failsafe_tid) {
8452 			perfcontrol_failsafe_thread_timer_last_seen = cur_th_time;
8453 		}
8454 
8455 		simple_unlock(&sched_available_cores_lock);
8456 
8457 		/* we're already trying to solve the problem, so bail */
8458 		return;
8459 	}
8460 
8461 	/* The failsafe won't help if there are no more processors to enable */
8462 	if (__probable(bit_count(pcs.pcs_requested_recommended_clpc) >= processor_count)) {
8463 		return;
8464 	}
8465 
8466 	uint64_t too_long_ago = ctime - perfcontrol_failsafe_starvation_threshold;
8467 
8468 	/* Use the maintenance thread as our canary in the coal mine */
8469 	thread_t m_thread = sched_maintenance_thread;
8470 
8471 	/* If it doesn't look bad, nothing to see here */
8472 	if (__probable(m_thread->last_made_runnable_time >= too_long_ago)) {
8473 		return;
8474 	}
8475 
8476 	/* It looks bad, take the lock to be sure */
8477 	thread_lock(m_thread);
8478 
8479 	if (thread_get_runq(m_thread) == PROCESSOR_NULL ||
8480 	    (m_thread->state & (TH_RUN | TH_WAIT)) != TH_RUN ||
8481 	    m_thread->last_made_runnable_time >= too_long_ago) {
8482 		/*
8483 		 * Maintenance thread is either on cpu or blocked, and
8484 		 * therefore wouldn't benefit from more cores
8485 		 */
8486 		thread_unlock(m_thread);
8487 		return;
8488 	}
8489 
8490 	uint64_t maintenance_runnable_time = m_thread->last_made_runnable_time;
8491 
8492 	thread_unlock(m_thread);
8493 
8494 	/*
8495 	 * There are cores disabled at perfcontrol's recommendation, but the
8496 	 * system is so overloaded that the maintenance thread can't run.
8497 	 * That likely means that perfcontrol can't run either, so it can't fix
8498 	 * the recommendation.  We have to kick in a failsafe to keep from starving.
8499 	 *
8500 	 * When the maintenance thread has been starved for too long,
8501 	 * ignore the recommendation from perfcontrol and light up all the cores.
8502 	 *
8503 	 * TODO: Consider weird states like boot, sleep, or debugger
8504 	 */
8505 
8506 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8507 
8508 	if (pcs.pcs_recommended_clpc_failsafe_active) {
8509 		simple_unlock(&sched_available_cores_lock);
8510 		return;
8511 	}
8512 
8513 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8514 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_START,
8515 	    pcs.pcs_requested_recommended_clpc, maintenance_runnable_time, 0, 0, 0);
8516 
8517 	pcs.pcs_recommended_clpc_failsafe_active = true;
8518 	perfcontrol_failsafe_activation_time = mach_absolute_time();
8519 	perfcontrol_failsafe_maintenance_runnable_time = maintenance_runnable_time;
8520 	perfcontrol_failsafe_recommended_at_trigger = pcs.pcs_requested_recommended_clpc;
8521 
8522 	/* Capture some data about who screwed up (assuming that the thread on core is at fault) */
8523 	task_t task = get_threadtask(cur_thread);
8524 	perfcontrol_failsafe_pid = task_pid(task);
8525 	strlcpy(perfcontrol_failsafe_name, proc_name_address(get_bsdtask_info(task)), sizeof(perfcontrol_failsafe_name));
8526 
8527 	perfcontrol_failsafe_tid = cur_thread->thread_id;
8528 
8529 	/* Blame the thread for time it has run recently */
8530 	uint64_t recent_computation = (ctime - cur_thread->computation_epoch) + cur_thread->computation_metered;
8531 
8532 	uint64_t last_seen = recount_current_thread_time_mach();
8533 
8534 	/* Compute the start time of the bad behavior in terms of the thread's on core time */
8535 	perfcontrol_failsafe_thread_timer_at_start  = last_seen - recent_computation;
8536 	perfcontrol_failsafe_thread_timer_last_seen = last_seen;
8537 
8538 	/* Publish the pcs_recommended_clpc_failsafe_active override to the CPUs */
8539 	sched_update_recommended_cores_locked(REASON_SYSTEM, 0);
8540 
8541 	simple_unlock(&sched_available_cores_lock);
8542 }
8543 
8544 /*
8545  * Now that our bacon has been saved by the failsafe, consider whether to turn it off
8546  *
8547  * Runs in the context of the maintenance thread, no locks held
8548  */
8549 static void
sched_recommended_cores_maintenance(void)8550 sched_recommended_cores_maintenance(void)
8551 {
8552 	/* Common case - no failsafe, nothing to be done here */
8553 	if (__probable(!pcs.pcs_recommended_clpc_failsafe_active)) {
8554 		return;
8555 	}
8556 
8557 	uint64_t ctime = mach_absolute_time();
8558 
8559 	boolean_t print_diagnostic = FALSE;
8560 	char p_name[FAILSAFE_NAME_LEN] = "";
8561 
8562 	spl_t s = splsched();
8563 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8564 
8565 	/* Check again, under the lock, to avoid races */
8566 	if (!pcs.pcs_recommended_clpc_failsafe_active) {
8567 		goto out;
8568 	}
8569 
8570 	/*
8571 	 * Ensure that the other cores get another few ticks to run some threads
8572 	 * If we don't have this hysteresis, the maintenance thread is the first
8573 	 * to run, and then it immediately kills the other cores
8574 	 */
8575 	if ((ctime - perfcontrol_failsafe_activation_time) < perfcontrol_failsafe_starvation_threshold) {
8576 		goto out;
8577 	}
8578 
8579 	/* Capture some diagnostic state under the lock so we can print it out later */
8580 
8581 	int      pid = perfcontrol_failsafe_pid;
8582 	uint64_t tid = perfcontrol_failsafe_tid;
8583 
8584 	uint64_t thread_usage       = perfcontrol_failsafe_thread_timer_last_seen -
8585 	    perfcontrol_failsafe_thread_timer_at_start;
8586 	uint64_t rec_cores_before   = perfcontrol_failsafe_recommended_at_trigger;
8587 	uint64_t rec_cores_after    = pcs.pcs_requested_recommended_clpc;
8588 	uint64_t failsafe_duration  = ctime - perfcontrol_failsafe_activation_time;
8589 	strlcpy(p_name, perfcontrol_failsafe_name, sizeof(p_name));
8590 
8591 	print_diagnostic = TRUE;
8592 
8593 	/* Deactivate the failsafe and reinstate the requested recommendation settings */
8594 
8595 	perfcontrol_failsafe_deactivation_time = ctime;
8596 	pcs.pcs_recommended_clpc_failsafe_active = false;
8597 
8598 	sched_update_recommended_cores_locked(REASON_SYSTEM, 0);
8599 
8600 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8601 	    MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_END,
8602 	    pcs.pcs_requested_recommended_clpc, failsafe_duration, 0, 0, 0);
8603 
8604 out:
8605 	simple_unlock(&sched_available_cores_lock);
8606 	splx(s);
8607 
8608 	if (print_diagnostic) {
8609 		uint64_t failsafe_duration_ms = 0, thread_usage_ms = 0;
8610 
8611 		absolutetime_to_nanoseconds(failsafe_duration, &failsafe_duration_ms);
8612 		failsafe_duration_ms = failsafe_duration_ms / NSEC_PER_MSEC;
8613 
8614 		absolutetime_to_nanoseconds(thread_usage, &thread_usage_ms);
8615 		thread_usage_ms = thread_usage_ms / NSEC_PER_MSEC;
8616 
8617 		printf("recommended core failsafe kicked in for %lld ms "
8618 		    "likely due to %s[%d] thread 0x%llx spending "
8619 		    "%lld ms on cpu at realtime priority - "
8620 		    "new recommendation: 0x%llx -> 0x%llx\n",
8621 		    failsafe_duration_ms, p_name, pid, tid, thread_usage_ms,
8622 		    rec_cores_before, rec_cores_after);
8623 	}
8624 }
8625 
8626 #endif /* __arm64__ */
8627 
8628 /*
8629  * This is true before we have jumped to kernel_bootstrap_thread
8630  * first thread context during boot, or while all processors
8631  * have offlined during system sleep and the scheduler is disabled.
8632  *
8633  * (Note: only ever true on ARM, Intel doesn't actually offline the last CPU)
8634  */
8635 bool
sched_all_cpus_offline(void)8636 sched_all_cpus_offline(void)
8637 {
8638 	return pcs.pcs_effective.pcs_online_cores == 0;
8639 }
8640 
8641 void
sched_assert_not_last_online_cpu(__assert_only int cpu_id)8642 sched_assert_not_last_online_cpu(__assert_only int cpu_id)
8643 {
8644 	assertf(pcs.pcs_effective.pcs_online_cores != BIT(cpu_id),
8645 	    "attempting to shut down the last online CPU!");
8646 }
8647 
8648 /*
8649  * This is the unified single function to change published active core counts based on processor mode.
8650  * Each type of flag affects the other in terms of how the counts change.
8651  *
8652  * Future: Add support for not decrementing counts in 'temporary derecommended online' mode
8653  * Future: Shutdown for system sleep should be 'temporary' according to the user counts
8654  * so that no client sees a transiently low number of CPUs.
8655  */
8656 void
sched_processor_change_mode_locked(processor_t processor,processor_mode_t pcm_mode,bool set)8657 sched_processor_change_mode_locked(processor_t processor, processor_mode_t pcm_mode, bool set)
8658 {
8659 	simple_lock_assert(&sched_available_cores_lock, LCK_ASSERT_OWNED);
8660 	pset_assert_locked(processor->processor_set);
8661 
8662 	switch (pcm_mode) {
8663 	case PCM_RECOMMENDED:
8664 		if (set) {
8665 			assert(!processor->is_recommended);
8666 			assert(!bit_test(pcs.pcs_recommended_cores, processor->cpu_id));
8667 
8668 			processor->is_recommended = true;
8669 			bit_set(pcs.pcs_recommended_cores, processor->cpu_id);
8670 
8671 			if (processor->processor_online) {
8672 				os_atomic_inc(&processor_avail_count_user, relaxed);
8673 				if (processor->processor_primary == processor) {
8674 					os_atomic_inc(&primary_processor_avail_count_user, relaxed);
8675 				}
8676 			}
8677 		} else {
8678 			assert(processor->is_recommended);
8679 			assert(bit_test(pcs.pcs_recommended_cores, processor->cpu_id));
8680 
8681 			processor->is_recommended = false;
8682 			bit_clear(pcs.pcs_recommended_cores, processor->cpu_id);
8683 
8684 			if (processor->processor_online) {
8685 				os_atomic_dec(&processor_avail_count_user, relaxed);
8686 				if (processor->processor_primary == processor) {
8687 					os_atomic_dec(&primary_processor_avail_count_user, relaxed);
8688 				}
8689 			}
8690 		}
8691 		break;
8692 	case PCM_TEMPORARY:
8693 		if (set) {
8694 			assert(!processor->shutdown_temporary);
8695 			assert(!bit_test(pcs.pcs_effective.pcs_tempdown_cores, processor->cpu_id));
8696 
8697 			processor->shutdown_temporary = true;
8698 			bit_set(pcs.pcs_effective.pcs_tempdown_cores, processor->cpu_id);
8699 
8700 			if (!processor->processor_online) {
8701 				goto counts_up;
8702 			}
8703 		} else {
8704 			assert(processor->shutdown_temporary);
8705 			assert(bit_test(pcs.pcs_effective.pcs_tempdown_cores, processor->cpu_id));
8706 
8707 			processor->shutdown_temporary = false;
8708 			bit_clear(pcs.pcs_effective.pcs_tempdown_cores, processor->cpu_id);
8709 
8710 			if (!processor->processor_online) {
8711 				goto counts_down;
8712 			}
8713 		}
8714 		break;
8715 	case PCM_ONLINE:
8716 		if (set) {
8717 			assert(!processor->processor_online);
8718 			assert(!bit_test(pcs.pcs_effective.pcs_online_cores, processor->cpu_id));
8719 			processor->processor_online = true;
8720 			bit_set(pcs.pcs_effective.pcs_online_cores, processor->cpu_id);
8721 
8722 			if (!processor->shutdown_temporary) {
8723 				goto counts_up;
8724 			}
8725 		} else {
8726 			assert(processor->processor_online);
8727 			assert(bit_test(pcs.pcs_effective.pcs_online_cores, processor->cpu_id));
8728 			processor->processor_online = false;
8729 			bit_clear(pcs.pcs_effective.pcs_online_cores, processor->cpu_id);
8730 
8731 			if (!processor->shutdown_temporary) {
8732 				goto counts_down;
8733 			}
8734 		}
8735 		break;
8736 	default:
8737 		panic("unknown mode %d", pcm_mode);
8738 	}
8739 
8740 	return;
8741 
8742 counts_up:
8743 	ml_cpu_up_update_counts(processor->cpu_id);
8744 
8745 	os_atomic_inc(&processor_avail_count, relaxed);
8746 
8747 	if (processor->is_recommended) {
8748 		os_atomic_inc(&processor_avail_count_user, relaxed);
8749 	}
8750 	if (processor->processor_primary == processor) {
8751 		os_atomic_inc(&primary_processor_avail_count, relaxed);
8752 		if (processor->is_recommended) {
8753 			os_atomic_inc(&primary_processor_avail_count_user, relaxed);
8754 		}
8755 	}
8756 	commpage_update_active_cpus();
8757 
8758 	return;
8759 
8760 counts_down:
8761 	ml_cpu_down_update_counts(processor->cpu_id);
8762 
8763 	os_atomic_dec(&processor_avail_count, relaxed);
8764 
8765 	if (processor->is_recommended) {
8766 		os_atomic_dec(&processor_avail_count_user, relaxed);
8767 	}
8768 	if (processor->processor_primary == processor) {
8769 		os_atomic_dec(&primary_processor_avail_count, relaxed);
8770 		if (processor->is_recommended) {
8771 			os_atomic_dec(&primary_processor_avail_count_user, relaxed);
8772 		}
8773 	}
8774 	commpage_update_active_cpus();
8775 
8776 	return;
8777 }
8778 
8779 bool
sched_mark_processor_online(processor_t processor,__assert_only processor_reason_t reason)8780 sched_mark_processor_online(processor_t processor, __assert_only processor_reason_t reason)
8781 {
8782 	assert(processor == current_processor());
8783 
8784 	processor_set_t pset = processor->processor_set;
8785 
8786 	spl_t s = splsched();
8787 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8788 	pset_lock(pset);
8789 
8790 	/* Boot CPU coming online for the first time, either at boot or after sleep */
8791 	bool is_first_online_processor = sched_all_cpus_offline();
8792 	if (is_first_online_processor) {
8793 		assert(processor == master_processor);
8794 	}
8795 
8796 	assert((processor != master_processor) || (reason == REASON_SYSTEM) || support_bootcpu_shutdown);
8797 
8798 	sched_processor_change_mode_locked(processor, PCM_ONLINE, true);
8799 
8800 	assert(processor->processor_offline_state == PROCESSOR_OFFLINE_STARTING ||
8801 	    processor->processor_offline_state == PROCESSOR_OFFLINE_STARTED_NOT_RUNNING ||
8802 	    processor->processor_offline_state == PROCESSOR_OFFLINE_FINAL_SYSTEM_SLEEP);
8803 
8804 	processor_update_offline_state_locked(processor, PROCESSOR_OFFLINE_STARTED_NOT_WAITED);
8805 
8806 	++pset->online_processor_count;
8807 	pset_update_processor_state(pset, processor, PROCESSOR_RUNNING);
8808 
8809 	if (processor->is_recommended) {
8810 		SCHED(pset_made_schedulable)(processor, pset, false); /* May relock the pset lock */
8811 	}
8812 	pset_unlock(pset);
8813 
8814 	smr_cpu_up(processor, SMR_CPU_REASON_OFFLINE);
8815 
8816 	simple_unlock(&sched_available_cores_lock);
8817 	splx(s);
8818 
8819 	return is_first_online_processor;
8820 }
8821 
8822 void
sched_mark_processor_offline(processor_t processor,bool is_final_system_sleep)8823 sched_mark_processor_offline(processor_t processor, bool is_final_system_sleep)
8824 {
8825 	assert(processor == current_processor());
8826 
8827 	processor_set_t pset = processor->processor_set;
8828 
8829 	spl_t s = splsched();
8830 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
8831 
8832 	assert(bit_test(pcs.pcs_effective.pcs_online_cores, processor->cpu_id));
8833 	assert(processor->processor_offline_state == PROCESSOR_OFFLINE_BEGIN_SHUTDOWN);
8834 
8835 	if (!is_final_system_sleep) {
8836 		/*
8837 		 * We can't shut down the last available core!
8838 		 * Force recommend another CPU if this is the last one.
8839 		 */
8840 
8841 		if ((pcs.pcs_effective.pcs_online_cores & pcs.pcs_recommended_cores) == BIT(processor->cpu_id)) {
8842 			sched_update_recommended_cores_locked(REASON_SYSTEM, BIT(processor->cpu_id));
8843 		}
8844 
8845 		/* If we're still the last one, something went wrong. */
8846 		if ((pcs.pcs_effective.pcs_online_cores & pcs.pcs_recommended_cores) == BIT(processor->cpu_id)) {
8847 			panic("shutting down the last available core! online: 0x%llx rec: 0x%llxx",
8848 			    pcs.pcs_effective.pcs_online_cores,
8849 			    pcs.pcs_recommended_cores);
8850 		}
8851 	}
8852 
8853 	pset_lock(pset);
8854 	assert(processor->state == PROCESSOR_RUNNING);
8855 	assert(processor->processor_inshutdown);
8856 	pset_update_processor_state(pset, processor, PROCESSOR_PENDING_OFFLINE);
8857 	--pset->online_processor_count;
8858 
8859 	sched_processor_change_mode_locked(processor, PCM_ONLINE, false);
8860 
8861 	if (is_final_system_sleep) {
8862 		assert3u(pcs.pcs_effective.pcs_online_cores, ==, 0);
8863 		assert(processor == master_processor);
8864 		assert(sched_all_cpus_offline());
8865 
8866 		processor_update_offline_state_locked(processor, PROCESSOR_OFFLINE_FINAL_SYSTEM_SLEEP);
8867 	} else {
8868 		processor_update_offline_state_locked(processor, PROCESSOR_OFFLINE_PENDING_OFFLINE);
8869 	}
8870 
8871 	simple_unlock(&sched_available_cores_lock);
8872 
8873 	SCHED(processor_queue_shutdown)(processor);
8874 	/* pset lock dropped */
8875 	SCHED(rt_queue_shutdown)(processor);
8876 
8877 	splx(s);
8878 }
8879 
8880 /*
8881  * Apply a new recommended cores mask to the processors it affects
8882  * Runs after considering failsafes and such
8883  *
8884  * Iterate over processors and update their ->is_recommended field.
8885  * If a processor is running, we let it drain out at its next
8886  * quantum expiration or blocking point. If a processor is idle, there
8887  * may be more work for it to do, so IPI it.
8888  *
8889  * interrupts disabled, sched_available_cores_lock is held
8890  *
8891  * If a core is about to go offline, its bit will be set in core_going_offline,
8892  * so we can make sure not to pick it as the last resort cpu.
8893  */
8894 static void
sched_update_recommended_cores_locked(processor_reason_t reason,cpumap_t core_going_offline)8895 sched_update_recommended_cores_locked(processor_reason_t reason,
8896     cpumap_t core_going_offline)
8897 {
8898 	simple_lock_assert(&sched_available_cores_lock, LCK_ASSERT_OWNED);
8899 
8900 	cpumap_t recommended_cores = pcs.pcs_requested_recommended_clpc;
8901 
8902 	if (pcs.pcs_init_completed) {
8903 		recommended_cores &= pcs.pcs_effective.pcs_powerdown_recommended_cores;
8904 	}
8905 
8906 	if (pcs.pcs_sleep_override_recommended || pcs.pcs_recommended_clpc_failsafe_active) {
8907 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
8908 		    MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_NONE,
8909 		    recommended_cores,
8910 		    sched_maintenance_thread->last_made_runnable_time, 0, 0, 0);
8911 
8912 		recommended_cores = pcs.pcs_managed_cores;
8913 	}
8914 
8915 	if (bit_count(recommended_cores & pcs.pcs_effective.pcs_online_cores & ~core_going_offline) == 0) {
8916 		/*
8917 		 * If there are no online cpus recommended,
8918 		 * then the system will make no forward progress.
8919 		 * Pick a CPU of last resort to avoid hanging.
8920 		 */
8921 		int last_resort;
8922 
8923 		if (!support_bootcpu_shutdown) {
8924 			/* We know the master_processor is always available */
8925 			last_resort = master_processor->cpu_id;
8926 		} else {
8927 			/* Pick some still-online processor to be the processor of last resort */
8928 			last_resort = lsb_first(pcs.pcs_effective.pcs_online_cores & ~core_going_offline);
8929 
8930 			if (last_resort == -1) {
8931 				panic("%s> no last resort cpu found: 0x%llx 0x%llx",
8932 				    __func__, pcs.pcs_effective.pcs_online_cores, core_going_offline);
8933 			}
8934 		}
8935 
8936 		bit_set(recommended_cores, last_resort);
8937 	}
8938 
8939 	if (pcs.pcs_recommended_cores == recommended_cores) {
8940 		/* Nothing to do */
8941 		return;
8942 	}
8943 
8944 	KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_UPDATE_REC_CORES) |
8945 	    DBG_FUNC_START,
8946 	    recommended_cores,
8947 	    pcs.pcs_recommended_clpc_failsafe_active, pcs.pcs_sleep_override_recommended, 0);
8948 
8949 	cpumap_t needs_exit_idle_mask = 0x0;
8950 
8951 	/* First set recommended cores */
8952 	foreach_node(node) {
8953 		foreach_pset_id(pset_id, node) {
8954 			processor_set_t pset = pset_array[pset_id];
8955 
8956 			cpumap_t changed_recommendations = (recommended_cores & pset->cpu_bitmask) ^ pset->recommended_bitmask;
8957 			cpumap_t newly_recommended = changed_recommendations & recommended_cores;
8958 
8959 			if (newly_recommended == 0) {
8960 				/* Nothing to do */
8961 				continue;
8962 			}
8963 
8964 			pset_lock(pset);
8965 
8966 			cpumap_foreach(cpu_id, newly_recommended) {
8967 				processor_t processor = processor_array[cpu_id];
8968 
8969 				sched_processor_change_mode_locked(processor, PCM_RECOMMENDED, true);
8970 
8971 				processor->last_recommend_reason = reason;
8972 
8973 				if (pset->recommended_bitmask == 0) {
8974 					/* Cluster is becoming available for scheduling */
8975 					atomic_bit_set(&pset->node->pset_recommended_map, pset->pset_id, memory_order_relaxed);
8976 				}
8977 				bit_set(pset->recommended_bitmask, processor->cpu_id);
8978 
8979 				if (processor->state == PROCESSOR_IDLE) {
8980 					if (processor != current_processor()) {
8981 						bit_set(needs_exit_idle_mask, processor->cpu_id);
8982 					}
8983 				}
8984 
8985 				if (processor->processor_online) {
8986 					SCHED(pset_made_schedulable)(processor, pset, false); /* May relock the pset lock */
8987 				}
8988 			}
8989 			pset_update_rt_stealable_state(pset);
8990 
8991 			pset_unlock(pset);
8992 
8993 			cpumap_foreach(cpu_id, newly_recommended) {
8994 				smr_cpu_up(processor_array[cpu_id],
8995 				    SMR_CPU_REASON_IGNORED);
8996 			}
8997 		}
8998 	}
8999 
9000 	/* Now shutdown not recommended cores */
9001 	foreach_node(node) {
9002 		foreach_pset_id(pset_id, node) {
9003 			processor_set_t pset = pset_array[pset_id];
9004 
9005 			cpumap_t changed_recommendations = (recommended_cores & pset->cpu_bitmask) ^ pset->recommended_bitmask;
9006 			cpumap_t newly_unrecommended = changed_recommendations & ~recommended_cores;
9007 
9008 			if (newly_unrecommended == 0) {
9009 				/* Nothing to do */
9010 				continue;
9011 			}
9012 
9013 			cpumap_foreach(cpu_id, newly_unrecommended) {
9014 				processor_t processor = processor_array[cpu_id];
9015 				sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
9016 
9017 				pset_lock(pset);
9018 
9019 				sched_processor_change_mode_locked(processor, PCM_RECOMMENDED, false);
9020 
9021 				if (reason != REASON_NONE) {
9022 					processor->last_derecommend_reason = reason;
9023 				}
9024 				bit_clear(pset->recommended_bitmask, processor->cpu_id);
9025 				pset_update_rt_stealable_state(pset);
9026 				if (pset->recommended_bitmask == 0) {
9027 					/* Cluster is becoming unavailable for scheduling */
9028 					atomic_bit_clear(&pset->node->pset_recommended_map, pset->pset_id, memory_order_relaxed);
9029 				}
9030 
9031 				if ((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_DISPATCHING)) {
9032 					ipi_type = SCHED_IPI_IMMEDIATE;
9033 				}
9034 				SCHED(processor_queue_shutdown)(processor);
9035 				/* pset unlocked */
9036 
9037 				SCHED(rt_queue_shutdown)(processor);
9038 
9039 				if (ipi_type == SCHED_IPI_NONE) {
9040 					/*
9041 					 * If the core is idle,
9042 					 * we can directly mark the processor
9043 					 * as "Ignored"
9044 					 *
9045 					 * Otherwise, smr will detect this
9046 					 * during smr_cpu_leave() when the
9047 					 * processor actually idles.
9048 					 */
9049 					smr_cpu_down(processor, SMR_CPU_REASON_IGNORED);
9050 				} else if (processor == current_processor()) {
9051 					ast_on(AST_PREEMPT);
9052 				} else {
9053 					sched_ipi_perform(processor, ipi_type);
9054 				}
9055 			}
9056 		}
9057 	}
9058 
9059 	if (pcs.pcs_init_completed) {
9060 		assert3u(pcs.pcs_recommended_cores, ==, recommended_cores);
9061 	}
9062 
9063 #if defined(__x86_64__)
9064 	commpage_update_active_cpus();
9065 #endif
9066 	/* Issue all pending IPIs now that the pset lock has been dropped */
9067 	cpumap_foreach(cpu_id, needs_exit_idle_mask) {
9068 		processor_t processor = processor_array[cpu_id];
9069 		machine_signal_idle(processor);
9070 	}
9071 
9072 	KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_END,
9073 	    needs_exit_idle_mask, 0, 0, 0);
9074 }
9075 
9076 /*
9077  * Enters with the available cores lock held, returns with it held, but will drop it in the meantime.
9078  * Enters with the cluster_powerdown_lock held, returns with it held, keeps it held.
9079  */
9080 static void
sched_update_powered_cores_drops_lock(processor_reason_t requested_reason,spl_t caller_s)9081 sched_update_powered_cores_drops_lock(processor_reason_t requested_reason, spl_t caller_s)
9082 {
9083 	lck_mtx_assert(&cluster_powerdown_lock, LCK_MTX_ASSERT_OWNED);
9084 	simple_lock_assert(&sched_available_cores_lock, LCK_ASSERT_OWNED);
9085 
9086 	assert(ml_get_interrupts_enabled() == false);
9087 	assert(caller_s == true); /* Caller must have had interrupts enabled when they took the lock */
9088 
9089 	/* All transitions should be quiesced before we start changing things */
9090 	assert_no_processors_in_transition_locked();
9091 
9092 	pcs.pcs_in_flight_reason = requested_reason;
9093 
9094 	struct powered_cores_state requested = sched_compute_requested_powered_cores();
9095 	struct powered_cores_state effective = pcs.pcs_effective;
9096 
9097 	KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_UPDATE_POWERED_CORES) | DBG_FUNC_START,
9098 	    requested.pcs_online_cores, requested_reason, 0, effective.pcs_online_cores);
9099 
9100 	/* The bits that are different and in the new value */
9101 	cpumap_t newly_online_cores = (requested.pcs_online_cores ^
9102 	    effective.pcs_online_cores) & requested.pcs_online_cores;
9103 
9104 	/* The bits that are different and are not in the new value */
9105 	cpumap_t newly_offline_cores = (requested.pcs_online_cores ^
9106 	    effective.pcs_online_cores) & ~requested.pcs_online_cores;
9107 
9108 	cpumap_t newly_recommended_cores = (requested.pcs_powerdown_recommended_cores ^
9109 	    effective.pcs_powerdown_recommended_cores) & requested.pcs_powerdown_recommended_cores;
9110 
9111 	cpumap_t newly_derecommended_cores = (requested.pcs_powerdown_recommended_cores ^
9112 	    effective.pcs_powerdown_recommended_cores) & ~requested.pcs_powerdown_recommended_cores;
9113 
9114 	cpumap_t newly_temporary_cores = (requested.pcs_tempdown_cores ^
9115 	    effective.pcs_tempdown_cores) & requested.pcs_tempdown_cores;
9116 
9117 	cpumap_t newly_nontemporary_cores = (requested.pcs_tempdown_cores ^
9118 	    effective.pcs_tempdown_cores) & ~requested.pcs_tempdown_cores;
9119 
9120 	/*
9121 	 * Newly online and derecommended cores should be derecommended
9122 	 * before powering them up, so they never run around doing stuff
9123 	 * before we reach the end of this function.
9124 	 */
9125 
9126 	cpumap_t newly_online_and_derecommended = newly_online_cores & newly_derecommended_cores;
9127 
9128 	/*
9129 	 * Publish the goal state we're working on achieving.
9130 	 * At the end of this function, pcs_effective will match this.
9131 	 */
9132 	pcs.pcs_requested = requested;
9133 
9134 	pcs.pcs_effective.pcs_powerdown_recommended_cores |= newly_recommended_cores;
9135 	pcs.pcs_effective.pcs_powerdown_recommended_cores &= ~newly_online_and_derecommended;
9136 
9137 	sched_update_recommended_cores_locked(requested_reason, 0);
9138 
9139 	simple_unlock(&sched_available_cores_lock);
9140 	splx(caller_s);
9141 
9142 	assert(ml_get_interrupts_enabled() == true);
9143 
9144 	/* First set powered cores */
9145 	cpumap_t started_cores = 0ull;
9146 	foreach_node(node) {
9147 		foreach_pset_id(pset_id, node) {
9148 			processor_set_t pset = pset_array[pset_id];
9149 
9150 			spl_t s = splsched();
9151 			pset_lock(pset);
9152 			cpumap_t pset_newly_online = newly_online_cores & pset->cpu_bitmask;
9153 
9154 			__assert_only cpumap_t pset_online_cores =
9155 			    pset->cpu_state_map[PROCESSOR_START] |
9156 			    pset->cpu_state_map[PROCESSOR_IDLE] |
9157 			    pset->cpu_state_map[PROCESSOR_DISPATCHING] |
9158 			    pset->cpu_state_map[PROCESSOR_RUNNING];
9159 			assert((pset_online_cores & pset_newly_online) == 0);
9160 
9161 			pset_unlock(pset);
9162 			splx(s);
9163 
9164 			if (pset_newly_online == 0) {
9165 				/* Nothing to do */
9166 				continue;
9167 			}
9168 			cpumap_foreach(cpu_id, pset_newly_online) {
9169 				processor_start_reason(processor_array[cpu_id], requested_reason);
9170 				bit_set(started_cores, cpu_id);
9171 			}
9172 		}
9173 	}
9174 
9175 	/*
9176 	 * Wait for processors to finish starting in parallel.
9177 	 * We never proceed until all newly started processors have finished.
9178 	 *
9179 	 * This has the side effect of closing the ml_cpu_up_processors race,
9180 	 * as all started CPUs must have SIGPdisabled cleared by the time this
9181 	 * is satisfied. (rdar://124631843)
9182 	 */
9183 	cpumap_foreach(cpu_id, started_cores) {
9184 		processor_wait_for_start(processor_array[cpu_id], PROCESSOR_POWERED_CORES_CHANGE);
9185 	}
9186 
9187 	/*
9188 	 * Update published counts of processors to match new temporary status
9189 	 * Publish all temporary before nontemporary, so that any readers that
9190 	 * see a middle state will see a slightly too high count instead of
9191 	 * ending up seeing a 0 (because that crashes dispatch_apply, ask
9192 	 * me how I know)
9193 	 */
9194 
9195 	spl_t s;
9196 	s = splsched();
9197 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
9198 
9199 	foreach_node(node) {
9200 		foreach_pset_id(pset_id, node) {
9201 			processor_set_t pset = pset_array[pset_id];
9202 
9203 			pset_lock(pset);
9204 
9205 			cpumap_t pset_newly_temporary = newly_temporary_cores & pset->cpu_bitmask;
9206 
9207 			cpumap_foreach(cpu_id, pset_newly_temporary) {
9208 				sched_processor_change_mode_locked(processor_array[cpu_id],
9209 				    PCM_TEMPORARY, true);
9210 			}
9211 
9212 			pset_unlock(pset);
9213 		}
9214 	}
9215 
9216 	foreach_node(node) {
9217 		foreach_pset_id(pset_id, node) {
9218 			processor_set_t pset = pset_array[pset_id];
9219 
9220 			pset_lock(pset);
9221 
9222 			cpumap_t pset_newly_nontemporary = newly_nontemporary_cores & pset->cpu_bitmask;
9223 
9224 			cpumap_foreach(cpu_id, pset_newly_nontemporary) {
9225 				sched_processor_change_mode_locked(processor_array[cpu_id],
9226 				    PCM_TEMPORARY, false);
9227 			}
9228 
9229 			pset_unlock(pset);
9230 		}
9231 	}
9232 
9233 	simple_unlock(&sched_available_cores_lock);
9234 	splx(s);
9235 
9236 	/* Now shutdown not powered cores */
9237 	foreach_node(node) {
9238 		foreach_pset_id(pset_id, node) {
9239 			processor_set_t pset = pset_array[pset_id];
9240 
9241 			s = splsched();
9242 			pset_lock(pset);
9243 
9244 			cpumap_t pset_newly_offline = newly_offline_cores & pset->cpu_bitmask;
9245 			__assert_only cpumap_t pset_powered_cores =
9246 			    pset->cpu_state_map[PROCESSOR_START] |
9247 			    pset->cpu_state_map[PROCESSOR_IDLE] |
9248 			    pset->cpu_state_map[PROCESSOR_DISPATCHING] |
9249 			    pset->cpu_state_map[PROCESSOR_RUNNING];
9250 			assert((pset_powered_cores & pset_newly_offline) == pset_newly_offline);
9251 
9252 			pset_unlock(pset);
9253 			splx(s);
9254 
9255 			if (pset_newly_offline == 0) {
9256 				/* Nothing to do */
9257 				continue;
9258 			}
9259 
9260 			cpumap_foreach(cpu_id, pset_newly_offline) {
9261 				processor_exit_reason(processor_array[cpu_id], requested_reason, false);
9262 			}
9263 		}
9264 	}
9265 
9266 	assert(ml_get_interrupts_enabled() == true);
9267 
9268 	s = splsched();
9269 	simple_lock(&sched_available_cores_lock, LCK_GRP_NULL);
9270 
9271 	assert(s == caller_s);
9272 
9273 	pcs.pcs_effective.pcs_powerdown_recommended_cores &= ~newly_derecommended_cores;
9274 
9275 	sched_update_recommended_cores_locked(requested_reason, 0);
9276 
9277 	pcs.pcs_previous_reason = requested_reason;
9278 
9279 	/* All transitions should be quiesced now that we are done changing things */
9280 	assert_no_processors_in_transition_locked();
9281 
9282 	assert3u(pcs.pcs_requested.pcs_online_cores, ==, pcs.pcs_effective.pcs_online_cores);
9283 	assert3u(pcs.pcs_requested.pcs_tempdown_cores, ==, pcs.pcs_effective.pcs_tempdown_cores);
9284 	assert3u(pcs.pcs_requested.pcs_powerdown_recommended_cores, ==, pcs.pcs_effective.pcs_powerdown_recommended_cores);
9285 
9286 	KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_UPDATE_POWERED_CORES) | DBG_FUNC_END, 0, 0, 0, 0);
9287 }
9288 
9289 void
thread_set_options(uint32_t thopt)9290 thread_set_options(uint32_t thopt)
9291 {
9292 	spl_t x;
9293 	thread_t t = current_thread();
9294 
9295 	x = splsched();
9296 	thread_lock(t);
9297 
9298 	t->options |= thopt;
9299 
9300 	thread_unlock(t);
9301 	splx(x);
9302 }
9303 
9304 void
thread_set_pending_block_hint(thread_t thread,block_hint_t block_hint)9305 thread_set_pending_block_hint(thread_t thread, block_hint_t block_hint)
9306 {
9307 	thread->pending_block_hint = block_hint;
9308 }
9309 
9310 uint32_t
qos_max_parallelism(int qos,uint64_t options)9311 qos_max_parallelism(int qos, uint64_t options)
9312 {
9313 	return SCHED(qos_max_parallelism)(qos, options);
9314 }
9315 
9316 uint32_t
sched_qos_max_parallelism(__unused int qos,uint64_t options)9317 sched_qos_max_parallelism(__unused int qos, uint64_t options)
9318 {
9319 	host_basic_info_data_t hinfo;
9320 	mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
9321 
9322 
9323 	/*
9324 	 * The QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE should be used on AMP platforms only which
9325 	 * implement their own qos_max_parallelism() interfaces.
9326 	 */
9327 	assert((options & QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE) == 0);
9328 
9329 	/* Query the machine layer for core information */
9330 	__assert_only kern_return_t kret = host_info(host_self(), HOST_BASIC_INFO,
9331 	    (host_info_t)&hinfo, &count);
9332 	assert(kret == KERN_SUCCESS);
9333 
9334 	if (options & QOS_PARALLELISM_COUNT_LOGICAL) {
9335 		return hinfo.logical_cpu;
9336 	} else {
9337 		return hinfo.physical_cpu;
9338 	}
9339 }
9340 
9341 int sched_allow_NO_SMT_threads = 1;
9342 bool
thread_no_smt(thread_t thread)9343 thread_no_smt(thread_t thread)
9344 {
9345 	return sched_allow_NO_SMT_threads &&
9346 	       (thread->bound_processor == PROCESSOR_NULL) &&
9347 	       ((thread->sched_flags & TH_SFLAG_NO_SMT) || (get_threadtask(thread)->t_flags & TF_NO_SMT));
9348 }
9349 
9350 bool
processor_active_thread_no_smt(processor_t processor)9351 processor_active_thread_no_smt(processor_t processor)
9352 {
9353 	return sched_allow_NO_SMT_threads && !processor->current_is_bound && processor->current_is_NO_SMT;
9354 }
9355 
9356 #if __arm64__
9357 
9358 /*
9359  * Set up or replace old timer with new timer
9360  *
9361  * Returns true if canceled old timer, false if it did not
9362  */
9363 boolean_t
sched_perfcontrol_update_callback_deadline(uint64_t new_deadline)9364 sched_perfcontrol_update_callback_deadline(uint64_t new_deadline)
9365 {
9366 	/*
9367 	 * Exchange deadline for new deadline, if old deadline was nonzero,
9368 	 * then I cancelled the callback, otherwise I didn't
9369 	 */
9370 
9371 	return os_atomic_xchg(&sched_perfcontrol_callback_deadline, new_deadline,
9372 	           relaxed) != 0;
9373 }
9374 
9375 /*
9376  * Set global SFI window (in usec)
9377  */
9378 kern_return_t
sched_perfcontrol_sfi_set_window(uint64_t window_usecs)9379 sched_perfcontrol_sfi_set_window(uint64_t window_usecs)
9380 {
9381 	kern_return_t ret = KERN_NOT_SUPPORTED;
9382 #if CONFIG_THREAD_GROUPS
9383 	if (window_usecs == 0ULL) {
9384 		ret = sfi_window_cancel();
9385 	} else {
9386 		ret = sfi_set_window(window_usecs);
9387 	}
9388 #endif // CONFIG_THREAD_GROUPS
9389 	return ret;
9390 }
9391 
9392 /*
9393  * Set background and maintenance SFI class offtimes
9394  */
9395 kern_return_t
sched_perfcontrol_sfi_set_bg_offtime(uint64_t offtime_usecs)9396 sched_perfcontrol_sfi_set_bg_offtime(uint64_t offtime_usecs)
9397 {
9398 	kern_return_t ret = KERN_NOT_SUPPORTED;
9399 #if CONFIG_THREAD_GROUPS
9400 	if (offtime_usecs == 0ULL) {
9401 		ret = sfi_class_offtime_cancel(SFI_CLASS_MAINTENANCE);
9402 		ret |= sfi_class_offtime_cancel(SFI_CLASS_DARWIN_BG);
9403 	} else {
9404 		ret = sfi_set_class_offtime(SFI_CLASS_MAINTENANCE, offtime_usecs);
9405 		ret |= sfi_set_class_offtime(SFI_CLASS_DARWIN_BG, offtime_usecs);
9406 	}
9407 #endif // CONFIG_THREAD_GROUPS
9408 	return ret;
9409 }
9410 
9411 /*
9412  * Set utility SFI class offtime
9413  */
9414 kern_return_t
sched_perfcontrol_sfi_set_utility_offtime(uint64_t offtime_usecs)9415 sched_perfcontrol_sfi_set_utility_offtime(uint64_t offtime_usecs)
9416 {
9417 	kern_return_t ret = KERN_NOT_SUPPORTED;
9418 #if CONFIG_THREAD_GROUPS
9419 	if (offtime_usecs == 0ULL) {
9420 		ret = sfi_class_offtime_cancel(SFI_CLASS_UTILITY);
9421 	} else {
9422 		ret = sfi_set_class_offtime(SFI_CLASS_UTILITY, offtime_usecs);
9423 	}
9424 #endif // CONFIG_THREAD_GROUPS
9425 	return ret;
9426 }
9427 
9428 #endif /* __arm64__ */
9429 
9430 #if CONFIG_SCHED_EDGE
9431 
9432 #define SCHED_PSET_LOAD_EWMA_TC_NSECS 10000000u
9433 
9434 /*
9435  * sched_edge_pset_running_higher_bucket()
9436  *
9437  * Routine to calculate cumulative running counts for each scheduling
9438  * bucket. This effectively lets the load calculation calculate if a
9439  * cluster is running any threads at a QoS lower than the thread being
9440  * migrated etc.
9441  */
9442 static void
sched_edge_pset_running_higher_bucket(processor_set_t pset,uint32_t * running_higher)9443 sched_edge_pset_running_higher_bucket(processor_set_t pset, uint32_t *running_higher)
9444 {
9445 	bitmap_t *active_map = &pset->cpu_state_map[PROCESSOR_RUNNING];
9446 	bzero(running_higher, sizeof(uint32_t) * TH_BUCKET_SCHED_MAX);
9447 
9448 	/* Count the running threads per bucket */
9449 	for (int cpu = bitmap_first(active_map, MAX_CPUS); cpu >= 0; cpu = bitmap_next(active_map, cpu)) {
9450 		sched_bucket_t cpu_bucket = os_atomic_load(&pset->cpu_running_buckets[cpu], relaxed);
9451 		/* Don't count idle threads */
9452 		if (cpu_bucket < TH_BUCKET_SCHED_MAX) {
9453 			running_higher[cpu_bucket]++;
9454 		}
9455 	}
9456 
9457 	/* Calculate the cumulative running counts as a prefix sum */
9458 	for (sched_bucket_t bucket = TH_BUCKET_FIXPRI; bucket < TH_BUCKET_SCHED_MAX - 1; bucket++) {
9459 		running_higher[bucket + 1] += running_higher[bucket];
9460 	}
9461 }
9462 
9463 /*
9464  * sched_update_pset_load_average()
9465  *
9466  * Updates the load average for each sched bucket for a cluster.
9467  * This routine must be called with the pset lock held.
9468  */
9469 void
sched_update_pset_load_average(processor_set_t pset,uint64_t curtime)9470 sched_update_pset_load_average(processor_set_t pset, uint64_t curtime)
9471 {
9472 	int avail_cpu_count = pset_available_cpu_count(pset);
9473 	if (avail_cpu_count == 0) {
9474 		/* Looks like the pset is not runnable any more; nothing to do here */
9475 		return;
9476 	}
9477 
9478 	/*
9479 	 * Edge Scheduler Optimization
9480 	 *
9481 	 * See if more callers of this routine can pass in timestamps to avoid the
9482 	 * mach_absolute_time() call here.
9483 	 */
9484 
9485 	if (!curtime) {
9486 		curtime = mach_absolute_time();
9487 	}
9488 	uint64_t last_update = os_atomic_load(&pset->pset_load_last_update, relaxed);
9489 	int64_t delta_ticks = curtime - last_update;
9490 	if (delta_ticks < 0) {
9491 		return;
9492 	}
9493 
9494 	uint64_t delta_nsecs = 0;
9495 	absolutetime_to_nanoseconds(delta_ticks, &delta_nsecs);
9496 
9497 	if (__improbable(delta_nsecs > UINT32_MAX)) {
9498 		delta_nsecs = UINT32_MAX;
9499 	}
9500 
9501 	/* Update the shared resource load on the pset */
9502 	for (cluster_shared_rsrc_type_t shared_rsrc_type = CLUSTER_SHARED_RSRC_TYPE_MIN; shared_rsrc_type < CLUSTER_SHARED_RSRC_TYPE_COUNT; shared_rsrc_type++) {
9503 		uint64_t shared_rsrc_runnable_load = sched_edge_shared_rsrc_runnable_load(&pset->pset_clutch_root, shared_rsrc_type);
9504 		uint64_t shared_rsrc_running_load = bit_count(pset->cpu_running_cluster_shared_rsrc_thread[shared_rsrc_type]);
9505 		uint64_t new_shared_load = shared_rsrc_runnable_load + shared_rsrc_running_load;
9506 		uint64_t old_shared_load = os_atomic_xchg(&pset->pset_cluster_shared_rsrc_load[shared_rsrc_type], new_shared_load, relaxed);
9507 		if (old_shared_load != new_shared_load) {
9508 			KTRC(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_CLUSTER_SHARED_LOAD) | DBG_FUNC_NONE, pset->pset_cluster_id, shared_rsrc_type, new_shared_load, shared_rsrc_running_load);
9509 		}
9510 	}
9511 
9512 	uint32_t running_higher[TH_BUCKET_SCHED_MAX];
9513 	sched_edge_pset_running_higher_bucket(pset, running_higher);
9514 
9515 	for (sched_bucket_t sched_bucket = TH_BUCKET_FIXPRI; sched_bucket < TH_BUCKET_SCHED_MAX; sched_bucket++) {
9516 		uint64_t old_load_average = os_atomic_load(&pset->pset_load_average[sched_bucket], relaxed);
9517 		uint64_t old_load_average_factor = old_load_average * SCHED_PSET_LOAD_EWMA_TC_NSECS;
9518 		uint32_t current_runq_depth = sched_edge_cluster_cumulative_count(&pset->pset_clutch_root, sched_bucket) +  rt_runq_count(pset) + running_higher[sched_bucket];
9519 		os_atomic_store(&pset->pset_runnable_depth[sched_bucket], current_runq_depth, relaxed);
9520 
9521 		uint32_t current_load = current_runq_depth / avail_cpu_count;
9522 		/*
9523 		 * For the new load average multiply current_load by delta_nsecs (which results in a 32.0 value).
9524 		 * Since we want to maintain the load average as a 24.8 fixed arithmetic value for precision, the
9525 		 * new load average needs to be shifted before it can be added to the old load average.
9526 		 */
9527 		uint64_t new_load_average_factor = (current_load * delta_nsecs) << SCHED_PSET_LOAD_EWMA_FRACTION_BITS;
9528 
9529 		/*
9530 		 * For extremely parallel workloads, it is important that the load average on a cluster moves zero to non-zero
9531 		 * instantly to allow threads to be migrated to other (potentially idle) clusters quickly. Hence use the EWMA
9532 		 * when the system is already loaded; otherwise for an idle system use the latest load average immediately.
9533 		 */
9534 		int old_load_shifted = (int)((old_load_average + SCHED_PSET_LOAD_EWMA_ROUND_BIT) >> SCHED_PSET_LOAD_EWMA_FRACTION_BITS);
9535 		boolean_t load_uptick = (old_load_shifted == 0) && (current_load != 0);
9536 		boolean_t load_downtick = (old_load_shifted != 0) && (current_load == 0);
9537 		uint64_t load_average;
9538 		if (load_uptick || load_downtick) {
9539 			load_average = (current_load << SCHED_PSET_LOAD_EWMA_FRACTION_BITS);
9540 		} else {
9541 			/* Indicates a loaded system; use EWMA for load average calculation */
9542 			load_average = (old_load_average_factor + new_load_average_factor) / (delta_nsecs + SCHED_PSET_LOAD_EWMA_TC_NSECS);
9543 		}
9544 		os_atomic_store(&pset->pset_load_average[sched_bucket], load_average, relaxed);
9545 		if (load_average != old_load_average) {
9546 			KTRC(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_LOAD_AVG) | DBG_FUNC_NONE, pset->pset_cluster_id, (load_average >> SCHED_PSET_LOAD_EWMA_FRACTION_BITS), load_average & SCHED_PSET_LOAD_EWMA_FRACTION_MASK, sched_bucket);
9547 		}
9548 	}
9549 	os_atomic_store(&pset->pset_load_last_update, curtime, relaxed);
9550 }
9551 
9552 void
sched_update_pset_avg_execution_time(processor_set_t pset,uint64_t execution_time,uint64_t curtime,sched_bucket_t sched_bucket)9553 sched_update_pset_avg_execution_time(processor_set_t pset, uint64_t execution_time, uint64_t curtime, sched_bucket_t sched_bucket)
9554 {
9555 	pset_execution_time_t old_execution_time_packed, new_execution_time_packed;
9556 	uint64_t avg_thread_execution_time = 0;
9557 
9558 	os_atomic_rmw_loop(&pset->pset_execution_time[sched_bucket].pset_execution_time_packed,
9559 	    old_execution_time_packed.pset_execution_time_packed,
9560 	    new_execution_time_packed.pset_execution_time_packed, relaxed, {
9561 		uint64_t last_update = old_execution_time_packed.pset_execution_time_last_update;
9562 		int64_t delta_ticks = curtime - last_update;
9563 		if (delta_ticks <= 0) {
9564 		        /*
9565 		         * Its possible that another CPU came in and updated the pset_execution_time
9566 		         * before this CPU could do it. Since the average execution time is meant to
9567 		         * be an approximate measure per cluster, ignore the older update.
9568 		         */
9569 		        os_atomic_rmw_loop_give_up(return );
9570 		}
9571 		uint64_t delta_nsecs = 0;
9572 		absolutetime_to_nanoseconds(delta_ticks, &delta_nsecs);
9573 
9574 		uint64_t nanotime = 0;
9575 		absolutetime_to_nanoseconds(execution_time, &nanotime);
9576 		uint64_t execution_time_us = nanotime / NSEC_PER_USEC;
9577 
9578 		/*
9579 		 * Since the average execution time is stored in microseconds, avoid rounding errors in
9580 		 * the EWMA calculation by only using a non-zero previous value.
9581 		 */
9582 		uint64_t old_avg_thread_execution_time = MAX(old_execution_time_packed.pset_avg_thread_execution_time, 1ULL);
9583 
9584 		uint64_t old_execution_time = (old_avg_thread_execution_time * SCHED_PSET_LOAD_EWMA_TC_NSECS);
9585 		uint64_t new_execution_time = (execution_time_us * delta_nsecs);
9586 
9587 		avg_thread_execution_time = (old_execution_time + new_execution_time) / (delta_nsecs + SCHED_PSET_LOAD_EWMA_TC_NSECS);
9588 		new_execution_time_packed.pset_avg_thread_execution_time = avg_thread_execution_time;
9589 		new_execution_time_packed.pset_execution_time_last_update = curtime;
9590 	});
9591 	if (new_execution_time_packed.pset_avg_thread_execution_time != old_execution_time_packed.pset_execution_time_packed) {
9592 		KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PSET_AVG_EXEC_TIME) | DBG_FUNC_NONE, pset->pset_cluster_id, avg_thread_execution_time, sched_bucket);
9593 	}
9594 }
9595 
9596 uint64_t
sched_pset_cluster_shared_rsrc_load(processor_set_t pset,cluster_shared_rsrc_type_t shared_rsrc_type)9597 sched_pset_cluster_shared_rsrc_load(processor_set_t pset, cluster_shared_rsrc_type_t shared_rsrc_type)
9598 {
9599 	/* Prevent migrations to derecommended clusters */
9600 	if (!pset_is_recommended(pset)) {
9601 		return UINT64_MAX;
9602 	}
9603 	return os_atomic_load(&pset->pset_cluster_shared_rsrc_load[shared_rsrc_type], relaxed);
9604 }
9605 
9606 #else /* CONFIG_SCHED_EDGE */
9607 
9608 void
sched_update_pset_load_average(processor_set_t pset,__unused uint64_t curtime)9609 sched_update_pset_load_average(processor_set_t pset, __unused uint64_t curtime)
9610 {
9611 	int non_rt_load = pset->pset_runq.count;
9612 	int load = ((bit_count(pset->cpu_state_map[PROCESSOR_RUNNING]) + non_rt_load + rt_runq_count(pset)) << PSET_LOAD_NUMERATOR_SHIFT);
9613 	int new_load_average = ((int)pset->load_average + load) >> 1;
9614 
9615 	pset->load_average = new_load_average;
9616 #if (DEVELOPMENT || DEBUG)
9617 #if __AMP__
9618 	if (pset->pset_cluster_type == PSET_AMP_P) {
9619 		KTRC(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PSET_LOAD_AVERAGE) | DBG_FUNC_NONE, sched_get_pset_load_average(pset, 0), (bit_count(pset->cpu_state_map[PROCESSOR_RUNNING]) + pset->pset_runq.count + rt_runq_count(pset)));
9620 	}
9621 #endif
9622 #endif
9623 }
9624 
9625 void
sched_update_pset_avg_execution_time(__unused processor_set_t pset,__unused uint64_t execution_time,__unused uint64_t curtime,__unused sched_bucket_t sched_bucket)9626 sched_update_pset_avg_execution_time(__unused processor_set_t pset, __unused uint64_t execution_time, __unused uint64_t curtime, __unused sched_bucket_t sched_bucket)
9627 {
9628 }
9629 
9630 #endif /* CONFIG_SCHED_EDGE */
9631 
9632 /* pset is locked */
9633 static bool
processor_is_fast_track_candidate_for_realtime_thread(processor_set_t pset,processor_t processor)9634 processor_is_fast_track_candidate_for_realtime_thread(processor_set_t pset, processor_t processor)
9635 {
9636 	int cpuid = processor->cpu_id;
9637 #if defined(__x86_64__)
9638 	if (sched_avoid_cpu0 && (cpuid == 0)) {
9639 		return false;
9640 	}
9641 #endif
9642 
9643 	cpumap_t fasttrack_map = pset_available_cpumap(pset) & ~pset->pending_AST_URGENT_cpu_mask & ~pset->realtime_map;
9644 
9645 	return bit_test(fasttrack_map, cpuid);
9646 }
9647 
9648 /* pset is locked */
9649 static processor_t
choose_processor_for_realtime_thread(processor_set_t pset,processor_t skip_processor,bool consider_secondaries,bool skip_spills)9650 choose_processor_for_realtime_thread(processor_set_t pset, processor_t skip_processor, bool consider_secondaries, bool skip_spills)
9651 {
9652 #if defined(__x86_64__)
9653 	bool avoid_cpu0 = sched_avoid_cpu0 && bit_test(pset->cpu_bitmask, 0);
9654 #else
9655 	const bool avoid_cpu0 = false;
9656 #endif
9657 	cpumap_t cpu_map;
9658 
9659 try_again:
9660 	cpu_map = pset_available_cpumap(pset) & ~pset->pending_AST_URGENT_cpu_mask & ~pset->realtime_map;
9661 	if (skip_processor) {
9662 		bit_clear(cpu_map, skip_processor->cpu_id);
9663 	}
9664 	if (skip_spills) {
9665 		cpu_map &= ~pset->rt_pending_spill_cpu_mask;
9666 	}
9667 
9668 	if (avoid_cpu0 && (sched_avoid_cpu0 == 2)) {
9669 		bit_clear(cpu_map, 0);
9670 	}
9671 
9672 	cpumap_t primary_map = cpu_map & pset->primary_map;
9673 	if (avoid_cpu0) {
9674 		primary_map = bit_ror64(primary_map, 1);
9675 	}
9676 
9677 	int rotid = lsb_first(primary_map);
9678 	if (rotid >= 0) {
9679 		int cpuid = avoid_cpu0 ? ((rotid + 1) & 63) : rotid;
9680 
9681 		processor_t processor = processor_array[cpuid];
9682 
9683 		return processor;
9684 	}
9685 
9686 	if (!pset->is_SMT || !sched_allow_rt_smt || !consider_secondaries) {
9687 		goto out;
9688 	}
9689 
9690 	if (avoid_cpu0 && (sched_avoid_cpu0 == 2)) {
9691 		/* Also avoid cpu1 */
9692 		bit_clear(cpu_map, 1);
9693 	}
9694 
9695 	/* Consider secondary processors whose primary is actually running a realtime thread */
9696 	cpumap_t secondary_map = cpu_map & ~pset->primary_map & (pset->realtime_map << 1);
9697 	if (avoid_cpu0) {
9698 		/* Also avoid cpu1 */
9699 		secondary_map = bit_ror64(secondary_map, 2);
9700 	}
9701 	rotid = lsb_first(secondary_map);
9702 	if (rotid >= 0) {
9703 		int cpuid = avoid_cpu0 ?  ((rotid + 2) & 63) : rotid;
9704 
9705 		processor_t processor = processor_array[cpuid];
9706 
9707 		return processor;
9708 	}
9709 
9710 	/* Consider secondary processors */
9711 	secondary_map = cpu_map & ~pset->primary_map;
9712 	if (avoid_cpu0) {
9713 		/* Also avoid cpu1 */
9714 		secondary_map = bit_ror64(secondary_map, 2);
9715 	}
9716 	rotid = lsb_first(secondary_map);
9717 	if (rotid >= 0) {
9718 		int cpuid = avoid_cpu0 ?  ((rotid + 2) & 63) : rotid;
9719 
9720 		processor_t processor = processor_array[cpuid];
9721 
9722 		return processor;
9723 	}
9724 
9725 	/*
9726 	 * I was hoping the compiler would optimize
9727 	 * this away when avoid_cpu0 is const bool false
9728 	 * but it still complains about the assignmnent
9729 	 * in that case.
9730 	 */
9731 	if (avoid_cpu0 && (sched_avoid_cpu0 == 2)) {
9732 #if defined(__x86_64__)
9733 		avoid_cpu0 = false;
9734 #else
9735 		assert(0);
9736 #endif
9737 		goto try_again;
9738 	}
9739 
9740 out:
9741 	if (skip_processor) {
9742 		return PROCESSOR_NULL;
9743 	}
9744 
9745 	/*
9746 	 * If we didn't find an obvious processor to choose, but there are still more CPUs
9747 	 * not already running realtime threads than realtime threads in the realtime run queue,
9748 	 * this thread belongs in this pset, so choose some other processor in this pset
9749 	 * to ensure the thread is enqueued here.
9750 	 */
9751 	cpumap_t non_realtime_map = pset_available_cpumap(pset) & pset->primary_map & ~pset->realtime_map;
9752 	if (bit_count(non_realtime_map) > rt_runq_count(pset)) {
9753 		cpu_map = non_realtime_map;
9754 		assert(cpu_map != 0);
9755 		int cpuid = bit_first(cpu_map);
9756 		assert(cpuid >= 0);
9757 		return processor_array[cpuid];
9758 	}
9759 
9760 	if (!pset->is_SMT || !sched_allow_rt_smt || !consider_secondaries) {
9761 		goto skip_secondaries;
9762 	}
9763 
9764 	non_realtime_map = pset_available_cpumap(pset) & ~pset->realtime_map;
9765 	if (bit_count(non_realtime_map) > rt_runq_count(pset)) {
9766 		cpu_map = non_realtime_map;
9767 		assert(cpu_map != 0);
9768 		int cpuid = bit_first(cpu_map);
9769 		assert(cpuid >= 0);
9770 		return processor_array[cpuid];
9771 	}
9772 
9773 skip_secondaries:
9774 	return PROCESSOR_NULL;
9775 }
9776 
9777 /*
9778  * Choose the processor with (1) the lowest priority less than max_pri and (2) the furthest deadline for that priority.
9779  * If all available processors are at max_pri, choose the furthest deadline that is greater than minimum_deadline.
9780  *
9781  * pset is locked.
9782  */
9783 static processor_t
choose_furthest_deadline_processor_for_realtime_thread(processor_set_t pset,int max_pri,uint64_t minimum_deadline,processor_t skip_processor,bool skip_spills,bool include_ast_urgent_pending_cpus)9784 choose_furthest_deadline_processor_for_realtime_thread(processor_set_t pset, int max_pri, uint64_t minimum_deadline, processor_t skip_processor, bool skip_spills, bool include_ast_urgent_pending_cpus)
9785 {
9786 	uint64_t  furthest_deadline = deadline_add(minimum_deadline, rt_deadline_epsilon);
9787 	processor_t fd_processor = PROCESSOR_NULL;
9788 	int lowest_priority = max_pri;
9789 
9790 	cpumap_t cpu_map = pset_available_cpumap(pset) & ~pset->pending_AST_URGENT_cpu_mask;
9791 	if (skip_processor) {
9792 		bit_clear(cpu_map, skip_processor->cpu_id);
9793 	}
9794 	if (skip_spills) {
9795 		cpu_map &= ~pset->rt_pending_spill_cpu_mask;
9796 	}
9797 
9798 	for (int cpuid = bit_first(cpu_map); cpuid >= 0; cpuid = bit_next(cpu_map, cpuid)) {
9799 		processor_t processor = processor_array[cpuid];
9800 
9801 		if (processor->current_pri > lowest_priority) {
9802 			continue;
9803 		}
9804 
9805 		if (processor->current_pri < lowest_priority) {
9806 			lowest_priority = processor->current_pri;
9807 			furthest_deadline = processor->deadline;
9808 			fd_processor = processor;
9809 			continue;
9810 		}
9811 
9812 		if (processor->deadline > furthest_deadline) {
9813 			furthest_deadline = processor->deadline;
9814 			fd_processor = processor;
9815 		}
9816 	}
9817 
9818 	if (fd_processor) {
9819 		return fd_processor;
9820 	}
9821 
9822 	/*
9823 	 * There is a race condition possible when there are multiple processor sets.
9824 	 * choose_processor() takes pset lock A, sees the pending_AST_URGENT_cpu_mask set for a processor in that set and finds no suitable candiate CPU,
9825 	 * so it drops pset lock A and tries to take pset lock B.  Meanwhile the pending_AST_URGENT_cpu_mask CPU is looking for a thread to run and holds
9826 	 * pset lock B. It doesn't find any threads (because the candidate thread isn't yet on any run queue), so drops lock B, takes lock A again to clear
9827 	 * the pending_AST_URGENT_cpu_mask bit, and keeps running the current (far deadline) thread. choose_processor() now has lock B and can only find
9828 	 * the lowest count processor in set B so enqueues it on set B's run queue but doesn't IPI anyone. (The lowest count includes all threads,
9829 	 * near and far deadlines, so will prefer a low count of earlier deadlines to a high count of far deadlines, which is suboptimal for EDF scheduling.
9830 	 * To make a better choice we would need to know how many threads with earlier deadlines than the candidate thread exist on each pset's run queue.
9831 	 * But even if we chose the better run queue, we still wouldn't send an IPI in this case.)
9832 	 *
9833 	 * The migitation is to also look for suitable CPUs that have their pending_AST_URGENT_cpu_mask bit set where there are no earlier deadline threads
9834 	 * on the run queue of that pset.
9835 	 */
9836 	if (include_ast_urgent_pending_cpus && (rt_runq_earliest_deadline(pset) > furthest_deadline)) {
9837 		cpu_map = pset_available_cpumap(pset) & pset->pending_AST_URGENT_cpu_mask;
9838 		assert(skip_processor == PROCESSOR_NULL);
9839 		assert(skip_spills == false);
9840 
9841 		for (int cpuid = bit_first(cpu_map); cpuid >= 0; cpuid = bit_next(cpu_map, cpuid)) {
9842 			processor_t processor = processor_array[cpuid];
9843 
9844 			if (processor->current_pri > lowest_priority) {
9845 				continue;
9846 			}
9847 
9848 			if (processor->current_pri < lowest_priority) {
9849 				lowest_priority = processor->current_pri;
9850 				furthest_deadline = processor->deadline;
9851 				fd_processor = processor;
9852 				continue;
9853 			}
9854 
9855 			if (processor->deadline > furthest_deadline) {
9856 				furthest_deadline = processor->deadline;
9857 				fd_processor = processor;
9858 			}
9859 		}
9860 	}
9861 
9862 	return fd_processor;
9863 }
9864 
9865 /* pset is locked */
9866 static processor_t
choose_next_processor_for_realtime_thread(processor_set_t pset,int max_pri,uint64_t minimum_deadline,processor_t skip_processor,bool consider_secondaries)9867 choose_next_processor_for_realtime_thread(processor_set_t pset, int max_pri, uint64_t minimum_deadline, processor_t skip_processor, bool consider_secondaries)
9868 {
9869 	bool skip_spills = true;
9870 	bool include_ast_urgent_pending_cpus = false;
9871 
9872 	processor_t next_processor = choose_processor_for_realtime_thread(pset, skip_processor, consider_secondaries, skip_spills);
9873 	if (next_processor != PROCESSOR_NULL) {
9874 		return next_processor;
9875 	}
9876 
9877 	next_processor = choose_furthest_deadline_processor_for_realtime_thread(pset, max_pri, minimum_deadline, skip_processor, skip_spills, include_ast_urgent_pending_cpus);
9878 	return next_processor;
9879 }
9880 
9881 #if defined(__x86_64__)
9882 /* pset is locked */
9883 static bool
all_available_primaries_are_running_realtime_threads(processor_set_t pset,bool include_backups)9884 all_available_primaries_are_running_realtime_threads(processor_set_t pset, bool include_backups)
9885 {
9886 	bool avoid_cpu0 = sched_avoid_cpu0 && bit_test(pset->cpu_bitmask, 0);
9887 	int nbackup_cpus = 0;
9888 
9889 	if (include_backups && rt_runq_is_low_latency(pset)) {
9890 		nbackup_cpus = sched_rt_n_backup_processors;
9891 	}
9892 
9893 	cpumap_t cpu_map = pset_available_cpumap(pset) & pset->primary_map & ~pset->realtime_map;
9894 	if (avoid_cpu0 && (sched_avoid_cpu0 == 2)) {
9895 		bit_clear(cpu_map, 0);
9896 	}
9897 	return (rt_runq_count(pset) + nbackup_cpus) > bit_count(cpu_map);
9898 }
9899 
9900 /* pset is locked */
9901 static bool
these_processors_are_running_realtime_threads(processor_set_t pset,uint64_t these_map,bool include_backups)9902 these_processors_are_running_realtime_threads(processor_set_t pset, uint64_t these_map, bool include_backups)
9903 {
9904 	int nbackup_cpus = 0;
9905 
9906 	if (include_backups && rt_runq_is_low_latency(pset)) {
9907 		nbackup_cpus = sched_rt_n_backup_processors;
9908 	}
9909 
9910 	cpumap_t cpu_map = pset_available_cpumap(pset) & these_map & ~pset->realtime_map;
9911 	return (rt_runq_count(pset) + nbackup_cpus) > bit_count(cpu_map);
9912 }
9913 #endif
9914 
9915 static bool
sched_ok_to_run_realtime_thread(processor_set_t pset,processor_t processor,bool as_backup)9916 sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor, bool as_backup)
9917 {
9918 	if (!processor->is_recommended) {
9919 		return false;
9920 	}
9921 	bool ok_to_run_realtime_thread = true;
9922 #if defined(__x86_64__)
9923 	bool spill_pending = bit_test(pset->rt_pending_spill_cpu_mask, processor->cpu_id);
9924 	if (spill_pending) {
9925 		return true;
9926 	}
9927 	if (processor->cpu_id == 0) {
9928 		if (sched_avoid_cpu0 == 1) {
9929 			ok_to_run_realtime_thread = these_processors_are_running_realtime_threads(pset, pset->primary_map & ~0x1, as_backup);
9930 		} else if (sched_avoid_cpu0 == 2) {
9931 			ok_to_run_realtime_thread = these_processors_are_running_realtime_threads(pset, ~0x3, as_backup);
9932 		}
9933 	} else if (sched_avoid_cpu0 && (processor->cpu_id == 1) && processor->is_SMT) {
9934 		ok_to_run_realtime_thread = sched_allow_rt_smt && these_processors_are_running_realtime_threads(pset, ~0x2, as_backup);
9935 	} else if (processor->processor_primary != processor) {
9936 		ok_to_run_realtime_thread = (sched_allow_rt_smt && all_available_primaries_are_running_realtime_threads(pset, as_backup));
9937 	}
9938 #else
9939 	(void)pset;
9940 	(void)processor;
9941 	(void)as_backup;
9942 #endif
9943 	return ok_to_run_realtime_thread;
9944 }
9945 
9946 void
sched_pset_made_schedulable(__unused processor_t processor,processor_set_t pset,boolean_t drop_lock)9947 sched_pset_made_schedulable(__unused processor_t processor, processor_set_t pset, boolean_t drop_lock)
9948 {
9949 	if (drop_lock) {
9950 		pset_unlock(pset);
9951 	}
9952 }
9953 
9954 void
thread_set_no_smt(bool set)9955 thread_set_no_smt(bool set)
9956 {
9957 	if (!system_is_SMT) {
9958 		/* Not a machine that supports SMT */
9959 		return;
9960 	}
9961 
9962 	thread_t thread = current_thread();
9963 
9964 	spl_t s = splsched();
9965 	thread_lock(thread);
9966 	if (set) {
9967 		thread->sched_flags |= TH_SFLAG_NO_SMT;
9968 	}
9969 	thread_unlock(thread);
9970 	splx(s);
9971 }
9972 
9973 bool
thread_get_no_smt(void)9974 thread_get_no_smt(void)
9975 {
9976 	return current_thread()->sched_flags & TH_SFLAG_NO_SMT;
9977 }
9978 
9979 extern void task_set_no_smt(task_t);
9980 void
task_set_no_smt(task_t task)9981 task_set_no_smt(task_t task)
9982 {
9983 	if (!system_is_SMT) {
9984 		/* Not a machine that supports SMT */
9985 		return;
9986 	}
9987 
9988 	if (task == TASK_NULL) {
9989 		task = current_task();
9990 	}
9991 
9992 	task_lock(task);
9993 	task->t_flags |= TF_NO_SMT;
9994 	task_unlock(task);
9995 }
9996 
9997 #if DEBUG || DEVELOPMENT
9998 extern void sysctl_task_set_no_smt(char no_smt);
9999 void
sysctl_task_set_no_smt(char no_smt)10000 sysctl_task_set_no_smt(char no_smt)
10001 {
10002 	if (!system_is_SMT) {
10003 		/* Not a machine that supports SMT */
10004 		return;
10005 	}
10006 
10007 	task_t task = current_task();
10008 
10009 	task_lock(task);
10010 	if (no_smt == '1') {
10011 		task->t_flags |= TF_NO_SMT;
10012 	}
10013 	task_unlock(task);
10014 }
10015 
10016 extern char sysctl_task_get_no_smt(void);
10017 char
sysctl_task_get_no_smt(void)10018 sysctl_task_get_no_smt(void)
10019 {
10020 	task_t task = current_task();
10021 
10022 	if (task->t_flags & TF_NO_SMT) {
10023 		return '1';
10024 	}
10025 	return '0';
10026 }
10027 #endif /* DEVELOPMENT || DEBUG */
10028 
10029 __private_extern__ void
thread_bind_cluster_type(thread_t thread,char cluster_type,bool soft_bound)10030 thread_bind_cluster_type(thread_t thread, char cluster_type, bool soft_bound)
10031 {
10032 #if __AMP__
10033 	spl_t s = splsched();
10034 	thread_lock(thread);
10035 	thread->sched_flags &= ~(TH_SFLAG_BOUND_SOFT);
10036 	thread->th_bound_cluster_id = THREAD_BOUND_CLUSTER_NONE;
10037 	if (soft_bound) {
10038 		thread->sched_flags |= TH_SFLAG_BOUND_SOFT;
10039 	}
10040 	pset_node_t bind_node = PSET_NODE_NULL;
10041 	switch (cluster_type) {
10042 	case 'e':
10043 	case 'E':
10044 		if (ecore_node->psets != PROCESSOR_SET_NULL) {
10045 			bind_node = ecore_node;
10046 		}
10047 		break;
10048 	case 'p':
10049 	case 'P':
10050 		if (pcore_node->psets != PROCESSOR_SET_NULL) {
10051 			bind_node = pcore_node;
10052 		}
10053 		break;
10054 	default:
10055 		break;
10056 	}
10057 	if (bind_node != PSET_NODE_NULL) {
10058 		thread->th_bound_cluster_id = bind_node->psets->pset_id;
10059 	}
10060 	thread_unlock(thread);
10061 	splx(s);
10062 
10063 	if (thread == current_thread()) {
10064 		thread_block(THREAD_CONTINUE_NULL);
10065 	}
10066 #else /* __AMP__ */
10067 	(void)thread;
10068 	(void)cluster_type;
10069 	(void)soft_bound;
10070 #endif /* __AMP__ */
10071 }
10072 
10073 extern uint32_t thread_bound_cluster_id(thread_t thread);
10074 uint32_t
thread_bound_cluster_id(thread_t thread)10075 thread_bound_cluster_id(thread_t thread)
10076 {
10077 	return thread->th_bound_cluster_id;
10078 }
10079 
10080 __private_extern__ kern_return_t
thread_bind_cluster_id(thread_t thread,uint32_t cluster_id,thread_bind_option_t options)10081 thread_bind_cluster_id(thread_t thread, uint32_t cluster_id, thread_bind_option_t options)
10082 {
10083 #if __AMP__
10084 
10085 	processor_set_t pset = NULL;
10086 
10087 	/* Treat binding to THREAD_BOUND_CLUSTER_NONE as a request to unbind. */
10088 	if ((options & THREAD_UNBIND) || cluster_id == THREAD_BOUND_CLUSTER_NONE) {
10089 		/* If the thread was actually not bound to some cluster, nothing to do here */
10090 		if (thread_bound_cluster_id(thread) == THREAD_BOUND_CLUSTER_NONE) {
10091 			return KERN_SUCCESS;
10092 		}
10093 	} else {
10094 		/* Validate the inputs for the bind case */
10095 		int max_clusters = ml_get_cluster_count();
10096 		if (cluster_id >= max_clusters) {
10097 			/* Invalid cluster id */
10098 			return KERN_INVALID_VALUE;
10099 		}
10100 		pset = pset_array[cluster_id];
10101 		if (pset == NULL) {
10102 			/* Cluster has not been initialized yet */
10103 			return KERN_INVALID_VALUE;
10104 		}
10105 		if (options & THREAD_BIND_ELIGIBLE_ONLY) {
10106 			if (SCHED(thread_eligible_for_pset(thread, pset)) == false) {
10107 				/* Thread is not recommended for the cluster type */
10108 				return KERN_INVALID_POLICY;
10109 			}
10110 		}
10111 	}
10112 
10113 	spl_t s = splsched();
10114 	thread_lock(thread);
10115 
10116 	/* Unbind the thread from its previous bound state */
10117 	thread->sched_flags &= ~(TH_SFLAG_BOUND_SOFT);
10118 	thread->th_bound_cluster_id = THREAD_BOUND_CLUSTER_NONE;
10119 
10120 	if (options & THREAD_UNBIND) {
10121 		/* Nothing more to do here */
10122 		goto thread_bind_cluster_complete;
10123 	}
10124 
10125 	if (options & THREAD_BIND_SOFT) {
10126 		thread->sched_flags |= TH_SFLAG_BOUND_SOFT;
10127 	}
10128 	thread->th_bound_cluster_id = cluster_id;
10129 
10130 thread_bind_cluster_complete:
10131 	thread_unlock(thread);
10132 	splx(s);
10133 
10134 	if (thread == current_thread()) {
10135 		thread_block(THREAD_CONTINUE_NULL);
10136 	}
10137 #else /* __AMP__ */
10138 	(void)thread;
10139 	(void)cluster_id;
10140 	(void)options;
10141 #endif /* __AMP__ */
10142 	return KERN_SUCCESS;
10143 }
10144 
10145 #if DEVELOPMENT || DEBUG
10146 extern int32_t sysctl_get_bound_cpuid(void);
10147 int32_t
sysctl_get_bound_cpuid(void)10148 sysctl_get_bound_cpuid(void)
10149 {
10150 	int32_t cpuid = -1;
10151 	thread_t self = current_thread();
10152 
10153 	processor_t processor = self->bound_processor;
10154 	if (processor == NULL) {
10155 		cpuid = -1;
10156 	} else {
10157 		cpuid = processor->cpu_id;
10158 	}
10159 
10160 	return cpuid;
10161 }
10162 
10163 extern kern_return_t sysctl_thread_bind_cpuid(int32_t cpuid);
10164 kern_return_t
sysctl_thread_bind_cpuid(int32_t cpuid)10165 sysctl_thread_bind_cpuid(int32_t cpuid)
10166 {
10167 	processor_t processor = PROCESSOR_NULL;
10168 
10169 	if (cpuid == -1) {
10170 		goto unbind;
10171 	}
10172 
10173 	if (cpuid < 0 || cpuid >= MAX_SCHED_CPUS) {
10174 		return KERN_INVALID_VALUE;
10175 	}
10176 
10177 	processor = processor_array[cpuid];
10178 	if (processor == PROCESSOR_NULL) {
10179 		return KERN_INVALID_VALUE;
10180 	}
10181 
10182 #if __AMP__
10183 
10184 	thread_t thread = current_thread();
10185 
10186 	if (thread->th_bound_cluster_id != THREAD_BOUND_CLUSTER_NONE) {
10187 		if ((thread->sched_flags & TH_SFLAG_BOUND_SOFT) == 0) {
10188 			/* Cannot hard-bind an already hard-cluster-bound thread */
10189 			return KERN_NOT_SUPPORTED;
10190 		}
10191 	}
10192 
10193 #endif /* __AMP__ */
10194 
10195 unbind:
10196 	thread_bind(processor);
10197 
10198 	thread_block(THREAD_CONTINUE_NULL);
10199 	return KERN_SUCCESS;
10200 }
10201 
10202 #if __AMP__
10203 static char
pset_cluster_type_name_char(pset_cluster_type_t pset_type)10204 pset_cluster_type_name_char(pset_cluster_type_t pset_type)
10205 {
10206 	switch (pset_type) {
10207 	case PSET_AMP_E:
10208 		return 'E';
10209 	case PSET_AMP_P:
10210 		return 'P';
10211 	default:
10212 		panic("Unexpected AMP pset cluster type %d", pset_type);
10213 	}
10214 }
10215 #endif /* __AMP__ */
10216 
10217 extern char sysctl_get_task_cluster_type(void);
10218 char
sysctl_get_task_cluster_type(void)10219 sysctl_get_task_cluster_type(void)
10220 {
10221 #if __AMP__
10222 	task_t task = current_task();
10223 	processor_set_t pset_hint = task->pset_hint;
10224 
10225 	if (!pset_hint) {
10226 		return '0';
10227 	}
10228 	return pset_cluster_type_name_char(pset_hint->pset_cluster_type);
10229 #else /* !__AMP__ */
10230 	return '0';
10231 #endif /* __AMP__ */
10232 }
10233 
10234 #if __AMP__
10235 extern char sysctl_get_bound_cluster_type(void);
10236 char
sysctl_get_bound_cluster_type(void)10237 sysctl_get_bound_cluster_type(void)
10238 {
10239 	thread_t self = current_thread();
10240 
10241 	if (self->th_bound_cluster_id == THREAD_BOUND_CLUSTER_NONE) {
10242 		return '0';
10243 	}
10244 	pset_cluster_type_t pset_type = pset_array[self->th_bound_cluster_id]->pset_cluster_type;
10245 	return pset_cluster_type_name_char(pset_type);
10246 }
10247 
10248 static processor_set_t
find_pset_of_type(pset_cluster_type_t t)10249 find_pset_of_type(pset_cluster_type_t t)
10250 {
10251 	for (pset_node_t node = &pset_node0; node != NULL; node = node->node_list) {
10252 		if (node->pset_cluster_type != t) {
10253 			continue;
10254 		}
10255 
10256 		processor_set_t pset = PROCESSOR_SET_NULL;
10257 		for (int pset_id = lsb_first(node->pset_map); pset_id >= 0; pset_id = lsb_next(node->pset_map, pset_id)) {
10258 			pset = pset_array[pset_id];
10259 			/* Prefer one with recommended processsors */
10260 			if (pset_is_recommended(pset)) {
10261 				assert(pset->pset_cluster_type == t);
10262 				return pset;
10263 			}
10264 		}
10265 		/* Otherwise return whatever was found last */
10266 		return pset;
10267 	}
10268 
10269 	return PROCESSOR_SET_NULL;
10270 }
10271 #endif /* __AMP__ */
10272 
10273 extern void sysctl_task_set_cluster_type(char cluster_type);
10274 void
sysctl_task_set_cluster_type(char cluster_type)10275 sysctl_task_set_cluster_type(char cluster_type)
10276 {
10277 	task_t task = current_task();
10278 	processor_set_t pset_hint = PROCESSOR_SET_NULL;
10279 
10280 #if __AMP__
10281 	switch (cluster_type) {
10282 	case 'e':
10283 	case 'E':
10284 		pset_hint = find_pset_of_type(PSET_AMP_E);
10285 		break;
10286 	case 'p':
10287 	case 'P':
10288 		pset_hint = find_pset_of_type(PSET_AMP_P);
10289 		break;
10290 	default:
10291 		break;
10292 	}
10293 
10294 	if (pset_hint) {
10295 		task_lock(task);
10296 		task->t_flags |= TF_USE_PSET_HINT_CLUSTER_TYPE;
10297 		task->pset_hint = pset_hint;
10298 		task_unlock(task);
10299 
10300 		thread_block(THREAD_CONTINUE_NULL);
10301 	}
10302 #else
10303 	(void)cluster_type;
10304 	(void)task;
10305 	(void)pset_hint;
10306 #endif
10307 }
10308 
10309 /*
10310  * The quantum length used for Fixed and RT sched modes. In general the quantum
10311  * can vary - for example for background or QOS.
10312  */
10313 extern uint64_t sysctl_get_quantum_us(void);
10314 uint64_t
sysctl_get_quantum_us(void)10315 sysctl_get_quantum_us(void)
10316 {
10317 	uint32_t quantum;
10318 	uint64_t quantum_ns;
10319 
10320 	quantum = SCHED(initial_quantum_size)(THREAD_NULL);
10321 	absolutetime_to_nanoseconds(quantum, &quantum_ns);
10322 
10323 	return quantum_ns / 1000;
10324 }
10325 
10326 #endif /* DEVELOPMENT || DEBUG */
10327