xref: /xnu-11215/osfmk/kern/processor.h (revision 8d741a5d)
1 /*
2  * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 
59 /*
60  *	processor.h:	Processor and processor-related definitions.
61  */
62 
63 #ifndef _KERN_PROCESSOR_H_
64 #define _KERN_PROCESSOR_H_
65 
66 #include <mach/boolean.h>
67 #include <mach/kern_return.h>
68 #include <kern/kern_types.h>
69 
70 #include <sys/cdefs.h>
71 
72 #ifdef  MACH_KERNEL_PRIVATE
73 #include <mach/mach_types.h>
74 #include <kern/ast.h>
75 #include <kern/cpu_number.h>
76 #include <kern/smp.h>
77 #include <kern/simple_lock.h>
78 #include <kern/locks.h>
79 #include <kern/percpu.h>
80 #include <kern/queue.h>
81 #include <kern/recount.h>
82 #include <kern/sched.h>
83 #include <kern/sched_urgency.h>
84 #include <kern/timer.h>
85 #include <mach/sfi_class.h>
86 #include <kern/sched_clutch.h>
87 #include <kern/timer_call.h>
88 #include <kern/assert.h>
89 #include <machine/limits.h>
90 #endif
91 
92 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
93 
94 #ifdef  MACH_KERNEL_PRIVATE
95 
96 /*
97  *	Processor state is accessed by locking the scheduling lock
98  *	for the assigned processor set.
99  *
100  *           --- PENDING_OFFLINE <
101  *          /                     \
102  *        _/                      \
103  *  OFF_LINE ---> START ---> RUNNING ---> IDLE ---> DISPATCHING
104  *         \_________________^   ^ ^______/           /
105  *                                \__________________/
106  *
107  *  The transition from offline to start and idle to dispatching
108  *  is externally driven as a a directive. However these
109  *  are paired with a handshake by the processor itself
110  *  to indicate that it has completed a transition of indeterminate
111  *  length (for example, the DISPATCHING->RUNNING or START->RUNNING
112  *  transitions must occur on the processor itself).
113  *
114  *  The boot processor has some special cases, and skips the START state,
115  *  since it has already bootstrapped and is ready to context switch threads.
116  *
117  *  When a processor is in DISPATCHING or RUNNING state, the current_pri,
118  *  current_thmode, and deadline fields should be set, so that other
119  *  processors can evaluate if it is an appropriate candidate for preemption.
120  */
121 #if defined(CONFIG_SCHED_DEFERRED_AST)
122 /*
123  *           --- PENDING_OFFLINE <
124  *          /                     \
125  *        _/                      \
126  *  OFF_LINE ---> START ---> RUNNING ---> IDLE ---> DISPATCHING
127  *         \_________________^   ^ ^______/ ^_____ /  /
128  *                                \__________________/
129  *
130  *  A DISPATCHING processor may be put back into IDLE, if another
131  *  processor determines that the target processor will have nothing to do
132  *  upon reaching the RUNNING state.  This is racy, but if the target
133  *  responds and becomes RUNNING, it will not break the processor state
134  *  machine.
135  *
136  *  This change allows us to cancel an outstanding signal/AST on a processor
137  *  (if such an operation is supported through hardware or software), and
138  *  push the processor back into the IDLE state as a power optimization.
139  */
140 #endif
141 
142 typedef enum {
143 	PROCESSOR_OFF_LINE        = 0,    /* Not booted or off-line */
144 	/* PROCESSOR_SHUTDOWN     = 1,    Going off-line, but schedulable. No longer used. */
145 	PROCESSOR_START           = 2,    /* Being started */
146 	PROCESSOR_PENDING_OFFLINE = 3,    /* Going off-line, not schedulable */
147 	PROCESSOR_IDLE            = 4,    /* Idle (available) */
148 	PROCESSOR_DISPATCHING     = 5,    /* Dispatching (idle -> active) */
149 	PROCESSOR_RUNNING         = 6,    /* Normal execution */
150 	PROCESSOR_STATE_LEN       = (PROCESSOR_RUNNING + 1)
151 } processor_state_t;
152 
153 typedef enum {
154 	PSET_SMP    = 0,
155 #if __AMP__
156 	PSET_AMP_E  = 1,
157 	PSET_AMP_P  = 2,
158 #endif /* __AMP__ */
159 } pset_cluster_type_t;
160 
161 #if __AMP__
162 
163 typedef enum {
164 	SCHED_PERFCTL_POLICY_DEFAULT,           /*  static policy: set at boot */
165 	SCHED_PERFCTL_POLICY_FOLLOW_GROUP,      /* dynamic policy: perfctl_class follows thread group across amp clusters */
166 	SCHED_PERFCTL_POLICY_RESTRICT_E,        /* dynamic policy: limits perfctl_class to amp e cluster */
167 } sched_perfctl_class_policy_t;
168 
169 extern _Atomic sched_perfctl_class_policy_t sched_perfctl_policy_util;
170 extern _Atomic sched_perfctl_class_policy_t sched_perfctl_policy_bg;
171 
172 #endif /* __AMP__ */
173 
174 typedef bitmap_t cpumap_t;
175 
176 #if __arm64__
177 
178 extern pset_cluster_type_t cluster_type_to_pset_cluster_type(cluster_type_t cluster_type);
179 extern pset_node_t cluster_type_to_pset_node(cluster_type_t cluster_type);
180 
181 /*
182  * pset_execution_time_t
183  *
184  * The pset_execution_time_t type is used to maintain the average
185  * execution time of threads on a pset. Since the avg. execution time is
186  * updated from contexts where the pset lock is not held, it uses a
187  * double-wide RMW loop to update these values atomically.
188  */
189 typedef union {
190 	struct {
191 		uint64_t        pset_avg_thread_execution_time;
192 		uint64_t        pset_execution_time_last_update;
193 	};
194 	unsigned __int128       pset_execution_time_packed;
195 } pset_execution_time_t;
196 
197 #endif /* __arm64__ */
198 
199 struct processor_set {
200 	int                     pset_id;
201 	int                     online_processor_count;
202 	int                     cpu_set_low, cpu_set_hi;
203 	int                     cpu_set_count;
204 	int                     last_chosen;
205 
206 	uint64_t                load_average;
207 	uint64_t                pset_load_average[TH_BUCKET_SCHED_MAX];
208 #if CONFIG_SCHED_EDGE
209 	/*
210 	 * Count of threads running or enqueued on the cluster (not including threads enqueued in a processor-bound runq).
211 	 * Updated atomically per scheduling bucket, around the same time as pset_load_average
212 	 */
213 	uint32_t                pset_runnable_depth[TH_BUCKET_SCHED_MAX];
214 #endif /* CONFIG_SCHED_EDGE */
215 	uint64_t                pset_load_last_update;
216 	cpumap_t                cpu_bitmask;
217 	cpumap_t                recommended_bitmask;
218 	cpumap_t                cpu_state_map[PROCESSOR_STATE_LEN];
219 	cpumap_t                primary_map;
220 	cpumap_t                realtime_map;
221 	cpumap_t                cpu_available_map;
222 
223 #define SCHED_PSET_TLOCK (1)
224 #if     defined(SCHED_PSET_TLOCK)
225 /* TODO: reorder struct for temporal cache locality */
226 	__attribute__((aligned(128))) lck_ticket_t      sched_lock;
227 #else /* SCHED_PSET_TLOCK*/
228 	__attribute__((aligned(128))) lck_spin_t        sched_lock;     /* lock for above */
229 #endif /* SCHED_PSET_TLOCK*/
230 
231 	struct run_queue        pset_runq;      /* runq for this processor set, used by the amp and dualq scheduler policies */
232 	struct rt_queue         rt_runq;        /* realtime runq for this processor set */
233 	uint64_t                stealable_rt_threads_earliest_deadline; /* if this pset has stealable RT threads, the earliest deadline; else UINT64_MAX */
234 #if CONFIG_SCHED_CLUTCH
235 	struct sched_clutch_root pset_clutch_root; /* clutch hierarchy root */
236 #endif /* CONFIG_SCHED_CLUTCH */
237 
238 	/* CPUs that have been sent an unacknowledged remote AST for scheduling purposes */
239 	cpumap_t                pending_AST_URGENT_cpu_mask;
240 	cpumap_t                pending_AST_PREEMPT_cpu_mask;
241 #if defined(CONFIG_SCHED_DEFERRED_AST)
242 	/*
243 	 * A separate mask, for ASTs that we may be able to cancel.  This is dependent on
244 	 * some level of support for requesting an AST on a processor, and then quashing
245 	 * that request later.
246 	 *
247 	 * The purpose of this field (and the associated codepaths) is to infer when we
248 	 * no longer need a processor that is DISPATCHING to come up, and to prevent it
249 	 * from coming out of IDLE if possible.  This should serve to decrease the number
250 	 * of spurious ASTs in the system, and let processors spend longer periods in
251 	 * IDLE.
252 	 */
253 	cpumap_t                pending_deferred_AST_cpu_mask;
254 #endif
255 	cpumap_t                pending_spill_cpu_mask;
256 	cpumap_t                rt_pending_spill_cpu_mask;
257 
258 	struct ipc_port *       pset_self;              /* port for operations */
259 	struct ipc_port *       pset_name_self; /* port for information */
260 
261 	processor_set_t         pset_list;              /* chain of associated psets */
262 	pset_node_t             node;
263 	uint32_t                pset_cluster_id;
264 
265 	/*
266 	 * Currently the scheduler uses a mix of pset_cluster_type_t & cluster_type_t
267 	 * for recommendations etc. It might be useful to unify these as a single type.
268 	 */
269 	pset_cluster_type_t     pset_cluster_type;
270 	/*
271 	 * For scheduler use only:
272 	 * The type that this pset will be treated like for scheduling purposes
273 	 */
274 	cluster_type_t          pset_type;
275 
276 #if CONFIG_SCHED_EDGE
277 	cpumap_t                cpu_running_foreign;
278 	cpumap_t                cpu_running_cluster_shared_rsrc_thread[CLUSTER_SHARED_RSRC_TYPE_COUNT];
279 	sched_bucket_t          cpu_running_buckets[MAX_CPUS];
280 
281 	bitmap_t                foreign_psets[BITMAP_LEN(MAX_PSETS)];
282 	bitmap_t                native_psets[BITMAP_LEN(MAX_PSETS)];
283 	bitmap_t                local_psets[BITMAP_LEN(MAX_PSETS)];
284 	bitmap_t                remote_psets[BITMAP_LEN(MAX_PSETS)];
285 	sched_clutch_edge       sched_edges[MAX_PSETS];
286 	pset_execution_time_t   pset_execution_time[TH_BUCKET_SCHED_MAX];
287 	uint64_t                pset_cluster_shared_rsrc_load[CLUSTER_SHARED_RSRC_TYPE_COUNT];
288 #endif /* CONFIG_SCHED_EDGE */
289 	cpumap_t                perfcontrol_cpu_preferred_bitmask;
290 	cpumap_t                perfcontrol_cpu_migration_bitmask;
291 	int                     cpu_preferred_last_chosen;
292 	bool                    is_SMT;                 /* pset contains SMT processors */
293 };
294 
295 /* Boot (and default) pset */
296 extern struct processor_set     pset0;
297 
298 typedef bitmap_t pset_map_t;
299 
300 struct pset_node {
301 	processor_set_t         psets;                  /* list of associated psets */
302 
303 	pset_node_t             nodes;                  /* list of associated subnodes */
304 	pset_node_t             node_list;              /* chain of associated nodes */
305 
306 	pset_node_t             parent;
307 
308 	pset_cluster_type_t     pset_cluster_type;      /* Same as the type of all psets in this node */
309 
310 	pset_map_t              pset_map;               /* map of associated psets */
311 	_Atomic pset_map_t      pset_idle_map;          /* psets with at least one IDLE CPU */
312 	_Atomic pset_map_t      pset_idle_primary_map;  /* psets with at least one IDLE primary CPU */
313 	_Atomic pset_map_t      pset_non_rt_map;        /* psets with at least one available CPU not running a realtime thread */
314 	_Atomic pset_map_t      pset_non_rt_primary_map;/* psets with at least one available primary CPU not running a realtime thread */
315 	_Atomic pset_map_t      pset_recommended_map;   /* psets with at least one recommended processor */
316 };
317 
318 /* Boot pset node and head of the pset node linked list */
319 extern struct pset_node pset_node0;
320 
321 #if __AMP__
322 extern pset_node_t ecore_node;
323 extern pset_node_t pcore_node;
324 #endif /* __AMP__ */
325 
326 extern queue_head_t tasks, threads, corpse_tasks;
327 extern int tasks_count, terminated_tasks_count, threads_count, terminated_threads_count;
328 decl_lck_mtx_data(extern, tasks_threads_lock);
329 decl_lck_mtx_data(extern, tasks_corpse_lock);
330 
331 /*
332  * The terminated tasks queue should only be inspected elsewhere by stackshot.
333  */
334 extern queue_head_t terminated_tasks;
335 
336 extern queue_head_t terminated_threads;
337 
338 /*
339  * Valid state transitions:
340  * not booted -> starting
341  * starting -> started not running
342  * starting -> started not waited
343  * started not running | not waited -> running
344  * running -> begin shutdown
345  * begin shutdown -> pending offline
346  * pending offline -> system sleep
347  * system sleep -> running
348  * pending offline -> cpu offline -> fully offline
349  * fully offline -> starting
350  */
351 __enum_closed_decl(processor_offline_state_t, uint8_t, {
352 	/* Before it's ever booted */
353 	PROCESSOR_OFFLINE_NOT_BOOTED            = 0,
354 
355 	/* cpu_start is going to be sent */
356 	PROCESSOR_OFFLINE_STARTING              = 1,
357 
358 	/* cpu_start has been sent, but it hasn't started up yet */
359 	PROCESSOR_OFFLINE_STARTED_NOT_RUNNING   = 2,
360 
361 	/* processor has started up and began running, but nobody has wait-for-start-ed it */
362 	PROCESSOR_OFFLINE_STARTED_NOT_WAITED    = 3,
363 
364 	/* processor is running and someone confirmed this with wait for start, no state change operations are in flight */
365 	PROCESSOR_OFFLINE_RUNNING               = 4,  /* This is the 'normal' state */
366 
367 	/* someone is working on asking to shut this processor down */
368 	PROCESSOR_OFFLINE_BEGIN_SHUTDOWN        = 5,
369 
370 	/* this processor has started itself on its way to offline */
371 	PROCESSOR_OFFLINE_PENDING_OFFLINE       = 6,
372 
373 	/* another processor has confirmed the processor has powered down */
374 	PROCESSOR_OFFLINE_CPU_OFFLINE           = 7,
375 
376 	/* cluster power has been disabled for this processor if it's going to be */
377 	PROCESSOR_OFFLINE_FULLY_OFFLINE         = 8, /* This is the finished powering down state */
378 
379 	/* This processor is the boot processor, and it's in the final system sleep */
380 	PROCESSOR_OFFLINE_FINAL_SYSTEM_SLEEP    = 9,
381 
382 	PROCESSOR_OFFLINE_MAX                   = 10,
383 });
384 
385 /* Locked under the sched_available_cores_lock */
386 extern cpumap_t processor_offline_state_map[PROCESSOR_OFFLINE_MAX];
387 
388 
389 struct processor {
390 	processor_state_t       state;                  /* See above */
391 	bool                    is_SMT;
392 	bool                    is_recommended;
393 	bool                    current_is_NO_SMT;      /* cached TH_SFLAG_NO_SMT of current thread */
394 	bool                    current_is_bound;       /* current thread is bound to this processor */
395 	bool                    current_is_eagerpreempt;/* current thread is TH_SFLAG_EAGERPREEMPT */
396 	bool                    pending_nonurgent_preemption; /* RUNNING_TIMER_PREEMPT is armed */
397 	struct thread          *active_thread;          /* thread running on processor */
398 	struct thread          *idle_thread;            /* this processor's idle thread. */
399 	struct thread          *startup_thread;
400 
401 	processor_set_t         processor_set;  /* assigned set */
402 
403 	/*
404 	 * XXX All current_* fields should be grouped together, as they're
405 	 * updated at the same time.
406 	 */
407 	int                     current_pri;            /* priority of current thread */
408 	sfi_class_id_t          current_sfi_class;      /* SFI class of current thread */
409 	perfcontrol_class_t     current_perfctl_class;  /* Perfcontrol class for current thread */
410 	/*
411 	 * The cluster type recommended for the current thread, used by AMP scheduler
412 	 */
413 	pset_cluster_type_t     current_recommended_pset_type;
414 	thread_urgency_t        current_urgency;        /* cached urgency of current thread */
415 
416 #if CONFIG_THREAD_GROUPS
417 	struct thread_group    *current_thread_group;   /* thread_group of current thread */
418 #endif
419 	int                     starting_pri;           /* priority of current thread as it was when scheduled */
420 	int                     cpu_id;                 /* platform numeric id */
421 
422 	uint64_t                quantum_end;            /* time when current quantum ends */
423 	uint64_t                last_dispatch;          /* time of last dispatch */
424 
425 #if KPERF
426 	uint64_t                kperf_last_sample_time; /* time of last kperf sample */
427 #endif /* KPERF */
428 
429 	uint64_t                deadline;               /* for next realtime thread */
430 	bool                    first_timeslice;        /* has the quantum expired since context switch */
431 
432 	bool                    must_idle;              /* Needs to be forced idle as next selected thread is allowed on this processor */
433 	bool                    next_idle_short;        /* Expecting a response IPI soon, so the next idle period is likely very brief */
434 
435 	bool                    running_timers_active;  /* whether the running timers should fire */
436 	struct timer_call       running_timers[RUNNING_TIMER_MAX];
437 
438 	struct run_queue        runq;                   /* runq for this processor */
439 
440 	struct recount_processor pr_recount;
441 
442 	/*
443 	 * Pointer to primary processor for secondary SMT processors, or a
444 	 * pointer to ourselves for primaries or non-SMT.
445 	 */
446 	processor_t             processor_primary;
447 	processor_t             processor_secondary;
448 	struct ipc_port        *processor_self;         /* port for operations */
449 
450 	processor_t             processor_list;         /* all existing processors */
451 
452 	uint64_t                timer_call_ttd;         /* current timer call time-to-deadline */
453 	processor_reason_t      last_startup_reason;
454 	processor_reason_t      last_shutdown_reason;
455 	processor_reason_t      last_recommend_reason;
456 	processor_reason_t      last_derecommend_reason;
457 
458 	/* locked by processor_start_state_lock */
459 	bool                    processor_instartup;     /* between dostartup and up */
460 
461 	/* Locked by the processor_updown_lock */
462 	bool                    processor_booted;       /* Has gone through processor_boot */
463 
464 	/* Locked by sched_available_cores_lock */
465 	bool                    shutdown_temporary;     /* Shutdown should be transparent to user - don't update CPU counts */
466 	bool                    processor_online;       /* between mark-online and mark-offline, tracked in sched_online_processors */
467 
468 	bool                    processor_inshutdown;   /* is the processor between processor_shutdown and processor_startup */
469 	processor_offline_state_t processor_offline_state;
470 };
471 
472 extern bool sched_all_cpus_offline(void);
473 extern void sched_assert_not_last_online_cpu(int cpu_id);
474 
475 extern processor_t processor_list;
476 decl_simple_lock_data(extern, processor_list_lock);
477 
478 decl_simple_lock_data(extern, processor_start_state_lock);
479 
480 /*
481  * Maximum number of CPUs supported by the scheduler.  bits.h bitmap macros
482  * need to be used to support greater than 64.
483  */
484 #define MAX_SCHED_CPUS          64
485 extern processor_t     __single processor_array[MAX_SCHED_CPUS];    /* array indexed by cpuid */
486 extern processor_set_t __single pset_array[MAX_PSETS];           /* array indexed by pset_id */
487 
488 extern uint32_t                 processor_avail_count;
489 extern uint32_t                 processor_avail_count_user;
490 extern uint32_t                 primary_processor_avail_count;
491 extern uint32_t                 primary_processor_avail_count_user;
492 
493 /*
494  * All of the operations on a processor that change the processor count
495  * published to userspace and kernel.
496  */
497 __enum_closed_decl(processor_mode_t, uint8_t, {
498 	PCM_RECOMMENDED = 0, /* processor->is_recommended */
499 	PCM_TEMPORARY   = 1, /* processor->shutdown_temporary */
500 	PCM_ONLINE      = 2, /* processor->processor_online */
501 });
502 
503 extern void sched_processor_change_mode_locked(processor_t processor, processor_mode_t pcm_mode, bool value);
504 
505 #define master_processor PERCPU_GET_MASTER(processor)
506 PERCPU_DECL(struct processor, processor);
507 
508 extern processor_t      current_processor(void);
509 
510 /* Lock macros, always acquired and released with interrupts disabled (splsched()) */
511 
512 extern lck_grp_t pset_lck_grp;
513 
514 #if defined(SCHED_PSET_TLOCK)
515 #define pset_lock_init(p)               lck_ticket_init(&(p)->sched_lock, &pset_lck_grp)
516 #define pset_lock(p)                    lck_ticket_lock(&(p)->sched_lock, &pset_lck_grp)
517 #define pset_unlock(p)                  lck_ticket_unlock(&(p)->sched_lock)
518 #define pset_assert_locked(p)           lck_ticket_assert_owned(&(p)->sched_lock)
519 #else /* SCHED_PSET_TLOCK*/
520 #define pset_lock_init(p)               lck_spin_init(&(p)->sched_lock, &pset_lck_grp, NULL)
521 #define pset_lock(p)                    lck_spin_lock_grp(&(p)->sched_lock, &pset_lck_grp)
522 #define pset_unlock(p)                  lck_spin_unlock(&(p)->sched_lock)
523 #define pset_assert_locked(p)           LCK_SPIN_ASSERT(&(p)->sched_lock, LCK_ASSERT_OWNED)
524 #endif /*!SCHED_PSET_TLOCK*/
525 
526 extern lck_spin_t       pset_node_lock;
527 
528 extern void             processor_bootstrap(void);
529 
530 extern void             processor_init(
531 	processor_t             processor,
532 	int                     cpu_id,
533 	processor_set_t         processor_set);
534 
535 extern void             processor_set_primary(
536 	processor_t             processor,
537 	processor_t             primary);
538 
539 extern void
540 processor_update_offline_state(processor_t processor, processor_offline_state_t new_state);
541 extern void
542 processor_update_offline_state_locked(processor_t processor, processor_offline_state_t new_state);
543 
544 extern void processor_doshutdown(
545 	processor_t             processor,
546 	bool                    is_final_system_sleep);
547 
548 __enum_closed_decl(processor_start_kind_t, uint8_t, {
549 	PROCESSOR_FIRST_BOOT = 0,
550 	PROCESSOR_BEFORE_ENTERING_SLEEP = 1,
551 	PROCESSOR_WAKE_FROM_SLEEP = 2,
552 	PROCESSOR_CLUSTER_POWERDOWN_SUSPEND = 3,
553 	PROCESSOR_CLUSTER_POWERDOWN_RESUME = 4,
554 	PROCESSOR_POWERED_CORES_CHANGE = 5,
555 });
556 
557 extern void             processor_wait_for_start(
558 	processor_t             processor,
559 	processor_start_kind_t  start_kind);
560 
561 extern kern_return_t    processor_start_from_user(
562 	processor_t             processor);
563 extern kern_return_t    processor_start_from_kext(
564 	processor_t             processor);
565 extern kern_return_t    processor_exit_from_kext(
566 	processor_t             processor);
567 
568 
569 extern void processor_start_reason(
570 	processor_t             processor,
571 	processor_reason_t      reason);
572 extern void processor_exit_reason(
573 	processor_t             processor,
574 	processor_reason_t      reason,
575 	bool is_system_sleep);
576 
577 extern kern_return_t sched_processor_exit_user(processor_t processor);
578 extern kern_return_t sched_processor_start_user(processor_t processor);
579 
580 extern bool sched_mark_processor_online(processor_t processor, processor_reason_t reason);
581 extern void sched_mark_processor_offline(processor_t processor, bool is_final_system_sleep);
582 
583 extern lck_mtx_t cluster_powerdown_lock;
584 extern lck_mtx_t processor_updown_lock;
585 
586 extern bool sched_is_in_sleep(void);
587 extern bool sched_is_cpu_init_completed(void);
588 
589 extern void             processor_queue_shutdown(
590 	processor_t             processor);
591 
592 extern processor_set_t  processor_pset(
593 	processor_t             processor);
594 
595 extern pset_node_t      pset_node_root(void);
596 
597 extern processor_set_t  pset_create(
598 	pset_node_t             node,
599 	pset_cluster_type_t     pset_type,
600 	uint32_t                pset_cluster_id,
601 	int                     pset_id);
602 
603 extern void             pset_init(
604 	processor_set_t         pset,
605 	pset_node_t             node);
606 
607 extern processor_set_t  pset_find(
608 	uint32_t                cluster_id,
609 	processor_set_t         default_pset);
610 
611 extern kern_return_t    processor_info_count(
612 	processor_flavor_t      flavor,
613 	mach_msg_type_number_t  *count);
614 
615 extern void processor_cpu_load_info(
616 	processor_t processor,
617 	natural_t ticks[static CPU_STATE_MAX]);
618 
619 extern void             machine_run_count(
620 	uint32_t                count);
621 
622 extern processor_t      machine_choose_processor(
623 	processor_set_t         pset,
624 	processor_t             processor);
625 
626 inline static processor_set_t
next_pset(processor_set_t pset)627 next_pset(processor_set_t pset)
628 {
629 	pset_map_t map = pset->node->pset_map;
630 
631 	int pset_id = lsb_next(map, pset->pset_id);
632 	if (pset_id == -1) {
633 		pset_id = lsb_first(map);
634 	}
635 
636 	return pset_array[pset_id];
637 }
638 
639 #define PSET_THING_TASK         0
640 #define PSET_THING_THREAD       1
641 
642 extern pset_cluster_type_t recommended_pset_type(
643 	thread_t                thread);
644 
645 extern void             processor_state_update_idle(
646 	processor_t             processor);
647 
648 extern void             processor_state_update_from_thread(
649 	processor_t             processor,
650 	thread_t                thread,
651 	boolean_t               pset_lock_held);
652 
653 extern void             processor_state_update_explicit(
654 	processor_t             processor,
655 	int                     pri,
656 	sfi_class_id_t          sfi_class,
657 	pset_cluster_type_t     pset_type,
658 	perfcontrol_class_t     perfctl_class,
659 	thread_urgency_t        urgency,
660 	sched_bucket_t          bucket);
661 
662 #define PSET_LOAD_NUMERATOR_SHIFT   16
663 #define PSET_LOAD_FRACTIONAL_SHIFT   4
664 
665 #if CONFIG_SCHED_EDGE
666 
667 extern cluster_type_t pset_type_for_id(uint32_t cluster_id);
668 extern uint64_t sched_pset_cluster_shared_rsrc_load(processor_set_t pset, cluster_shared_rsrc_type_t shared_rsrc_type);
669 
670 /*
671  * The Edge scheduler uses average scheduling latency as the metric for making
672  * thread migration decisions. One component of avg scheduling latency is the load
673  * average on the cluster.
674  *
675  * Load Average Fixed Point Arithmetic
676  *
677  * The load average is maintained as a 24.8 fixed point arithmetic value for precision.
678  * When multiplied by the average execution time, it needs to be rounded up (based on
679  * the most significant bit of the fractional part) for better accuracy. After rounding
680  * up, the whole number part of the value is used as the actual load value for
681  * migrate/steal decisions.
682  */
683 #define SCHED_PSET_LOAD_EWMA_FRACTION_BITS 8
684 #define SCHED_PSET_LOAD_EWMA_ROUND_BIT     (1 << (SCHED_PSET_LOAD_EWMA_FRACTION_BITS - 1))
685 #define SCHED_PSET_LOAD_EWMA_FRACTION_MASK ((1 << SCHED_PSET_LOAD_EWMA_FRACTION_BITS) - 1)
686 
687 inline static int
sched_get_pset_load_average(processor_set_t pset,sched_bucket_t sched_bucket)688 sched_get_pset_load_average(processor_set_t pset, sched_bucket_t sched_bucket)
689 {
690 	uint64_t load_average = os_atomic_load(&pset->pset_load_average[sched_bucket], relaxed);
691 	uint64_t avg_execution_time = os_atomic_load(&pset->pset_execution_time[sched_bucket].pset_avg_thread_execution_time, relaxed);
692 	/*
693 	 * Since a load average of 0 indicates an idle cluster, don't allow an average
694 	 * execution time less than 1us to cause a cluster to appear idle.
695 	 */
696 	avg_execution_time = MAX(avg_execution_time, 1ULL);
697 	return (int)(((load_average + SCHED_PSET_LOAD_EWMA_ROUND_BIT) >> SCHED_PSET_LOAD_EWMA_FRACTION_BITS) * avg_execution_time);
698 }
699 
700 #else /* CONFIG_SCHED_EDGE */
701 inline static int
sched_get_pset_load_average(processor_set_t pset,__unused sched_bucket_t sched_bucket)702 sched_get_pset_load_average(processor_set_t pset, __unused sched_bucket_t sched_bucket)
703 {
704 	return (int)pset->load_average >> (PSET_LOAD_NUMERATOR_SHIFT - PSET_LOAD_FRACTIONAL_SHIFT);
705 }
706 #endif /* CONFIG_SCHED_EDGE */
707 
708 extern void sched_update_pset_load_average(processor_set_t pset, uint64_t curtime);
709 extern void sched_update_pset_avg_execution_time(processor_set_t pset, uint64_t delta, uint64_t curtime, sched_bucket_t sched_bucket);
710 
711 inline static void
pset_update_processor_state(processor_set_t pset,processor_t processor,uint new_state)712 pset_update_processor_state(processor_set_t pset, processor_t processor, uint new_state)
713 {
714 	pset_assert_locked(pset);
715 
716 	uint old_state = processor->state;
717 	uint cpuid = (uint)processor->cpu_id;
718 
719 	assert(processor->processor_set == pset);
720 	assert(bit_test(pset->cpu_bitmask, cpuid));
721 
722 	assert(old_state < PROCESSOR_STATE_LEN);
723 	assert(new_state < PROCESSOR_STATE_LEN);
724 
725 	processor->state = new_state;
726 
727 	bit_clear(pset->cpu_state_map[old_state], cpuid);
728 	bit_set(pset->cpu_state_map[new_state], cpuid);
729 
730 	if (bit_test(pset->cpu_available_map, cpuid) && (new_state < PROCESSOR_IDLE)) {
731 		/* No longer available for scheduling */
732 		bit_clear(pset->cpu_available_map, cpuid);
733 	} else if (!bit_test(pset->cpu_available_map, cpuid) && (new_state >= PROCESSOR_IDLE)) {
734 		/* Newly available for scheduling */
735 		bit_set(pset->cpu_available_map, cpuid);
736 	}
737 
738 	if ((old_state == PROCESSOR_RUNNING) || (new_state == PROCESSOR_RUNNING)) {
739 		sched_update_pset_load_average(pset, 0);
740 		if (new_state == PROCESSOR_RUNNING) {
741 			assert(processor == current_processor());
742 		}
743 	}
744 	if ((old_state == PROCESSOR_IDLE) || (new_state == PROCESSOR_IDLE)) {
745 		if (new_state == PROCESSOR_IDLE) {
746 			bit_clear(pset->realtime_map, cpuid);
747 		}
748 
749 		pset_node_t node = pset->node;
750 
751 		if (bit_count(node->pset_map) == 1) {
752 			/* Node has only a single pset, so skip node pset map updates */
753 			return;
754 		}
755 
756 		if (new_state == PROCESSOR_IDLE) {
757 			if (processor->processor_primary == processor) {
758 				if (!bit_test(atomic_load(&node->pset_non_rt_primary_map), pset->pset_id)) {
759 					atomic_bit_set(&node->pset_non_rt_primary_map, pset->pset_id, memory_order_relaxed);
760 				}
761 				if (!bit_test(atomic_load(&node->pset_idle_primary_map), pset->pset_id)) {
762 					atomic_bit_set(&node->pset_idle_primary_map, pset->pset_id, memory_order_relaxed);
763 				}
764 			}
765 			if (!bit_test(atomic_load(&node->pset_non_rt_map), pset->pset_id)) {
766 				atomic_bit_set(&node->pset_non_rt_map, pset->pset_id, memory_order_relaxed);
767 			}
768 			if (!bit_test(atomic_load(&node->pset_idle_map), pset->pset_id)) {
769 				atomic_bit_set(&node->pset_idle_map, pset->pset_id, memory_order_relaxed);
770 			}
771 		} else {
772 			cpumap_t idle_map = pset->cpu_state_map[PROCESSOR_IDLE];
773 			if (idle_map == 0) {
774 				/* No more IDLE CPUs */
775 				if (bit_test(atomic_load(&node->pset_idle_map), pset->pset_id)) {
776 					atomic_bit_clear(&node->pset_idle_map, pset->pset_id, memory_order_relaxed);
777 				}
778 			}
779 			if (processor->processor_primary == processor) {
780 				idle_map &= pset->primary_map;
781 				if (idle_map == 0) {
782 					/* No more IDLE primary CPUs */
783 					if (bit_test(atomic_load(&node->pset_idle_primary_map), pset->pset_id)) {
784 						atomic_bit_clear(&node->pset_idle_primary_map, pset->pset_id, memory_order_relaxed);
785 					}
786 				}
787 			}
788 		}
789 	}
790 }
791 
792 decl_simple_lock_data(extern, sched_available_cores_lock);
793 
794 #endif  /* MACH_KERNEL_PRIVATE */
795 
796 #ifdef KERNEL_PRIVATE
797 
798 /* Private KPI */
799 extern processor_t      cpu_to_processor(int cpu);
800 
801 /*!
802  * @function              sched_enable_acc_rail
803  * @abstract              Enable shared voltage rail for a single ACC block.
804  * @param die_id          0-based die number indicating which die the ACC is on.
805  * @param die_cluster_id  0 for the first cluster on the die, 1 for the second, ...
806  * @discussion            Called from the PMGR driver.  On systems where ANE and PACC
807  *                        share a voltage rail, the PMGR driver calls into XNU prior to
808  *                        accessing the ANE hardware, to ensure that the ANE block
809  *                        is powered.  This will block until the rail has been enabled,
810  *                        and it must be called from a schedulable context.
811  *
812  *                        This should not be called on systems without a shared ANE/ACC rail.
813  *                        The caller is responsible for knowing which die/cluster needs to
814  *                        be forced on, in order to allow access to the ANE block.
815  */
816 extern void sched_enable_acc_rail(unsigned int die_id, unsigned int die_cluster_id);
817 
818 /*!
819  * @function              sched_disable_acc_rail
820  * @abstract              Disable voltage rail for a single ACC block.
821  * @param die_id          0-based die number indicating which die the ACC is on.
822  * @param die_cluster_id  0 for the first cluster on the die, 1 for the second, ...
823  * @discussion            Tells XNU that the shared ACC voltage rail can be safely disabled.
824  *                        This may or may not cut voltage immediately.  Must be called from a
825  *                        schedulable context.
826  */
827 extern void sched_disable_acc_rail(unsigned int die_id, unsigned int die_cluster_id);
828 
829 /*
830  * Private KPI with CLPC
831  *
832  * Update the scheduler with the set of cores that should be used to dispatch new threads.
833  * Non-recommended cores can still be used to field interrupts or run bound threads.
834  * This should be called with interrupts enabled and no scheduler locks held.
835  */
836 #define ALL_CORES_RECOMMENDED   (~(uint64_t)0)
837 #define ALL_CORES_POWERED       (~(uint64_t)0)
838 
839 extern void sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores);
840 extern void sched_perfcontrol_update_recommended_cores_reason(uint64_t recommended_cores, processor_reason_t reason, uint32_t flags);
841 
842 /* Request a change to the powered cores mask that CLPC wants.  Does not block waiting for completion. */
843 extern void sched_perfcontrol_update_powered_cores(uint64_t powered_cores, processor_reason_t reason, uint32_t flags);
844 
845 #endif /* KERNEL_PRIVATE */
846 
847 #ifdef XNU_KERNEL_PRIVATE
848 
849 extern bool support_bootcpu_shutdown;
850 extern bool enable_processor_exit;
851 extern unsigned int processor_count;
852 
853 extern int sched_enable_smt;
854 
855 extern kern_return_t    enable_smt_processors(bool enable);
856 
857 extern void sched_override_available_cores_for_sleep(void);
858 extern void sched_restore_available_cores_after_sleep(void);
859 extern bool processor_should_kprintf(processor_t processor, bool starting);
860 extern void suspend_cluster_powerdown(void);
861 extern void resume_cluster_powerdown(void);
862 extern kern_return_t suspend_cluster_powerdown_from_user(void);
863 extern kern_return_t resume_cluster_powerdown_from_user(void);
864 extern int get_cluster_powerdown_user_suspended(void);
865 
866 extern void processor_wake(
867 	processor_t             processor);
868 extern void processor_sleep(
869 	processor_t             processor);
870 extern void processor_boot(
871 	processor_t             processor);
872 extern kern_return_t    processor_exit_from_user(
873 	processor_t             processor);
874 
875 #endif /* XNU_KERNEL_PRIVATE */
876 
877 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
878 
879 #endif  /* _KERN_PROCESSOR_H_ */
880