1 // Copyright (c) 2023 Apple Inc. All rights reserved.
2
3 #include <mach/mach_time.h>
4
5 #ifndef MIN
6 #define MIN(a, b) (((a)<(b))?(a):(b))
7 #endif /* MIN */
8 #ifndef MAX
9 #define MAX(a, b) (((a)>(b))?(a):(b))
10 #endif /* MAX */
11
12 /* Overrides necessary for userspace code */
13 #define panic(...) ({ printf("Panicking:\n"); printf(__VA_ARGS__); abort(); })
14 #define KDBG(...) (void)0
15 #define kalloc_type(x, y, z) calloc((size_t)y, sizeof(x))
16 #define kfree_type(x, y, z) free(z)
17 #define PE_parse_boot_argn(x, y, z) FALSE
18
19 #define pset_lock(x) (void)x
20 #define pset_unlock(x) (void)x
21 #define thread_lock(x) (void)x
22 #define thread_unlock(x) (void)x
23
24 /* Expected global(s) */
25 static task_t kernel_task = NULL;
26
27 /* Time conversion to mock the implementation in osfmk/arm/rtclock.c */
28 static mach_timebase_info_data_t timebase_info;
29 void
clock_interval_to_absolutetime_interval(uint32_t interval,uint32_t scale_factor,uint64_t * result)30 clock_interval_to_absolutetime_interval(uint32_t interval,
31 uint32_t scale_factor,
32 uint64_t * result)
33 {
34 mach_timebase_info(&timebase_info);
35 uint64_t nanosecs = (uint64_t) interval * scale_factor;
36 *result = nanosecs * timebase_info.denom / timebase_info.numer;
37 }
38
39 /*
40 * thread struct from osfmk/kern/thread.h containing only fields needed by
41 * the Clutch runqueue logic, followed by needed functions from osfmk/kern/thread.c
42 * for operating on the __runq field
43 */
44 struct thread {
45 int id;
46 sched_mode_t sched_mode;
47 int16_t sched_pri; /* scheduled (current) priority */
48 int16_t base_pri; /* effective base priority (equal to req_base_pri unless TH_SFLAG_BASE_PRI_FROZEN) */
49 queue_chain_t runq_links; /* run queue links */
50 struct { processor_t runq; } __runq; /* internally managed run queue assignment, see above comment */
51 sched_bucket_t th_sched_bucket;
52 processor_t bound_processor; /* bound to a processor? */
53 int state;
54 #define TH_WAIT 0x01 /* queued for waiting */
55 #define TH_RUN 0x04 /* running or on runq */
56 #define TH_IDLE 0x80 /* idling processor */
57 uint64_t thread_id; /* system wide unique thread-id */
58 struct {
59 uint64_t user_time;
60 uint64_t system_time;
61 } mock_recount_time;
62 uint64_t sched_time_save;
63 natural_t sched_usage; /* timesharing cpu usage [sched] */
64 natural_t pri_shift; /* usage -> priority from pset */
65 natural_t cpu_usage; /* instrumented cpu usage [%cpu] */
66 natural_t cpu_delta; /* accumulated cpu_usage delta */
67 struct thread_group *thread_group;
68 struct priority_queue_entry_stable th_clutch_runq_link;
69 struct priority_queue_entry_sched th_clutch_pri_link;
70 queue_chain_t th_clutch_timeshare_link;
71 uint32_t sched_flags; /* current flag bits */
72 };
73
74 void
thread_assert_runq_null(__assert_only thread_t thread)75 thread_assert_runq_null(__assert_only thread_t thread)
76 {
77 assert(thread->__runq.runq == PROCESSOR_NULL);
78 }
79
80 void
thread_assert_runq_nonnull(thread_t thread)81 thread_assert_runq_nonnull(thread_t thread)
82 {
83 assert(thread->__runq.runq != PROCESSOR_NULL);
84 }
85
86 void
thread_clear_runq(thread_t thread)87 thread_clear_runq(thread_t thread)
88 {
89 thread_assert_runq_nonnull(thread);
90 thread->__runq.runq = PROCESSOR_NULL;
91 }
92
93 void
thread_set_runq_locked(thread_t thread,processor_t new_runq)94 thread_set_runq_locked(thread_t thread, processor_t new_runq)
95 {
96 thread_assert_runq_null(thread);
97 thread->__runq.runq = new_runq;
98 }
99
100 processor_t
thread_get_runq_locked(thread_t thread)101 thread_get_runq_locked(thread_t thread)
102 {
103 return thread->__runq.runq;
104 }
105
106 uint64_t
thread_tid(thread_t thread)107 thread_tid(
108 thread_t thread)
109 {
110 return thread != THREAD_NULL? thread->thread_id: 0;
111 }
112
113 /* Satisfy recount dependency needed by osfmk/kern/sched.h */
114 #define recount_thread_time_mach(thread) (thread->mock_recount_time.user_time + thread->mock_recount_time.system_time)
115
116 /*
117 * thread_group struct from osfmk/kern/thread_group.c containing only fields
118 * needed by the Clutch runqueue logic, followed by needed functions from
119 * osfmk/kern/thread_group.c
120 */
121 struct thread_group {
122 uint64_t tg_id;
123 struct sched_clutch tg_sched_clutch;
124 };
125
126 sched_clutch_t
sched_clutch_for_thread(thread_t thread)127 sched_clutch_for_thread(thread_t thread)
128 {
129 assert(thread->thread_group != NULL);
130 return &(thread->thread_group->tg_sched_clutch);
131 }
132
133 sched_clutch_t
sched_clutch_for_thread_group(struct thread_group * thread_group)134 sched_clutch_for_thread_group(struct thread_group *thread_group)
135 {
136 return &(thread_group->tg_sched_clutch);
137 }
138
139 inline uint64_t
thread_group_get_id(struct thread_group * tg)140 thread_group_get_id(struct thread_group *tg)
141 {
142 return tg->tg_id;
143 }
144
145 /*
146 * processor and processor_set structs from osfmk/kern/processor.h containing
147 * only fields needed by the Clutch runqueue logic
148 */
149 struct processor_set {
150 uint32_t pset_cluster_id;
151 struct sched_clutch_root pset_clutch_root; /* clutch hierarchy root */
152 };
153 struct processor {
154 processor_set_t processor_set; /* assigned set */
155 struct run_queue runq; /* runq for this processor */
156 struct thread *active_thread; /* thread running on processor */
157 bool first_timeslice; /* has the quantum expired since context switch */
158 int current_pri; /* priority of current thread */
159 int cpu_id; /* platform numeric id */
160 processor_t processor_primary;
161 bool current_is_bound; /* current thread is bound to this processor */
162 struct thread_group *current_thread_group; /* thread_group of current thread */
163 };
164