1 // Copyright (c) 2023 Apple Inc. All rights reserved.
2
3 #include <stdint.h>
4 #include <stdio.h>
5 #include <sys/kdebug.h>
6
7 /* Harness interface */
8 #include "sched_clutch_harness.h"
9
10 /*
11 * Include non-kernel header dependencies to make up for the equivalent kernel header
12 * dependencies which are not safe to compile in a userspace binary
13 */
14 #include <os/overflow.h>
15 #include <sys/types.h>
16 #include <os/atomic_private.h>
17
18 /* Include kernel header depdencies */
19 #include "shadow_headers/misc_needed_defines.h"
20
21 /* Header for Clutch policy code under-test */
22 #include <kern/sched_clutch.h>
23
24 /* Include non-header dependencies */
25 void log_tracepoint(uint64_t trace_code, uint64_t a2, uint64_t a3, uint64_t a4, uint64_t a5);
26 #define KERNEL_DEBUG_CONSTANT_IST(a0, a1, a2, a3, a4, a5, a6) log_tracepoint(a1, a2, a3, a4, a5)
27 #include "shadow_headers/misc_needed_deps.c"
28 #include "shadow_headers/sched_prim.c"
29
30 /*
31 * Mocked HW details
32 * For simplicity, we mock a platform with 1 pset comprised of 1 CPU
33 */
34 #define MAX_PSETS 1
35 #define ml_get_cluster_count() MAX_PSETS
36 static const uint32_t processor_avail_count = 1;
37 #define pset_available_cpu_count(x) processor_avail_count
38 static struct processor_set pset0 = {
39 .pset_cluster_id = 0,
40 };
41 static struct processor cpu0 = {
42 .cpu_id = 0,
43 .processor_set = &pset0,
44 };
45
46 /* Mocked-out Clutch functions */
47 static boolean_t
sched_thread_sched_pri_promoted(thread_t thread)48 sched_thread_sched_pri_promoted(thread_t thread)
49 {
50 (void)thread;
51 return FALSE;
52 }
53
54 /* Clutch policy code under-test, safe to include now after satisfying its dependencies */
55 #include <kern/sched_clutch.c>
56
57 /* Implementation of sched_clutch_harness.h interface */
58
59 int root_bucket_to_highest_pri[TH_BUCKET_SCHED_MAX] = {
60 MAXPRI_USER,
61 BASEPRI_FOREGROUND,
62 BASEPRI_USER_INITIATED,
63 BASEPRI_DEFAULT,
64 BASEPRI_UTILITY,
65 MAXPRI_THROTTLE
66 };
67
68 int clutch_interactivity_score_max = -1;
69 uint64_t clutch_root_bucket_wcel_us[TH_BUCKET_SCHED_MAX];
70 uint64_t clutch_root_bucket_warp_us[TH_BUCKET_SCHED_MAX];
71 unsigned int CLUTCH_THREAD_SELECT = -1;
72
73 /* Track harness allocations so we can free the pointers in impl_cleanup_harness() */
74 struct list_node {
75 struct list_node *next;
76 void *ptr;
77 };
78 static struct list_node *allocated_list = NULL;
79
80 static void
track_allocated(void * ptr)81 track_allocated(void *ptr)
82 {
83 struct list_node *new_node = malloc(sizeof(struct list_node));
84 new_node->ptr = ptr;
85 new_node->next = allocated_list;
86 allocated_list = new_node;
87 }
88
89 /* Implementation of sched_runqueue_harness.h interface */
90
91 static uint64_t unique_tg_id = 0;
92 static uint64_t unique_thread_id = 0;
93 #define NUM_LOGGED_TRACE_CODES 1
94 #define NUM_TRACEPOINT_FIELDS 5
95 static uint64_t logged_trace_codes[NUM_LOGGED_TRACE_CODES];
96 #define MAX_LOGGED_TRACEPOINTS 1000
97 static uint64_t *logged_tracepoints = NULL;
98 static uint32_t curr_tracepoint_ind = 0;
99 static uint32_t expect_tracepoint_ind = 0;
100
101 void
impl_init_runqueue(void)102 impl_init_runqueue(void)
103 {
104 /* Init runqueue */
105 sched_clutch_init();
106 sched_clutch_pset_init(&pset0);
107 sched_clutch_processor_init(&cpu0);
108 increment_mock_time(100);
109
110 /* Read out Clutch-internal fields for use by the test harness */
111 clutch_interactivity_score_max = 2 * sched_clutch_bucket_group_interactive_pri;
112 for (int b = TH_BUCKET_FIXPRI; b < TH_BUCKET_SCHED_MAX; b++) {
113 clutch_root_bucket_wcel_us[b] = sched_clutch_root_bucket_wcel_us[b] == SCHED_CLUTCH_INVALID_TIME_32 ? 0 : sched_clutch_root_bucket_wcel_us[b];
114 clutch_root_bucket_warp_us[b] = sched_clutch_root_bucket_warp_us[b] == SCHED_CLUTCH_INVALID_TIME_32 ? 0 : sched_clutch_root_bucket_warp_us[b];
115 }
116 CLUTCH_THREAD_SELECT = MACH_SCHED_CLUTCH_THREAD_SELECT;
117 logged_trace_codes[0] = MACH_SCHED_CLUTCH_THREAD_SELECT;
118
119 /* Init harness-internal allocators */
120 logged_tracepoints = malloc(MAX_LOGGED_TRACEPOINTS * 5 * sizeof(uint64_t));
121 track_allocated(logged_tracepoints);
122 curr_tracepoint_ind = 0;
123 expect_tracepoint_ind = 0;
124 unique_tg_id = 0;
125 unique_thread_id = 0;
126 }
127
128 struct thread_group *
impl_create_tg(int interactivity_score)129 impl_create_tg(int interactivity_score)
130 {
131 struct thread_group *tg = malloc(sizeof(struct thread_group));
132 track_allocated(tg);
133 sched_clutch_init_with_thread_group(&tg->tg_sched_clutch, tg);
134 if (interactivity_score != -1) {
135 for (int bucket = TH_BUCKET_SHARE_FG; bucket < TH_BUCKET_SCHED_MAX; bucket++) {
136 tg->tg_sched_clutch.sc_clutch_groups[bucket].scbg_interactivity_data.scct_count = interactivity_score;
137 tg->tg_sched_clutch.sc_clutch_groups[bucket].scbg_interactivity_data.scct_timestamp = mach_absolute_time();
138 }
139 }
140 tg->tg_id = unique_tg_id++;
141 return tg;
142 }
143
144 test_thread_t
impl_create_thread(int root_bucket,struct thread_group * tg,int pri)145 impl_create_thread(int root_bucket, struct thread_group *tg, int pri)
146 {
147 assert((sched_bucket_t)root_bucket == sched_convert_pri_to_bucket(pri) || (sched_bucket_t)root_bucket == TH_BUCKET_FIXPRI);
148 assert(tg != NULL);
149 thread_t thread = malloc(sizeof(struct thread));
150 track_allocated(thread);
151 thread->base_pri = pri;
152 thread->sched_pri = pri;
153 thread->thread_group = tg;
154 thread->th_sched_bucket = root_bucket;
155 thread->bound_processor = NULL;
156 thread->__runq.runq = PROCESSOR_NULL;
157 thread->thread_id = unique_thread_id++;
158 return thread;
159 }
160
161 void
impl_set_thread_sched_mode(test_thread_t thread,int mode)162 impl_set_thread_sched_mode(test_thread_t thread, int mode)
163 {
164 ((thread_t)thread)->sched_mode = (sched_mode_t)mode;
165 }
166
167 void
impl_set_thread_processor_bound(test_thread_t thread)168 impl_set_thread_processor_bound(test_thread_t thread)
169 {
170 ((thread_t)thread)->bound_processor = &cpu0;
171 }
172
173 static test_thread_t curr_thread = NULL;
174
175 void
impl_set_thread_current(test_thread_t thread)176 impl_set_thread_current(test_thread_t thread)
177 {
178 curr_thread = thread;
179 cpu0.active_thread = thread;
180 cpu0.first_timeslice = true;
181 /* Equivalent logic of processor_state_update_from_thread() */
182 cpu0.current_pri = ((thread_t)thread)->sched_pri;
183 cpu0.current_thread_group = ((thread_t)thread)->thread_group;
184 cpu0.current_is_bound = ((thread_t)thread)->bound_processor != PROCESSOR_NULL;
185 }
186
187 void
impl_clear_thread_current(void)188 impl_clear_thread_current(void)
189 {
190 curr_thread = NULL;
191 cpu0.active_thread = NULL;
192 }
193
194 void
impl_enqueue_thread(test_thread_t thread)195 impl_enqueue_thread(test_thread_t thread)
196 {
197 sched_clutch_processor_enqueue(&cpu0, thread, SCHED_TAILQ);
198 }
199
200 test_thread_t
impl_dequeue_thread(void)201 impl_dequeue_thread(void)
202 {
203 return sched_clutch_choose_thread(&cpu0, MINPRI, NULL, 0);
204 }
205
206 test_thread_t
impl_dequeue_thread_compare_current(void)207 impl_dequeue_thread_compare_current(void)
208 {
209 assert(curr_thread != NULL);
210 return sched_clutch_choose_thread(&cpu0, MINPRI, curr_thread, 0);
211 }
212
213 bool
impl_processor_csw_check(void)214 impl_processor_csw_check(void)
215 {
216 assert(curr_thread != NULL);
217 ast_t preempt_ast = sched_clutch_processor_csw_check(&cpu0);
218 return preempt_ast & AST_PREEMPT;
219 }
220
221 static bool
is_logged_clutch_trace_code(uint64_t clutch_trace_code)222 is_logged_clutch_trace_code(uint64_t clutch_trace_code)
223 {
224 for (int i = 0; i < NUM_LOGGED_TRACE_CODES; i++) {
225 if (logged_trace_codes[i] == clutch_trace_code) {
226 return true;
227 }
228 }
229 return false;
230 }
231
232 static bool
is_logged_trace_code(uint64_t trace_code)233 is_logged_trace_code(uint64_t trace_code)
234 {
235 if (KDBG_EXTRACT_CLASS(trace_code) == DBG_MACH && KDBG_EXTRACT_SUBCLASS(trace_code) == DBG_MACH_SCHED_CLUTCH) {
236 if (is_logged_clutch_trace_code(KDBG_EXTRACT_CODE(trace_code))) {
237 return true;
238 }
239 }
240 return false;
241 }
242
243 void
log_tracepoint(uint64_t trace_code,uint64_t a2,uint64_t a3,uint64_t a4,uint64_t a5)244 log_tracepoint(uint64_t trace_code, uint64_t a2, uint64_t a3, uint64_t a4, uint64_t a5)
245 {
246 if (is_logged_trace_code(trace_code)) {
247 if (curr_tracepoint_ind < MAX_LOGGED_TRACEPOINTS) {
248 logged_tracepoints[curr_tracepoint_ind * NUM_TRACEPOINT_FIELDS + 0] = KDBG_EXTRACT_CODE(trace_code);
249 logged_tracepoints[curr_tracepoint_ind * NUM_TRACEPOINT_FIELDS + 1] = a2;
250 logged_tracepoints[curr_tracepoint_ind * NUM_TRACEPOINT_FIELDS + 2] = a3;
251 logged_tracepoints[curr_tracepoint_ind * NUM_TRACEPOINT_FIELDS + 3] = a4;
252 logged_tracepoints[curr_tracepoint_ind * NUM_TRACEPOINT_FIELDS + 4] = a5;
253 } else if (curr_tracepoint_ind == MAX_LOGGED_TRACEPOINTS) {
254 printf("Ran out of pre-allocated memory to log tracepoints (%x points)...will no longer log tracepoints\n",
255 MAX_LOGGED_TRACEPOINTS);
256 }
257 curr_tracepoint_ind++;
258 }
259 }
260
261 void
impl_pop_tracepoint(uint64_t * clutch_trace_code,uint64_t * arg1,uint64_t * arg2,uint64_t * arg3,uint64_t * arg4)262 impl_pop_tracepoint(uint64_t *clutch_trace_code, uint64_t *arg1, uint64_t *arg2, uint64_t *arg3, uint64_t *arg4)
263 {
264 *clutch_trace_code = logged_tracepoints[expect_tracepoint_ind * NUM_TRACEPOINT_FIELDS + 0];
265 *arg1 = logged_tracepoints[expect_tracepoint_ind * NUM_TRACEPOINT_FIELDS + 1];
266 *arg2 = logged_tracepoints[expect_tracepoint_ind * NUM_TRACEPOINT_FIELDS + 2];
267 *arg3 = logged_tracepoints[expect_tracepoint_ind * NUM_TRACEPOINT_FIELDS + 3];
268 *arg4 = logged_tracepoints[expect_tracepoint_ind * NUM_TRACEPOINT_FIELDS + 4];
269 expect_tracepoint_ind++;
270 }
271
272 void
impl_cleanup_harness(void)273 impl_cleanup_harness(void)
274 {
275 /* Free all of the pointers we tracked in the allocated list */
276 struct list_node *curr_node = allocated_list;
277 while (curr_node != NULL) {
278 free(curr_node->ptr);
279 struct list_node *next_node = curr_node->next;
280 free(curr_node);
281 curr_node = next_node;
282 }
283 allocated_list = NULL;
284 }
285