xref: /xnu-11215/osfmk/tests/kernel_tests.c (revision 4f1223e8)
1 /*
2  * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/kern_types.h>
30 #include <kern/assert.h>
31 #include <kern/host.h>
32 #include <kern/macro_help.h>
33 #include <kern/sched.h>
34 #include <kern/locks.h>
35 #include <kern/sched_prim.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread_call.h>
38 #include <kern/zalloc_internal.h>
39 #include <kern/kalloc.h>
40 #include <tests/ktest.h>
41 #include <sys/errno.h>
42 #include <sys/random.h>
43 #include <kern/kern_cdata.h>
44 #include <machine/lowglobals.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_object_internal.h>
47 #include <vm/vm_protos.h>
48 #include <vm/vm_iokit.h>
49 #include <string.h>
50 #include <kern/kern_apfs_reflock.h>
51 
52 #if !(DEVELOPMENT || DEBUG)
53 #error "Testing is not enabled on RELEASE configurations"
54 #endif
55 
56 #include <tests/xnupost.h>
57 
58 extern boolean_t get_range_bounds(char * c, int64_t * lower, int64_t * upper);
59 __private_extern__ void qsort(void * a, size_t n, size_t es, int (*cmp)(const void *, const void *));
60 
61 uint32_t total_post_tests_count = 0;
62 void xnupost_reset_panic_widgets(void);
63 
64 /* test declarations */
65 kern_return_t zalloc_test(void);
66 kern_return_t RandomULong_test(void);
67 kern_return_t kcdata_api_test(void);
68 kern_return_t ts_kernel_primitive_test(void);
69 kern_return_t ts_kernel_sleep_inheritor_test(void);
70 kern_return_t ts_kernel_gate_test(void);
71 kern_return_t ts_kernel_turnstile_chain_test(void);
72 kern_return_t ts_kernel_timingsafe_bcmp_test(void);
73 
74 #if __ARM_VFP__
75 extern kern_return_t vfp_state_test(void);
76 #endif
77 
78 extern kern_return_t kprintf_hhx_test(void);
79 
80 #if defined(__arm64__)
81 kern_return_t pmap_coredump_test(void);
82 #endif
83 
84 extern kern_return_t console_serial_test(void);
85 extern kern_return_t console_serial_parallel_log_tests(void);
86 extern kern_return_t test_printf(void);
87 extern kern_return_t test_os_log(void);
88 extern kern_return_t test_os_log_handles(void);
89 extern kern_return_t test_os_log_parallel(void);
90 extern kern_return_t bitmap_post_test(void);
91 extern kern_return_t counter_tests(void);
92 #if ML_IO_TIMEOUTS_ENABLED
93 extern kern_return_t ml_io_timeout_test(void);
94 #endif
95 
96 #ifdef __arm64__
97 extern kern_return_t arm64_munger_test(void);
98 #if __ARM_PAN_AVAILABLE__
99 extern kern_return_t arm64_pan_test(void);
100 #endif
101 #if defined(HAS_APPLE_PAC)
102 extern kern_return_t arm64_ropjop_test(void);
103 #endif /* defined(HAS_APPLE_PAC) */
104 #if CONFIG_SPTM
105 extern kern_return_t arm64_panic_lockdown_test(void);
106 #endif /* CONFIG_SPTM */
107 #if HAS_SPECRES
108 extern kern_return_t specres_test(void);
109 #endif /* HAS_SPECRES */
110 #if BTI_ENFORCED
111 kern_return_t arm64_bti_test(void);
112 #endif /* BTI_ENFORCED */
113 extern kern_return_t arm64_speculation_guard_test(void);
114 #endif /* __arm64__ */
115 
116 extern kern_return_t test_thread_call(void);
117 
118 
119 struct xnupost_panic_widget xt_panic_widgets = {.xtp_context_p = NULL,
120 	                                        .xtp_outval_p = NULL,
121 	                                        .xtp_func_name = NULL,
122 	                                        .xtp_func = NULL};
123 
124 struct xnupost_test kernel_post_tests[] = {XNUPOST_TEST_CONFIG_BASIC(zalloc_test),
125 	                                   XNUPOST_TEST_CONFIG_BASIC(RandomULong_test),
126 	                                   XNUPOST_TEST_CONFIG_BASIC(test_printf),
127 	                                   XNUPOST_TEST_CONFIG_BASIC(test_os_log_handles),
128 	                                   XNUPOST_TEST_CONFIG_BASIC(test_os_log),
129 	                                   XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel),
130 #ifdef __arm64__
131 	                                   XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test),
132 #if __ARM_PAN_AVAILABLE__
133 	                                   XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test),
134 #endif
135 #if defined(HAS_APPLE_PAC)
136 	                                   XNUPOST_TEST_CONFIG_BASIC(arm64_ropjop_test),
137 #endif /* defined(HAS_APPLE_PAC) */
138 #if CONFIG_SPTM
139 	                                   XNUPOST_TEST_CONFIG_BASIC(arm64_panic_lockdown_test),
140 #endif /* CONFIG_SPTM */
141 	                                   XNUPOST_TEST_CONFIG_BASIC(arm64_speculation_guard_test),
142 #endif /* __arm64__ */
143 	                                   XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test),
144 	                                   XNUPOST_TEST_CONFIG_BASIC(console_serial_test),
145 	                                   XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests),
146 #if defined(__arm64__)
147 	                                   XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test),
148 #endif
149 	                                   XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test),
150 	                                   //XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests)
151 	                                   XNUPOST_TEST_CONFIG_BASIC(test_thread_call),
152 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_primitive_test),
153 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_sleep_inheritor_test),
154 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_gate_test),
155 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_turnstile_chain_test),
156 	                                   XNUPOST_TEST_CONFIG_BASIC(ts_kernel_timingsafe_bcmp_test),
157 	                                   XNUPOST_TEST_CONFIG_BASIC(kprintf_hhx_test),
158 #if __ARM_VFP__
159 	                                   XNUPOST_TEST_CONFIG_BASIC(vfp_state_test),
160 #endif
161 	                                   XNUPOST_TEST_CONFIG_BASIC(vm_tests),
162 	                                   XNUPOST_TEST_CONFIG_BASIC(counter_tests),
163 #if ML_IO_TIMEOUTS_ENABLED
164 	                                   XNUPOST_TEST_CONFIG_BASIC(ml_io_timeout_test),
165 #endif
166 #if HAS_SPECRES
167 	                                   XNUPOST_TEST_CONFIG_BASIC(specres_test),
168 #endif
169 };
170 
171 uint32_t kernel_post_tests_count = sizeof(kernel_post_tests) / sizeof(xnupost_test_data_t);
172 
173 #define POSTARGS_RUN_TESTS 0x1
174 #define POSTARGS_CONTROLLER_AVAILABLE 0x2
175 #define POSTARGS_CUSTOM_TEST_RUNLIST 0x4
176 uint64_t kernel_post_args = 0x0;
177 
178 /* static variables to hold state */
179 static kern_return_t parse_config_retval = KERN_INVALID_CAPABILITY;
180 static char kernel_post_test_configs[256];
181 boolean_t xnupost_should_run_test(uint32_t test_num);
182 
183 kern_return_t
xnupost_parse_config()184 xnupost_parse_config()
185 {
186 	if (parse_config_retval != KERN_INVALID_CAPABILITY) {
187 		return parse_config_retval;
188 	}
189 	PE_parse_boot_argn("kernPOST", &kernel_post_args, sizeof(kernel_post_args));
190 
191 	if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs[0], sizeof(kernel_post_test_configs)) == TRUE) {
192 		kernel_post_args |= POSTARGS_CUSTOM_TEST_RUNLIST;
193 	}
194 
195 	if (kernel_post_args != 0) {
196 		parse_config_retval = KERN_SUCCESS;
197 		goto out;
198 	}
199 	parse_config_retval = KERN_NOT_SUPPORTED;
200 out:
201 	return parse_config_retval;
202 }
203 
204 boolean_t
xnupost_should_run_test(uint32_t test_num)205 xnupost_should_run_test(uint32_t test_num)
206 {
207 	if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
208 		int64_t begin = 0, end = 999999;
209 		char * b = kernel_post_test_configs;
210 		while (*b) {
211 			get_range_bounds(b, &begin, &end);
212 			if (test_num >= begin && test_num <= end) {
213 				return TRUE;
214 			}
215 
216 			/* skip to the next "," */
217 			while (*b != ',') {
218 				if (*b == '\0') {
219 					return FALSE;
220 				}
221 				b++;
222 			}
223 			/* skip past the ',' */
224 			b++;
225 		}
226 		return FALSE;
227 	}
228 	return TRUE;
229 }
230 
231 kern_return_t
xnupost_list_tests(xnupost_test_t test_list,uint32_t test_count)232 xnupost_list_tests(xnupost_test_t test_list, uint32_t test_count)
233 {
234 	if (KERN_SUCCESS != xnupost_parse_config()) {
235 		return KERN_FAILURE;
236 	}
237 
238 	xnupost_test_t testp;
239 	for (uint32_t i = 0; i < test_count; i++) {
240 		testp = &test_list[i];
241 		if (testp->xt_test_num == 0) {
242 			assert(total_post_tests_count < UINT16_MAX);
243 			testp->xt_test_num = (uint16_t)++total_post_tests_count;
244 		}
245 		/* make sure the boot-arg based test run list is honored */
246 		if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) {
247 			testp->xt_config |= XT_CONFIG_IGNORE;
248 			if (xnupost_should_run_test(testp->xt_test_num)) {
249 				testp->xt_config &= ~(XT_CONFIG_IGNORE);
250 				testp->xt_config |= XT_CONFIG_RUN;
251 				printf("\n[TEST] #%u is marked as ignored", testp->xt_test_num);
252 			}
253 		}
254 		printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp->xt_test_num, testp->xt_name, testp->xt_expected_retval,
255 		    testp->xt_config);
256 	}
257 
258 	return KERN_SUCCESS;
259 }
260 
261 kern_return_t
xnupost_run_tests(xnupost_test_t test_list,uint32_t test_count)262 xnupost_run_tests(xnupost_test_t test_list, uint32_t test_count)
263 {
264 	uint32_t i = 0;
265 	int retval = KERN_SUCCESS;
266 	int test_retval = KERN_FAILURE;
267 
268 	if ((kernel_post_args & POSTARGS_RUN_TESTS) == 0) {
269 		printf("No POST boot-arg set.\n");
270 		return retval;
271 	}
272 
273 	T_START;
274 	xnupost_test_t testp;
275 	for (; i < test_count; i++) {
276 		xnupost_reset_panic_widgets();
277 		T_TESTRESULT = T_STATE_UNRESOLVED;
278 		testp = &test_list[i];
279 		T_BEGIN(testp->xt_name);
280 		testp->xt_begin_time = mach_absolute_time();
281 		testp->xt_end_time   = testp->xt_begin_time;
282 
283 		/*
284 		 * If test is designed to panic and controller
285 		 * is not available then mark as SKIPPED
286 		 */
287 		if ((testp->xt_config & XT_CONFIG_EXPECT_PANIC) && !(kernel_post_args & POSTARGS_CONTROLLER_AVAILABLE)) {
288 			T_SKIP(
289 				"Test expects panic but "
290 				"no controller is present");
291 			testp->xt_test_actions = XT_ACTION_SKIPPED;
292 			continue;
293 		}
294 
295 		if ((testp->xt_config & XT_CONFIG_IGNORE)) {
296 			T_SKIP("Test is marked as XT_CONFIG_IGNORE");
297 			testp->xt_test_actions = XT_ACTION_SKIPPED;
298 			continue;
299 		}
300 
301 		test_retval = testp->xt_func();
302 		if (T_STATE_UNRESOLVED == T_TESTRESULT) {
303 			/*
304 			 * If test result is unresolved due to that no T_* test cases are called,
305 			 * determine the test result based on the return value of the test function.
306 			 */
307 			if (KERN_SUCCESS == test_retval) {
308 				T_PASS("Test passed because retval == KERN_SUCCESS");
309 			} else {
310 				T_FAIL("Test failed because retval == KERN_FAILURE");
311 			}
312 		}
313 		T_END;
314 		testp->xt_retval = T_TESTRESULT;
315 		testp->xt_end_time = mach_absolute_time();
316 		if (testp->xt_retval == testp->xt_expected_retval) {
317 			testp->xt_test_actions = XT_ACTION_PASSED;
318 		} else {
319 			testp->xt_test_actions = XT_ACTION_FAILED;
320 		}
321 	}
322 	T_FINISH;
323 	return retval;
324 }
325 
326 kern_return_t
kernel_list_tests()327 kernel_list_tests()
328 {
329 	return xnupost_list_tests(kernel_post_tests, kernel_post_tests_count);
330 }
331 
332 kern_return_t
kernel_do_post()333 kernel_do_post()
334 {
335 	return xnupost_run_tests(kernel_post_tests, kernel_post_tests_count);
336 }
337 
338 kern_return_t
xnupost_register_panic_widget(xt_panic_widget_func funcp,const char * funcname,void * context,void ** outval)339 xnupost_register_panic_widget(xt_panic_widget_func funcp, const char * funcname, void * context, void ** outval)
340 {
341 	if (xt_panic_widgets.xtp_context_p != NULL || xt_panic_widgets.xtp_func != NULL) {
342 		return KERN_RESOURCE_SHORTAGE;
343 	}
344 
345 	xt_panic_widgets.xtp_context_p = context;
346 	xt_panic_widgets.xtp_func      = funcp;
347 	xt_panic_widgets.xtp_func_name = funcname;
348 	xt_panic_widgets.xtp_outval_p  = outval;
349 
350 	return KERN_SUCCESS;
351 }
352 
353 void
xnupost_reset_panic_widgets()354 xnupost_reset_panic_widgets()
355 {
356 	bzero(&xt_panic_widgets, sizeof(xt_panic_widgets));
357 }
358 
359 kern_return_t
xnupost_process_kdb_stop(const char * panic_s)360 xnupost_process_kdb_stop(const char * panic_s)
361 {
362 	xt_panic_return_t retval         = 0;
363 	struct xnupost_panic_widget * pw = &xt_panic_widgets;
364 	const char * name = "unknown";
365 	if (xt_panic_widgets.xtp_func_name) {
366 		name = xt_panic_widgets.xtp_func_name;
367 	}
368 
369 	/* bail early on if kernPOST is not set */
370 	if (kernel_post_args == 0) {
371 		return KERN_INVALID_CAPABILITY;
372 	}
373 
374 	if (xt_panic_widgets.xtp_func) {
375 		T_LOG("%s: Calling out to widget: %s", __func__, xt_panic_widgets.xtp_func_name);
376 		retval = pw->xtp_func(panic_s, pw->xtp_context_p, pw->xtp_outval_p);
377 	} else {
378 		return KERN_INVALID_CAPABILITY;
379 	}
380 
381 	switch (retval) {
382 	case XT_RET_W_SUCCESS:
383 		T_EXPECT_EQ_INT(retval, XT_RET_W_SUCCESS, "%s reported successful handling. Returning from kdb_stop.", name);
384 		/* KERN_SUCCESS means return from panic/assertion */
385 		return KERN_SUCCESS;
386 
387 	case XT_RET_W_FAIL:
388 		T_FAIL("%s reported XT_RET_W_FAIL: Returning from kdb_stop", name);
389 		return KERN_SUCCESS;
390 
391 	case XT_PANIC_W_FAIL:
392 		T_FAIL("%s reported XT_PANIC_W_FAIL: Continuing to kdb_stop", name);
393 		return KERN_FAILURE;
394 
395 	case XT_PANIC_W_SUCCESS:
396 		T_EXPECT_EQ_INT(retval, XT_PANIC_W_SUCCESS, "%s reported successful testcase. But continuing to kdb_stop.", name);
397 		return KERN_FAILURE;
398 
399 	case XT_PANIC_UNRELATED:
400 	default:
401 		T_LOG("UNRELATED: Continuing to kdb_stop.");
402 		return KERN_FAILURE;
403 	}
404 }
405 
406 xt_panic_return_t
_xt_generic_assert_check(const char * s,void * str_to_match,void ** outval)407 _xt_generic_assert_check(const char * s, void * str_to_match, void ** outval)
408 {
409 	xt_panic_return_t ret = XT_PANIC_UNRELATED;
410 
411 	if (NULL != strnstr(__DECONST(char *, s), (char *)str_to_match, strlen(s))) {
412 		T_LOG("%s: kdb_stop string: '%s' MATCHED string: '%s'", __func__, s, (char *)str_to_match);
413 		ret = XT_RET_W_SUCCESS;
414 	}
415 
416 	if (outval) {
417 		*outval = (void *)(uintptr_t)ret;
418 	}
419 	return ret;
420 }
421 
422 kern_return_t
xnupost_reset_tests(xnupost_test_t test_list,uint32_t test_count)423 xnupost_reset_tests(xnupost_test_t test_list, uint32_t test_count)
424 {
425 	uint32_t i = 0;
426 	xnupost_test_t testp;
427 	for (; i < test_count; i++) {
428 		testp                  = &test_list[i];
429 		testp->xt_begin_time   = 0;
430 		testp->xt_end_time     = 0;
431 		testp->xt_test_actions = XT_ACTION_NONE;
432 		testp->xt_retval       = -1;
433 	}
434 	return KERN_SUCCESS;
435 }
436 
437 
438 kern_return_t
zalloc_test(void)439 zalloc_test(void)
440 {
441 	zone_t test_zone;
442 	void * test_ptr;
443 
444 	T_SETUPBEGIN;
445 	test_zone = zone_create("test_uint64_zone", sizeof(uint64_t),
446 	    ZC_DESTRUCTIBLE);
447 	T_ASSERT_NOTNULL(test_zone, NULL);
448 
449 	T_ASSERT_EQ_INT(test_zone->z_elems_free, 0, NULL);
450 	T_SETUPEND;
451 
452 	T_ASSERT_NOTNULL(test_ptr = zalloc(test_zone), NULL);
453 
454 	zfree(test_zone, test_ptr);
455 
456 	/* A sample report for perfdata */
457 	T_PERF("num_threads_at_ktest", threads_count, "count", "# of threads in system at zalloc_test");
458 
459 	return KERN_SUCCESS;
460 }
461 
462 /*
463  * Function used for comparison by qsort()
464  */
465 static int
compare_numbers_ascending(const void * a,const void * b)466 compare_numbers_ascending(const void * a, const void * b)
467 {
468 	const uint64_t x = *(const uint64_t *)a;
469 	const uint64_t y = *(const uint64_t *)b;
470 	if (x < y) {
471 		return -1;
472 	} else if (x > y) {
473 		return 1;
474 	} else {
475 		return 0;
476 	}
477 }
478 
479 /*
480  * Function to count number of bits that are set in a number.
481  * It uses Side Addition using Magic Binary Numbers
482  */
483 static int
count_bits(uint64_t number)484 count_bits(uint64_t number)
485 {
486 	return __builtin_popcountll(number);
487 }
488 
489 kern_return_t
RandomULong_test()490 RandomULong_test()
491 {
492 /*
493  * Randomness test for RandomULong()
494  *
495  * This test verifies that:
496  *  a. RandomULong works
497  *  b. The generated numbers match the following entropy criteria:
498  *     For a thousand iterations, verify:
499  *          1. mean entropy > 12 bits
500  *          2. min entropy > 4 bits
501  *          3. No Duplicate
502  *          4. No incremental/decremental pattern in a window of 3
503  *          5. No Zero
504  *          6. No -1
505  *
506  * <rdar://problem/22526137> Add test to increase code coverage for /dev/random
507  */
508 
509 #define CONF_MIN_ENTROPY 4
510 #define CONF_MEAN_ENTROPY 12
511 #define CONF_ITERATIONS 1000
512 #define CONF_WINDOW_SIZE 3
513 #define CONF_WINDOW_TREND_LIMIT ((CONF_WINDOW_SIZE / 2) + (CONF_WINDOW_SIZE & 1)) >> 0
514 
515 	int i;
516 	uint32_t min_bit_entropy, max_bit_entropy, bit_entropy;
517 	uint32_t aggregate_bit_entropy = 0;
518 	uint32_t mean_bit_entropy      = 0;
519 	uint64_t numbers[CONF_ITERATIONS];
520 	min_bit_entropy = UINT32_MAX;
521 	max_bit_entropy = 0;
522 
523 	/*
524 	 * TEST 1: Number generation and basic and basic validation
525 	 * Check for non-zero (no bits set), -1 (all bits set) and error
526 	 */
527 	for (i = 0; i < CONF_ITERATIONS; i++) {
528 		read_random(&numbers[i], sizeof(numbers[i]));
529 		if (numbers[i] == 0) {
530 			T_ASSERT_NE_ULLONG(numbers[i], 0, "read_random returned zero value.");
531 		}
532 		if (numbers[i] == UINT64_MAX) {
533 			T_ASSERT_NE_ULLONG(numbers[i], UINT64_MAX, "read_random returned -1.");
534 		}
535 	}
536 	T_PASS("Generated %d non-zero random numbers with atleast one bit reset.", CONF_ITERATIONS);
537 
538 	/*
539 	 * TEST 2: Mean and Min Bit Entropy
540 	 * Check the bit entropy and its mean over the generated numbers.
541 	 */
542 	for (i = 1; i < CONF_ITERATIONS; i++) {
543 		bit_entropy = count_bits(numbers[i - 1] ^ numbers[i]);
544 		if (bit_entropy < min_bit_entropy) {
545 			min_bit_entropy = bit_entropy;
546 		}
547 		if (bit_entropy > max_bit_entropy) {
548 			max_bit_entropy = bit_entropy;
549 		}
550 
551 		if (bit_entropy < CONF_MIN_ENTROPY) {
552 			T_EXPECT_GE_UINT(bit_entropy, CONF_MIN_ENTROPY,
553 			    "Number of differing bits in consecutive numbers does not satisfy the min criteria.");
554 		}
555 
556 		aggregate_bit_entropy += bit_entropy;
557 	}
558 	T_PASS("Passed the min bit entropy expectation of %d bits", CONF_MIN_ENTROPY);
559 
560 	mean_bit_entropy = aggregate_bit_entropy / CONF_ITERATIONS;
561 	T_EXPECT_GE_UINT(mean_bit_entropy, CONF_MEAN_ENTROPY, "Test criteria for mean number of differing bits.");
562 	T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY, mean_bit_entropy);
563 	T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS,
564 	    min_bit_entropy, mean_bit_entropy, max_bit_entropy);
565 	T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), min_bit_entropy, "bits", "minimum bit entropy in RNG. High is better");
566 	T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), mean_bit_entropy, "bits", "mean bit entropy in RNG. High is better");
567 	T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), max_bit_entropy, "bits", "max bit entropy in RNG. High is better");
568 
569 	/*
570 	 * TEST 3: Incremental Pattern Search
571 	 * Check that incremental/decremental pattern does not exist in the given window
572 	 */
573 	int window_start, window_end, trend;
574 	window_start = window_end = trend = 0;
575 
576 	do {
577 		/*
578 		 * Set the window
579 		 */
580 		window_end = window_start + CONF_WINDOW_SIZE - 1;
581 		if (window_end >= CONF_ITERATIONS) {
582 			window_end = CONF_ITERATIONS - 1;
583 		}
584 
585 		trend = 0;
586 		for (i = window_start; i < window_end; i++) {
587 			if (numbers[i] < numbers[i + 1]) {
588 				trend++;
589 			} else if (numbers[i] > numbers[i + 1]) {
590 				trend--;
591 			}
592 		}
593 		/*
594 		 * Check that there is no increasing or decreasing trend
595 		 * i.e. trend <= ceil(window_size/2)
596 		 */
597 		if (trend < 0) {
598 			trend = -trend;
599 		}
600 		if (trend > CONF_WINDOW_TREND_LIMIT) {
601 			T_ASSERT_LE_INT(trend, CONF_WINDOW_TREND_LIMIT, "Found increasing/decreasing trend in random numbers.");
602 		}
603 
604 		/*
605 		 * Move to the next window
606 		 */
607 		window_start++;
608 	} while (window_start < (CONF_ITERATIONS - 1));
609 	T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE);
610 
611 	/*
612 	 * TEST 4: Find Duplicates
613 	 * Check no duplicate values are generated
614 	 */
615 	qsort(numbers, CONF_ITERATIONS, sizeof(numbers[0]), compare_numbers_ascending);
616 	for (i = 1; i < CONF_ITERATIONS; i++) {
617 		if (numbers[i] == numbers[i - 1]) {
618 			T_ASSERT_NE_ULLONG(numbers[i], numbers[i - 1], "read_random generated duplicate values.");
619 		}
620 	}
621 	T_PASS("Test did not find any duplicates as expected.");
622 
623 	return KERN_SUCCESS;
624 }
625 
626 
627 /* KCDATA kernel api tests */
628 static struct kcdata_descriptor test_kc_data;//, test_kc_data2;
629 struct sample_disk_io_stats {
630 	uint64_t disk_reads_count;
631 	uint64_t disk_reads_size;
632 	uint64_t io_priority_count[4];
633 	uint64_t io_priority_size;
634 } __attribute__((packed));
635 
636 struct kcdata_subtype_descriptor test_disk_io_stats_def[] = {
637 	{
638 		.kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
639 		.kcs_elem_type = KC_ST_UINT64,
640 		.kcs_elem_offset = 0 * sizeof(uint64_t),
641 		.kcs_elem_size = sizeof(uint64_t),
642 		.kcs_name = "disk_reads_count"
643 	},
644 	{
645 		.kcs_flags = KCS_SUBTYPE_FLAGS_NONE,
646 		.kcs_elem_type = KC_ST_UINT64,
647 		.kcs_elem_offset = 1 * sizeof(uint64_t),
648 		.kcs_elem_size = sizeof(uint64_t),
649 		.kcs_name = "disk_reads_size"
650 	},
651 	{
652 		.kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
653 		.kcs_elem_type = KC_ST_UINT64,
654 		.kcs_elem_offset = 2 * sizeof(uint64_t),
655 		.kcs_elem_size = KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)),
656 		.kcs_name = "io_priority_count"
657 	},
658 	{
659 		.kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY,
660 		.kcs_elem_type = KC_ST_UINT64,
661 		.kcs_elem_offset = (2 + 4) * sizeof(uint64_t),
662 		.kcs_elem_size = sizeof(uint64_t),
663 		.kcs_name = "io_priority_size"
664 	},
665 };
666 
667 kern_return_t
kcdata_api_test(void)668 kcdata_api_test(void)
669 {
670 	kern_return_t retval = KERN_SUCCESS;
671 
672 	/* test for NULL input */
673 	retval = kcdata_memory_static_init(NULL, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_STACKSHOT, 100, KCFLAG_USE_MEMCOPY);
674 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_memory_static_init with NULL struct");
675 
676 	/* another negative test with buffer size < 32 bytes */
677 	char data[30] = "sample_disk_io_stats";
678 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)&data, KCDATA_BUFFER_BEGIN_CRASHINFO, sizeof(data),
679 	    KCFLAG_USE_MEMCOPY);
680 	T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "init with 30 bytes failed as expected with KERN_INSUFFICIENT_BUFFER_SIZE");
681 
682 	/* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */
683 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_CRASHINFO, PAGE_SIZE,
684 	    KCFLAG_USE_COPYOUT);
685 	T_ASSERT(retval == KERN_NO_ACCESS, "writing to 0x0 returned KERN_NO_ACCESS");
686 
687 	/* test with successful kcdata_memory_static_init */
688 	test_kc_data.kcd_length   = 0xdeadbeef;
689 
690 	void *data_ptr = kalloc_data(PAGE_SIZE, Z_WAITOK_ZERO_NOFAIL);
691 	mach_vm_address_t address = (mach_vm_address_t)data_ptr;
692 	T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
693 
694 	retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
695 	    KCFLAG_USE_MEMCOPY);
696 
697 	T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
698 
699 	T_ASSERT(test_kc_data.kcd_length == PAGE_SIZE, "kcdata length is set correctly to PAGE_SIZE.");
700 	T_LOG("addr_begin 0x%llx and end 0x%llx and address 0x%llx", test_kc_data.kcd_addr_begin, test_kc_data.kcd_addr_end, address);
701 	T_ASSERT(test_kc_data.kcd_addr_begin == address, "kcdata begin address is correct 0x%llx", (uint64_t)address);
702 
703 	/* verify we have BEGIN and END HEADERS set */
704 	uint32_t * mem = (uint32_t *)address;
705 	T_ASSERT(mem[0] == KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer does contain KCDATA_BUFFER_BEGIN_STACKSHOT");
706 	T_ASSERT(mem[4] == KCDATA_TYPE_BUFFER_END, "KCDATA_TYPE_BUFFER_END is appended as expected");
707 	T_ASSERT(mem[5] == 0, "size of BUFFER_END tag is zero");
708 
709 	/* verify kcdata_memory_get_used_bytes() */
710 	uint64_t bytes_used = 0;
711 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
712 	T_ASSERT(bytes_used == (2 * sizeof(struct kcdata_item)), "bytes_used api returned expected %llu", bytes_used);
713 
714 	/* test for kcdata_get_memory_addr() */
715 
716 	mach_vm_address_t user_addr = 0;
717 	/* negative test for NULL user_addr AND/OR kcdata_descriptor */
718 	retval = kcdata_get_memory_addr(NULL, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
719 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL struct -> KERN_INVALID_ARGUMENT");
720 
721 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), NULL);
722 	T_ASSERT(retval == KERN_INVALID_ARGUMENT, "kcdata_get_memory_addr with NULL user_addr -> KERN_INVALID_ARGUMENT");
723 
724 	/* successful case with size 0. Yes this is expected to succeed as just a item type could be used as boolean */
725 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_USECS_SINCE_EPOCH, 0, &user_addr);
726 	T_ASSERT(retval == KERN_SUCCESS, "Successfully got kcdata entry for 0 size data");
727 	T_ASSERT(user_addr == test_kc_data.kcd_addr_end, "0 sized data did not add any extra buffer space");
728 
729 	/* successful case with valid size. */
730 	user_addr = 0xdeadbeef;
731 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &user_addr);
732 	T_ASSERT(retval == KERN_SUCCESS, "kcdata_get_memory_addr with valid values succeeded.");
733 	T_ASSERT(user_addr > test_kc_data.kcd_addr_begin, "user_addr is in range of buffer");
734 	T_ASSERT(user_addr < test_kc_data.kcd_addr_end, "user_addr is in range of buffer");
735 
736 	/* Try creating an item with really large size */
737 	user_addr  = 0xdeadbeef;
738 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
739 	retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, PAGE_SIZE * 4, &user_addr);
740 	T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "Allocating entry with size > buffer -> KERN_INSUFFICIENT_BUFFER_SIZE");
741 	T_ASSERT(user_addr == 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr");
742 	T_ASSERT(bytes_used == kcdata_memory_get_used_bytes(&test_kc_data), "The data structure should be unaffected");
743 
744 	/* verify convenience functions for uint32_with_description */
745 	retval = kcdata_add_uint32_with_description(&test_kc_data, 0xbdc0ffee, "This is bad coffee");
746 	T_ASSERT(retval == KERN_SUCCESS, "add uint32 with description succeeded.");
747 
748 	retval = kcdata_add_uint64_with_description(&test_kc_data, 0xf001badc0ffee, "another 8 byte no.");
749 	T_ASSERT(retval == KERN_SUCCESS, "add uint64 with desc succeeded.");
750 
751 	/* verify creating an KCDATA_TYPE_ARRAY here */
752 	user_addr  = 0xdeadbeef;
753 	bytes_used = kcdata_memory_get_used_bytes(&test_kc_data);
754 	/* save memory address where the array will come up */
755 	struct kcdata_item * item_p = (struct kcdata_item *)test_kc_data.kcd_addr_end;
756 
757 	retval = kcdata_get_memory_addr_for_array(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), 20, &user_addr);
758 	T_ASSERT(retval == KERN_SUCCESS, "Array of 20 integers should be possible");
759 	T_ASSERT(user_addr != 0xdeadbeef, "user_addr is updated as expected");
760 	T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data) - bytes_used) >= 20 * sizeof(uint64_t), "memory allocation is in range");
761 	kcdata_iter_t iter = kcdata_iter(item_p, (unsigned long)(PAGE_SIZE - kcdata_memory_get_used_bytes(&test_kc_data)));
762 	T_ASSERT(kcdata_iter_array_elem_count(iter) == 20, "array count is 20");
763 
764 	/* FIXME add tests here for ranges of sizes and counts */
765 
766 	T_ASSERT(item_p->flags == (((uint64_t)KCDATA_TYPE_MACH_ABSOLUTE_TIME << 32) | 20), "flags are set correctly");
767 
768 	/* test adding of custom type */
769 
770 	retval = kcdata_add_type_definition(&test_kc_data, 0x999, data, &test_disk_io_stats_def[0],
771 	    sizeof(test_disk_io_stats_def) / sizeof(struct kcdata_subtype_descriptor));
772 	T_ASSERT(retval == KERN_SUCCESS, "adding custom type succeeded.");
773 
774 	kfree_data(data_ptr, PAGE_SIZE);
775 	return KERN_SUCCESS;
776 }
777 
778 /*
779  *  kern_return_t
780  *  kcdata_api_assert_tests()
781  *  {
782  *       kern_return_t retval       = 0;
783  *       void * assert_check_retval = NULL;
784  *       test_kc_data2.kcd_length   = 0xdeadbeef;
785  *       mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE);
786  *       T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data.");
787  *
788  *       retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE,
789  *                                          KCFLAG_USE_MEMCOPY);
790  *
791  *       T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call");
792  *
793  *       retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval);
794  *       T_ASSERT(retval == KERN_SUCCESS, "registered assert widget");
795  *
796  *       // this will assert
797  *       retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata");
798  *       T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly");
799  *       T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit");
800  *
801  *       return KERN_SUCCESS;
802  *  }
803  */
804 
805 #if defined(__arm64__)
806 
807 #include <arm/pmap.h>
808 
809 #define MAX_PMAP_OBJECT_ELEMENT 100000
810 
811 extern struct vm_object pmap_object_store; /* store pt pages */
812 extern unsigned long gPhysBase, gPhysSize, first_avail;
813 
814 /*
815  * Define macros to transverse the pmap object structures and extract
816  * physical page number with information from low global only
817  * This emulate how Astris extracts information from coredump
818  */
819 #if defined(__arm64__)
820 
821 static inline uintptr_t
astris_vm_page_unpack_ptr(uintptr_t p)822 astris_vm_page_unpack_ptr(uintptr_t p)
823 {
824 	if (!p) {
825 		return (uintptr_t)0;
826 	}
827 
828 	return (p & lowGlo.lgPmapMemFromArrayMask)
829 	       ? lowGlo.lgPmapMemStartAddr + (p & ~(lowGlo.lgPmapMemFromArrayMask)) * lowGlo.lgPmapMemPagesize
830 	       : lowGlo.lgPmapMemPackedBaseAddr + (p << lowGlo.lgPmapMemPackedShift);
831 }
832 
833 // assume next pointer is the first element
834 #define astris_vm_page_queue_next(qc) (astris_vm_page_unpack_ptr(*((uint32_t *)(qc))))
835 
836 #endif
837 
838 #define astris_vm_page_queue_first(q) astris_vm_page_queue_next(q)
839 
840 #define astris_vm_page_queue_end(q, qe) ((q) == (qe))
841 
842 #define astris_vm_page_queue_iterate(head, elt)                                                           \
843 	for ((elt) = (uintptr_t)astris_vm_page_queue_first((head)); !astris_vm_page_queue_end((head), (elt)); \
844 	     (elt) = (uintptr_t)astris_vm_page_queue_next(((elt) + (uintptr_t)lowGlo.lgPmapMemChainOffset)))
845 
846 #define astris_ptoa(x) ((vm_address_t)(x) << lowGlo.lgPageShift)
847 
848 static inline ppnum_t
astris_vm_page_get_phys_page(uintptr_t m)849 astris_vm_page_get_phys_page(uintptr_t m)
850 {
851 	return (m >= lowGlo.lgPmapMemStartAddr && m < lowGlo.lgPmapMemEndAddr)
852 	       ? (ppnum_t)((m - lowGlo.lgPmapMemStartAddr) / lowGlo.lgPmapMemPagesize + lowGlo.lgPmapMemFirstppnum)
853 	       : *((ppnum_t *)(m + lowGlo.lgPmapMemPageOffset));
854 }
855 
856 kern_return_t
pmap_coredump_test(void)857 pmap_coredump_test(void)
858 {
859 	int iter = 0;
860 	uintptr_t p;
861 
862 	T_LOG("Testing coredump info for PMAP.");
863 
864 	T_ASSERT_GE_ULONG(lowGlo.lgStaticAddr, gPhysBase, NULL);
865 	T_ASSERT_LE_ULONG(lowGlo.lgStaticAddr + lowGlo.lgStaticSize, first_avail, NULL);
866 	T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMajorVersion, 3, NULL);
867 	T_ASSERT_GE_ULONG(lowGlo.lgLayoutMinorVersion, 2, NULL);
868 	T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMagic, LOWGLO_LAYOUT_MAGIC, NULL);
869 
870 	// check the constant values in lowGlo
871 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemQ, ((typeof(lowGlo.lgPmapMemQ)) & (pmap_object_store.memq)), NULL);
872 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPageOffset, offsetof(struct vm_page_with_ppnum, vmp_phys_page), NULL);
873 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemChainOffset, offsetof(struct vm_page, vmp_listq), NULL);
874 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPagesize, sizeof(struct vm_page), NULL);
875 
876 #if defined(__arm64__)
877 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemFromArrayMask, VM_PAGE_PACKED_FROM_ARRAY, NULL);
878 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedShift, VM_PAGE_PACKED_PTR_SHIFT, NULL);
879 	T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedBaseAddr, VM_PAGE_PACKED_PTR_BASE, NULL);
880 #endif
881 
882 	vm_object_lock_shared(&pmap_object_store);
883 	astris_vm_page_queue_iterate(lowGlo.lgPmapMemQ, p)
884 	{
885 		ppnum_t ppnum   = astris_vm_page_get_phys_page(p);
886 		pmap_paddr_t pa = (pmap_paddr_t)astris_ptoa(ppnum);
887 		T_ASSERT_GE_ULONG(pa, gPhysBase, NULL);
888 		T_ASSERT_LT_ULONG(pa, gPhysBase + gPhysSize, NULL);
889 		iter++;
890 		T_ASSERT_LT_INT(iter, MAX_PMAP_OBJECT_ELEMENT, NULL);
891 	}
892 	vm_object_unlock(&pmap_object_store);
893 
894 	T_ASSERT_GT_INT(iter, 0, NULL);
895 	return KERN_SUCCESS;
896 }
897 #endif /* defined(__arm64__) */
898 
899 struct ts_kern_prim_test_args {
900 	int *end_barrier;
901 	int *notify_b;
902 	int *wait_event_b;
903 	int before_num;
904 	int *notify_a;
905 	int *wait_event_a;
906 	int after_num;
907 	int priority_to_check;
908 };
909 
910 static void
wait_threads(int * var,int num)911 wait_threads(
912 	int* var,
913 	int num)
914 {
915 	if (var != NULL) {
916 		while (os_atomic_load(var, acquire) != num) {
917 			assert_wait((event_t) var, THREAD_UNINT);
918 			if (os_atomic_load(var, acquire) != num) {
919 				(void) thread_block(THREAD_CONTINUE_NULL);
920 			} else {
921 				clear_wait(current_thread(), THREAD_AWAKENED);
922 			}
923 		}
924 	}
925 }
926 
927 static void
wake_threads(int * var)928 wake_threads(
929 	int* var)
930 {
931 	if (var) {
932 		os_atomic_inc(var, relaxed);
933 		thread_wakeup((event_t) var);
934 	}
935 }
936 
937 extern void IOSleep(int);
938 
939 static void
thread_lock_unlock_kernel_primitive(void * args,__unused wait_result_t wr)940 thread_lock_unlock_kernel_primitive(
941 	void *args,
942 	__unused wait_result_t wr)
943 {
944 	thread_t thread = current_thread();
945 	struct ts_kern_prim_test_args *info = (struct ts_kern_prim_test_args*) args;
946 	int pri;
947 
948 	wait_threads(info->wait_event_b, info->before_num);
949 	wake_threads(info->notify_b);
950 
951 	tstile_test_prim_lock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
952 
953 	wake_threads(info->notify_a);
954 	wait_threads(info->wait_event_a, info->after_num);
955 
956 	IOSleep(100);
957 
958 	if (info->priority_to_check) {
959 		spl_t s = splsched();
960 		thread_lock(thread);
961 		pri = thread->sched_pri;
962 		thread_unlock(thread);
963 		splx(s);
964 		T_ASSERT(pri == info->priority_to_check, "Priority thread: current sched %d sched wanted %d", pri, info->priority_to_check);
965 	}
966 
967 	tstile_test_prim_unlock(SYSCTL_TURNSTILE_TEST_KERNEL_DEFAULT);
968 
969 	wake_threads(info->end_barrier);
970 	thread_terminate_self();
971 }
972 
973 kern_return_t
ts_kernel_primitive_test(void)974 ts_kernel_primitive_test(void)
975 {
976 	thread_t owner, thread1, thread2;
977 	struct ts_kern_prim_test_args targs[2] = {};
978 	kern_return_t result;
979 	int end_barrier = 0;
980 	int owner_locked = 0;
981 	int waiters_ready = 0;
982 
983 	T_LOG("Testing turnstile kernel primitive");
984 
985 	targs[0].notify_b = NULL;
986 	targs[0].wait_event_b = NULL;
987 	targs[0].before_num = 0;
988 	targs[0].notify_a = &owner_locked;
989 	targs[0].wait_event_a = &waiters_ready;
990 	targs[0].after_num = 2;
991 	targs[0].priority_to_check = 90;
992 	targs[0].end_barrier = &end_barrier;
993 
994 	// Start owner with priority 80
995 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[0], 80, &owner);
996 	T_ASSERT(result == KERN_SUCCESS, "Starting owner");
997 
998 	targs[1].notify_b = &waiters_ready;
999 	targs[1].wait_event_b = &owner_locked;
1000 	targs[1].before_num = 1;
1001 	targs[1].notify_a = NULL;
1002 	targs[1].wait_event_a = NULL;
1003 	targs[1].after_num = 0;
1004 	targs[1].priority_to_check = 0;
1005 	targs[1].end_barrier = &end_barrier;
1006 
1007 	// Start waiters with priority 85 and 90
1008 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 85, &thread1);
1009 	T_ASSERT(result == KERN_SUCCESS, "Starting thread1");
1010 
1011 	result = kernel_thread_start_priority((thread_continue_t)thread_lock_unlock_kernel_primitive, &targs[1], 90, &thread2);
1012 	T_ASSERT(result == KERN_SUCCESS, "Starting thread2");
1013 
1014 	wait_threads(&end_barrier, 3);
1015 
1016 	return KERN_SUCCESS;
1017 }
1018 
1019 #define MTX_LOCK 0
1020 #define RW_LOCK 1
1021 
1022 #define NUM_THREADS 4
1023 
1024 struct synch_test_common {
1025 	unsigned int nthreads;
1026 	thread_t *threads;
1027 	int max_pri;
1028 	int test_done;
1029 };
1030 
1031 static kern_return_t
init_synch_test_common(struct synch_test_common * info,unsigned int nthreads)1032 init_synch_test_common(struct synch_test_common *info, unsigned int nthreads)
1033 {
1034 	info->nthreads = nthreads;
1035 	info->threads = kalloc_type(thread_t, nthreads, Z_WAITOK);
1036 	if (!info->threads) {
1037 		return ENOMEM;
1038 	}
1039 
1040 	return KERN_SUCCESS;
1041 }
1042 
1043 static void
destroy_synch_test_common(struct synch_test_common * info)1044 destroy_synch_test_common(struct synch_test_common *info)
1045 {
1046 	kfree_type(thread_t, info->nthreads, info->threads);
1047 }
1048 
1049 static void
start_threads(thread_continue_t func,struct synch_test_common * info,bool sleep_after_first)1050 start_threads(thread_continue_t func, struct synch_test_common *info, bool sleep_after_first)
1051 {
1052 	thread_t thread;
1053 	kern_return_t result;
1054 	uint i;
1055 	int priority = 75;
1056 
1057 	info->test_done = 0;
1058 
1059 	for (i = 0; i < info->nthreads; i++) {
1060 		info->threads[i] = NULL;
1061 	}
1062 
1063 	info->max_pri = priority + (info->nthreads - 1) * 5;
1064 	if (info->max_pri > 95) {
1065 		info->max_pri = 95;
1066 	}
1067 
1068 	for (i = 0; i < info->nthreads; i++) {
1069 		result = kernel_thread_start_priority((thread_continue_t)func, info, priority, &thread);
1070 		os_atomic_store(&info->threads[i], thread, release);
1071 		T_ASSERT(result == KERN_SUCCESS, "Starting thread %d, priority %d, %p", i, priority, thread);
1072 
1073 		priority += 5;
1074 
1075 		if (i == 0 && sleep_after_first) {
1076 			IOSleep(100);
1077 		}
1078 	}
1079 }
1080 
1081 static unsigned int
get_max_pri(struct synch_test_common * info)1082 get_max_pri(struct synch_test_common * info)
1083 {
1084 	return info->max_pri;
1085 }
1086 
1087 static void
wait_all_thread(struct synch_test_common * info)1088 wait_all_thread(struct synch_test_common * info)
1089 {
1090 	wait_threads(&info->test_done, info->nthreads);
1091 }
1092 
1093 static void
notify_waiter(struct synch_test_common * info)1094 notify_waiter(struct synch_test_common * info)
1095 {
1096 	wake_threads(&info->test_done);
1097 }
1098 
1099 static void
wait_for_waiters(struct synch_test_common * info)1100 wait_for_waiters(struct synch_test_common *info)
1101 {
1102 	uint i, j;
1103 	thread_t thread;
1104 
1105 	for (i = 0; i < info->nthreads; i++) {
1106 		j = 0;
1107 		while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1108 			if (j % 100 == 0) {
1109 				IOSleep(10);
1110 			}
1111 			j++;
1112 		}
1113 
1114 		if (info->threads[i] != current_thread()) {
1115 			j = 0;
1116 			do {
1117 				thread = os_atomic_load(&info->threads[i], relaxed);
1118 				if (thread == (thread_t) 1) {
1119 					break;
1120 				}
1121 
1122 				if (!(thread->state & TH_RUN)) {
1123 					break;
1124 				}
1125 
1126 				if (j % 100 == 0) {
1127 					IOSleep(100);
1128 				}
1129 				j++;
1130 
1131 				if (thread->started == FALSE) {
1132 					continue;
1133 				}
1134 			} while (thread->state & TH_RUN);
1135 		}
1136 	}
1137 }
1138 
1139 static void
exclude_current_waiter(struct synch_test_common * info)1140 exclude_current_waiter(struct synch_test_common *info)
1141 {
1142 	uint i, j;
1143 
1144 	for (i = 0; i < info->nthreads; i++) {
1145 		j = 0;
1146 		while (os_atomic_load(&info->threads[i], acquire) == NULL) {
1147 			if (j % 100 == 0) {
1148 				IOSleep(10);
1149 			}
1150 			j++;
1151 		}
1152 
1153 		if (os_atomic_load(&info->threads[i], acquire) == current_thread()) {
1154 			os_atomic_store(&info->threads[i], (thread_t)1, release);
1155 			return;
1156 		}
1157 	}
1158 }
1159 
1160 struct info_sleep_inheritor_test {
1161 	struct synch_test_common head;
1162 	lck_mtx_t mtx_lock;
1163 	lck_rw_t rw_lock;
1164 	decl_lck_mtx_gate_data(, gate);
1165 	boolean_t gate_closed;
1166 	int prim_type;
1167 	boolean_t work_to_do;
1168 	unsigned int max_pri;
1169 	unsigned int steal_pri;
1170 	int synch_value;
1171 	int synch;
1172 	int value;
1173 	int handoff_failure;
1174 	thread_t thread_inheritor;
1175 	bool use_alloc_gate;
1176 	gate_t *alloc_gate;
1177 	struct obj_cached **obj_cache;
1178 	kern_apfs_reflock_data(, reflock);
1179 	int reflock_protected_status;
1180 };
1181 
1182 static void
primitive_lock(struct info_sleep_inheritor_test * info)1183 primitive_lock(struct info_sleep_inheritor_test *info)
1184 {
1185 	switch (info->prim_type) {
1186 	case MTX_LOCK:
1187 		lck_mtx_lock(&info->mtx_lock);
1188 		break;
1189 	case RW_LOCK:
1190 		lck_rw_lock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1191 		break;
1192 	default:
1193 		panic("invalid type %d", info->prim_type);
1194 	}
1195 }
1196 
1197 static void
primitive_unlock(struct info_sleep_inheritor_test * info)1198 primitive_unlock(struct info_sleep_inheritor_test *info)
1199 {
1200 	switch (info->prim_type) {
1201 	case MTX_LOCK:
1202 		lck_mtx_unlock(&info->mtx_lock);
1203 		break;
1204 	case RW_LOCK:
1205 		lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1206 		break;
1207 	default:
1208 		panic("invalid type %d", info->prim_type);
1209 	}
1210 }
1211 
1212 static wait_result_t
primitive_sleep_with_inheritor(struct info_sleep_inheritor_test * info)1213 primitive_sleep_with_inheritor(struct info_sleep_inheritor_test *info)
1214 {
1215 	wait_result_t ret = KERN_SUCCESS;
1216 	switch (info->prim_type) {
1217 	case MTX_LOCK:
1218 		ret = lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1219 		break;
1220 	case RW_LOCK:
1221 		ret = lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1222 		break;
1223 	default:
1224 		panic("invalid type %d", info->prim_type);
1225 	}
1226 
1227 	return ret;
1228 }
1229 
1230 static void
primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test * info)1231 primitive_wakeup_one_with_inheritor(struct info_sleep_inheritor_test *info)
1232 {
1233 	switch (info->prim_type) {
1234 	case MTX_LOCK:
1235 	case RW_LOCK:
1236 		wakeup_one_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED, LCK_WAKE_DEFAULT, &info->thread_inheritor);
1237 		break;
1238 	default:
1239 		panic("invalid type %d", info->prim_type);
1240 	}
1241 }
1242 
1243 static void
primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test * info)1244 primitive_wakeup_all_with_inheritor(struct info_sleep_inheritor_test *info)
1245 {
1246 	switch (info->prim_type) {
1247 	case MTX_LOCK:
1248 	case RW_LOCK:
1249 		wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1250 		break;
1251 	default:
1252 		panic("invalid type %d", info->prim_type);
1253 	}
1254 	return;
1255 }
1256 
1257 static void
primitive_change_sleep_inheritor(struct info_sleep_inheritor_test * info)1258 primitive_change_sleep_inheritor(struct info_sleep_inheritor_test *info)
1259 {
1260 	switch (info->prim_type) {
1261 	case MTX_LOCK:
1262 	case RW_LOCK:
1263 		change_sleep_inheritor((event_t) &info->thread_inheritor, info->thread_inheritor);
1264 		break;
1265 	default:
1266 		panic("invalid type %d", info->prim_type);
1267 	}
1268 	return;
1269 }
1270 
1271 static kern_return_t
primitive_gate_try_close(struct info_sleep_inheritor_test * info)1272 primitive_gate_try_close(struct info_sleep_inheritor_test *info)
1273 {
1274 	gate_t *gate = &info->gate;
1275 	if (info->use_alloc_gate == true) {
1276 		gate = info->alloc_gate;
1277 	}
1278 	kern_return_t ret = KERN_SUCCESS;
1279 	switch (info->prim_type) {
1280 	case MTX_LOCK:
1281 		ret = lck_mtx_gate_try_close(&info->mtx_lock, gate);
1282 		break;
1283 	case RW_LOCK:
1284 		ret = lck_rw_gate_try_close(&info->rw_lock, gate);
1285 		break;
1286 	default:
1287 		panic("invalid type %d", info->prim_type);
1288 	}
1289 	return ret;
1290 }
1291 
1292 static gate_wait_result_t
primitive_gate_wait(struct info_sleep_inheritor_test * info)1293 primitive_gate_wait(struct info_sleep_inheritor_test *info)
1294 {
1295 	gate_t *gate = &info->gate;
1296 	if (info->use_alloc_gate == true) {
1297 		gate = info->alloc_gate;
1298 	}
1299 	gate_wait_result_t ret = GATE_OPENED;
1300 	switch (info->prim_type) {
1301 	case MTX_LOCK:
1302 		ret = lck_mtx_gate_wait(&info->mtx_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1303 		break;
1304 	case RW_LOCK:
1305 		ret = lck_rw_gate_wait(&info->rw_lock, gate, LCK_SLEEP_DEFAULT, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1306 		break;
1307 	default:
1308 		panic("invalid type %d", info->prim_type);
1309 	}
1310 	return ret;
1311 }
1312 
1313 static void
primitive_gate_open(struct info_sleep_inheritor_test * info)1314 primitive_gate_open(struct info_sleep_inheritor_test *info)
1315 {
1316 	gate_t *gate = &info->gate;
1317 	if (info->use_alloc_gate == true) {
1318 		gate = info->alloc_gate;
1319 	}
1320 	switch (info->prim_type) {
1321 	case MTX_LOCK:
1322 		lck_mtx_gate_open(&info->mtx_lock, gate);
1323 		break;
1324 	case RW_LOCK:
1325 		lck_rw_gate_open(&info->rw_lock, gate);
1326 		break;
1327 	default:
1328 		panic("invalid type %d", info->prim_type);
1329 	}
1330 }
1331 
1332 static void
primitive_gate_close(struct info_sleep_inheritor_test * info)1333 primitive_gate_close(struct info_sleep_inheritor_test *info)
1334 {
1335 	gate_t *gate = &info->gate;
1336 	if (info->use_alloc_gate == true) {
1337 		gate = info->alloc_gate;
1338 	}
1339 
1340 	switch (info->prim_type) {
1341 	case MTX_LOCK:
1342 		lck_mtx_gate_close(&info->mtx_lock, gate);
1343 		break;
1344 	case RW_LOCK:
1345 		lck_rw_gate_close(&info->rw_lock, gate);
1346 		break;
1347 	default:
1348 		panic("invalid type %d", info->prim_type);
1349 	}
1350 }
1351 
1352 static void
primitive_gate_steal(struct info_sleep_inheritor_test * info)1353 primitive_gate_steal(struct info_sleep_inheritor_test *info)
1354 {
1355 	gate_t *gate = &info->gate;
1356 	if (info->use_alloc_gate == true) {
1357 		gate = info->alloc_gate;
1358 	}
1359 
1360 	switch (info->prim_type) {
1361 	case MTX_LOCK:
1362 		lck_mtx_gate_steal(&info->mtx_lock, gate);
1363 		break;
1364 	case RW_LOCK:
1365 		lck_rw_gate_steal(&info->rw_lock, gate);
1366 		break;
1367 	default:
1368 		panic("invalid type %d", info->prim_type);
1369 	}
1370 }
1371 
1372 static kern_return_t
primitive_gate_handoff(struct info_sleep_inheritor_test * info,int flags)1373 primitive_gate_handoff(struct info_sleep_inheritor_test *info, int flags)
1374 {
1375 	gate_t *gate = &info->gate;
1376 	if (info->use_alloc_gate == true) {
1377 		gate = info->alloc_gate;
1378 	}
1379 
1380 	kern_return_t ret = KERN_SUCCESS;
1381 	switch (info->prim_type) {
1382 	case MTX_LOCK:
1383 		ret = lck_mtx_gate_handoff(&info->mtx_lock, gate, flags);
1384 		break;
1385 	case RW_LOCK:
1386 		ret = lck_rw_gate_handoff(&info->rw_lock, gate, flags);
1387 		break;
1388 	default:
1389 		panic("invalid type %d", info->prim_type);
1390 	}
1391 	return ret;
1392 }
1393 
1394 static void
primitive_gate_assert(struct info_sleep_inheritor_test * info,int type)1395 primitive_gate_assert(struct info_sleep_inheritor_test *info, int type)
1396 {
1397 	gate_t *gate = &info->gate;
1398 	if (info->use_alloc_gate == true) {
1399 		gate = info->alloc_gate;
1400 	}
1401 
1402 	switch (info->prim_type) {
1403 	case MTX_LOCK:
1404 		lck_mtx_gate_assert(&info->mtx_lock, gate, type);
1405 		break;
1406 	case RW_LOCK:
1407 		lck_rw_gate_assert(&info->rw_lock, gate, type);
1408 		break;
1409 	default:
1410 		panic("invalid type %d", info->prim_type);
1411 	}
1412 }
1413 
1414 static void
primitive_gate_init(struct info_sleep_inheritor_test * info)1415 primitive_gate_init(struct info_sleep_inheritor_test *info)
1416 {
1417 	switch (info->prim_type) {
1418 	case MTX_LOCK:
1419 		lck_mtx_gate_init(&info->mtx_lock, &info->gate);
1420 		break;
1421 	case RW_LOCK:
1422 		lck_rw_gate_init(&info->rw_lock, &info->gate);
1423 		break;
1424 	default:
1425 		panic("invalid type %d", info->prim_type);
1426 	}
1427 }
1428 
1429 static void
primitive_gate_destroy(struct info_sleep_inheritor_test * info)1430 primitive_gate_destroy(struct info_sleep_inheritor_test *info)
1431 {
1432 	switch (info->prim_type) {
1433 	case MTX_LOCK:
1434 		lck_mtx_gate_destroy(&info->mtx_lock, &info->gate);
1435 		break;
1436 	case RW_LOCK:
1437 		lck_rw_gate_destroy(&info->rw_lock, &info->gate);
1438 		break;
1439 	default:
1440 		panic("invalid type %d", info->prim_type);
1441 	}
1442 }
1443 
1444 static void
primitive_gate_alloc(struct info_sleep_inheritor_test * info)1445 primitive_gate_alloc(struct info_sleep_inheritor_test *info)
1446 {
1447 	gate_t *gate;
1448 	switch (info->prim_type) {
1449 	case MTX_LOCK:
1450 		gate = lck_mtx_gate_alloc_init(&info->mtx_lock);
1451 		break;
1452 	case RW_LOCK:
1453 		gate = lck_rw_gate_alloc_init(&info->rw_lock);
1454 		break;
1455 	default:
1456 		panic("invalid type %d", info->prim_type);
1457 	}
1458 	info->alloc_gate = gate;
1459 }
1460 
1461 static void
primitive_gate_free(struct info_sleep_inheritor_test * info)1462 primitive_gate_free(struct info_sleep_inheritor_test *info)
1463 {
1464 	T_ASSERT(info->alloc_gate != NULL, "gate not yet freed");
1465 
1466 	switch (info->prim_type) {
1467 	case MTX_LOCK:
1468 		lck_mtx_gate_free(&info->mtx_lock, info->alloc_gate);
1469 		break;
1470 	case RW_LOCK:
1471 		lck_rw_gate_free(&info->rw_lock, info->alloc_gate);
1472 		break;
1473 	default:
1474 		panic("invalid type %d", info->prim_type);
1475 	}
1476 	info->alloc_gate = NULL;
1477 }
1478 
1479 static void
thread_inheritor_like_mutex(void * args,__unused wait_result_t wr)1480 thread_inheritor_like_mutex(
1481 	void *args,
1482 	__unused wait_result_t wr)
1483 {
1484 	wait_result_t wait;
1485 
1486 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1487 	uint my_pri = current_thread()->sched_pri;
1488 
1489 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1490 
1491 	/*
1492 	 * spin here to start concurrently
1493 	 */
1494 	wake_threads(&info->synch);
1495 	wait_threads(&info->synch, info->synch_value);
1496 
1497 	primitive_lock(info);
1498 
1499 	if (info->thread_inheritor == NULL) {
1500 		info->thread_inheritor = current_thread();
1501 	} else {
1502 		wait = primitive_sleep_with_inheritor(info);
1503 		T_ASSERT(wait == THREAD_AWAKENED || wait == THREAD_NOT_WAITING, "sleep_with_inheritor return");
1504 	}
1505 	primitive_unlock(info);
1506 
1507 	IOSleep(100);
1508 	info->value++;
1509 
1510 	primitive_lock(info);
1511 
1512 	T_ASSERT(info->thread_inheritor == current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1513 	primitive_wakeup_one_with_inheritor(info);
1514 	T_LOG("woken up %p", info->thread_inheritor);
1515 
1516 	if (info->thread_inheritor == NULL) {
1517 		T_ASSERT(info->handoff_failure == 0, "handoff failures");
1518 		info->handoff_failure++;
1519 	} else {
1520 		T_ASSERT(info->thread_inheritor != current_thread(), "thread_inheritor is %p", info->thread_inheritor);
1521 		thread_deallocate(info->thread_inheritor);
1522 	}
1523 
1524 	primitive_unlock(info);
1525 
1526 	assert(current_thread()->kern_promotion_schedpri == 0);
1527 	notify_waiter((struct synch_test_common *)info);
1528 
1529 	thread_terminate_self();
1530 }
1531 
1532 static void
thread_just_inheritor_do_work(void * args,__unused wait_result_t wr)1533 thread_just_inheritor_do_work(
1534 	void *args,
1535 	__unused wait_result_t wr)
1536 {
1537 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1538 	uint my_pri = current_thread()->sched_pri;
1539 	uint max_pri;
1540 
1541 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1542 	primitive_lock(info);
1543 
1544 	if (info->thread_inheritor == NULL) {
1545 		info->thread_inheritor = current_thread();
1546 		primitive_unlock(info);
1547 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1548 
1549 		wait_threads(&info->synch, info->synch_value - 1);
1550 
1551 		wait_for_waiters((struct synch_test_common *)info);
1552 
1553 		max_pri = get_max_pri((struct synch_test_common *) info);
1554 		T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1555 
1556 		os_atomic_store(&info->synch, 0, relaxed);
1557 		primitive_lock(info);
1558 		primitive_wakeup_all_with_inheritor(info);
1559 	} else {
1560 		wake_threads(&info->synch);
1561 		primitive_sleep_with_inheritor(info);
1562 	}
1563 
1564 	primitive_unlock(info);
1565 
1566 	assert(current_thread()->kern_promotion_schedpri == 0);
1567 	notify_waiter((struct synch_test_common *)info);
1568 
1569 	thread_terminate_self();
1570 }
1571 
1572 static void
thread_steal_work(void * args,__unused wait_result_t wr)1573 thread_steal_work(
1574 	void *args,
1575 	__unused wait_result_t wr)
1576 {
1577 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1578 	uint my_pri = current_thread()->sched_pri;
1579 
1580 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1581 	primitive_lock(info);
1582 
1583 	if (info->thread_inheritor == NULL) {
1584 		info->thread_inheritor = current_thread();
1585 		exclude_current_waiter((struct synch_test_common *)info);
1586 
1587 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1588 		primitive_unlock(info);
1589 
1590 		wait_threads(&info->synch, info->synch_value - 2);
1591 
1592 		wait_for_waiters((struct synch_test_common *)info);
1593 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1594 		primitive_lock(info);
1595 		if (info->thread_inheritor == current_thread()) {
1596 			primitive_wakeup_all_with_inheritor(info);
1597 		}
1598 	} else {
1599 		if (info->steal_pri == 0) {
1600 			info->steal_pri = my_pri;
1601 			info->thread_inheritor = current_thread();
1602 			primitive_change_sleep_inheritor(info);
1603 			exclude_current_waiter((struct synch_test_common *)info);
1604 
1605 			primitive_unlock(info);
1606 
1607 			wait_threads(&info->synch, info->synch_value - 2);
1608 
1609 			T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
1610 			wait_for_waiters((struct synch_test_common *)info);
1611 
1612 			T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
1613 
1614 			primitive_lock(info);
1615 			primitive_wakeup_all_with_inheritor(info);
1616 		} else {
1617 			if (my_pri > info->steal_pri) {
1618 				info->steal_pri = my_pri;
1619 			}
1620 			wake_threads(&info->synch);
1621 			primitive_sleep_with_inheritor(info);
1622 			exclude_current_waiter((struct synch_test_common *)info);
1623 		}
1624 	}
1625 	primitive_unlock(info);
1626 
1627 	assert(current_thread()->kern_promotion_schedpri == 0);
1628 	notify_waiter((struct synch_test_common *)info);
1629 
1630 	thread_terminate_self();
1631 }
1632 
1633 static void
thread_no_inheritor_work(void * args,__unused wait_result_t wr)1634 thread_no_inheritor_work(
1635 	void *args,
1636 	__unused wait_result_t wr)
1637 {
1638 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1639 	uint my_pri = current_thread()->sched_pri;
1640 
1641 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1642 	primitive_lock(info);
1643 
1644 	info->value--;
1645 	if (info->value == 0) {
1646 		primitive_wakeup_all_with_inheritor(info);
1647 	} else {
1648 		info->thread_inheritor = NULL;
1649 		primitive_sleep_with_inheritor(info);
1650 	}
1651 
1652 	primitive_unlock(info);
1653 
1654 	assert(current_thread()->kern_promotion_schedpri == 0);
1655 	notify_waiter((struct synch_test_common *)info);
1656 
1657 	thread_terminate_self();
1658 }
1659 
1660 static void
thread_mtx_work(void * args,__unused wait_result_t wr)1661 thread_mtx_work(
1662 	void *args,
1663 	__unused wait_result_t wr)
1664 {
1665 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1666 	uint my_pri = current_thread()->sched_pri;
1667 	int i;
1668 	u_int8_t rand;
1669 	unsigned int mod_rand;
1670 	uint max_pri;
1671 
1672 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1673 
1674 	for (i = 0; i < 10; i++) {
1675 		lck_mtx_lock(&info->mtx_lock);
1676 		if (info->thread_inheritor == NULL) {
1677 			info->thread_inheritor = current_thread();
1678 			lck_mtx_unlock(&info->mtx_lock);
1679 
1680 			T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1681 
1682 			wait_threads(&info->synch, info->synch_value - 1);
1683 			wait_for_waiters((struct synch_test_common *)info);
1684 			max_pri = get_max_pri((struct synch_test_common *) info);
1685 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1686 
1687 			os_atomic_store(&info->synch, 0, relaxed);
1688 
1689 			lck_mtx_lock(&info->mtx_lock);
1690 			info->thread_inheritor = NULL;
1691 			wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1692 			lck_mtx_unlock(&info->mtx_lock);
1693 			continue;
1694 		}
1695 
1696 		read_random(&rand, sizeof(rand));
1697 		mod_rand = rand % 2;
1698 
1699 		wake_threads(&info->synch);
1700 		switch (mod_rand) {
1701 		case 0:
1702 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1703 			lck_mtx_unlock(&info->mtx_lock);
1704 			break;
1705 		case 1:
1706 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1707 			break;
1708 		default:
1709 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1710 		}
1711 	}
1712 
1713 	/*
1714 	 * spin here to stop using the lock as mutex
1715 	 */
1716 	wake_threads(&info->synch);
1717 	wait_threads(&info->synch, info->synch_value);
1718 
1719 	for (i = 0; i < 10; i++) {
1720 		/* read_random might sleep so read it before acquiring the mtx as spin */
1721 		read_random(&rand, sizeof(rand));
1722 
1723 		lck_mtx_lock_spin(&info->mtx_lock);
1724 		if (info->thread_inheritor == NULL) {
1725 			info->thread_inheritor = current_thread();
1726 			lck_mtx_unlock(&info->mtx_lock);
1727 
1728 			T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1729 			wait_for_waiters((struct synch_test_common *)info);
1730 			max_pri = get_max_pri((struct synch_test_common *) info);
1731 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1732 
1733 			lck_mtx_lock_spin(&info->mtx_lock);
1734 			info->thread_inheritor = NULL;
1735 			wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1736 			lck_mtx_unlock(&info->mtx_lock);
1737 			continue;
1738 		}
1739 
1740 		mod_rand = rand % 2;
1741 		switch (mod_rand) {
1742 		case 0:
1743 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1744 			lck_mtx_unlock(&info->mtx_lock);
1745 			break;
1746 		case 1:
1747 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_SPIN_ALWAYS, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1748 			lck_mtx_unlock(&info->mtx_lock);
1749 			break;
1750 		default:
1751 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1752 		}
1753 	}
1754 	assert(current_thread()->kern_promotion_schedpri == 0);
1755 	notify_waiter((struct synch_test_common *)info);
1756 
1757 	thread_terminate_self();
1758 }
1759 
1760 static void
thread_rw_work(void * args,__unused wait_result_t wr)1761 thread_rw_work(
1762 	void *args,
1763 	__unused wait_result_t wr)
1764 {
1765 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
1766 	uint my_pri = current_thread()->sched_pri;
1767 	int i;
1768 	lck_rw_type_t type;
1769 	u_int8_t rand;
1770 	unsigned int mod_rand;
1771 	uint max_pri;
1772 
1773 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
1774 
1775 	for (i = 0; i < 10; i++) {
1776 try_again:
1777 		type = LCK_RW_TYPE_SHARED;
1778 		lck_rw_lock(&info->rw_lock, type);
1779 		if (info->thread_inheritor == NULL) {
1780 			type = LCK_RW_TYPE_EXCLUSIVE;
1781 
1782 			if (lck_rw_lock_shared_to_exclusive(&info->rw_lock)) {
1783 				if (info->thread_inheritor == NULL) {
1784 					info->thread_inheritor = current_thread();
1785 					lck_rw_unlock(&info->rw_lock, type);
1786 					wait_threads(&info->synch, info->synch_value - 1);
1787 
1788 					T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
1789 					wait_for_waiters((struct synch_test_common *)info);
1790 					max_pri = get_max_pri((struct synch_test_common *) info);
1791 					T_ASSERT((uint) current_thread()->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
1792 
1793 					os_atomic_store(&info->synch, 0, relaxed);
1794 
1795 					lck_rw_lock(&info->rw_lock, type);
1796 					info->thread_inheritor = NULL;
1797 					wakeup_all_with_inheritor((event_t) &info->thread_inheritor, THREAD_AWAKENED);
1798 					lck_rw_unlock(&info->rw_lock, type);
1799 					continue;
1800 				}
1801 			} else {
1802 				goto try_again;
1803 			}
1804 		}
1805 
1806 		read_random(&rand, sizeof(rand));
1807 		mod_rand = rand % 4;
1808 
1809 		wake_threads(&info->synch);
1810 		switch (mod_rand) {
1811 		case 0:
1812 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_DEFAULT, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1813 			lck_rw_unlock(&info->rw_lock, type);
1814 			break;
1815 		case 1:
1816 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_UNLOCK, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1817 			break;
1818 		case 2:
1819 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_SHARED, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1820 			lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_SHARED);
1821 			break;
1822 		case 3:
1823 			lck_rw_sleep_with_inheritor(&info->rw_lock, LCK_SLEEP_EXCLUSIVE, (event_t) &info->thread_inheritor, info->thread_inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
1824 			lck_rw_unlock(&info->rw_lock, LCK_RW_TYPE_EXCLUSIVE);
1825 			break;
1826 		default:
1827 			panic("rand()mod4 returned %u (random %u)", mod_rand, rand);
1828 		}
1829 	}
1830 
1831 	assert(current_thread()->kern_promotion_schedpri == 0);
1832 	notify_waiter((struct synch_test_common *)info);
1833 
1834 	thread_terminate_self();
1835 }
1836 
1837 #define OBJ_STATE_UNUSED        0
1838 #define OBJ_STATE_REAL          1
1839 #define OBJ_STATE_PLACEHOLDER   2
1840 
1841 #define OBJ_BUFF_SIZE 11
1842 struct obj_cached {
1843 	int obj_id;
1844 	int obj_state;
1845 	struct kern_apfs_reflock *obj_refcount;
1846 	char obj_buff[OBJ_BUFF_SIZE];
1847 };
1848 
1849 #define CACHE_SIZE 2
1850 #define USE_CACHE_ROUNDS 15
1851 
1852 #define REFCOUNT_REFLOCK_ROUNDS 15
1853 
1854 /*
1855  * For the reflock cache test the cache is allocated
1856  * and its pointer is saved in obj_cache.
1857  * The lock for the cache is going to be one of the exclusive
1858  * locks already present in struct info_sleep_inheritor_test.
1859  */
1860 
1861 static struct obj_cached *
alloc_init_cache_entry(void)1862 alloc_init_cache_entry(void)
1863 {
1864 	struct obj_cached *cache_entry = kalloc_type(struct obj_cached, 1, Z_WAITOK | Z_NOFAIL | Z_ZERO);
1865 	cache_entry->obj_id = 0;
1866 	cache_entry->obj_state = OBJ_STATE_UNUSED;
1867 	cache_entry->obj_refcount = kern_apfs_reflock_alloc_init();
1868 	snprintf(cache_entry->obj_buff, OBJ_BUFF_SIZE, "I am groot");
1869 	return cache_entry;
1870 }
1871 
1872 static void
init_cache(struct info_sleep_inheritor_test * info)1873 init_cache(struct info_sleep_inheritor_test *info)
1874 {
1875 	struct obj_cached **obj_cache = kalloc_type(struct obj_cached *, CACHE_SIZE, Z_WAITOK | Z_NOFAIL | Z_ZERO);
1876 
1877 	int i;
1878 	for (i = 0; i < CACHE_SIZE; i++) {
1879 		obj_cache[i] = alloc_init_cache_entry();
1880 	}
1881 
1882 	info->obj_cache = obj_cache;
1883 }
1884 
1885 static void
check_cache_empty(struct info_sleep_inheritor_test * info)1886 check_cache_empty(struct info_sleep_inheritor_test *info)
1887 {
1888 	struct obj_cached **obj_cache = info->obj_cache;
1889 
1890 	int i, ret;
1891 	for (i = 0; i < CACHE_SIZE; i++) {
1892 		if (obj_cache[i] != NULL) {
1893 			T_ASSERT(obj_cache[i]->obj_state == OBJ_STATE_UNUSED, "checked OBJ_STATE_UNUSED");
1894 			T_ASSERT(obj_cache[i]->obj_refcount != NULL, "checked obj_refcount");
1895 			ret = memcmp(obj_cache[i]->obj_buff, "I am groot", OBJ_BUFF_SIZE);
1896 			T_ASSERT(ret == 0, "checked buff correctly emptied");
1897 		}
1898 	}
1899 }
1900 
1901 static void
free_cache(struct info_sleep_inheritor_test * info)1902 free_cache(struct info_sleep_inheritor_test *info)
1903 {
1904 	struct obj_cached **obj_cache = info->obj_cache;
1905 
1906 	int i;
1907 	for (i = 0; i < CACHE_SIZE; i++) {
1908 		if (obj_cache[i] != NULL) {
1909 			kern_apfs_reflock_free(obj_cache[i]->obj_refcount);
1910 			obj_cache[i]->obj_refcount = NULL;
1911 			kfree_type(struct obj_cached, 1, obj_cache[i]);
1912 			obj_cache[i] = NULL;
1913 		}
1914 	}
1915 
1916 	kfree_type(struct obj_cached *, CACHE_SIZE, obj_cache);
1917 	info->obj_cache = NULL;
1918 }
1919 
1920 static struct obj_cached *
find_id_in_cache(int obj_id,struct info_sleep_inheritor_test * info)1921 find_id_in_cache(int obj_id, struct info_sleep_inheritor_test *info)
1922 {
1923 	struct obj_cached **obj_cache = info->obj_cache;
1924 	int i;
1925 	for (i = 0; i < CACHE_SIZE; i++) {
1926 		if (obj_cache[i] != NULL && obj_cache[i]->obj_id == obj_id) {
1927 			return obj_cache[i];
1928 		}
1929 	}
1930 	return NULL;
1931 }
1932 
1933 static bool
free_id_in_cache(int obj_id,struct info_sleep_inheritor_test * info,__assert_only struct obj_cached * expected)1934 free_id_in_cache(int obj_id, struct info_sleep_inheritor_test *info, __assert_only struct obj_cached *expected)
1935 {
1936 	struct obj_cached **obj_cache = info->obj_cache;
1937 	int i;
1938 	for (i = 0; i < CACHE_SIZE; i++) {
1939 		if (obj_cache[i] != NULL && obj_cache[i]->obj_id == obj_id) {
1940 			assert(obj_cache[i] == expected);
1941 			kfree_type(struct obj_cached, 1, obj_cache[i]);
1942 			obj_cache[i] = NULL;
1943 			return true;
1944 		}
1945 	}
1946 	return false;
1947 }
1948 
1949 static struct obj_cached *
find_empty_spot_in_cache(struct info_sleep_inheritor_test * info)1950 find_empty_spot_in_cache(struct info_sleep_inheritor_test *info)
1951 {
1952 	struct obj_cached **obj_cache = info->obj_cache;
1953 	int i;
1954 	for (i = 0; i < CACHE_SIZE; i++) {
1955 		if (obj_cache[i] == NULL) {
1956 			obj_cache[i] = alloc_init_cache_entry();
1957 			return obj_cache[i];
1958 		}
1959 		if (obj_cache[i]->obj_state == OBJ_STATE_UNUSED) {
1960 			return obj_cache[i];
1961 		}
1962 	}
1963 	return NULL;
1964 }
1965 
1966 static int
get_obj_cache(int obj_id,struct info_sleep_inheritor_test * info,char ** buff)1967 get_obj_cache(int obj_id, struct info_sleep_inheritor_test *info, char **buff)
1968 {
1969 	struct obj_cached *obj = NULL, *obj2 = NULL;
1970 	kern_apfs_reflock_t refcount = NULL;
1971 	bool ret;
1972 	kern_apfs_reflock_out_flags_t out_flags;
1973 
1974 try_again:
1975 	primitive_lock(info);
1976 	if ((obj = find_id_in_cache(obj_id, info)) != NULL) {
1977 		/* Found an allocated object on the cache with same id */
1978 
1979 		/*
1980 		 * copy the pointer to obj_refcount as obj might
1981 		 * get deallocated after primitive_unlock()
1982 		 */
1983 		refcount = obj->obj_refcount;
1984 		if (kern_apfs_reflock_try_get_ref(refcount, KERN_APFS_REFLOCK_IN_WILL_WAIT, &out_flags)) {
1985 			/*
1986 			 * Got a ref, let's check the state
1987 			 */
1988 			switch (obj->obj_state) {
1989 			case OBJ_STATE_UNUSED:
1990 				goto init;
1991 			case OBJ_STATE_REAL:
1992 				goto done;
1993 			case OBJ_STATE_PLACEHOLDER:
1994 				panic("Thread %p observed OBJ_STATE_PLACEHOLDER %d for obj %d", current_thread(), obj->obj_state, obj_id);
1995 			default:
1996 				panic("Thread %p observed an unknown obj_state %d for obj %d", current_thread(), obj->obj_state, obj_id);
1997 			}
1998 		} else {
1999 			/*
2000 			 * Didn't get a ref.
2001 			 * This means or an obj_put() of the last ref is ongoing
2002 			 * or a init of the object is happening.
2003 			 * Both cases wait for that to finish and retry.
2004 			 * While waiting the thread that is holding the reflock
2005 			 * will get a priority at least as the one of this thread.
2006 			 */
2007 			primitive_unlock(info);
2008 			kern_apfs_reflock_wait_for_unlock(refcount, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2009 			goto try_again;
2010 		}
2011 	} else {
2012 		/* Look for a spot on the cache where we can save the object */
2013 
2014 		if ((obj = find_empty_spot_in_cache(info)) == NULL) {
2015 			/*
2016 			 * Sadness cache is full, and everyting in the cache is
2017 			 * used.
2018 			 */
2019 			primitive_unlock(info);
2020 			return -1;
2021 		} else {
2022 			/*
2023 			 * copy the pointer to obj_refcount as obj might
2024 			 * get deallocated after primitive_unlock()
2025 			 */
2026 			refcount = obj->obj_refcount;
2027 			if (kern_apfs_reflock_try_get_ref(refcount, KERN_APFS_REFLOCK_IN_WILL_WAIT, &out_flags)) {
2028 				/*
2029 				 * Got a ref on a OBJ_STATE_UNUSED obj.
2030 				 * Recicle time.
2031 				 */
2032 				obj->obj_id = obj_id;
2033 				goto init;
2034 			} else {
2035 				/*
2036 				 * This could happen if the obj_put() has just changed the
2037 				 * state to OBJ_STATE_UNUSED, but not unlocked the reflock yet.
2038 				 */
2039 				primitive_unlock(info);
2040 				kern_apfs_reflock_wait_for_unlock(refcount, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2041 				goto try_again;
2042 			}
2043 		}
2044 	}
2045 init:
2046 	assert(obj->obj_id == obj_id);
2047 	assert(obj->obj_state == OBJ_STATE_UNUSED);
2048 	/*
2049 	 * We already got a ref on the object, but we need
2050 	 * to initialize it. Mark it as
2051 	 * OBJ_STATE_PLACEHOLDER and get the obj_reflock.
2052 	 * In this way all thread waiting for this init
2053 	 * to finish will push on this thread.
2054 	 */
2055 	ret = kern_apfs_reflock_try_lock(refcount, KERN_APFS_REFLOCK_IN_DEFAULT, NULL);
2056 	assert(ret == true);
2057 	obj->obj_state = OBJ_STATE_PLACEHOLDER;
2058 	primitive_unlock(info);
2059 
2060 	//let's pretend we are populating the obj
2061 	IOSleep(10);
2062 	/*
2063 	 * obj will not be deallocated while I hold a ref.
2064 	 * So it is safe to access it.
2065 	 */
2066 	snprintf(obj->obj_buff, OBJ_BUFF_SIZE, "I am %d", obj_id);
2067 
2068 	primitive_lock(info);
2069 	obj2 = find_id_in_cache(obj_id, info);
2070 	assert(obj == obj2);
2071 	assert(obj->obj_state == OBJ_STATE_PLACEHOLDER);
2072 
2073 	obj->obj_state = OBJ_STATE_REAL;
2074 	kern_apfs_reflock_unlock(refcount);
2075 
2076 done:
2077 	*buff = obj->obj_buff;
2078 	primitive_unlock(info);
2079 	return 0;
2080 }
2081 
2082 static void
put_obj_cache(int obj_id,struct info_sleep_inheritor_test * info,bool free)2083 put_obj_cache(int obj_id, struct info_sleep_inheritor_test *info, bool free)
2084 {
2085 	struct obj_cached *obj = NULL, *obj2 = NULL;
2086 	bool ret;
2087 	kern_apfs_reflock_out_flags_t out_flags;
2088 	kern_apfs_reflock_t refcount = NULL;
2089 
2090 	primitive_lock(info);
2091 	obj = find_id_in_cache(obj_id, info);
2092 	primitive_unlock(info);
2093 
2094 	/*
2095 	 * Nobody should have been able to remove obj_id
2096 	 * from the cache.
2097 	 */
2098 	assert(obj != NULL);
2099 	assert(obj->obj_state == OBJ_STATE_REAL);
2100 
2101 	refcount = obj->obj_refcount;
2102 
2103 	/*
2104 	 * This should never fail, as or the reflock
2105 	 * was acquired when the state was OBJ_STATE_UNUSED to init,
2106 	 * or from a put that reached zero. And if the latter
2107 	 * happened subsequent reflock_get_ref() will had to wait to transition
2108 	 * to OBJ_STATE_REAL.
2109 	 */
2110 	ret = kern_apfs_reflock_try_put_ref(refcount, KERN_APFS_REFLOCK_IN_LOCK_IF_LAST, &out_flags);
2111 	assert(ret == true);
2112 	if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == 0) {
2113 		return;
2114 	}
2115 
2116 	/*
2117 	 * Note: nobody at this point will be able to get a ref or a lock on
2118 	 * refcount.
2119 	 * All people waiting on refcount will push on this thread.
2120 	 */
2121 
2122 	//let's pretend we are flushing the obj somewhere.
2123 	IOSleep(10);
2124 	snprintf(obj->obj_buff, OBJ_BUFF_SIZE, "I am groot");
2125 
2126 	primitive_lock(info);
2127 	obj->obj_state = OBJ_STATE_UNUSED;
2128 	if (free) {
2129 		obj2 = find_id_in_cache(obj_id, info);
2130 		assert(obj == obj2);
2131 
2132 		ret = free_id_in_cache(obj_id, info, obj);
2133 		assert(ret == true);
2134 	}
2135 	primitive_unlock(info);
2136 
2137 	kern_apfs_reflock_unlock(refcount);
2138 
2139 	if (free) {
2140 		kern_apfs_reflock_free(refcount);
2141 	}
2142 }
2143 
2144 static void
thread_use_cache(void * args,__unused wait_result_t wr)2145 thread_use_cache(
2146 	void *args,
2147 	__unused wait_result_t wr)
2148 {
2149 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2150 	int my_obj;
2151 
2152 	primitive_lock(info);
2153 	my_obj = ((info->value--) % (CACHE_SIZE + 1)) + 1;
2154 	primitive_unlock(info);
2155 
2156 	T_LOG("Thread %p started and it is going to use obj %d", current_thread(), my_obj);
2157 	/*
2158 	 * This is the string I would expect to see
2159 	 * on my_obj buff.
2160 	 */
2161 	char my_string[OBJ_BUFF_SIZE];
2162 	int my_string_size = snprintf(my_string, OBJ_BUFF_SIZE, "I am %d", my_obj);
2163 
2164 	/*
2165 	 * spin here to start concurrently with the other threads
2166 	 */
2167 	wake_threads(&info->synch);
2168 	wait_threads(&info->synch, info->synch_value);
2169 
2170 	for (int i = 0; i < USE_CACHE_ROUNDS; i++) {
2171 		char *buff;
2172 		while (get_obj_cache(my_obj, info, &buff) == -1) {
2173 			/*
2174 			 * Cache is full, wait.
2175 			 */
2176 			IOSleep(10);
2177 		}
2178 		T_ASSERT(memcmp(buff, my_string, my_string_size) == 0, "reflock: thread %p obj_id %d value in buff", current_thread(), my_obj);
2179 		IOSleep(10);
2180 		T_ASSERT(memcmp(buff, my_string, my_string_size) == 0, "reflock: thread %p obj_id %d value in buff", current_thread(), my_obj);
2181 		put_obj_cache(my_obj, info, (i % 2 == 0));
2182 	}
2183 
2184 	notify_waiter((struct synch_test_common *)info);
2185 	thread_terminate_self();
2186 }
2187 
2188 static void
thread_refcount_reflock(void * args,__unused wait_result_t wr)2189 thread_refcount_reflock(
2190 	void *args,
2191 	__unused wait_result_t wr)
2192 {
2193 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2194 	bool ret;
2195 	kern_apfs_reflock_out_flags_t out_flags;
2196 	kern_apfs_reflock_in_flags_t in_flags;
2197 
2198 	T_LOG("Thread %p started", current_thread());
2199 	/*
2200 	 * spin here to start concurrently with the other threads
2201 	 */
2202 	wake_threads(&info->synch);
2203 	wait_threads(&info->synch, info->synch_value);
2204 
2205 	for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2206 		in_flags = KERN_APFS_REFLOCK_IN_LOCK_IF_FIRST;
2207 		if ((i % 2) == 0) {
2208 			in_flags |= KERN_APFS_REFLOCK_IN_WILL_WAIT;
2209 		}
2210 		ret = kern_apfs_reflock_try_get_ref(&info->reflock, in_flags, &out_flags);
2211 		if (ret == true) {
2212 			/* got reference, check if we did 0->1 */
2213 			if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == KERN_APFS_REFLOCK_OUT_LOCKED) {
2214 				T_ASSERT(info->reflock_protected_status == 0, "status init check");
2215 				info->reflock_protected_status = 1;
2216 				kern_apfs_reflock_unlock(&info->reflock);
2217 			} else {
2218 				T_ASSERT(info->reflock_protected_status == 1, "status set check");
2219 			}
2220 			/* release the reference and check if we did 1->0 */
2221 			ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_LOCK_IF_LAST, &out_flags);
2222 			T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2223 			if ((out_flags & KERN_APFS_REFLOCK_OUT_LOCKED) == KERN_APFS_REFLOCK_OUT_LOCKED) {
2224 				T_ASSERT(info->reflock_protected_status == 1, "status set check");
2225 				info->reflock_protected_status = 0;
2226 				kern_apfs_reflock_unlock(&info->reflock);
2227 			}
2228 		} else {
2229 			/* didn't get a reference */
2230 			if ((in_flags & KERN_APFS_REFLOCK_IN_WILL_WAIT) == KERN_APFS_REFLOCK_IN_WILL_WAIT) {
2231 				kern_apfs_reflock_wait_for_unlock(&info->reflock, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2232 			}
2233 		}
2234 	}
2235 
2236 	notify_waiter((struct synch_test_common *)info);
2237 	thread_terminate_self();
2238 }
2239 
2240 static void
thread_force_reflock(void * args,__unused wait_result_t wr)2241 thread_force_reflock(
2242 	void *args,
2243 	__unused wait_result_t wr)
2244 {
2245 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2246 	bool ret;
2247 	kern_apfs_reflock_out_flags_t out_flags;
2248 	bool lock = false;
2249 	uint32_t count;
2250 
2251 	T_LOG("Thread %p started", current_thread());
2252 	if (os_atomic_inc_orig(&info->value, relaxed) == 0) {
2253 		T_LOG("Thread %p is locker", current_thread());
2254 		lock = true;
2255 		ret = kern_apfs_reflock_try_lock(&info->reflock, KERN_APFS_REFLOCK_IN_ALLOW_FORCE, &count);
2256 		T_ASSERT(ret == true, "kern_apfs_reflock_try_lock success");
2257 		T_ASSERT(count == 0, "refcount value");
2258 	}
2259 	/*
2260 	 * spin here to start concurrently with the other threads
2261 	 */
2262 	wake_threads(&info->synch);
2263 	wait_threads(&info->synch, info->synch_value);
2264 
2265 	if (lock) {
2266 		IOSleep(100);
2267 		kern_apfs_reflock_unlock(&info->reflock);
2268 	} else {
2269 		for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2270 			ret = kern_apfs_reflock_try_get_ref(&info->reflock, KERN_APFS_REFLOCK_IN_FORCE, &out_flags);
2271 			T_ASSERT(ret == true, "kern_apfs_reflock_try_get_ref success");
2272 			ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_FORCE, &out_flags);
2273 			T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2274 		}
2275 	}
2276 
2277 	notify_waiter((struct synch_test_common *)info);
2278 	thread_terminate_self();
2279 }
2280 
2281 static void
thread_lock_reflock(void * args,__unused wait_result_t wr)2282 thread_lock_reflock(
2283 	void *args,
2284 	__unused wait_result_t wr)
2285 {
2286 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2287 	bool ret;
2288 	kern_apfs_reflock_out_flags_t out_flags;
2289 	bool lock = false;
2290 	uint32_t count;
2291 
2292 	T_LOG("Thread %p started", current_thread());
2293 	if (os_atomic_inc_orig(&info->value, relaxed) == 0) {
2294 		T_LOG("Thread %p is locker", current_thread());
2295 		lock = true;
2296 		ret = kern_apfs_reflock_try_lock(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &count);
2297 		T_ASSERT(ret == true, "kern_apfs_reflock_try_lock success");
2298 		T_ASSERT(count == 0, "refcount value");
2299 		info->reflock_protected_status = 1;
2300 	}
2301 	/*
2302 	 * spin here to start concurrently with the other threads
2303 	 */
2304 	wake_threads(&info->synch);
2305 	wait_threads(&info->synch, info->synch_value);
2306 
2307 	if (lock) {
2308 		IOSleep(100);
2309 		info->reflock_protected_status = 0;
2310 		kern_apfs_reflock_unlock(&info->reflock);
2311 	} else {
2312 		for (int i = 0; i < REFCOUNT_REFLOCK_ROUNDS; i++) {
2313 			ret = kern_apfs_reflock_try_get_ref(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &out_flags);
2314 			if (ret == true) {
2315 				T_ASSERT(info->reflock_protected_status == 0, "unlocked status check");
2316 				ret = kern_apfs_reflock_try_put_ref(&info->reflock, KERN_APFS_REFLOCK_IN_DEFAULT, &out_flags);
2317 				T_ASSERT(ret == true, "kern_apfs_reflock_try_put_ref success");
2318 				break;
2319 			}
2320 		}
2321 	}
2322 
2323 	notify_waiter((struct synch_test_common *)info);
2324 	thread_terminate_self();
2325 }
2326 
2327 static void
test_cache_reflock(struct info_sleep_inheritor_test * info)2328 test_cache_reflock(struct info_sleep_inheritor_test *info)
2329 {
2330 	info->synch = 0;
2331 	info->synch_value = info->head.nthreads;
2332 
2333 	info->value = info->head.nthreads;
2334 	/*
2335 	 * Use the mtx as cache lock
2336 	 */
2337 	info->prim_type = MTX_LOCK;
2338 
2339 	init_cache(info);
2340 
2341 	start_threads((thread_continue_t)thread_use_cache, (struct synch_test_common *)info, FALSE);
2342 	wait_all_thread((struct synch_test_common *)info);
2343 
2344 	check_cache_empty(info);
2345 	free_cache(info);
2346 }
2347 
2348 static void
test_refcount_reflock(struct info_sleep_inheritor_test * info)2349 test_refcount_reflock(struct info_sleep_inheritor_test *info)
2350 {
2351 	info->synch = 0;
2352 	info->synch_value = info->head.nthreads;
2353 	kern_apfs_reflock_init(&info->reflock);
2354 	info->reflock_protected_status = 0;
2355 
2356 	start_threads((thread_continue_t)thread_refcount_reflock, (struct synch_test_common *)info, FALSE);
2357 	wait_all_thread((struct synch_test_common *)info);
2358 
2359 	kern_apfs_reflock_destroy(&info->reflock);
2360 
2361 	T_ASSERT(info->reflock_protected_status == 0, "unlocked status check");
2362 }
2363 
2364 static void
test_force_reflock(struct info_sleep_inheritor_test * info)2365 test_force_reflock(struct info_sleep_inheritor_test *info)
2366 {
2367 	info->synch = 0;
2368 	info->synch_value = info->head.nthreads;
2369 	kern_apfs_reflock_init(&info->reflock);
2370 	info->value = 0;
2371 
2372 	start_threads((thread_continue_t)thread_force_reflock, (struct synch_test_common *)info, FALSE);
2373 	wait_all_thread((struct synch_test_common *)info);
2374 
2375 	kern_apfs_reflock_destroy(&info->reflock);
2376 }
2377 
2378 static void
test_lock_reflock(struct info_sleep_inheritor_test * info)2379 test_lock_reflock(struct info_sleep_inheritor_test *info)
2380 {
2381 	info->synch = 0;
2382 	info->synch_value = info->head.nthreads;
2383 	kern_apfs_reflock_init(&info->reflock);
2384 	info->value = 0;
2385 
2386 	start_threads((thread_continue_t)thread_lock_reflock, (struct synch_test_common *)info, FALSE);
2387 	wait_all_thread((struct synch_test_common *)info);
2388 
2389 	kern_apfs_reflock_destroy(&info->reflock);
2390 }
2391 
2392 static void
test_sleep_with_wake_all(struct info_sleep_inheritor_test * info,int prim_type)2393 test_sleep_with_wake_all(struct info_sleep_inheritor_test *info, int prim_type)
2394 {
2395 	info->prim_type = prim_type;
2396 	info->synch = 0;
2397 	info->synch_value = info->head.nthreads;
2398 
2399 	info->thread_inheritor = NULL;
2400 
2401 	start_threads((thread_continue_t)thread_just_inheritor_do_work, (struct synch_test_common *)info, TRUE);
2402 	wait_all_thread((struct synch_test_common *)info);
2403 }
2404 
2405 static void
test_sleep_with_wake_one(struct info_sleep_inheritor_test * info,int prim_type)2406 test_sleep_with_wake_one(struct info_sleep_inheritor_test *info, int prim_type)
2407 {
2408 	info->prim_type = prim_type;
2409 
2410 	info->synch = 0;
2411 	info->synch_value = info->head.nthreads;
2412 	info->value = 0;
2413 	info->handoff_failure = 0;
2414 	info->thread_inheritor = NULL;
2415 
2416 	start_threads((thread_continue_t)thread_inheritor_like_mutex, (struct synch_test_common *)info, FALSE);
2417 	wait_all_thread((struct synch_test_common *)info);
2418 
2419 	T_ASSERT(info->value == (int)info->head.nthreads, "value protected by sleep");
2420 	T_ASSERT(info->handoff_failure == 1, "handoff failures");
2421 }
2422 
2423 static void
test_change_sleep_inheritor(struct info_sleep_inheritor_test * info,int prim_type)2424 test_change_sleep_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
2425 {
2426 	info->prim_type = prim_type;
2427 
2428 	info->thread_inheritor = NULL;
2429 	info->steal_pri = 0;
2430 	info->synch = 0;
2431 	info->synch_value = info->head.nthreads;
2432 
2433 	start_threads((thread_continue_t)thread_steal_work, (struct synch_test_common *)info, FALSE);
2434 	wait_all_thread((struct synch_test_common *)info);
2435 }
2436 
2437 static void
test_no_inheritor(struct info_sleep_inheritor_test * info,int prim_type)2438 test_no_inheritor(struct info_sleep_inheritor_test *info, int prim_type)
2439 {
2440 	info->prim_type = prim_type;
2441 	info->synch = 0;
2442 	info->synch_value = info->head.nthreads;
2443 
2444 	info->thread_inheritor = NULL;
2445 	info->value = info->head.nthreads;
2446 
2447 	start_threads((thread_continue_t)thread_no_inheritor_work, (struct synch_test_common *)info, FALSE);
2448 	wait_all_thread((struct synch_test_common *)info);
2449 }
2450 
2451 static void
test_rw_lock(struct info_sleep_inheritor_test * info)2452 test_rw_lock(struct info_sleep_inheritor_test *info)
2453 {
2454 	info->thread_inheritor = NULL;
2455 	info->value = info->head.nthreads;
2456 	info->synch = 0;
2457 	info->synch_value = info->head.nthreads;
2458 
2459 	start_threads((thread_continue_t)thread_rw_work, (struct synch_test_common *)info, FALSE);
2460 	wait_all_thread((struct synch_test_common *)info);
2461 }
2462 
2463 static void
test_mtx_lock(struct info_sleep_inheritor_test * info)2464 test_mtx_lock(struct info_sleep_inheritor_test *info)
2465 {
2466 	info->thread_inheritor = NULL;
2467 	info->value = info->head.nthreads;
2468 	info->synch = 0;
2469 	info->synch_value = info->head.nthreads;
2470 
2471 	start_threads((thread_continue_t)thread_mtx_work, (struct synch_test_common *)info, FALSE);
2472 	wait_all_thread((struct synch_test_common *)info);
2473 }
2474 
2475 kern_return_t
ts_kernel_sleep_inheritor_test(void)2476 ts_kernel_sleep_inheritor_test(void)
2477 {
2478 	struct info_sleep_inheritor_test info = {};
2479 
2480 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2481 
2482 	lck_attr_t* lck_attr = lck_attr_alloc_init();
2483 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2484 	lck_grp_t* lck_grp = lck_grp_alloc_init("test sleep_inheritor", lck_grp_attr);
2485 
2486 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2487 	lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2488 
2489 	/*
2490 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2491 	 */
2492 	T_LOG("Testing mtx sleep with inheritor and wake_all_with_inheritor");
2493 	test_sleep_with_wake_all(&info, MTX_LOCK);
2494 
2495 	/*
2496 	 * Testing rw_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2497 	 */
2498 	T_LOG("Testing rw sleep with inheritor and wake_all_with_inheritor");
2499 	test_sleep_with_wake_all(&info, RW_LOCK);
2500 
2501 	/*
2502 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_one_with_inheritor
2503 	 */
2504 	T_LOG("Testing mtx sleep with inheritor and wake_one_with_inheritor");
2505 	test_sleep_with_wake_one(&info, MTX_LOCK);
2506 
2507 	/*
2508 	 * Testing lck_rw_sleep_with_inheritor and wakeup_one_with_inheritor
2509 	 */
2510 	T_LOG("Testing rw sleep with inheritor and wake_one_with_inheritor");
2511 	test_sleep_with_wake_one(&info, RW_LOCK);
2512 
2513 	/*
2514 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2515 	 * and change_sleep_inheritor
2516 	 */
2517 	T_LOG("Testing change_sleep_inheritor with mxt sleep");
2518 	test_change_sleep_inheritor(&info, MTX_LOCK);
2519 
2520 	/*
2521 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2522 	 * and change_sleep_inheritor
2523 	 */
2524 	T_LOG("Testing change_sleep_inheritor with rw sleep");
2525 	test_change_sleep_inheritor(&info, RW_LOCK);
2526 
2527 	/*
2528 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2529 	 * with inheritor NULL
2530 	 */
2531 	T_LOG("Testing inheritor NULL");
2532 	test_no_inheritor(&info, MTX_LOCK);
2533 
2534 	/*
2535 	 * Testing lck_mtx_sleep_with_inheritor and wakeup_all_with_inheritor
2536 	 * with inheritor NULL
2537 	 */
2538 	T_LOG("Testing inheritor NULL");
2539 	test_no_inheritor(&info, RW_LOCK);
2540 
2541 	/*
2542 	 * Testing mtx locking combinations
2543 	 */
2544 	T_LOG("Testing mtx locking combinations");
2545 	test_mtx_lock(&info);
2546 
2547 	/*
2548 	 * Testing rw locking combinations
2549 	 */
2550 	T_LOG("Testing rw locking combinations");
2551 	test_rw_lock(&info);
2552 
2553 	/*
2554 	 * Testing reflock / cond_sleep_with_inheritor
2555 	 */
2556 	T_LOG("Test cache reflock + cond_sleep_with_inheritor");
2557 	test_cache_reflock(&info);
2558 	T_LOG("Test force reflock + cond_sleep_with_inheritor");
2559 	test_force_reflock(&info);
2560 	T_LOG("Test refcount reflock + cond_sleep_with_inheritor");
2561 	test_refcount_reflock(&info);
2562 	T_LOG("Test lock reflock + cond_sleep_with_inheritor");
2563 	test_lock_reflock(&info);
2564 
2565 	destroy_synch_test_common((struct synch_test_common *)&info);
2566 
2567 	lck_attr_free(lck_attr);
2568 	lck_grp_attr_free(lck_grp_attr);
2569 	lck_rw_destroy(&info.rw_lock, lck_grp);
2570 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
2571 	lck_grp_free(lck_grp);
2572 
2573 	return KERN_SUCCESS;
2574 }
2575 
2576 static void
thread_gate_aggressive(void * args,__unused wait_result_t wr)2577 thread_gate_aggressive(
2578 	void *args,
2579 	__unused wait_result_t wr)
2580 {
2581 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2582 	uint my_pri = current_thread()->sched_pri;
2583 
2584 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2585 
2586 	primitive_lock(info);
2587 	if (info->thread_inheritor == NULL) {
2588 		info->thread_inheritor = current_thread();
2589 		primitive_gate_assert(info, GATE_ASSERT_OPEN);
2590 		primitive_gate_close(info);
2591 		exclude_current_waiter((struct synch_test_common *)info);
2592 
2593 		primitive_unlock(info);
2594 
2595 		wait_threads(&info->synch, info->synch_value - 2);
2596 		wait_for_waiters((struct synch_test_common *)info);
2597 		T_LOG("Thread pri %d first to run %p", my_pri, current_thread());
2598 
2599 		primitive_lock(info);
2600 		if (info->thread_inheritor == current_thread()) {
2601 			primitive_gate_open(info);
2602 		}
2603 	} else {
2604 		if (info->steal_pri == 0) {
2605 			info->steal_pri = my_pri;
2606 			info->thread_inheritor = current_thread();
2607 			primitive_gate_steal(info);
2608 			exclude_current_waiter((struct synch_test_common *)info);
2609 
2610 			primitive_unlock(info);
2611 			wait_threads(&info->synch, info->synch_value - 2);
2612 
2613 			T_LOG("Thread pri %d stole push %p", my_pri, current_thread());
2614 			wait_for_waiters((struct synch_test_common *)info);
2615 			T_ASSERT((uint) current_thread()->sched_pri == info->steal_pri, "gate keeper priority current is %d, should be %d", current_thread()->sched_pri, info->steal_pri);
2616 
2617 			primitive_lock(info);
2618 			primitive_gate_open(info);
2619 		} else {
2620 			if (my_pri > info->steal_pri) {
2621 				info->steal_pri = my_pri;
2622 			}
2623 			wake_threads(&info->synch);
2624 			primitive_gate_wait(info);
2625 			exclude_current_waiter((struct synch_test_common *)info);
2626 		}
2627 	}
2628 	primitive_unlock(info);
2629 
2630 	assert(current_thread()->kern_promotion_schedpri == 0);
2631 	notify_waiter((struct synch_test_common *)info);
2632 
2633 	thread_terminate_self();
2634 }
2635 
2636 static void
thread_gate_free(void * args,__unused wait_result_t wr)2637 thread_gate_free(
2638 	void *args,
2639 	__unused wait_result_t wr)
2640 {
2641 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2642 	uint my_pri = current_thread()->sched_pri;
2643 
2644 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2645 
2646 	primitive_lock(info);
2647 
2648 	if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2649 		primitive_gate_assert(info, GATE_ASSERT_HELD);
2650 		primitive_unlock(info);
2651 
2652 		wait_threads(&info->synch, info->synch_value - 1);
2653 		wait_for_waiters((struct synch_test_common *) info);
2654 
2655 		primitive_lock(info);
2656 		primitive_gate_open(info);
2657 		primitive_gate_free(info);
2658 	} else {
2659 		primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2660 		wake_threads(&info->synch);
2661 		gate_wait_result_t ret = primitive_gate_wait(info);
2662 		T_ASSERT(ret == GATE_OPENED, "open gate");
2663 	}
2664 
2665 	primitive_unlock(info);
2666 
2667 	notify_waiter((struct synch_test_common *)info);
2668 
2669 	thread_terminate_self();
2670 }
2671 
2672 static void
thread_gate_like_mutex(void * args,__unused wait_result_t wr)2673 thread_gate_like_mutex(
2674 	void *args,
2675 	__unused wait_result_t wr)
2676 {
2677 	gate_wait_result_t wait;
2678 	kern_return_t ret;
2679 	uint my_pri = current_thread()->sched_pri;
2680 
2681 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2682 
2683 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2684 
2685 	/*
2686 	 * spin here to start concurrently
2687 	 */
2688 	wake_threads(&info->synch);
2689 	wait_threads(&info->synch, info->synch_value);
2690 
2691 	primitive_lock(info);
2692 
2693 	if (primitive_gate_try_close(info) != KERN_SUCCESS) {
2694 		wait = primitive_gate_wait(info);
2695 		T_ASSERT(wait == GATE_HANDOFF, "gate_wait return");
2696 	}
2697 
2698 	primitive_gate_assert(info, GATE_ASSERT_HELD);
2699 
2700 	primitive_unlock(info);
2701 
2702 	IOSleep(100);
2703 	info->value++;
2704 
2705 	primitive_lock(info);
2706 
2707 	ret = primitive_gate_handoff(info, GATE_HANDOFF_DEFAULT);
2708 	if (ret == KERN_NOT_WAITING) {
2709 		T_ASSERT(info->handoff_failure == 0, "handoff failures");
2710 		primitive_gate_handoff(info, GATE_HANDOFF_OPEN_IF_NO_WAITERS);
2711 		info->handoff_failure++;
2712 	}
2713 
2714 	primitive_unlock(info);
2715 	notify_waiter((struct synch_test_common *)info);
2716 
2717 	thread_terminate_self();
2718 }
2719 
2720 static void
thread_just_one_do_work(void * args,__unused wait_result_t wr)2721 thread_just_one_do_work(
2722 	void *args,
2723 	__unused wait_result_t wr)
2724 {
2725 	struct info_sleep_inheritor_test *info = (struct info_sleep_inheritor_test*) args;
2726 	uint my_pri = current_thread()->sched_pri;
2727 	uint max_pri;
2728 
2729 	T_LOG("Started thread pri %d %p", my_pri, current_thread());
2730 
2731 	primitive_lock(info);
2732 check_again:
2733 	if (info->work_to_do) {
2734 		if (primitive_gate_try_close(info) == KERN_SUCCESS) {
2735 			primitive_gate_assert(info, GATE_ASSERT_HELD);
2736 			primitive_unlock(info);
2737 
2738 			T_LOG("Thread pri %d acquired the gate %p", my_pri, current_thread());
2739 			wait_threads(&info->synch, info->synch_value - 1);
2740 			wait_for_waiters((struct synch_test_common *)info);
2741 			max_pri = get_max_pri((struct synch_test_common *) info);
2742 			T_ASSERT((uint) current_thread()->sched_pri == max_pri, "gate owner priority current is %d, should be %d", current_thread()->sched_pri, max_pri);
2743 			os_atomic_store(&info->synch, 0, relaxed);
2744 
2745 			primitive_lock(info);
2746 			info->work_to_do = FALSE;
2747 			primitive_gate_open(info);
2748 		} else {
2749 			primitive_gate_assert(info, GATE_ASSERT_CLOSED);
2750 			wake_threads(&info->synch);
2751 			primitive_gate_wait(info);
2752 			goto check_again;
2753 		}
2754 	}
2755 	primitive_unlock(info);
2756 
2757 	assert(current_thread()->kern_promotion_schedpri == 0);
2758 	notify_waiter((struct synch_test_common *)info);
2759 	thread_terminate_self();
2760 }
2761 
2762 static void
test_gate_push(struct info_sleep_inheritor_test * info,int prim_type)2763 test_gate_push(struct info_sleep_inheritor_test *info, int prim_type)
2764 {
2765 	info->prim_type = prim_type;
2766 	info->use_alloc_gate = false;
2767 
2768 	primitive_gate_init(info);
2769 	info->work_to_do = TRUE;
2770 	info->synch = 0;
2771 	info->synch_value = NUM_THREADS;
2772 
2773 	start_threads((thread_continue_t)thread_just_one_do_work, (struct synch_test_common *) info, TRUE);
2774 	wait_all_thread((struct synch_test_common *)info);
2775 
2776 	primitive_gate_destroy(info);
2777 }
2778 
2779 static void
test_gate_handoff(struct info_sleep_inheritor_test * info,int prim_type)2780 test_gate_handoff(struct info_sleep_inheritor_test *info, int prim_type)
2781 {
2782 	info->prim_type = prim_type;
2783 	info->use_alloc_gate = false;
2784 
2785 	primitive_gate_init(info);
2786 
2787 	info->synch = 0;
2788 	info->synch_value = NUM_THREADS;
2789 	info->value = 0;
2790 	info->handoff_failure = 0;
2791 
2792 	start_threads((thread_continue_t)thread_gate_like_mutex, (struct synch_test_common *)info, false);
2793 	wait_all_thread((struct synch_test_common *)info);
2794 
2795 	T_ASSERT(info->value == NUM_THREADS, "value protected by gate");
2796 	T_ASSERT(info->handoff_failure == 1, "handoff failures");
2797 
2798 	primitive_gate_destroy(info);
2799 }
2800 
2801 static void
test_gate_steal(struct info_sleep_inheritor_test * info,int prim_type)2802 test_gate_steal(struct info_sleep_inheritor_test *info, int prim_type)
2803 {
2804 	info->prim_type = prim_type;
2805 	info->use_alloc_gate = false;
2806 
2807 	primitive_gate_init(info);
2808 
2809 	info->synch = 0;
2810 	info->synch_value = NUM_THREADS;
2811 	info->thread_inheritor = NULL;
2812 	info->steal_pri = 0;
2813 
2814 	start_threads((thread_continue_t)thread_gate_aggressive, (struct synch_test_common *)info, FALSE);
2815 	wait_all_thread((struct synch_test_common *)info);
2816 
2817 	primitive_gate_destroy(info);
2818 }
2819 
2820 static void
test_gate_alloc_free(struct info_sleep_inheritor_test * info,int prim_type)2821 test_gate_alloc_free(struct info_sleep_inheritor_test *info, int prim_type)
2822 {
2823 	(void)info;
2824 	(void) prim_type;
2825 	info->prim_type = prim_type;
2826 	info->use_alloc_gate = true;
2827 
2828 	primitive_gate_alloc(info);
2829 
2830 	info->synch = 0;
2831 	info->synch_value = NUM_THREADS;
2832 
2833 	start_threads((thread_continue_t)thread_gate_free, (struct synch_test_common *)info, FALSE);
2834 	wait_all_thread((struct synch_test_common *)info);
2835 
2836 	T_ASSERT(info->alloc_gate == NULL, "gate free");
2837 	info->use_alloc_gate = false;
2838 }
2839 
2840 kern_return_t
ts_kernel_gate_test(void)2841 ts_kernel_gate_test(void)
2842 {
2843 	struct info_sleep_inheritor_test info = {};
2844 
2845 	T_LOG("Testing gate primitive");
2846 
2847 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREADS);
2848 
2849 	lck_attr_t* lck_attr = lck_attr_alloc_init();
2850 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
2851 	lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
2852 
2853 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
2854 	lck_rw_init(&info.rw_lock, lck_grp, lck_attr);
2855 
2856 	/*
2857 	 * Testing the priority inherited by the keeper
2858 	 * lck_mtx_gate_try_close, lck_mtx_gate_open, lck_mtx_gate_wait
2859 	 */
2860 	T_LOG("Testing gate push, mtx");
2861 	test_gate_push(&info, MTX_LOCK);
2862 
2863 	T_LOG("Testing gate push, rw");
2864 	test_gate_push(&info, RW_LOCK);
2865 
2866 	/*
2867 	 * Testing the handoff
2868 	 * lck_mtx_gate_wait, lck_mtx_gate_handoff
2869 	 */
2870 	T_LOG("Testing gate handoff, mtx");
2871 	test_gate_handoff(&info, MTX_LOCK);
2872 
2873 	T_LOG("Testing gate handoff, rw");
2874 	test_gate_handoff(&info, RW_LOCK);
2875 
2876 	/*
2877 	 * Testing the steal
2878 	 * lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_steal, lck_mtx_gate_handoff
2879 	 */
2880 	T_LOG("Testing gate steal, mtx");
2881 	test_gate_steal(&info, MTX_LOCK);
2882 
2883 	T_LOG("Testing gate steal, rw");
2884 	test_gate_steal(&info, RW_LOCK);
2885 
2886 	/*
2887 	 * Testing the alloc/free
2888 	 * lck_mtx_gate_alloc_init, lck_mtx_gate_close, lck_mtx_gate_wait, lck_mtx_gate_free
2889 	 */
2890 	T_LOG("Testing gate alloc/free, mtx");
2891 	test_gate_alloc_free(&info, MTX_LOCK);
2892 
2893 	T_LOG("Testing gate alloc/free, rw");
2894 	test_gate_alloc_free(&info, RW_LOCK);
2895 
2896 	destroy_synch_test_common((struct synch_test_common *)&info);
2897 
2898 	lck_attr_free(lck_attr);
2899 	lck_grp_attr_free(lck_grp_attr);
2900 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
2901 	lck_grp_free(lck_grp);
2902 
2903 	return KERN_SUCCESS;
2904 }
2905 
2906 #define NUM_THREAD_CHAIN 6
2907 
2908 struct turnstile_chain_test {
2909 	struct synch_test_common head;
2910 	lck_mtx_t mtx_lock;
2911 	int synch_value;
2912 	int synch;
2913 	int synch2;
2914 	gate_t gates[NUM_THREAD_CHAIN];
2915 };
2916 
2917 static void
thread_sleep_gate_chain_work(void * args,__unused wait_result_t wr)2918 thread_sleep_gate_chain_work(
2919 	void *args,
2920 	__unused wait_result_t wr)
2921 {
2922 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
2923 	thread_t self = current_thread();
2924 	uint my_pri = self->sched_pri;
2925 	uint max_pri;
2926 	uint i;
2927 	thread_t inheritor = NULL, woken_up;
2928 	event_t wait_event, wake_event;
2929 	kern_return_t ret;
2930 
2931 	T_LOG("Started thread pri %d %p", my_pri, self);
2932 
2933 	/*
2934 	 * Need to use the threads ids, wait for all of them to be populated
2935 	 */
2936 
2937 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
2938 		IOSleep(10);
2939 	}
2940 
2941 	max_pri = get_max_pri((struct synch_test_common *) info);
2942 
2943 	for (i = 0; i < info->head.nthreads; i = i + 2) {
2944 		// even threads will close a gate
2945 		if (info->head.threads[i] == self) {
2946 			lck_mtx_lock(&info->mtx_lock);
2947 			lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
2948 			lck_mtx_unlock(&info->mtx_lock);
2949 			break;
2950 		}
2951 	}
2952 
2953 	wake_threads(&info->synch2);
2954 	wait_threads(&info->synch2, info->synch_value);
2955 
2956 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
2957 		wait_threads(&info->synch, info->synch_value - 1);
2958 		wait_for_waiters((struct synch_test_common *)info);
2959 
2960 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2961 
2962 		lck_mtx_lock(&info->mtx_lock);
2963 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
2964 		lck_mtx_unlock(&info->mtx_lock);
2965 	} else {
2966 		wait_event = NULL;
2967 		wake_event = NULL;
2968 		for (i = 0; i < info->head.nthreads; i++) {
2969 			if (info->head.threads[i] == self) {
2970 				inheritor = info->head.threads[i - 1];
2971 				wait_event = (event_t) &info->head.threads[i - 1];
2972 				wake_event = (event_t) &info->head.threads[i];
2973 				break;
2974 			}
2975 		}
2976 		assert(wait_event != NULL);
2977 
2978 		lck_mtx_lock(&info->mtx_lock);
2979 		wake_threads(&info->synch);
2980 
2981 		if (i % 2 != 0) {
2982 			lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2983 			T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
2984 
2985 			ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
2986 			if (ret == KERN_SUCCESS) {
2987 				T_ASSERT(i != (info->head.nthreads - 1), "thread id");
2988 				T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
2989 			} else {
2990 				T_ASSERT(i == (info->head.nthreads - 1), "thread id");
2991 			}
2992 
2993 			// i am still the inheritor, wake all to drop inheritership
2994 			ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
2995 			T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
2996 		} else {
2997 			// I previously closed a gate
2998 			lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
2999 			T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3000 
3001 			lck_mtx_lock(&info->mtx_lock);
3002 			lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
3003 			lck_mtx_unlock(&info->mtx_lock);
3004 		}
3005 	}
3006 
3007 	assert(current_thread()->kern_promotion_schedpri == 0);
3008 	notify_waiter((struct synch_test_common *)info);
3009 
3010 	thread_terminate_self();
3011 }
3012 
3013 static void
thread_gate_chain_work(void * args,__unused wait_result_t wr)3014 thread_gate_chain_work(
3015 	void *args,
3016 	__unused wait_result_t wr)
3017 {
3018 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
3019 	thread_t self = current_thread();
3020 	uint my_pri = self->sched_pri;
3021 	uint max_pri;
3022 	uint i;
3023 	T_LOG("Started thread pri %d %p", my_pri, self);
3024 
3025 
3026 	/*
3027 	 * Need to use the threads ids, wait for all of them to be populated
3028 	 */
3029 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
3030 		IOSleep(10);
3031 	}
3032 
3033 	max_pri = get_max_pri((struct synch_test_common *) info);
3034 
3035 	for (i = 0; i < info->head.nthreads; i++) {
3036 		if (info->head.threads[i] == self) {
3037 			lck_mtx_lock(&info->mtx_lock);
3038 			lck_mtx_gate_close(&info->mtx_lock, &info->gates[i]);
3039 			lck_mtx_unlock(&info->mtx_lock);
3040 			break;
3041 		}
3042 	}
3043 	assert(i != info->head.nthreads);
3044 
3045 	wake_threads(&info->synch2);
3046 	wait_threads(&info->synch2, info->synch_value);
3047 
3048 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
3049 		wait_threads(&info->synch, info->synch_value - 1);
3050 
3051 		wait_for_waiters((struct synch_test_common *)info);
3052 
3053 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3054 
3055 		lck_mtx_lock(&info->mtx_lock);
3056 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[0]);
3057 		lck_mtx_unlock(&info->mtx_lock);
3058 	} else {
3059 		lck_mtx_lock(&info->mtx_lock);
3060 		wake_threads(&info->synch);
3061 		lck_mtx_gate_wait(&info->mtx_lock, &info->gates[i - 1], LCK_SLEEP_UNLOCK, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3062 
3063 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3064 
3065 		lck_mtx_lock(&info->mtx_lock);
3066 		lck_mtx_gate_open(&info->mtx_lock, &info->gates[i]);
3067 		lck_mtx_unlock(&info->mtx_lock);
3068 	}
3069 
3070 	assert(current_thread()->kern_promotion_schedpri == 0);
3071 	notify_waiter((struct synch_test_common *)info);
3072 
3073 	thread_terminate_self();
3074 }
3075 
3076 static void
thread_sleep_chain_work(void * args,__unused wait_result_t wr)3077 thread_sleep_chain_work(
3078 	void *args,
3079 	__unused wait_result_t wr)
3080 {
3081 	struct turnstile_chain_test *info = (struct turnstile_chain_test*) args;
3082 	thread_t self = current_thread();
3083 	uint my_pri = self->sched_pri;
3084 	uint max_pri;
3085 	event_t wait_event, wake_event;
3086 	uint i;
3087 	thread_t inheritor = NULL, woken_up = NULL;
3088 	kern_return_t ret;
3089 
3090 	T_LOG("Started thread pri %d %p", my_pri, self);
3091 
3092 	/*
3093 	 * Need to use the threads ids, wait for all of them to be populated
3094 	 */
3095 	while (os_atomic_load(&info->head.threads[info->head.nthreads - 1], acquire) == NULL) {
3096 		IOSleep(10);
3097 	}
3098 
3099 	max_pri = get_max_pri((struct synch_test_common *) info);
3100 
3101 	if (self == os_atomic_load(&info->head.threads[0], acquire)) {
3102 		wait_threads(&info->synch, info->synch_value - 1);
3103 
3104 		wait_for_waiters((struct synch_test_common *)info);
3105 
3106 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3107 
3108 		ret = wakeup_one_with_inheritor((event_t) &info->head.threads[0], THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
3109 		T_ASSERT(ret == KERN_SUCCESS, "wakeup_one_with_inheritor woke next");
3110 		T_ASSERT(woken_up == info->head.threads[1], "thread woken up");
3111 
3112 		// i am still the inheritor, wake all to drop inheritership
3113 		ret = wakeup_all_with_inheritor((event_t) &info->head.threads[0], LCK_WAKE_DEFAULT);
3114 		T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
3115 	} else {
3116 		wait_event = NULL;
3117 		wake_event = NULL;
3118 		for (i = 0; i < info->head.nthreads; i++) {
3119 			if (info->head.threads[i] == self) {
3120 				inheritor = info->head.threads[i - 1];
3121 				wait_event = (event_t) &info->head.threads[i - 1];
3122 				wake_event = (event_t) &info->head.threads[i];
3123 				break;
3124 			}
3125 		}
3126 
3127 		assert(wait_event != NULL);
3128 		lck_mtx_lock(&info->mtx_lock);
3129 		wake_threads(&info->synch);
3130 
3131 		lck_mtx_sleep_with_inheritor(&info->mtx_lock, LCK_SLEEP_UNLOCK, wait_event, inheritor, THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
3132 
3133 		T_ASSERT((uint) self->sched_pri == max_pri, "sleep_inheritor inheritor priority current is %d, should be %d", self->sched_pri, max_pri);
3134 
3135 		ret = wakeup_one_with_inheritor(wake_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &woken_up);
3136 		if (ret == KERN_SUCCESS) {
3137 			T_ASSERT(i != (info->head.nthreads - 1), "thread id");
3138 			T_ASSERT(woken_up == info->head.threads[i + 1], "wakeup_one_with_inheritor woke next");
3139 		} else {
3140 			T_ASSERT(i == (info->head.nthreads - 1), "thread id");
3141 		}
3142 
3143 		// i am still the inheritor, wake all to drop inheritership
3144 		ret = wakeup_all_with_inheritor(wake_event, LCK_WAKE_DEFAULT);
3145 		T_ASSERT(ret == KERN_NOT_WAITING, "waiters on event");
3146 	}
3147 
3148 	assert(current_thread()->kern_promotion_schedpri == 0);
3149 	notify_waiter((struct synch_test_common *)info);
3150 
3151 	thread_terminate_self();
3152 }
3153 
3154 static void
test_sleep_chain(struct turnstile_chain_test * info)3155 test_sleep_chain(struct turnstile_chain_test *info)
3156 {
3157 	info->synch = 0;
3158 	info->synch_value = info->head.nthreads;
3159 
3160 	start_threads((thread_continue_t)thread_sleep_chain_work, (struct synch_test_common *)info, FALSE);
3161 	wait_all_thread((struct synch_test_common *)info);
3162 }
3163 
3164 static void
test_gate_chain(struct turnstile_chain_test * info)3165 test_gate_chain(struct turnstile_chain_test *info)
3166 {
3167 	info->synch = 0;
3168 	info->synch2 = 0;
3169 	info->synch_value = info->head.nthreads;
3170 
3171 	start_threads((thread_continue_t)thread_gate_chain_work, (struct synch_test_common *)info, FALSE);
3172 	wait_all_thread((struct synch_test_common *)info);
3173 }
3174 
3175 static void
test_sleep_gate_chain(struct turnstile_chain_test * info)3176 test_sleep_gate_chain(struct turnstile_chain_test *info)
3177 {
3178 	info->synch = 0;
3179 	info->synch2 = 0;
3180 	info->synch_value = info->head.nthreads;
3181 
3182 	start_threads((thread_continue_t)thread_sleep_gate_chain_work, (struct synch_test_common *)info, FALSE);
3183 	wait_all_thread((struct synch_test_common *)info);
3184 }
3185 
3186 kern_return_t
ts_kernel_turnstile_chain_test(void)3187 ts_kernel_turnstile_chain_test(void)
3188 {
3189 	struct turnstile_chain_test info = {};
3190 	int i;
3191 
3192 	init_synch_test_common((struct synch_test_common *)&info, NUM_THREAD_CHAIN);
3193 	lck_attr_t* lck_attr = lck_attr_alloc_init();
3194 	lck_grp_attr_t* lck_grp_attr = lck_grp_attr_alloc_init();
3195 	lck_grp_t* lck_grp = lck_grp_alloc_init("test gate", lck_grp_attr);
3196 
3197 	lck_mtx_init(&info.mtx_lock, lck_grp, lck_attr);
3198 	for (i = 0; i < NUM_THREAD_CHAIN; i++) {
3199 		lck_mtx_gate_init(&info.mtx_lock, &info.gates[i]);
3200 	}
3201 
3202 	T_LOG("Testing sleep chain, lck");
3203 	test_sleep_chain(&info);
3204 
3205 	T_LOG("Testing gate chain, lck");
3206 	test_gate_chain(&info);
3207 
3208 	T_LOG("Testing sleep and gate chain, lck");
3209 	test_sleep_gate_chain(&info);
3210 
3211 	destroy_synch_test_common((struct synch_test_common *)&info);
3212 	for (i = 0; i < NUM_THREAD_CHAIN; i++) {
3213 		lck_mtx_gate_destroy(&info.mtx_lock, &info.gates[i]);
3214 	}
3215 	lck_attr_free(lck_attr);
3216 	lck_grp_attr_free(lck_grp_attr);
3217 	lck_mtx_destroy(&info.mtx_lock, lck_grp);
3218 	lck_grp_free(lck_grp);
3219 
3220 	return KERN_SUCCESS;
3221 }
3222 
3223 kern_return_t
ts_kernel_timingsafe_bcmp_test(void)3224 ts_kernel_timingsafe_bcmp_test(void)
3225 {
3226 	int i, buf_size;
3227 	char *buf = NULL;
3228 
3229 	// empty
3230 	T_ASSERT(timingsafe_bcmp(NULL, NULL, 0) == 0, NULL);
3231 	T_ASSERT(timingsafe_bcmp("foo", "foo", 0) == 0, NULL);
3232 	T_ASSERT(timingsafe_bcmp("foo", "bar", 0) == 0, NULL);
3233 
3234 	// equal
3235 	T_ASSERT(timingsafe_bcmp("foo", "foo", strlen("foo")) == 0, NULL);
3236 
3237 	// unequal
3238 	T_ASSERT(timingsafe_bcmp("foo", "bar", strlen("foo")) == 1, NULL);
3239 	T_ASSERT(timingsafe_bcmp("foo", "goo", strlen("foo")) == 1, NULL);
3240 	T_ASSERT(timingsafe_bcmp("foo", "fpo", strlen("foo")) == 1, NULL);
3241 	T_ASSERT(timingsafe_bcmp("foo", "fop", strlen("foo")) == 1, NULL);
3242 
3243 	// all possible bitwise differences
3244 	for (i = 1; i < 256; i += 1) {
3245 		unsigned char a = 0;
3246 		unsigned char b = (unsigned char)i;
3247 
3248 		T_ASSERT(timingsafe_bcmp(&a, &b, sizeof(a)) == 1, NULL);
3249 	}
3250 
3251 	// large
3252 	buf_size = 1024 * 16;
3253 	buf = kalloc_data(buf_size, Z_WAITOK);
3254 	T_EXPECT_NOTNULL(buf, "kalloc of buf");
3255 
3256 	read_random(buf, buf_size);
3257 	T_ASSERT(timingsafe_bcmp(buf, buf, buf_size) == 0, NULL);
3258 	T_ASSERT(timingsafe_bcmp(buf, buf + 1, buf_size - 1) == 1, NULL);
3259 	T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 1, NULL);
3260 
3261 	memcpy(buf + 128, buf, 128);
3262 	T_ASSERT(timingsafe_bcmp(buf, buf + 128, 128) == 0, NULL);
3263 
3264 	kfree_data(buf, buf_size);
3265 
3266 	return KERN_SUCCESS;
3267 }
3268 
3269 kern_return_t
kprintf_hhx_test(void)3270 kprintf_hhx_test(void)
3271 {
3272 	printf("POST hhx test %hx%hx%hx%hx %hhx%hhx%hhx%hhx - %llx",
3273 	    (unsigned short)0xfeed, (unsigned short)0xface,
3274 	    (unsigned short)0xabad, (unsigned short)0xcafe,
3275 	    (unsigned char)'h', (unsigned char)'h', (unsigned char)'x',
3276 	    (unsigned char)'!',
3277 	    0xfeedfaceULL);
3278 	T_PASS("kprintf_hhx_test passed");
3279 	return KERN_SUCCESS;
3280 }
3281