xref: /xnu-11215/tests/ktrace/kdebug_tests.c (revision 8d741a5d)
1 // Copyright (c) 2020-2022 Apple Inc.  All rights reserved.
2 
3 #include <darwintest.h>
4 #include <darwintest_utils.h>
5 #include <dispatch/dispatch.h>
6 #include <inttypes.h>
7 #include <ktrace/session.h>
8 #include <ktrace/private.h>
9 #include <kperf/kperf.h>
10 #include <mach/clock_types.h>
11 #include <mach/dyld_kernel.h>
12 #include <mach/host_info.h>
13 #include <mach/mach.h>
14 #include <mach/mach_init.h>
15 #include <mach/task.h>
16 #include <os/assumes.h>
17 #include <stdlib.h>
18 #include <sys/kdebug.h>
19 #include <sys/kdebug_signpost.h>
20 #include <sys/resource_private.h>
21 #include <sys/sysctl.h>
22 #include <stdint.h>
23 #include <TargetConditionals.h>
24 
25 #include "ktrace_helpers.h"
26 #include "test_utils.h"
27 #include "ktrace_meta.h"
28 
29 #define KDBG_TEST_MACROS         1
30 #define KDBG_TEST_OLD_TIMES      2
31 #define KDBG_TEST_FUTURE_TIMES   3
32 #define KDBG_TEST_IOP_SYNC_FLUSH 4
33 
34 #pragma mark kdebug syscalls
35 
36 #define TRACE_DEBUGID (0xfedfed00U)
37 
38 T_DECL(kdebug_trace_syscall, "test that kdebug_trace(2) emits correct events",
39 		T_META_TAG_VM_PREFERRED)
40 {
41 	start_controlling_ktrace();
42 
43 	ktrace_session_t s = ktrace_session_create();
44 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
45 	ktrace_set_collection_interval(s, 200);
46 
47 	__block int events_seen = 0;
48 	ktrace_events_single(s, TRACE_DEBUGID, ^void (struct trace_point *tp) {
49 		events_seen++;
50 		T_PASS("saw traced event");
51 
52 		if (ktrace_is_kernel_64_bit(s)) {
53 			T_EXPECT_EQ(tp->arg1, UINT64_C(0xfeedfacefeedface),
54 					"argument 1 of traced event is correct");
55 		} else {
56 			T_EXPECT_EQ(tp->arg1, UINT64_C(0xfeedface),
57 					"argument 1 of traced event is correct");
58 		}
59 		T_EXPECT_EQ(tp->arg2, 2ULL, "argument 2 of traced event is correct");
60 		T_EXPECT_EQ(tp->arg3, 3ULL, "argument 3 of traced event is correct");
61 		T_EXPECT_EQ(tp->arg4, 4ULL, "argument 4 of traced event is correct");
62 
63 		ktrace_end(s, 1);
64 	});
65 
66 	ktrace_set_completion_handler(s, ^{
67 		T_EXPECT_GE(events_seen, 1, NULL);
68 		ktrace_session_destroy(s);
69 		T_END;
70 	});
71 
72 	ktrace_filter_pid(s, getpid());
73 
74 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
75 	T_ASSERT_POSIX_SUCCESS(kdebug_trace(TRACE_DEBUGID, 0xfeedfacefeedface, 2,
76 			3, 4), NULL);
77 	ktrace_end(s, 0);
78 
79 	dispatch_main();
80 }
81 
82 #if __LP64__
83 #define IS_64BIT true
84 #else // __LP64__
85 #define IS_64BIT false
86 #endif // !__LP64__
87 
88 #define STRING_SIZE (1024)
89 
90 T_DECL(kdebug_trace_string_syscall,
91 		"test that kdebug_trace_string(2) emits correct events",
92 		T_META_ENABLED(IS_64BIT),
93 		T_META_TAG_VM_PREFERRED)
94 {
95 	start_controlling_ktrace();
96 
97 	ktrace_session_t s = ktrace_session_create();
98 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
99 	ktrace_set_collection_interval(s, 200);
100 	ktrace_filter_pid(s, getpid());
101 
102 	char *traced_string = calloc(1, STRING_SIZE);
103 	T_QUIET; T_WITH_ERRNO;
104 	T_ASSERT_NOTNULL(traced_string, "allocated memory for string");
105 	for (size_t i = 0; i < sizeof(traced_string); i++) {
106 		traced_string[i] = 'a' + (i % 26);
107 	}
108 	traced_string[sizeof(traced_string) - 1] = '\0';
109 	size_t traced_len = strlen(traced_string);
110 	T_QUIET; T_ASSERT_EQ(traced_len, sizeof(traced_string) - 1,
111 			"traced string should be filled");
112 
113 	ktrace_events_single(s, TRACE_DEBUGID,
114 			^void (struct trace_point * __unused tp) {
115 		// Do nothing -- just ensure the event is filtered in.
116 	});
117 
118 	__block unsigned int string_cpu = 0;
119 	__block bool tracing_string = false;
120 	char *observed_string = calloc(1, PATH_MAX);
121 	T_QUIET; T_WITH_ERRNO;
122 	T_ASSERT_NOTNULL(observed_string, "allocated memory for observed string");
123 	__block size_t string_offset = 0;
124 	ktrace_events_single(s, TRACE_STRING_GLOBAL, ^(struct trace_point *tp){
125 		if (tp->debugid & DBG_FUNC_START && tp->arg1 == TRACE_DEBUGID) {
126 			tracing_string = true;
127 			string_cpu = tp->cpuid;
128 			memcpy(observed_string + string_offset, &tp->arg3,
129 					sizeof(uint64_t) * 2);
130 			string_offset += sizeof(uint64_t) * 2;
131 		} else if (tracing_string && string_cpu == tp->cpuid) {
132 			memcpy(observed_string + string_offset, &tp->arg1,
133 					sizeof(uint64_t) * 4);
134 			string_offset += sizeof(uint64_t) * 4;
135 			if (tp->debugid & DBG_FUNC_END) {
136 				ktrace_end(s, 1);
137 			}
138 		}
139 	});
140 
141 	ktrace_set_completion_handler(s, ^{
142 		T_EXPECT_TRUE(tracing_string, "found string in trace");
143 		size_t observed_len = strlen(observed_string);
144 		T_EXPECT_EQ(traced_len, observed_len, "string lengths should be equal");
145 		if (traced_len == observed_len) {
146 			T_EXPECT_EQ_STR(traced_string, observed_string,
147 					"observed correct string");
148 		}
149 		ktrace_session_destroy(s);
150 		T_END;
151 	});
152 
153 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
154 	uint64_t str_id = kdebug_trace_string(TRACE_DEBUGID, 0, traced_string);
155 	T_WITH_ERRNO; T_ASSERT_NE(str_id, (uint64_t)0, "kdebug_trace_string(2)");
156 	ktrace_end(s, 0);
157 
158 	dispatch_main();
159 }
160 
161 #define SIGNPOST_SINGLE_CODE (0x10U)
162 #define SIGNPOST_PAIRED_CODE (0x20U)
163 
164 T_DECL(kdebug_signpost_syscall,
165 		"test that kdebug_signpost(2) emits correct events",
166 		T_META_TAG_VM_PREFERRED)
167 {
168 	start_controlling_ktrace();
169 
170 	ktrace_session_t s = ktrace_session_create();
171 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
172 
173 	__block int single_seen = 0;
174 	__block int paired_seen = 0;
175 
176 	/* make sure to get enough events for the KDBUFWAIT to trigger */
177 	// ktrace_events_class(s, DBG_MACH, ^(__unused struct trace_point *tp){});
178 	ktrace_events_single(s,
179 	    APPSDBG_CODE(DBG_APP_SIGNPOST, SIGNPOST_SINGLE_CODE),
180 	    ^(struct trace_point *tp) {
181 		single_seen++;
182 		T_PASS("single signpost is traced");
183 
184 		T_EXPECT_EQ(tp->arg1, 1ULL, "argument 1 of single signpost is correct");
185 		T_EXPECT_EQ(tp->arg2, 2ULL, "argument 2 of single signpost is correct");
186 		T_EXPECT_EQ(tp->arg3, 3ULL, "argument 3 of single signpost is correct");
187 		T_EXPECT_EQ(tp->arg4, 4ULL, "argument 4 of single signpost is correct");
188 	});
189 
190 	ktrace_events_single_paired(s,
191 	    APPSDBG_CODE(DBG_APP_SIGNPOST, SIGNPOST_PAIRED_CODE),
192 	    ^(struct trace_point *start, struct trace_point *end) {
193 		paired_seen++;
194 		T_PASS("paired signposts are traced");
195 
196 		T_EXPECT_EQ(start->arg1, 5ULL, "argument 1 of start signpost is correct");
197 		T_EXPECT_EQ(start->arg2, 6ULL, "argument 2 of start signpost is correct");
198 		T_EXPECT_EQ(start->arg3, 7ULL, "argument 3 of start signpost is correct");
199 		T_EXPECT_EQ(start->arg4, 8ULL, "argument 4 of start signpost is correct");
200 
201 		T_EXPECT_EQ(end->arg1, 9ULL, "argument 1 of end signpost is correct");
202 		T_EXPECT_EQ(end->arg2, 10ULL, "argument 2 of end signpost is correct");
203 		T_EXPECT_EQ(end->arg3, 11ULL, "argument 3 of end signpost is correct");
204 		T_EXPECT_EQ(end->arg4, 12ULL, "argument 4 of end signpost is correct");
205 
206 		T_EXPECT_EQ(single_seen, 1, "signposts are traced in the correct order");
207 
208 		ktrace_end(s, 1);
209 	});
210 
211 	ktrace_set_completion_handler(s, ^(void) {
212 		T_QUIET; T_EXPECT_NE(single_seen, 0,
213 		"did not see single tracepoint before timeout");
214 		T_QUIET; T_EXPECT_NE(paired_seen, 0,
215 		"did not see single tracepoint before timeout");
216 		ktrace_session_destroy(s);
217 		T_END;
218 	});
219 
220 	ktrace_filter_pid(s, getpid());
221 
222 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
223 	    "started tracing");
224 
225 #pragma clang diagnostic push
226 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
227 	T_EXPECT_POSIX_SUCCESS(kdebug_signpost(SIGNPOST_SINGLE_CODE, 1, 2, 3, 4),
228 	    "emitted single signpost");
229 	T_EXPECT_POSIX_SUCCESS(
230 		kdebug_signpost_start(SIGNPOST_PAIRED_CODE, 5, 6, 7, 8),
231 		"emitted start signpost");
232 	T_EXPECT_POSIX_SUCCESS(
233 		kdebug_signpost_end(SIGNPOST_PAIRED_CODE, 9, 10, 11, 12),
234 		"emitted end signpost");
235 #pragma clang diagnostic pop
236 	ktrace_end(s, 0);
237 
238 	dispatch_main();
239 }
240 
241 T_DECL(syscall_tracing,
242 		"ensure that syscall arguments are traced propertly",
243 		T_META_TAG_VM_PREFERRED)
244 {
245 	ktrace_session_t s = ktrace_session_create();
246 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
247 
248 	__block bool seen = 0;
249 
250 	ktrace_filter_pid(s, getpid());
251 
252 	static const int telemetry_syscall_no = 451;
253 	static const uint64_t arg1 = 0xfeedfacefeedface;
254 
255 	ktrace_events_single(s, BSDDBG_CODE(DBG_BSD_EXCP_SC, telemetry_syscall_no),
256 			^(struct trace_point *evt){
257 		if (KDBG_EXTRACT_CODE(evt->debugid) != telemetry_syscall_no || seen) {
258 			return;
259 		}
260 
261 		seen = true;
262 		if (ktrace_is_kernel_64_bit(s)) {
263 			T_EXPECT_EQ(evt->arg1, arg1,
264 					"argument 1 of syscall event is correct");
265 		} else {
266 			T_EXPECT_EQ(evt->arg1, (uint64_t)(uint32_t)(arg1),
267 					"argument 1 of syscall event is correct");
268 		}
269 
270 		ktrace_end(s, 1);
271 	});
272 
273 	ktrace_set_completion_handler(s, ^{
274 		T_ASSERT_TRUE(seen,
275 				"should have seen a syscall event for kevent_id(2)");
276 		ktrace_session_destroy(s);
277 		T_END;
278 	});
279 
280 	int error = ktrace_start(s, dispatch_get_main_queue());
281 	T_ASSERT_POSIX_ZERO(error, "started tracing");
282 
283 	/*
284 	 * telemetry(2) has a 64-bit argument that will definitely be traced, and
285 	 * is unlikely to be used elsewhere by this process.
286 	 */
287 	extern int __telemetry(uint64_t cmd, uint64_t deadline, uint64_t interval,
288 			uint64_t leeway, uint64_t arg4, uint64_t arg5);
289 	(void)__telemetry(arg1, 0, 0, 0, 0, 0);
290 
291 	dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 5 * NSEC_PER_SEC),
292 			dispatch_get_main_queue(), ^{
293 		T_LOG("ending test due to timeout");
294 		ktrace_end(s, 0);
295 	});
296 
297 	dispatch_main();
298 }
299 
300 #pragma mark kdebug behaviors
301 
302 #define WRAPPING_EVENTS_COUNT     (150000)
303 #define TRACE_ITERATIONS          (5000)
304 #define WRAPPING_EVENTS_THRESHOLD (100)
305 
306 T_DECL(wrapping,
307     "ensure that wrapping traces lost events and no events prior to the wrap",
308     T_META_CHECK_LEAKS(false),
309 	T_META_TAG_VM_PREFERRED)
310 {
311 	kbufinfo_t buf_info;
312 	int wait_wrapping_secs = (WRAPPING_EVENTS_COUNT / TRACE_ITERATIONS) + 5;
313 	int current_secs = wait_wrapping_secs;
314 
315 	start_controlling_ktrace();
316 
317 	/* use sysctls manually to bypass libktrace assumptions */
318 
319 	int mib[4] = { CTL_KERN, KERN_KDEBUG };
320 	mib[2] = KERN_KDSETBUF; mib[3] = WRAPPING_EVENTS_COUNT;
321 	T_ASSERT_POSIX_SUCCESS(sysctl(mib, 4, NULL, 0, NULL, 0), "KERN_KDSETBUF");
322 
323 	mib[2] = KERN_KDSETUP; mib[3] = 0;
324 	size_t needed = 0;
325 	T_ASSERT_POSIX_SUCCESS(sysctl(mib, 3, NULL, &needed, NULL, 0),
326 	    "KERN_KDSETUP");
327 
328 	mib[2] = KERN_KDENABLE; mib[3] = 1;
329 	T_ASSERT_POSIX_SUCCESS(sysctl(mib, 4, NULL, 0, NULL, 0), "KERN_KDENABLE");
330 
331 	/* wrapping is on by default */
332 
333 	/* wait until wrapped */
334 	T_LOG("waiting for trace to wrap");
335 	mib[2] = KERN_KDGETBUF;
336 	needed = sizeof(buf_info);
337 	do {
338 		sleep(1);
339 		for (int i = 0; i < TRACE_ITERATIONS; i++) {
340 			T_QUIET;
341 			T_ASSERT_POSIX_SUCCESS(kdebug_trace(0xfefe0000, 0, 0, 0, 0), NULL);
342 		}
343 		T_QUIET;
344 		T_ASSERT_POSIX_SUCCESS(sysctl(mib, 3, &buf_info, &needed, NULL, 0),
345 		    NULL);
346 	} while (!(buf_info.flags & KDBG_WRAPPED) && --current_secs > 0);
347 
348 	T_ASSERT_TRUE(buf_info.flags & KDBG_WRAPPED,
349 	    "trace wrapped (after %d seconds within %d second timeout)",
350 	    wait_wrapping_secs - current_secs, wait_wrapping_secs);
351 
352 	ktrace_session_t s = ktrace_session_create();
353 	T_QUIET; T_ASSERT_NOTNULL(s, NULL);
354 	T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_set_use_existing(s), NULL);
355 
356 	__block int events = 0;
357 
358 	ktrace_events_all(s, ^(struct trace_point *tp) {
359 		if (events == 0) {
360 		        T_EXPECT_EQ(tp->debugid, (unsigned int)TRACE_LOST_EVENTS,
361 		        "first event's debugid 0x%08x (%s) should be TRACE_LOST_EVENTS",
362 		        tp->debugid,
363 		        ktrace_name_for_eventid(s, tp->debugid & KDBG_EVENTID_MASK));
364 		} else {
365 		        T_QUIET;
366 		        T_EXPECT_NE(tp->debugid, (unsigned int)TRACE_LOST_EVENTS,
367 		        "event debugid 0x%08x (%s) should not be TRACE_LOST_EVENTS",
368 		        tp->debugid,
369 		        ktrace_name_for_eventid(s, tp->debugid & KDBG_EVENTID_MASK));
370 		}
371 
372 		events++;
373 		if (events > WRAPPING_EVENTS_THRESHOLD) {
374 		        ktrace_end(s, 1);
375 		}
376 	});
377 
378 	ktrace_set_completion_handler(s, ^{
379 		ktrace_session_destroy(s);
380 		T_END;
381 	});
382 
383 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
384 	    "started tracing");
385 
386 	dispatch_main();
387 }
388 
389 static void
_assert_tracing_state(bool enable,const char * msg)390 _assert_tracing_state(bool enable, const char *msg)
391 {
392 	kbufinfo_t bufinfo = { 0 };
393 	T_QUIET;
394 	T_ASSERT_POSIX_SUCCESS(sysctl(
395 		    (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDGETBUF }, 3,
396 		    &bufinfo, &(size_t){ sizeof(bufinfo) }, NULL, 0),
397 	    "get kdebug buffer info");
398 	T_QUIET;
399 	T_ASSERT_NE(bufinfo.nkdbufs, 0, "tracing should be configured");
400 	T_ASSERT_NE(bufinfo.nolog, enable, "%s: tracing should%s be enabled",
401 			msg, enable ? "" : "n't");
402 }
403 
404 #define DRAIN_TIMEOUT_NS (1 * NSEC_PER_SEC)
405 
406 static void
_drain_until_event(uint32_t debugid)407 _drain_until_event(uint32_t debugid)
408 {
409 	static kd_buf events[256] = { 0 };
410 	size_t events_size = sizeof(events);
411 	uint64_t start_time_ns = clock_gettime_nsec_np(CLOCK_MONOTONIC);
412 	unsigned int reads = 0;
413 	while (true) {
414 		T_QUIET;
415 		T_ASSERT_POSIX_SUCCESS(sysctl(
416 				(int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDREADTR, }, 3,
417 				events, &events_size, NULL, 0), "reading trace data");
418 		reads += 1;
419 		size_t events_count = events_size;
420 		for (size_t i = 0; i < events_count; i++) {
421 			if (events[i].debugid == debugid) {
422 				T_LOG("draining found event 0x%x", debugid);
423 				return;
424 			}
425 		}
426 		uint64_t cur_time_ns = clock_gettime_nsec_np(CLOCK_MONOTONIC);
427 		if (cur_time_ns - start_time_ns > DRAIN_TIMEOUT_NS) {
428 			T_ASSERT_FAIL("timed out after %f seconds waiting for 0x%x,"
429 					" after %u reads",
430 					(double)(cur_time_ns - start_time_ns) / 1e9, debugid,
431 					reads);
432 		}
433 	}
434 }
435 
436 T_DECL(disabling_event_match,
437     "ensure that ktrace is disabled when an event disable matcher fires",
438 	T_META_TAG_VM_PREFERRED)
439 {
440 	start_controlling_ktrace();
441 
442 	T_SETUPBEGIN;
443 	ktrace_session_t s = ktrace_session_create();
444 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
445 	ktrace_events_single(s, TRACE_DEBUGID,
446 			^(struct trace_point *tp __unused) {});
447 	int error = ktrace_configure(s);
448 	T_QUIET; T_ASSERT_POSIX_ZERO(error, "configured session");
449 	kd_event_matcher matchers[2] = { {
450 		.kem_debugid = TRACE_DEBUGID,
451 		.kem_args[0] = 0xff,
452 	}, {
453 		.kem_debugid = UINT32_MAX,
454 		.kem_args[0] = 0xfff,
455 	} };
456 	size_t matchers_size = sizeof(matchers);
457 	T_ASSERT_POSIX_SUCCESS(sysctl(
458 		(int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDSET_EDM, }, 3,
459 		&matchers, &matchers_size, NULL, 0), "set event disable matcher");
460 	size_t size = 0;
461 	T_ASSERT_POSIX_SUCCESS(sysctl(
462 		(int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDEFLAGS, KDBG_MATCH_DISABLE, }, 4,
463 		NULL, &size, NULL, 0), "enabled event disable matching");
464 	T_ASSERT_POSIX_SUCCESS(sysctl(
465 		(int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDENABLE, 1, }, 4,
466 		NULL, NULL, NULL, 0), "enabled tracing");
467 	_assert_tracing_state(true, "after enabling trace");
468 	T_SETUPEND;
469 
470 	kdebug_trace(TRACE_DEBUGID + 8, 0xff, 0, 0, 0);
471 	_drain_until_event(TRACE_DEBUGID + 8);
472 	_assert_tracing_state(true, "with wrong debugid");
473 	kdebug_trace(TRACE_DEBUGID, 0, 0, 0, 0);
474 	_drain_until_event(TRACE_DEBUGID);
475 	_assert_tracing_state(true, "with wrong argument");
476 	kdebug_trace(TRACE_DEBUGID, 0xff, 0, 0, 0);
477 	_drain_until_event(TRACE_DEBUGID);
478 	_assert_tracing_state(false, "after disabling event");
479 }
480 
481 T_DECL(reject_old_events,
482     "ensure that kdebug rejects events from before tracing began",
483     T_META_CHECK_LEAKS(false),
484 	T_META_TAG_VM_PREFERRED)
485 {
486 	__block uint64_t event_horizon_ts;
487 
488 	start_controlling_ktrace();
489 
490 	ktrace_session_t s = ktrace_session_create();
491 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
492 	ktrace_set_collection_interval(s, 100);
493 
494 	__block int events = 0;
495 	ktrace_events_single(s, KDBG_EVENTID(DBG_BSD, DBG_BSD_KDEBUG_TEST, 1),
496 	    ^(struct trace_point *tp) {
497 		events++;
498 		T_EXPECT_GT(tp->timestamp, event_horizon_ts,
499 		"events in trace should be from after tracing began");
500 	});
501 
502 	ktrace_set_completion_handler(s, ^{
503 		T_EXPECT_EQ(events, 2, "should see only two events");
504 		ktrace_session_destroy(s);
505 		T_END;
506 	});
507 
508 	event_horizon_ts = mach_absolute_time();
509 
510 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
511 	/* first, try an old event at the beginning of trace */
512 	assert_kdebug_test(KDBG_TEST_OLD_TIMES, "induce old event at beginning");
513 	/* after a good event has been traced, old events should be rejected */
514 	assert_kdebug_test(KDBG_TEST_OLD_TIMES, "induce old event to be rejected");
515 	ktrace_end(s, 0);
516 
517 	dispatch_main();
518 }
519 
520 #define ORDERING_TIMEOUT_SEC 5
521 
522 T_DECL(ascending_time_order,
523     "ensure that kdebug events are in ascending order based on time",
524     T_META_CHECK_LEAKS(false), XNU_T_META_SOC_SPECIFIC, T_META_TAG_VM_NOT_ELIGIBLE)
525 {
526 	__block uint64_t prev_ts = 0;
527 	__block uint32_t prev_debugid = 0;
528 	__block unsigned int prev_cpu = 0;
529 	__block bool in_order = true;
530 
531 	start_controlling_ktrace();
532 
533 	ktrace_session_t s = ktrace_session_create();
534 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
535 
536 	ktrace_events_all(s, ^(struct trace_point *tp) {
537 		if (tp->timestamp < prev_ts) {
538 		        in_order = false;
539 		        T_LOG("%" PRIu64 ": %#" PRIx32 " (cpu %d)",
540 		        prev_ts, prev_debugid, prev_cpu);
541 		        T_LOG("%" PRIu64 ": %#" PRIx32 " (cpu %d)",
542 		        tp->timestamp, tp->debugid, tp->cpuid);
543 		        ktrace_end(s, 1);
544 		}
545 	});
546 
547 	ktrace_set_completion_handler(s, ^{
548 		ktrace_session_destroy(s);
549 		T_EXPECT_TRUE(in_order, "event timestamps were in-order");
550 		T_END;
551 	});
552 
553 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
554 	    "started tracing");
555 
556 	/* try to inject old timestamps into trace */
557 	assert_kdebug_test(KDBG_TEST_OLD_TIMES, "inject old time");
558 
559 	dispatch_after(dispatch_time(DISPATCH_TIME_NOW, ORDERING_TIMEOUT_SEC * NSEC_PER_SEC),
560 	    dispatch_get_main_queue(), ^{
561 		T_LOG("ending test after timeout");
562 		ktrace_end(s, 1);
563 	});
564 
565 	dispatch_main();
566 }
567 
568 #pragma mark dyld tracing
569 
570 __attribute__((aligned(8)))
571 static const char map_uuid[16] = "map UUID";
572 
573 __attribute__((aligned(8)))
574 static const char unmap_uuid[16] = "unmap UUID";
575 
576 __attribute__((aligned(8)))
577 static const char sc_uuid[16] = "shared UUID";
578 
579 static fsid_t map_fsid = { .val = { 42, 43 } };
580 static fsid_t unmap_fsid = { .val = { 44, 45 } };
581 static fsid_t sc_fsid = { .val = { 46, 47 } };
582 
583 static fsobj_id_t map_fsobjid = { .fid_objno = 42, .fid_generation = 43 };
584 static fsobj_id_t unmap_fsobjid = { .fid_objno = 44, .fid_generation = 45 };
585 static fsobj_id_t sc_fsobjid = { .fid_objno = 46, .fid_generation = 47 };
586 
587 #define MAP_LOAD_ADDR   0xabadcafe
588 #define UNMAP_LOAD_ADDR 0xfeedface
589 #define SC_LOAD_ADDR    0xfedfaced
590 
591 __unused
592 static void
expect_dyld_image_info(struct trace_point * tp,const uint64_t * exp_uuid,uint64_t exp_load_addr,fsid_t * exp_fsid,fsobj_id_t * exp_fsobjid,int order)593 expect_dyld_image_info(struct trace_point *tp, const uint64_t *exp_uuid,
594     uint64_t exp_load_addr, fsid_t *exp_fsid, fsobj_id_t *exp_fsobjid,
595     int order)
596 {
597 #if defined(__LP64__) || defined(__arm64__)
598 	if (order == 0) {
599 		uint64_t uuid[2];
600 		uint64_t load_addr;
601 		fsid_t fsid;
602 
603 		uuid[0] = (uint64_t)tp->arg1;
604 		uuid[1] = (uint64_t)tp->arg2;
605 		load_addr = (uint64_t)tp->arg3;
606 		fsid.val[0] = (int32_t)(tp->arg4 & UINT32_MAX);
607 		fsid.val[1] = (int32_t)((uint64_t)tp->arg4 >> 32);
608 
609 		T_QUIET; T_EXPECT_EQ(uuid[0], exp_uuid[0], NULL);
610 		T_QUIET; T_EXPECT_EQ(uuid[1], exp_uuid[1], NULL);
611 		T_QUIET; T_EXPECT_EQ(load_addr, exp_load_addr, NULL);
612 		T_QUIET; T_EXPECT_EQ(fsid.val[0], exp_fsid->val[0], NULL);
613 		T_QUIET; T_EXPECT_EQ(fsid.val[1], exp_fsid->val[1], NULL);
614 	} else if (order == 1) {
615 		fsobj_id_t fsobjid;
616 
617 		fsobjid.fid_objno = (uint32_t)(tp->arg1 & UINT32_MAX);
618 		fsobjid.fid_generation = (uint32_t)((uint64_t)tp->arg1 >> 32);
619 
620 		T_QUIET; T_EXPECT_EQ(fsobjid.fid_objno, exp_fsobjid->fid_objno, NULL);
621 		T_QUIET; T_EXPECT_EQ(fsobjid.fid_generation,
622 		    exp_fsobjid->fid_generation, NULL);
623 	} else {
624 		T_ASSERT_FAIL("unrecognized order of events %d", order);
625 	}
626 #else /* defined(__LP64__) */
627 	if (order == 0) {
628 		uint32_t uuid[4];
629 
630 		uuid[0] = (uint32_t)tp->arg1;
631 		uuid[1] = (uint32_t)tp->arg2;
632 		uuid[2] = (uint32_t)tp->arg3;
633 		uuid[3] = (uint32_t)tp->arg4;
634 
635 		T_QUIET; T_EXPECT_EQ(uuid[0], (uint32_t)exp_uuid[0], NULL);
636 		T_QUIET; T_EXPECT_EQ(uuid[1], (uint32_t)(exp_uuid[0] >> 32), NULL);
637 		T_QUIET; T_EXPECT_EQ(uuid[2], (uint32_t)exp_uuid[1], NULL);
638 		T_QUIET; T_EXPECT_EQ(uuid[3], (uint32_t)(exp_uuid[1] >> 32), NULL);
639 	} else if (order == 1) {
640 		uint32_t load_addr;
641 		fsid_t fsid;
642 		fsobj_id_t fsobjid;
643 
644 		load_addr = (uint32_t)tp->arg1;
645 		fsid.val[0] = (int32_t)tp->arg2;
646 		fsid.val[1] = (int32_t)tp->arg3;
647 		fsobjid.fid_objno = (uint32_t)tp->arg4;
648 
649 		T_QUIET; T_EXPECT_EQ(load_addr, (uint32_t)exp_load_addr, NULL);
650 		T_QUIET; T_EXPECT_EQ(fsid.val[0], exp_fsid->val[0], NULL);
651 		T_QUIET; T_EXPECT_EQ(fsid.val[1], exp_fsid->val[1], NULL);
652 		T_QUIET; T_EXPECT_EQ(fsobjid.fid_objno, exp_fsobjid->fid_objno, NULL);
653 	} else if (order == 2) {
654 		fsobj_id_t fsobjid;
655 
656 		fsobjid.fid_generation = tp->arg1;
657 
658 		T_QUIET; T_EXPECT_EQ(fsobjid.fid_generation,
659 		    exp_fsobjid->fid_generation, NULL);
660 	} else {
661 		T_ASSERT_FAIL("unrecognized order of events %d", order);
662 	}
663 #endif /* defined(__LP64__) */
664 }
665 
666 #if defined(__LP64__) || defined(__arm64__)
667 #define DYLD_CODE_OFFSET (0)
668 #define DYLD_EVENTS      (2)
669 #else
670 #define DYLD_CODE_OFFSET (2)
671 #define DYLD_EVENTS      (3)
672 #endif
673 
674 static void
expect_dyld_events(ktrace_session_t s,const char * name,uint32_t base_code,const char * exp_uuid,uint64_t exp_load_addr,fsid_t * exp_fsid,fsobj_id_t * exp_fsobjid,uint8_t * saw_events)675 expect_dyld_events(ktrace_session_t s, const char *name, uint32_t base_code,
676     const char *exp_uuid, uint64_t exp_load_addr, fsid_t *exp_fsid,
677     fsobj_id_t *exp_fsobjid, uint8_t *saw_events)
678 {
679 	for (int i = 0; i < DYLD_EVENTS; i++) {
680 		ktrace_events_single(s, KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID,
681 		    base_code + DYLD_CODE_OFFSET + (unsigned int)i),
682 		    ^(struct trace_point *tp) {
683 			T_LOG("checking %s event %c", name, 'A' + i);
684 			expect_dyld_image_info(tp, (const void *)exp_uuid, exp_load_addr,
685 			exp_fsid, exp_fsobjid, i);
686 			*saw_events |= (1U << i);
687 		});
688 	}
689 }
690 
691 T_DECL(dyld_events, "test that dyld registering libraries emits events",
692 		T_META_TAG_VM_PREFERRED)
693 {
694 	dyld_kernel_image_info_t info;
695 
696 	/*
697 	 * Use pointers instead of __block variables in order to use these variables
698 	 * in the completion block below _and_ pass pointers to them to the
699 	 * expect_dyld_events function.
700 	 */
701 	uint8_t saw_events[3] = { 0 };
702 	uint8_t *saw_mapping = &(saw_events[0]);
703 	uint8_t *saw_unmapping = &(saw_events[1]);
704 	uint8_t *saw_shared_cache = &(saw_events[2]);
705 
706 	start_controlling_ktrace();
707 
708 	ktrace_session_t s = ktrace_session_create();
709 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
710 
711 	T_QUIET;
712 	T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()),
713 	    "filtered to current process");
714 
715 	expect_dyld_events(s, "mapping", DBG_DYLD_UUID_MAP_A, map_uuid,
716 	    MAP_LOAD_ADDR, &map_fsid, &map_fsobjid, saw_mapping);
717 	expect_dyld_events(s, "unmapping", DBG_DYLD_UUID_UNMAP_A, unmap_uuid,
718 	    UNMAP_LOAD_ADDR, &unmap_fsid, &unmap_fsobjid, saw_unmapping);
719 	expect_dyld_events(s, "shared cache", DBG_DYLD_UUID_SHARED_CACHE_A,
720 	    sc_uuid, SC_LOAD_ADDR, &sc_fsid, &sc_fsobjid, saw_shared_cache);
721 
722 	ktrace_set_completion_handler(s, ^{
723 		ktrace_session_destroy(s);
724 
725 		T_EXPECT_EQ(__builtin_popcount(*saw_mapping), DYLD_EVENTS, NULL);
726 		T_EXPECT_EQ(__builtin_popcount(*saw_unmapping), DYLD_EVENTS, NULL);
727 		T_EXPECT_EQ(__builtin_popcount(*saw_shared_cache), DYLD_EVENTS, NULL);
728 		T_END;
729 	});
730 
731 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
732 
733 	info.load_addr = MAP_LOAD_ADDR;
734 	memcpy(info.uuid, map_uuid, sizeof(info.uuid));
735 	info.fsid = map_fsid;
736 	info.fsobjid = map_fsobjid;
737 	T_EXPECT_MACH_SUCCESS(task_register_dyld_image_infos(mach_task_self(),
738 	    &info, 1), "registered dyld image info");
739 
740 	info.load_addr = UNMAP_LOAD_ADDR;
741 	memcpy(info.uuid, unmap_uuid, sizeof(info.uuid));
742 	info.fsid = unmap_fsid;
743 	info.fsobjid = unmap_fsobjid;
744 	T_EXPECT_MACH_SUCCESS(task_unregister_dyld_image_infos(mach_task_self(),
745 	    &info, 1), "unregistered dyld image info");
746 
747 	info.load_addr = SC_LOAD_ADDR;
748 	memcpy(info.uuid, sc_uuid, sizeof(info.uuid));
749 	info.fsid = sc_fsid;
750 	info.fsobjid = sc_fsobjid;
751 	T_EXPECT_MACH_SUCCESS(task_register_dyld_shared_cache_image_info(
752 		    mach_task_self(), info, FALSE, FALSE),
753 	    "registered dyld shared cache image info");
754 
755 	ktrace_end(s, 0);
756 
757 	dispatch_main();
758 }
759 
760 #pragma mark kdebug kernel macros
761 
762 #define EXP_KERNEL_EVENTS 5U
763 
764 static const uint32_t dev_evts[EXP_KERNEL_EVENTS] = {
765 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 0),
766 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 1),
767 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 2),
768 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 3),
769 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 4),
770 };
771 
772 static const uint32_t rel_evts[EXP_KERNEL_EVENTS] = {
773 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 5),
774 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 6),
775 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 7),
776 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 8),
777 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 9),
778 };
779 
780 static const uint32_t filt_evts[EXP_KERNEL_EVENTS] = {
781 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 10),
782 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 11),
783 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 12),
784 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 13),
785 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 14),
786 };
787 
788 static const uint32_t noprocfilt_evts[EXP_KERNEL_EVENTS] = {
789 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 15),
790 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 16),
791 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 17),
792 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 18),
793 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 19),
794 };
795 
796 static void
expect_event(struct trace_point * tp,const char * name,unsigned int * events,const uint32_t * event_ids,size_t event_ids_len)797 expect_event(struct trace_point *tp, const char *name, unsigned int *events,
798     const uint32_t *event_ids, size_t event_ids_len)
799 {
800 	unsigned int event_idx = *events;
801 	bool event_found = false;
802 	size_t i;
803 	for (i = 0; i < event_ids_len; i++) {
804 		if (event_ids[i] == (tp->debugid & KDBG_EVENTID_MASK)) {
805 			T_LOG("found %s event 0x%x", name, tp->debugid);
806 			event_found = true;
807 		}
808 	}
809 
810 	if (!event_found) {
811 		return;
812 	}
813 
814 	*events += 1;
815 	for (i = 0; i < event_idx; i++) {
816 		T_QUIET; T_EXPECT_EQ(((uint64_t *)&tp->arg1)[i], (uint64_t)i + 1,
817 		    NULL);
818 	}
819 	for (; i < 4; i++) {
820 		T_QUIET; T_EXPECT_EQ(((uint64_t *)&tp->arg1)[i], (uint64_t)0, NULL);
821 	}
822 }
823 
824 static void
expect_release_event(struct trace_point * tp,unsigned int * events)825 expect_release_event(struct trace_point *tp, unsigned int *events)
826 {
827 	expect_event(tp, "release", events, rel_evts,
828 	    sizeof(rel_evts) / sizeof(rel_evts[0]));
829 }
830 
831 static void
expect_development_event(struct trace_point * tp,unsigned int * events)832 expect_development_event(struct trace_point *tp, unsigned int *events)
833 {
834 	expect_event(tp, "dev", events, dev_evts, sizeof(dev_evts) / sizeof(dev_evts[0]));
835 }
836 
837 static void
expect_filtered_event(struct trace_point * tp,unsigned int * events)838 expect_filtered_event(struct trace_point *tp, unsigned int *events)
839 {
840 	expect_event(tp, "filtered", events, filt_evts,
841 	    sizeof(filt_evts) / sizeof(filt_evts[0]));
842 }
843 
844 static void
expect_noprocfilt_event(struct trace_point * tp,unsigned int * events)845 expect_noprocfilt_event(struct trace_point *tp, unsigned int *events)
846 {
847 	expect_event(tp, "noprocfilt", events, noprocfilt_evts,
848 	    sizeof(noprocfilt_evts) / sizeof(noprocfilt_evts[0]));
849 }
850 
851 static void
852 expect_kdbg_test_events(ktrace_session_t s, bool use_all_callback,
853     void (^cb)(unsigned int dev_seen, unsigned int rel_seen,
854     unsigned int filt_seen, unsigned int noprocfilt_seen))
855 {
856 	__block unsigned int dev_seen = 0;
857 	__block unsigned int rel_seen = 0;
858 	__block unsigned int filt_seen = 0;
859 	__block unsigned int noprocfilt_seen = 0;
860 
861 	void (^evtcb)(struct trace_point *tp) = ^(struct trace_point *tp) {
862 		expect_development_event(tp, &dev_seen);
863 		expect_release_event(tp, &rel_seen);
864 		expect_filtered_event(tp, &filt_seen);
865 		expect_noprocfilt_event(tp, &noprocfilt_seen);
866 	};
867 
868 	if (use_all_callback) {
869 		ktrace_events_all(s, evtcb);
870 	} else {
871 		ktrace_events_range(s, KDBG_EVENTID(DBG_BSD, DBG_BSD_KDEBUG_TEST, 0),
872 		    KDBG_EVENTID(DBG_BSD + 1, 0, 0), evtcb);
873 	}
874 
875 	ktrace_set_completion_handler(s, ^{
876 		ktrace_session_destroy(s);
877 		cb(dev_seen, rel_seen, filt_seen, noprocfilt_seen);
878 		T_END;
879 	});
880 
881 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
882 	assert_kdebug_test(KDBG_TEST_MACROS, "check test macros");
883 
884 	ktrace_end(s, 0);
885 }
886 
887 T_DECL(kernel_events, "ensure kernel macros work", T_META_TAG_VM_PREFERRED)
888 {
889 	start_controlling_ktrace();
890 
891 	ktrace_session_t s = ktrace_session_create();
892 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
893 
894 	T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()),
895 	    "filtered events to current process");
896 
897 	expect_kdbg_test_events(s, false,
898 	    ^(unsigned int dev_seen, unsigned int rel_seen,
899 	    unsigned int filt_seen, unsigned int noprocfilt_seen) {
900 		/*
901 		 * Development-only events are only filtered if running on an embedded
902 		 * OS.
903 		 */
904 		unsigned int dev_exp;
905 #if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR)
906 		dev_exp = is_development_kernel() ? EXP_KERNEL_EVENTS : 0U;
907 #else
908 		dev_exp = EXP_KERNEL_EVENTS;
909 #endif
910 
911 		T_EXPECT_EQ(rel_seen, EXP_KERNEL_EVENTS,
912 		"release and development events seen");
913 		T_EXPECT_EQ(dev_seen, dev_exp, "development-only events %sseen",
914 		dev_exp ? "" : "not ");
915 		T_EXPECT_EQ(filt_seen, dev_exp, "filter-only events seen");
916 		T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS,
917 		"process filter-agnostic events seen");
918 	});
919 
920 	dispatch_main();
921 }
922 
923 T_DECL(kernel_events_filtered, "ensure that the filtered kernel macros work",
924 		T_META_TAG_VM_PREFERRED)
925 {
926 	start_controlling_ktrace();
927 
928 	ktrace_session_t s = ktrace_session_create();
929 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
930 
931 	T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()),
932 	    "filtered events to current process");
933 
934 	expect_kdbg_test_events(s, true,
935 	    ^(unsigned int dev_seen, unsigned int rel_seen,
936 	    unsigned int filt_seen, unsigned int noprocfilt_seen) {
937 		T_EXPECT_EQ(rel_seen, EXP_KERNEL_EVENTS, NULL);
938 #if defined(__arm64__)
939 		T_EXPECT_EQ(dev_seen, is_development_kernel() ? EXP_KERNEL_EVENTS : 0U,
940 		NULL);
941 #else
942 		T_EXPECT_EQ(dev_seen, EXP_KERNEL_EVENTS,
943 		"development-only events seen");
944 #endif /* defined(__arm64__) */
945 		T_EXPECT_EQ(filt_seen, 0U, "no filter-only events seen");
946 		T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS,
947 		"process filter-agnostic events seen");
948 	});
949 
950 	dispatch_main();
951 }
952 
953 T_DECL(kernel_events_noprocfilt,
954     "ensure that the no process filter kernel macros work",
955 	T_META_TAG_VM_PREFERRED)
956 {
957 	start_controlling_ktrace();
958 
959 	ktrace_session_t s = ktrace_session_create();
960 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
961 
962 	/*
963 	 * Only allow launchd events through.
964 	 */
965 	T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, 1), "filtered events to launchd");
966 	for (size_t i = 0; i < sizeof(noprocfilt_evts) / sizeof(noprocfilt_evts[0]); i++) {
967 		T_QUIET;
968 		T_ASSERT_POSIX_ZERO(ktrace_ignore_process_filter_for_event(s,
969 		    noprocfilt_evts[i]),
970 		    "ignored process filter for noprocfilt event");
971 	}
972 
973 	expect_kdbg_test_events(s, false,
974 	    ^(unsigned int dev_seen, unsigned int rel_seen,
975 	    unsigned int filt_seen, unsigned int noprocfilt_seen) {
976 		T_EXPECT_EQ(rel_seen, 0U, "release and development events not seen");
977 		T_EXPECT_EQ(dev_seen, 0U, "development-only events not seen");
978 		T_EXPECT_EQ(filt_seen, 0U, "filter-only events not seen");
979 
980 		T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS,
981 		"process filter-agnostic events seen");
982 	});
983 
984 	dispatch_main();
985 }
986 
987 static volatile bool continue_abuse = true;
988 
989 #define STRESS_DEBUGID (0xfeedfac0)
990 #define ABUSE_SECS (2)
991 #define TIMER_NS (100 * NSEC_PER_USEC)
992 /*
993  * Use the quantum as the gap threshold.
994  */
995 #define GAP_THRESHOLD_NS (10 * NSEC_PER_MSEC)
996 
997 static void *
kdebug_abuser_thread(void * ctx)998 kdebug_abuser_thread(void *ctx)
999 {
1000 	unsigned int id = (unsigned int)ctx;
1001 	uint64_t i = 0;
1002 	while (continue_abuse) {
1003 		kdebug_trace(STRESS_DEBUGID, id, i, 0, 0);
1004 		i++;
1005 	}
1006 
1007 	return NULL;
1008 }
1009 
1010 T_DECL(stress, "emit events on all but one CPU with a small buffer",
1011     T_META_CHECK_LEAKS(false),
1012 	T_META_TAG_VM_PREFERRED)
1013 {
1014 	start_controlling_ktrace();
1015 
1016 	T_SETUPBEGIN;
1017 	ktrace_session_t s = ktrace_session_create();
1018 	T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create");
1019 
1020 	/* Let's not waste any time with pleasantries. */
1021 	ktrace_set_uuid_map_enabled(s, KTRACE_FEATURE_DISABLED);
1022 
1023 	/* Ouch. */
1024 	ktrace_events_all(s, ^(__unused struct trace_point *tp) {});
1025 	ktrace_set_vnode_paths_enabled(s, KTRACE_FEATURE_ENABLED);
1026 	(void)atexit_b(^{ kperf_reset(); });
1027 	(void)kperf_action_count_set(1);
1028 	(void)kperf_timer_count_set(1);
1029 	int kperror = kperf_timer_period_set(0, kperf_ns_to_ticks(TIMER_NS));
1030 	T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_timer_period_set %llu ns",
1031 	    TIMER_NS);
1032 	kperror = kperf_timer_action_set(0, 1);
1033 	T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_timer_action_set");
1034 	kperror = kperf_action_samplers_set(1, KPERF_SAMPLER_TINFO |
1035 	    KPERF_SAMPLER_TH_SNAPSHOT | KPERF_SAMPLER_KSTACK |
1036 	    KPERF_SAMPLER_USTACK | KPERF_SAMPLER_MEMINFO |
1037 	    KPERF_SAMPLER_TINFO_SCHED | KPERF_SAMPLER_TH_DISPATCH |
1038 	    KPERF_SAMPLER_TK_SNAPSHOT | KPERF_SAMPLER_SYS_MEM |
1039 	    KPERF_SAMPLER_TH_INSTRS_CYCLES);
1040 	T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_action_samplers_set");
1041 	/* You monster... */
1042 
1043 	/* The coup-de-grace. */
1044 	ktrace_set_buffer_size(s, 10);
1045 
1046 	char filepath_arr[MAXPATHLEN] = "";
1047 	strlcpy(filepath_arr, dt_tmpdir(), sizeof(filepath_arr));
1048 	strlcat(filepath_arr, "/stress.ktrace", sizeof(filepath_arr));
1049 	char *filepath = filepath_arr;
1050 
1051 	int ncpus = 0;
1052 	size_t ncpus_size = sizeof(ncpus);
1053 	int ret = sysctlbyname("hw.logicalcpu_max", &ncpus, &ncpus_size, NULL, 0);
1054 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname(\"hw.logicalcpu_max\"");
1055 	T_QUIET; T_ASSERT_GT(ncpus, 0, "realistic number of CPUs");
1056 
1057 	pthread_t *threads = calloc((unsigned int)ncpus - 1, sizeof(pthread_t));
1058 	T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(threads, "calloc(%d threads)",
1059 	    ncpus - 1);
1060 
1061 	ktrace_set_completion_handler(s, ^{
1062 		T_SETUPBEGIN;
1063 		ktrace_session_destroy(s);
1064 
1065 		T_LOG("trace ended, searching for gaps");
1066 
1067 		ktrace_session_t sread = ktrace_session_create();
1068 		T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(sread, "ktrace_session_create");
1069 
1070 		int error = ktrace_set_file(sread, filepath);
1071 		T_QUIET; T_ASSERT_POSIX_ZERO(error, "ktrace_set_file %s", filepath);
1072 
1073 		ktrace_file_t f = ktrace_file_open(filepath, false);
1074 		T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(f, "ktrace_file_open %s",
1075 		filepath);
1076 		uint64_t first_timestamp = 0;
1077 		error = ktrace_file_earliest_timestamp(f, &first_timestamp);
1078 		T_QUIET; T_ASSERT_POSIX_ZERO(error, "ktrace_file_earliest_timestamp");
1079 
1080 		uint64_t last_timestamp = 0;
1081 		(void)ktrace_file_latest_timestamp(f, &last_timestamp);
1082 
1083 		__block uint64_t prev_timestamp = 0;
1084 		__block uint64_t nevents = 0;
1085 		ktrace_events_all(sread, ^(struct trace_point *tp) {
1086 			nevents++;
1087 			uint64_t delta_ns = 0;
1088 			T_QUIET; T_EXPECT_GE(tp->timestamp, prev_timestamp,
1089 			"timestamps are monotonically increasing");
1090 			int converror = ktrace_convert_timestamp_to_nanoseconds(sread,
1091 			tp->timestamp - prev_timestamp, &delta_ns);
1092 			T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns");
1093 			if (prev_timestamp && delta_ns > GAP_THRESHOLD_NS) {
1094 			        if (tp->debugname) {
1095 			                T_LOG("gap: %gs at %llu - %llu on %d: %s (%#08x)",
1096 			                (double)delta_ns / 1e9, prev_timestamp,
1097 			                tp->timestamp, tp->cpuid, tp->debugname, tp->debugid);
1098 				} else {
1099 			                T_LOG("gap: %gs at %llu - %llu on %d: %#x",
1100 			                (double)delta_ns / 1e9, prev_timestamp,
1101 			                tp->timestamp, tp->cpuid, tp->debugid);
1102 				}
1103 
1104 			        /*
1105 			         * These gaps are ok -- they appear after CPUs are brought back
1106 			         * up.
1107 			         */
1108 #define INTERRUPT (0x1050000)
1109 #define PERF_CPU_IDLE (0x27001000)
1110 #define INTC_HANDLER (0x5000004)
1111 #define DECR_TRAP (0x1090000)
1112 			        uint32_t eventid = tp->debugid & KDBG_EVENTID_MASK;
1113 			        if (eventid != INTERRUPT && eventid != PERF_CPU_IDLE &&
1114 			        eventid != INTC_HANDLER && eventid != DECR_TRAP) {
1115 			                unsigned int lost_events = TRACE_LOST_EVENTS;
1116 			                T_QUIET; T_EXPECT_EQ(tp->debugid, lost_events,
1117 			                "gaps should end with lost events");
1118 				}
1119 			}
1120 
1121 			prev_timestamp = tp->timestamp;
1122 		});
1123 		ktrace_events_single(sread, TRACE_LOST_EVENTS, ^(struct trace_point *tp){
1124 			T_LOG("lost: %llu on %d (%llu)", tp->timestamp, tp->cpuid, tp->arg1);
1125 		});
1126 
1127 		__block uint64_t last_write = 0;
1128 		ktrace_events_single_paired(sread, TRACE_WRITING_EVENTS,
1129 		^(struct trace_point *start, struct trace_point *end) {
1130 			uint64_t delta_ns;
1131 			int converror = ktrace_convert_timestamp_to_nanoseconds(sread,
1132 			start->timestamp - last_write, &delta_ns);
1133 			T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns");
1134 
1135 			uint64_t dur_ns;
1136 			converror = ktrace_convert_timestamp_to_nanoseconds(sread,
1137 			end->timestamp - start->timestamp, &dur_ns);
1138 			T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns");
1139 
1140 			T_LOG("write: %llu (+%gs): %gus on %d: %llu events", start->timestamp,
1141 			(double)delta_ns / 1e9, (double)dur_ns / 1e3, end->cpuid, end->arg1);
1142 			last_write = end->timestamp;
1143 		});
1144 		ktrace_set_completion_handler(sread, ^{
1145 			uint64_t duration_ns = 0;
1146 			if (last_timestamp) {
1147 			        int converror = ktrace_convert_timestamp_to_nanoseconds(sread,
1148 			        last_timestamp - first_timestamp, &duration_ns);
1149 			        T_QUIET; T_ASSERT_POSIX_ZERO(converror,
1150 			        "convert timestamp to ns");
1151 			        T_LOG("file was %gs long, %llu events: %g events/msec/cpu",
1152 			        (double)duration_ns / 1e9, nevents,
1153 			        (double)nevents / ((double)duration_ns / 1e6) / ncpus);
1154 			}
1155 			(void)unlink(filepath);
1156 			ktrace_session_destroy(sread);
1157 			T_END;
1158 		});
1159 
1160 		int starterror = ktrace_start(sread, dispatch_get_main_queue());
1161 		T_QUIET; T_ASSERT_POSIX_ZERO(starterror, "ktrace_start read session");
1162 
1163 		T_SETUPEND;
1164 	});
1165 
1166 /* Just kidding... for now. */
1167 #if 0
1168 	kperror = kperf_sample_set(1);
1169 	T_ASSERT_POSIX_SUCCESS(kperror,
1170 	    "started kperf timer sampling every %llu ns", TIMER_NS);
1171 #endif
1172 
1173 	for (int i = 0; i < (ncpus - 1); i++) {
1174 		int error = pthread_create(&threads[i], NULL, kdebug_abuser_thread,
1175 		    (void *)(uintptr_t)i);
1176 		T_QUIET; T_ASSERT_POSIX_ZERO(error,
1177 		    "pthread_create abuser thread %d", i);
1178 	}
1179 
1180 	int error = ktrace_start_writing_file(s, filepath,
1181 	    ktrace_compression_none, NULL, NULL);
1182 	T_ASSERT_POSIX_ZERO(error, "started writing ktrace to %s", filepath);
1183 
1184 	T_SETUPEND;
1185 
1186 	dispatch_after(dispatch_time(DISPATCH_TIME_NOW, ABUSE_SECS * NSEC_PER_SEC),
1187 	    dispatch_get_main_queue(), ^{
1188 		T_LOG("ending trace");
1189 		ktrace_end(s, 1);
1190 
1191 		continue_abuse = false;
1192 		for (int i = 0; i < (ncpus - 1); i++) {
1193 		        int joinerror = pthread_join(threads[i], NULL);
1194 		        T_QUIET; T_EXPECT_POSIX_ZERO(joinerror, "pthread_join thread %d",
1195 		        i);
1196 		}
1197 	});
1198 
1199 	dispatch_main();
1200 }
1201 
1202 #define ROUND_TRIP_PERIOD UINT64_C(10 * 1000)
1203 #define ROUND_TRIPS_THRESHOLD UINT64_C(25)
1204 #define ROUND_TRIPS_TIMEOUT_SECS (2 * 60)
1205 #define COLLECTION_INTERVAL_MS 100
1206 
1207 /*
1208  * Test a sustained tracing session, involving multiple round-trips to the
1209  * kernel.
1210  *
1211  * Trace all events, and every `ROUND_TRIP_PERIOD` events, emit an event that's
1212  * unlikely to be emitted elsewhere.  Look for this event, too, and make sure we
1213  * see as many of them as we emitted.
1214  *
1215  * After seeing `ROUND_TRIPS_THRESHOLD` of the unlikely events, end tracing.
1216  * In the failure mode, we won't see any of these, so set a timeout of
1217  * `ROUND_TRIPS_TIMEOUT_SECS` to prevent hanging, waiting for events that we'll
1218  * never see.
1219  */
1220 T_DECL(round_trips,
1221     "test sustained tracing with multiple round-trips through the kernel",
1222 	T_META_TAG_VM_PREFERRED)
1223 {
1224 	start_controlling_ktrace();
1225 
1226 	ktrace_session_t s = ktrace_session_create();
1227 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
1228 
1229 	/*
1230 	 * Set a small buffer and collection interval to increase the number of
1231 	 * round-trips.
1232 	 */
1233 	ktrace_set_buffer_size(s, 50);
1234 	ktrace_set_collection_interval(s, COLLECTION_INTERVAL_MS);
1235 
1236 	__block uint64_t events = 0;
1237 	__block uint64_t emitted = 0;
1238 	__block uint64_t seen = 0;
1239 	ktrace_events_all(s, ^(__unused struct trace_point *tp) {
1240 		events++;
1241 		if (events % ROUND_TRIP_PERIOD == 0) {
1242 		        T_LOG("emitting round-trip event %" PRIu64, emitted);
1243 		        kdebug_trace(TRACE_DEBUGID, events, 0, 0, 0);
1244 		        emitted++;
1245 		}
1246 	});
1247 
1248 	ktrace_events_single(s, TRACE_DEBUGID, ^(__unused struct trace_point *tp) {
1249 		T_LOG("saw round-trip event after %" PRIu64 " events", events);
1250 		seen++;
1251 		if (seen >= ROUND_TRIPS_THRESHOLD) {
1252 		        T_LOG("ending trace after seeing %" PRIu64 " events, "
1253 		        "emitting %" PRIu64, seen, emitted);
1254 		        ktrace_end(s, 1);
1255 		}
1256 	});
1257 
1258 	ktrace_set_completion_handler(s, ^{
1259 		T_EXPECT_GE(emitted, ROUND_TRIPS_THRESHOLD,
1260 		"emitted %" PRIu64 " round-trip events", emitted);
1261 		T_EXPECT_GE(seen, ROUND_TRIPS_THRESHOLD,
1262 		"saw %" PRIu64 " round-trip events", seen);
1263 		ktrace_session_destroy(s);
1264 		T_END;
1265 	});
1266 
1267 	int error = ktrace_start(s, dispatch_get_main_queue());
1268 	T_ASSERT_POSIX_ZERO(error, "started tracing");
1269 
1270 	dispatch_after(dispatch_time(DISPATCH_TIME_NOW,
1271 	    ROUND_TRIPS_TIMEOUT_SECS * NSEC_PER_SEC), dispatch_get_main_queue(),
1272 	    ^{
1273 		T_LOG("ending trace after %d seconds", ROUND_TRIPS_TIMEOUT_SECS);
1274 		ktrace_end(s, 0);
1275 	});
1276 
1277 	dispatch_main();
1278 }
1279 
1280 #define HEARTBEAT_INTERVAL_SECS 1
1281 #define HEARTBEAT_COUNT 10
1282 
1283 /*
1284  * Ensure we see events periodically, checking for recent events on a
1285  * heart-beat.
1286  */
1287 T_DECL(event_coverage, "ensure events appear up to the end of tracing",
1288 		T_META_TAG_VM_PREFERRED)
1289 {
1290 	start_controlling_ktrace();
1291 
1292 	ktrace_session_t s = ktrace_session_create();
1293 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
1294 
1295 	__block uint64_t current_timestamp = 0;
1296 	__block uint64_t events = 0;
1297 	ktrace_events_all(s, ^(struct trace_point *tp) {
1298 		current_timestamp = tp->timestamp;
1299 		events++;
1300 	});
1301 
1302 	ktrace_set_buffer_size(s, 20);
1303 	ktrace_set_collection_interval(s, COLLECTION_INTERVAL_MS);
1304 
1305 	__block uint64_t last_timestamp = 0;
1306 	__block uint64_t last_events = 0;
1307 	__block unsigned int heartbeats = 0;
1308 
1309 	ktrace_set_completion_handler(s, ^{
1310 		ktrace_session_destroy(s);
1311 		T_QUIET; T_EXPECT_GT(events, 0ULL, "should have seen some events");
1312 		T_END;
1313 	});
1314 
1315 	dispatch_source_t timer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER,
1316 	    0, 0, dispatch_get_main_queue());
1317 	dispatch_source_set_timer(timer, dispatch_time(DISPATCH_TIME_NOW,
1318 	    HEARTBEAT_INTERVAL_SECS * NSEC_PER_SEC),
1319 	    HEARTBEAT_INTERVAL_SECS * NSEC_PER_SEC, 0);
1320 	dispatch_source_set_cancel_handler(timer, ^{
1321 		dispatch_release(timer);
1322 	});
1323 
1324 	dispatch_source_set_event_handler(timer, ^{
1325 		heartbeats++;
1326 
1327 		T_LOG("heartbeat %u at time %lld, seen %" PRIu64 " events, "
1328 		"current event time %lld", heartbeats, mach_absolute_time(),
1329 		events, current_timestamp);
1330 
1331 		if (current_timestamp > 0) {
1332 		        T_EXPECT_GT(current_timestamp, last_timestamp,
1333 		        "event timestamps should be increasing");
1334 		        T_QUIET; T_EXPECT_GT(events, last_events,
1335 		        "number of events should be increasing");
1336 		}
1337 
1338 		last_timestamp = current_timestamp;
1339 		last_events = events;
1340 
1341 		kdebug_trace(TRACE_DEBUGID, 0, 0, 0, 0);
1342 
1343 		if (heartbeats >= HEARTBEAT_COUNT) {
1344 		        T_LOG("ending trace after %u heartbeats", HEARTBEAT_COUNT);
1345 		        ktrace_end(s, 0);
1346 		}
1347 	});
1348 
1349 	int error = ktrace_start(s, dispatch_get_main_queue());
1350 	T_ASSERT_POSIX_ZERO(error, "started tracing");
1351 
1352 	dispatch_activate(timer);
1353 
1354 	dispatch_main();
1355 }
1356 
1357 static unsigned int
get_nevents(void)1358 get_nevents(void)
1359 {
1360 	kbufinfo_t bufinfo = { 0 };
1361 	T_QUIET;
1362 	T_ASSERT_POSIX_SUCCESS(sysctl(
1363 		    (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDGETBUF }, 3,
1364 		    &bufinfo, &(size_t){ sizeof(bufinfo) }, NULL, 0),
1365 	    "get kdebug buffer size");
1366 
1367 	return (unsigned int)bufinfo.nkdbufs;
1368 }
1369 
1370 static unsigned int
set_nevents(unsigned int nevents)1371 set_nevents(unsigned int nevents)
1372 {
1373 	T_QUIET;
1374 	T_ASSERT_POSIX_SUCCESS(sysctl(
1375 		    (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDSETBUF, (int)nevents }, 4,
1376 		    NULL, 0, NULL, 0), "set kdebug buffer size");
1377 
1378 	T_QUIET;
1379 	T_ASSERT_POSIX_SUCCESS(sysctl(
1380 		    (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDSETUP, (int)nevents }, 4,
1381 		    NULL, 0, NULL, 0), "setup kdebug buffers");
1382 
1383 	unsigned int nevents_allocated = get_nevents();
1384 
1385 	T_QUIET;
1386 	T_ASSERT_POSIX_SUCCESS(sysctl(
1387 		    (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDREMOVE }, 3,
1388 		    NULL, 0, NULL, 0),
1389 	    "remove kdebug buffers");
1390 
1391 	return nevents_allocated;
1392 }
1393 
1394 static unsigned int
_mb_of_events(unsigned int event_count)1395 _mb_of_events(unsigned int event_count)
1396 {
1397 	return (unsigned int)(((uint64_t)event_count * 64) >> 20);
1398 }
1399 
1400 T_DECL(set_buffer_size, "ensure large buffer sizes can be set",
1401 		XNU_T_META_SOC_SPECIFIC, T_META_TAG_VM_NOT_ELIGIBLE)
1402 {
1403 	T_SETUPBEGIN;
1404 	uint64_t memsize = 0;
1405 	T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.memsize", &memsize,
1406 	    &(size_t){ sizeof(memsize) }, NULL, 0), "sysctl hw.memsize");
1407 	T_SETUPEND;
1408 
1409 #if TARGET_OS_IPHONE
1410 	if (memsize >= (8ULL << 30)) {
1411 		T_SKIP("skipping on iOS device with memory >= 8GB, rdar://79403304");
1412 	}
1413 #endif // TARGET_OS_IPHONE
1414 
1415 	start_controlling_ktrace();
1416 
1417 	// Try to allocate up to one-eighth of available memory towards
1418 	// tracing.
1419 	uint64_t maxevents_u64 = memsize / 8 / sizeof(kd_buf);
1420 	if (maxevents_u64 > UINT32_MAX) {
1421 		maxevents_u64 = UINT32_MAX;
1422 	}
1423 	unsigned int maxevents = (unsigned int)maxevents_u64;
1424 
1425 	// Use hexadecimal representation to prevent failure signaturization on these values.
1426 	unsigned int minevents = set_nevents(0);
1427 	T_ASSERT_GT(minevents, 0, "saw non-zero minimum event count of %#x",
1428 	    minevents);
1429 
1430 	unsigned int step = ((maxevents - minevents - 1) / 4);
1431 	T_ASSERT_GT(step, 0, "stepping by %#x events (%#x MiB)", step,
1432 	    _mb_of_events(step));
1433 
1434 	for (unsigned int i = minevents + step; i < maxevents; i += step) {
1435 		unsigned int actualevents = set_nevents(i);
1436 		T_ASSERT_GE(actualevents, i - minevents,
1437 		    "%#x events (%#x MiB) in kernel when %#x (%#x MiB) requested",
1438 		    actualevents, _mb_of_events(actualevents), i, _mb_of_events(i));
1439 	}
1440 }
1441 
1442 static void *
donothing(__unused void * arg)1443 donothing(__unused void *arg)
1444 {
1445 	return NULL;
1446 }
1447 
1448 T_DECL(long_names, "ensure long command names are reported",
1449 		T_META_TAG_VM_PREFERRED)
1450 {
1451 	start_controlling_ktrace();
1452 
1453 	char longname[] = "thisisaverylongprocessname!";
1454 	char *longname_ptr = longname;
1455 	static_assert(sizeof(longname) > 16,
1456 	    "the name should be longer than MAXCOMLEN");
1457 
1458 	int ret = sysctlbyname("kern.procname", NULL, NULL, longname,
1459 	    sizeof(longname));
1460 	T_ASSERT_POSIX_SUCCESS(ret,
1461 	    "use sysctl kern.procname to lengthen the name");
1462 
1463 	ktrace_session_t ktsess = ktrace_session_create();
1464 
1465 	/*
1466 	 * 32-bit kernels can only trace 16 bytes of the string in their event
1467 	 * arguments.
1468 	 */
1469 	if (!ktrace_is_kernel_64_bit(ktsess)) {
1470 		longname[16] = '\0';
1471 	}
1472 
1473 	ktrace_filter_pid(ktsess, getpid());
1474 
1475 	__block bool saw_newthread = false;
1476 	ktrace_events_single(ktsess, TRACE_STRING_NEWTHREAD,
1477 	    ^(struct trace_point *tp) {
1478 		if (ktrace_get_pid_for_thread(ktsess, tp->threadid) ==
1479 		    getpid()) {
1480 			saw_newthread = true;
1481 
1482 			char argname[32] = {};
1483 			strncat(argname, (char *)&tp->arg1, sizeof(tp->arg1));
1484 			strncat(argname, (char *)&tp->arg2, sizeof(tp->arg2));
1485 			strncat(argname, (char *)&tp->arg3, sizeof(tp->arg3));
1486 			strncat(argname, (char *)&tp->arg4, sizeof(tp->arg4));
1487 
1488 			T_EXPECT_EQ_STR((char *)argname, longname_ptr,
1489 			    "process name of new thread should be long");
1490 
1491 			ktrace_end(ktsess, 1);
1492 		}
1493 	});
1494 
1495 	ktrace_set_completion_handler(ktsess, ^{
1496 		ktrace_session_destroy(ktsess);
1497 		T_EXPECT_TRUE(saw_newthread,
1498 		    "should have seen the new thread");
1499 		T_END;
1500 	});
1501 
1502 	int error = ktrace_start(ktsess, dispatch_get_main_queue());
1503 	T_ASSERT_POSIX_ZERO(error, "started tracing");
1504 
1505 	pthread_t thread = NULL;
1506 	error = pthread_create(&thread, NULL, donothing, NULL);
1507 	T_ASSERT_POSIX_ZERO(error, "create new thread");
1508 
1509 	dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 5 * NSEC_PER_SEC),
1510 	    dispatch_get_main_queue(), ^{
1511 		ktrace_end(ktsess, 0);
1512 	});
1513 
1514 	error = pthread_join(thread, NULL);
1515 	T_ASSERT_POSIX_ZERO(error, "join to thread");
1516 
1517 	dispatch_main();
1518 }
1519 
1520 T_DECL(continuous_time, "make sure continuous time status can be queried",
1521 	T_META_RUN_CONCURRENTLY(true), T_META_TAG_VM_PREFERRED)
1522 {
1523 	bool cont_time = kdebug_using_continuous_time();
1524 	T_ASSERT_FALSE(cont_time, "should not be using continuous time yet");
1525 }
1526 
1527 T_DECL(lookup_long_paths, "lookup long path names",
1528 		T_META_TAG_VM_PREFERRED)
1529 {
1530 	start_controlling_ktrace();
1531 
1532 	int ret = chdir("/tmp");
1533 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "chdir to /tmp");
1534 	const char *dir = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/";
1535 	int i = 0;
1536 	do {
1537 		ret = mkdir(dir, S_IRUSR | S_IWUSR | S_IXUSR);
1538 		if (ret >= 0 || errno != EEXIST) {
1539 			T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mkdir of %d nested directory",
1540 			    i);
1541 		}
1542 		ret = chdir(dir);
1543 		T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "chdir to %d nested directory", i);
1544 	} while (i++ < 40);
1545 
1546 	ktrace_session_t s = ktrace_session_create();
1547 	ktrace_set_collection_interval(s, 250);
1548 	ktrace_filter_pid(s, getpid());
1549 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
1550 	ktrace_events_single(s, VFS_LOOKUP, ^(struct trace_point *tp __unused){});
1551 	ktrace_set_vnode_paths_enabled(s, KTRACE_FEATURE_ENABLED);
1552 
1553 	dispatch_queue_t q = dispatch_queue_create("com.apple.kdebug-test", 0);
1554 
1555 	ktrace_set_completion_handler(s, ^{
1556 		dispatch_release(q);
1557 		T_END;
1558 	});
1559 
1560 	int error = ktrace_start(s, q);
1561 	T_ASSERT_POSIX_ZERO(error, "started tracing");
1562 
1563 	int fd = open("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", O_RDWR | O_CREAT);
1564 	T_ASSERT_POSIX_SUCCESS(fd, "opened file at %d directories deep", i);
1565 
1566 	sleep(5);
1567 
1568 	T_LOG("ending tracing");
1569 	ktrace_end(s, 0);
1570 	dispatch_main();
1571 }
1572 
1573 #pragma mark - boot tracing
1574 
1575 static void
expect_kernel_task_tracing(void)1576 expect_kernel_task_tracing(void)
1577 {
1578 	unsigned int state = 0;
1579 	size_t state_size = sizeof(state);
1580 	int ret = sysctlbyname("ktrace.state", &state, &state_size, NULL, 0);
1581 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl(ktrace.state)");
1582 	T_ASSERT_EQ(state, 1, "state is foreground");
1583 
1584 	char configured_by[1024] = "";
1585 	size_t configured_by_size = sizeof(configured_by);
1586 	ret = sysctlbyname("ktrace.configured_by", &configured_by,
1587 	    &configured_by_size, NULL, 0);
1588 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl(ktrace.configured_by)");
1589 	T_ASSERT_EQ_STR(configured_by, "kernel_task", "configured by kernel_task");
1590 }
1591 
1592 static const char *expected_subsystems[] = {
1593 	"tunables", "locks", "kprintf", "pmap_steal", "kmem", "zalloc",
1594 	/* "percpu", only has a startup phase on Intel */
1595 	"codesigning", "oslog", "early_boot",
1596 };
1597 #define EXPECTED_SUBSYSTEMS_LEN \
1598 		(sizeof(expected_subsystems) / sizeof(expected_subsystems[0]))
1599 
1600 T_DECL(early_boot_tracing, "ensure early boot strings are present",
1601 	T_META_BOOTARGS_SET("trace=1000000"), XNU_T_META_SOC_SPECIFIC, T_META_TAG_VM_NOT_ELIGIBLE)
1602 {
1603 	T_ATEND(reset_ktrace);
1604 
1605 	expect_kernel_task_tracing();
1606 
1607 	T_SETUPBEGIN;
1608 	ktrace_session_t s = ktrace_session_create();
1609 	ktrace_set_collection_interval(s, 250);
1610 	int error = ktrace_set_use_existing(s);
1611 	T_ASSERT_POSIX_ZERO(error, "use existing trace buffer");
1612 
1613 #if defined(__x86_64__)
1614 #define FIRST_EVENT_STRING "i386_init"
1615 #else /* defined(__x86_64__) */
1616 #define FIRST_EVENT_STRING "kernel_startup_bootstrap"
1617 #endif /* !defined(__x86_64__) */
1618 
1619 	__block bool seen_event = false;
1620 	__block size_t cur_subsystem = 0;
1621 	ktrace_events_single(s, TRACE_INFO_STRING, ^(struct trace_point *tp) {
1622 		char early_str[33] = "";
1623 		size_t argsize = ktrace_is_kernel_64_bit(s) ? 8 : 4;
1624 		memcpy(early_str, &tp->arg1, argsize);
1625 		memcpy(early_str + argsize, &tp->arg2, argsize);
1626 		memcpy(early_str + argsize * 2, &tp->arg3, argsize);
1627 		memcpy(early_str + argsize * 3, &tp->arg4, argsize);
1628 
1629 		if (!seen_event) {
1630 			T_LOG("found first string event with args: "
1631 			    "0x%" PRIx64 ", 0x%" PRIx64 ", 0x%" PRIx64 ", 0x%" PRIx64,
1632 			    tp->arg1, tp->arg2, tp->arg3, tp->arg4);
1633 			char expect_str[33] = FIRST_EVENT_STRING;
1634 			if (!ktrace_is_kernel_64_bit(s)) {
1635 				// Only the first 16 bytes of the string will be traced.
1636 				expect_str[16] = '\0';
1637 			}
1638 
1639 			T_EXPECT_EQ_STR(early_str, expect_str,
1640 			    "first event in boot trace should be the bootstrap message");
1641 		}
1642 		seen_event = true;
1643 
1644 		if (strcmp(early_str, expected_subsystems[cur_subsystem]) == 0) {
1645 			T_LOG("found log for subsystem `%s'",
1646 					expected_subsystems[cur_subsystem]);
1647 			cur_subsystem++;
1648 		} else {
1649 			T_LOG("saw extra log for subsystem `%s'", early_str);
1650 		}
1651 
1652 		if (cur_subsystem == EXPECTED_SUBSYSTEMS_LEN) {
1653 			T_LOG("ending after seeing all expected logs");
1654 			ktrace_end(s, 1);
1655 		}
1656 	});
1657 
1658 	ktrace_set_completion_handler(s, ^{
1659 		T_EXPECT_TRUE(seen_event, "should see an early boot string event");
1660 		T_EXPECT_EQ(cur_subsystem, EXPECTED_SUBSYSTEMS_LEN,
1661 				"should see logs from all subsystems");
1662 		if (cur_subsystem != EXPECTED_SUBSYSTEMS_LEN) {
1663 			T_LOG("missing log for %s", expected_subsystems[cur_subsystem]);
1664 		}
1665 		T_END;
1666 	});
1667 
1668 	error = ktrace_start(s, dispatch_get_main_queue());
1669 	T_ASSERT_POSIX_ZERO(error, "started tracing");
1670 
1671 	T_SETUPEND;
1672 
1673 	dispatch_main();
1674 }
1675 
1676 // Allocating ~4TB should be clamped to some lower number.
1677 T_DECL(early_boot_tracing_too_large,
1678     "ensure early boot tracing can allocate up to a clamped size",
1679 	T_META_BOOTARGS_SET("trace=64000000000"), XNU_T_META_SOC_SPECIFIC, T_META_TAG_VM_NOT_ELIGIBLE)
1680 {
1681 	expect_kernel_task_tracing();
1682 	T_EXPECT_NE(get_nevents(), 0, "allocated some events");
1683 }
1684 
1685 // Not SoC-specific because the typefilter parsing logic is generic.
1686 T_DECL(typefilter_boot_arg, "ensure typefilter is set up correctly at boot",
1687 	T_META_BOOTARGS_SET("trace=100000 trace_typefilter=S0x0c00,C0xfe"),
1688 	T_META_TAG_VM_PREFERRED)
1689 {
1690 	T_ATEND(reset_ktrace);
1691 
1692 	T_SETUPBEGIN;
1693 	ktrace_config_t config = ktrace_config_create_current();
1694 	T_QUIET; T_WITH_ERRNO;
1695 	T_ASSERT_NOTNULL(config, "create config from current system");
1696 	T_SETUPEND;
1697 
1698 	T_LOG("ktrace configuration:");
1699 	ktrace_config_print_description(config, stdout);
1700 
1701 	uint8_t *typefilt = ktrace_config_kdebug_get_typefilter(config);
1702 	T_ASSERT_NOTNULL(typefilt, "typefilter is active");
1703 	T_EXPECT_TRUE(typefilt[0x0c00 / 8],
1704 			"specified subclass is set in typefilter");
1705 	T_MAYFAIL; // rdar://63625062 (UTD converts commas in boot-args to spaces)
1706 	T_EXPECT_TRUE(typefilt[0xfeed / 8],
1707 			"specified class is set in typefilter");
1708 
1709 	ktrace_config_destroy(config);
1710 }
1711 
1712 #pragma mark - events present
1713 
1714 static int recvd_sigchild = 0;
1715 static void
sighandler(int sig)1716 sighandler(int sig)
1717 {
1718 	if (sig != SIGCHLD) {
1719 		T_ASSERT_FAIL("unexpected signal: %d", sig);
1720 	}
1721 	recvd_sigchild = 1;
1722 }
1723 
1724 #define END_EVENT (0xfeedfac0)
1725 
1726 T_DECL(instrs_and_cycles_on_proc_exit,
1727 		"instructions and cycles should be traced on thread exit",
1728 		T_META_REQUIRES_SYSCTL_EQ("kern.monotonic.supported", 1),
1729 		T_META_TAG_VM_NOT_ELIGIBLE)
1730 {
1731 	T_SETUPBEGIN;
1732 	start_controlling_ktrace();
1733 	int error;
1734 	struct rusage_info_v4 *rusage = calloc(1, sizeof(*rusage));
1735 	char *args[] = { "ls", "-l", NULL, };
1736 	int status;
1737 	dispatch_queue_t q = dispatch_queue_create("com.apple.kdebug-test",
1738 			DISPATCH_QUEUE_SERIAL);
1739 	T_QUIET; T_ASSERT_POSIX_SUCCESS(signal(SIGCHLD, sighandler),
1740 			"register signal handler");
1741 
1742 	ktrace_session_t s = ktrace_session_create();
1743 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create");
1744 	ktrace_set_collection_interval(s, 100);
1745 
1746 	__block pid_t pid;
1747 	__block bool seen_event = false;
1748 	__block uint64_t proc_instrs = 0;
1749 	__block uint64_t proc_cycles = 0;
1750 	__block uint64_t proc_sys_time = 0;
1751 	__block uint64_t proc_usr_time = 0;
1752 	error = ktrace_events_single(s, DBG_MT_INSTRS_CYCLES_PROC_EXIT,
1753 			^(ktrace_event_t tp){
1754 		if (tp->pid == pid) {
1755 			seen_event = true;
1756 			proc_instrs = tp->arg1;
1757 			proc_cycles = tp->arg2;
1758 			proc_sys_time = tp->arg3;
1759 			proc_usr_time = tp->arg4;
1760 			ktrace_end(s, 1);
1761 		}
1762 	});
1763 	T_QUIET; T_WITH_ERRNO; T_ASSERT_POSIX_ZERO(error, "trace single event");
1764 	error = ktrace_events_single(s, END_EVENT, ^(ktrace_event_t __unused tp){
1765 		T_LOG("saw ending event, stopping trace session");
1766 		ktrace_end(s, 0);
1767 	});
1768 	T_QUIET; T_WITH_ERRNO; T_ASSERT_POSIX_ZERO(error, "trace single event");
1769 	ktrace_set_completion_handler(s, ^{
1770 		// TODO Check for equality once rdar://61948669 is fixed.
1771 		T_ASSERT_GE(proc_instrs, rusage->ri_instructions,
1772 				"trace event instrs are >= to rusage instrs");
1773 		T_ASSERT_GE(proc_cycles, rusage->ri_cycles,
1774 				"trace event cycles are >= to rusage cycles");
1775 		T_ASSERT_GE(proc_sys_time, rusage->ri_system_time,
1776 				"trace event sys time is >= rusage sys time");
1777 		T_ASSERT_GE(proc_usr_time, rusage->ri_user_time,
1778 				"trace event usr time >= rusage usr time");
1779 		T_EXPECT_TRUE(seen_event, "should see the proc exit trace event");
1780 
1781 		free(rusage);
1782 		ktrace_session_destroy(s);
1783 		dispatch_release(q);
1784 		T_END;
1785 	});
1786 	error = ktrace_start(s, q);
1787 	T_ASSERT_POSIX_ZERO(error, "start tracing");
1788 	T_SETUPEND;
1789 
1790 	extern char **environ;
1791 	status = posix_spawnp(&pid, args[0], NULL, NULL, args, environ);
1792 	T_QUIET; T_ASSERT_POSIX_SUCCESS(status, "spawn process");
1793 	if (status == 0) {
1794 		while (!recvd_sigchild) {
1795 			pause();
1796 		}
1797 		error = proc_pid_rusage(pid, RUSAGE_INFO_V4, (rusage_info_t)rusage);
1798 		T_QUIET; T_ASSERT_POSIX_ZERO(error, "rusage");
1799 		error = waitpid(pid, &status, 0);
1800 		T_QUIET; T_ASSERT_POSIX_SUCCESS(error, "waitpid");
1801 		kdebug_trace(END_EVENT, 0, 0, 0, 0);
1802 	}
1803 	dispatch_main();
1804 }
1805 
1806 #define NO_OF_THREADS 2
1807 
1808 struct thread_counters_info {
1809 	struct thsc_cpi counts;
1810 	uint64_t cpu_time;
1811 	uint64_t thread_id;
1812 };
1813 typedef struct thread_counters_info *tc_info_t;
1814 
1815 static void*
get_thread_counters(void * ptr)1816 get_thread_counters(void* ptr)
1817 {
1818 	extern uint64_t __thread_selfusage(void);
1819 	extern uint64_t __thread_selfid(void);
1820 	tc_info_t tc_info = (tc_info_t) ptr;
1821 	tc_info->thread_id = __thread_selfid();
1822 	// Just to increase the instr, cycle count
1823 	T_LOG("printing %llu\n", tc_info->thread_id);
1824 	tc_info->cpu_time = __thread_selfusage();
1825 	(void)thread_selfcounts(THSC_CPI, &tc_info->counts, sizeof(tc_info->counts));
1826 	return NULL;
1827 }
1828 
1829 T_DECL(instrs_and_cycles_on_thread_exit,
1830 		"instructions and cycles should be traced on thread exit",
1831 		T_META_REQUIRES_SYSCTL_EQ("kern.monotonic.supported", 1),
1832 		T_META_TAG_VM_NOT_ELIGIBLE)
1833 {
1834 	T_SETUPBEGIN;
1835 	start_controlling_ktrace();
1836 
1837 	int error;
1838 	pthread_t *threads = calloc((unsigned int)(NO_OF_THREADS),
1839 			sizeof(pthread_t));
1840 	 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(threads, "calloc(%d threads)",
1841 	    NO_OF_THREADS);
1842 	tc_info_t tc_infos = calloc((unsigned int) (NO_OF_THREADS),
1843 			sizeof(struct thread_counters_info));
1844 	T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(tc_infos,
1845 			"calloc(%d thread counters)", NO_OF_THREADS);
1846 
1847 	ktrace_session_t s = ktrace_session_create();
1848 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create");
1849 	ktrace_filter_pid(s, getpid());
1850 
1851 	__block int nevents = 0;
1852 	error = ktrace_events_single(s, DBG_MT_INSTRS_CYCLES_THR_EXIT,
1853 			^(ktrace_event_t tp) {
1854 		for (int i = 0; i < NO_OF_THREADS; i++) {
1855 			if (tp->threadid == tc_infos[i].thread_id) {
1856 				nevents++;
1857 				uint64_t cpu_time = tp->arg3 + tp->arg4;
1858 				/*
1859 				 * as we are getting counts before thread exit,
1860 				 * the counts at thread exit should be greater than
1861 				 * thread_selfcounts
1862 				 */
1863 				T_ASSERT_GE(tp->arg1, tc_infos[i].counts.tcpi_instructions,
1864 					"trace event instrs are >= to thread's instrs");
1865 				T_ASSERT_GE(tp->arg2, tc_infos[i].counts.tcpi_cycles,
1866 					"trace event cycles are >= to thread's cycles");
1867 				T_ASSERT_GE(cpu_time, tc_infos[i].cpu_time,
1868 					"trace event cpu time is >= thread's cpu time");
1869 			}
1870 			if (nevents == NO_OF_THREADS) {
1871 				ktrace_end(s, 1);
1872 			}
1873 		}
1874 	});
1875 	T_QUIET; T_ASSERT_POSIX_ZERO(error, "trace single event");
1876 	ktrace_set_completion_handler(s, ^{
1877 		T_EXPECT_EQ(NO_OF_THREADS, nevents, "seen %d thread exit trace events",
1878 				NO_OF_THREADS);
1879 		free(tc_infos);
1880 		ktrace_session_destroy(s);
1881 		T_END;
1882 	});
1883 	error = ktrace_start(s, dispatch_get_main_queue());
1884 	T_ASSERT_POSIX_ZERO(error, "start tracing");
1885 
1886 	for (int i = 0; i < NO_OF_THREADS; i++) {
1887 		error = pthread_create(&threads[i], NULL, get_thread_counters,
1888 				(void *)&tc_infos[i]);
1889 		T_QUIET; T_ASSERT_POSIX_ZERO(error, "pthread_create thread %d", i);
1890 	}
1891 	T_SETUPEND;
1892 	for (int i = 0; i < NO_OF_THREADS; i++) {
1893 		error = pthread_join(threads[i], NULL);
1894 		T_QUIET; T_EXPECT_POSIX_ZERO(error, "pthread_join thread %d", i);
1895 	}
1896 
1897 	dispatch_main();
1898 }
1899