1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @Apple_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
21 */
22
23 #include <libkern/libkern.h>
24 #include <libkern/coreanalytics/coreanalytics.h>
25 #include <mach/mach_types.h>
26 #include <sys/errno.h>
27 #include <sys/kauth.h>
28 #include <sys/proc_internal.h>
29 #include <sys/stackshot.h>
30 #include <sys/sysproto.h>
31 #include <sys/sysctl.h>
32 #include <pexpert/device_tree.h>
33 #include <pexpert/pexpert.h>
34 #include <os/log.h>
35 #include <vm/vm_kern_xnu.h>
36 #include <IOKit/IOBSD.h>
37
38 extern uint32_t stackshot_estimate_adj;
39 EXPERIMENT_FACTOR_UINT(_kern, stackshot_estimate_adj, &stackshot_estimate_adj, 0, 100,
40 "adjust stackshot estimates up by this percentage");
41
42 extern unsigned int stackshot_single_thread;
43
44 #if DEVELOPMENT || DEBUG
45 SYSCTL_UINT(_kern, OID_AUTO, stackshot_single_thread, CTLFLAG_RW | CTLFLAG_LOCKED, &stackshot_single_thread, 1, "Single-threaded stackshots");
46 #else
47 SYSCTL_UINT(_kern, OID_AUTO, stackshot_single_thread, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &stackshot_single_thread, 1, "Single-threaded stackshots");
48 #endif
49
50
51 #define SSHOT_ANALYTICS_PERIOD_HOURS 1
52
53 enum stackshot_report_setting {
54 STACKSHOT_REPORT_NONE = 0,
55 STACKSHOT_REPORT_NO_ENT = 1, /* report if missing entitlement */
56 STACKSHOT_REPORT_ALL = 2, /* always report */
57 };
58
59 #if XNU_TARGET_OS_XR
60 #define STACKSHOT_ENTITLEMENT_REPORT STACKSHOT_REPORT_ALL
61 #define STACKSHOT_ENTITLEMENT_REFUSE true
62 #else
63 #define STACKSHOT_ENTITLEMENT_REPORT STACKSHOT_REPORT_ALL
64 #define STACKSHOT_ENTITLEMENT_REFUSE false
65 #endif
66 /*
67 * Controls for Stackshot entitlement; changable with boot args
68 * stackshot_entitlement_report=0 or 1 or 2 (send CoreAnalytics when called without entitlement(1) or always(2))
69 * stackshot_entitlement_fail=0 or 1 (fail call without entitlement)
70 * This only effects requests from userspace.
71 *
72 * For reporting, we only report a given command once.
73 */
74 SECURITY_READ_ONLY_LATE(uint8_t) stackshot_entitlement_report = STACKSHOT_ENTITLEMENT_REPORT;
75 SECURITY_READ_ONLY_LATE(bool) stackshot_entitlement_refuse = STACKSHOT_ENTITLEMENT_REFUSE;
76
77 #define STACKSHOT_ENTITLEMENT "com.apple.private.stackshot"
78 #define STACKSHOT_STATS_ENTITLEMENT "com.apple.private.stackshot.stats"
79 #define SSHOT_ENTITLEMENT_BOOTARG_REPORT "sshot-entitlement-report"
80 #define SSHOT_ENTITLEMENT_BOOTARG_FAIL "sshot-entitlement-refuse"
81
82 /* use single printable characters; these are in order of the stackshot syscall's checks */
83 enum stackshot_progress {
84 STACKSHOT_NOT_ROOT = 'R',
85 STACKSHOT_NOT_ENTITLED = 'E',
86 STACKSHOT_PERMITTED = 'P',
87 STACKSHOT_ATTEMPTED = 'A',
88 STACKSHOT_SUCCEEDED = 'S',
89 };
90
91 CA_EVENT(stackshot_entitlement_report,
92 CA_INT, sshot_count,
93 CA_BOOL, sshot_refused,
94 CA_BOOL, sshot_have_entitlement,
95 CA_BOOL, sshot_fromtest,
96 CA_STATIC_STRING(2), sshot_progress,
97 CA_STATIC_STRING(CA_PROCNAME_LEN), sshot_pcomm,
98 CA_STATIC_STRING(33), sshot_pname);
99
100 static thread_call_t sshot_entitlement_thread_call;
101
102 #define SSHOT_ENTITLEMENT_RECENT 16 /* track 16 recent violators */
103 struct stackshot_entitlement_report {
104 uint64_t ser_lastev;
105 uint32_t ser_count;
106 command_t ser_pcomm;
107 proc_name_t ser_pname;
108 bool ser_have_entitlement;
109 char ser_progress; /* from enum stackshot_progress */
110 #if DEVELOPMENT || DEBUG
111 bool ser_test;
112 #endif
113 };
114 static LCK_GRP_DECLARE(sshot_report_lck_grp, "stackshot_entitlement_repot");
115 static LCK_MTX_DECLARE(sshot_report_lck, &sshot_report_lck_grp);
116 static struct stackshot_entitlement_report *sshot_report_recent[SSHOT_ENTITLEMENT_RECENT];
117 static bool sshot_report_batch_scheduled = false;
118 #if DEVELOPMENT || DEBUG
119 static uint32_t sshot_report_test_events = 0;
120 static uint64_t sshot_report_test_counts = 0;
121 #endif
122
123 static void
stackshot_entitlement_send_report(const struct stackshot_entitlement_report * ser)124 stackshot_entitlement_send_report(const struct stackshot_entitlement_report *ser)
125 {
126 ca_event_t ca_event = CA_EVENT_ALLOCATE(stackshot_entitlement_report);
127 CA_EVENT_TYPE(stackshot_entitlement_report) * ser_event = ca_event->data;
128 ser_event->sshot_count = ser->ser_count;
129 ser_event->sshot_refused = stackshot_entitlement_refuse;
130 #if DEVELOPMENT || DEBUG
131 ser_event->sshot_fromtest = ser->ser_test;
132 #else
133 ser_event->sshot_fromtest = false;
134 #endif
135 ser_event->sshot_have_entitlement = ser->ser_have_entitlement;
136 ser_event->sshot_progress[0] = ser->ser_progress;
137 ser_event->sshot_progress[1] = '\0';
138 static_assert(sizeof(ser_event->sshot_pcomm) == sizeof(ser->ser_pcomm), "correct sshot_pcomm/ser_pcomm sizing");
139 strlcpy(ser_event->sshot_pcomm, ser->ser_pcomm, sizeof(ser->ser_pcomm));
140 static_assert(sizeof(ser_event->sshot_pname) == sizeof(ser->ser_pname), "correct sshot_pcomm/ser_pcomm sizing");
141 strlcpy(ser_event->sshot_pname, ser->ser_pname, sizeof(ser->ser_pname));
142 CA_EVENT_SEND(ca_event);
143 }
144
145 static void
sshot_entitlement_schedule_batch(void)146 sshot_entitlement_schedule_batch(void)
147 {
148 static const uint64_t analytics_period_ns = SSHOT_ANALYTICS_PERIOD_HOURS * 60 * 60 * NSEC_PER_SEC;
149 uint64_t analytics_period_absolutetime;
150 nanoseconds_to_absolutetime(analytics_period_ns, &analytics_period_absolutetime);
151
152 thread_call_enter_delayed(sshot_entitlement_thread_call, analytics_period_absolutetime + mach_absolute_time());
153 }
154
155 __attribute__((always_inline))
156 static void
sshot_entitlement_copy_for_send(const struct stackshot_entitlement_report * src,struct stackshot_entitlement_report * dst)157 sshot_entitlement_copy_for_send(const struct stackshot_entitlement_report *src,
158 struct stackshot_entitlement_report *dst)
159 {
160 bcopy(src, dst, sizeof(*src));
161 #if DEVELOPMENT || DEBUG
162 if (src->ser_test) {
163 sshot_report_test_events++;
164 sshot_report_test_counts += src->ser_count;
165 }
166 #endif
167 }
168
169 #define SSHOT_ENTITLEMENT_REPORT_NORMAL 0
170 #define SSHOT_ENTITLEMENT_REPORT_TEST(x) ((int)((x) ?: 1)) // always non-zero
171 #define SSHOT_ENTITLEMENT_REPORT_TEST_OVERFLOW SSHOT_ENTITLEMENT_REPORT_TEST(-1)
172
173 static void
stackshot_entitlement_do_report(bool have_entitlement,enum stackshot_progress progress,int testval)174 stackshot_entitlement_do_report(bool have_entitlement, enum stackshot_progress progress, int testval)
175 {
176 #pragma unused(testval)
177 #if DEVELOPMENT || DEBUG
178 const bool from_test = (testval != SSHOT_ENTITLEMENT_REPORT_NORMAL);
179 #endif
180 const struct proc *p = current_proc();
181 struct stackshot_entitlement_report *ser = kalloc_data(sizeof(*ser), Z_WAITOK | Z_NOFAIL);
182 struct stackshot_entitlement_report *tofree = NULL;
183 struct stackshot_entitlement_report myser = {0};
184 struct stackshot_entitlement_report oldser = {0};
185 bool send_myser = false;
186 bool send_oldser = false;
187
188 myser.ser_count = 0;
189 myser.ser_have_entitlement = have_entitlement;
190 myser.ser_progress = (uint8_t)progress;
191 static_assert(sizeof(p->p_comm) == sizeof(myser.ser_pcomm), "correct p_comm/ser_pcomm sizing");
192 strlcpy(myser.ser_pcomm, p->p_comm, sizeof(myser.ser_pcomm));
193 static_assert(sizeof(p->p_name) == sizeof(myser.ser_pname), "correct p_name/ser_pname sizing");
194 strlcpy(myser.ser_pname, p->p_name, sizeof(myser.ser_pname));
195 #if DEVELOPMENT || DEBUG
196 myser.ser_test = from_test;
197 if (testval && (myser.ser_pcomm[0] != 0)) {
198 myser.ser_pcomm[0] += (testval - 1);
199 }
200 #endif
201 lck_mtx_lock(&sshot_report_lck);
202 // Search the table, looking for a match or a NULL slot. While we search, track
203 // the slot with the oldest use time as an eviction candidate, for LRU behavior
204
205 struct stackshot_entitlement_report **tslot = NULL;
206 bool match = false;
207 for (int i = 0; i < SSHOT_ENTITLEMENT_RECENT; i++) {
208 struct stackshot_entitlement_report **curp = &sshot_report_recent[i];
209 struct stackshot_entitlement_report *cur = *curp;
210
211 if (cur == NULL) {
212 tslot = curp;
213 break;
214 }
215 if (cur->ser_have_entitlement == myser.ser_have_entitlement &&
216 cur->ser_progress == myser.ser_progress &&
217 strncmp(cur->ser_pcomm, myser.ser_pcomm, sizeof(cur->ser_pcomm)) == 0 &&
218 strncmp(cur->ser_pname, myser.ser_pname, sizeof(cur->ser_pname)) == 0) {
219 match = true;
220 tslot = curp;
221 break;
222 }
223 // not a match; track the slot with the oldest event to evict
224 if (tslot == NULL ||
225 ((*tslot)->ser_lastev > cur->ser_lastev)) {
226 tslot = curp;
227 }
228 }
229 // Either we have:
230 // a match,
231 // no match and an empty (NULL) slot, or
232 // no match, a full table, and tslot points at the entry with the lowest count
233 struct stackshot_entitlement_report *cur = NULL; // the entry to bump the count of
234 if (match) {
235 cur = *tslot;
236 tofree = ser;
237 } else {
238 struct stackshot_entitlement_report *old = *tslot;
239 if (old != NULL && old->ser_count > 0) {
240 sshot_entitlement_copy_for_send(old, &oldser);
241 send_oldser = true;
242 }
243 // fill it in and install it
244 bcopy(&myser, ser, sizeof(*cur));
245 cur = *tslot = ser;
246 tofree = old; // if there's an old one, free it after we drop the lock
247 }
248 // Now we have an installed structure, bump the count
249 uint32_t ncount;
250 uint32_t toadd = 1;
251 #if DEVELOPMENT || DEBUG
252 if (testval == SSHOT_ENTITLEMENT_REPORT_TEST_OVERFLOW) {
253 toadd = UINT32_MAX;
254 }
255 #endif
256 if (os_add_overflow(cur->ser_count, toadd, &ncount)) {
257 // overflow; send the existing structure
258 sshot_entitlement_copy_for_send(cur, &myser);
259 send_myser = true;
260 ncount = toadd;
261 }
262 cur->ser_lastev = mach_absolute_time();
263 cur->ser_count = ncount;
264 #if DEVELOPMENT || DEBUG
265 cur->ser_test = from_test;
266 #endif
267 // see if we need to schedule the background task
268 const bool batch_is_scheduled = sshot_report_batch_scheduled;
269 if (!batch_is_scheduled) {
270 sshot_report_batch_scheduled = true;
271 }
272 lck_mtx_unlock(&sshot_report_lck);
273 //
274 // we just bumped a counter in the structure, so schedule an analytics
275 // dump in an hour if one isn't already scheduled.
276 //
277 // The flag gets cleared when the batch clears out the data, making the
278 // next event reschedule immediately.
279 if (!batch_is_scheduled) {
280 sshot_entitlement_schedule_batch();
281 }
282
283 if (tofree != NULL) {
284 kfree_data(tofree, sizeof(*tofree));
285 }
286 if (send_myser) {
287 stackshot_entitlement_send_report(&myser);
288 }
289 if (send_oldser) {
290 stackshot_entitlement_send_report(&oldser);
291 }
292 }
293
294 static void
sshot_entitlement_send_batch(void * arg0,void * arg1)295 sshot_entitlement_send_batch(void *arg0, void *arg1)
296 {
297 #pragma unused(arg0, arg1)
298 struct stackshot_entitlement_report *ser = kalloc_data(sizeof(*ser) * SSHOT_ENTITLEMENT_RECENT, Z_WAITOK | Z_NOFAIL);
299 size_t count = 0;
300 // Walk through the array, find non-zero counts and:
301 // * copy them into our local array for reporting, and
302 // * zeroing the counts.
303 lck_mtx_lock(&sshot_report_lck);
304 for (size_t i = 0; i < SSHOT_ENTITLEMENT_RECENT; i++) {
305 struct stackshot_entitlement_report *cur = sshot_report_recent[i];
306 if (cur == NULL || cur->ser_count == 0) {
307 continue;
308 }
309 sshot_entitlement_copy_for_send(cur, &ser[count]);
310 count++;
311 cur->ser_count = 0;
312 }
313 sshot_report_batch_scheduled = false;
314 lck_mtx_unlock(&sshot_report_lck);
315 for (size_t i = 0; i < count; i++) {
316 stackshot_entitlement_send_report(&ser[i]);
317 }
318 }
319
320 #if DEVELOPMENT || DEBUG
321 /*
322 * Manual trigger of a set of entitlement reports and the associated batch
323 * processing for testing on dev/debug kernel.
324 */
325 static int
326 sysctl_stackshot_entitlement_test SYSCTL_HANDLER_ARGS
327 {
328 #pragma unused(arg1, arg2)
329 int error, val = 0;
330 error = sysctl_handle_int(oidp, &val, 0, req);
331 if (error || !req->newptr) {
332 return error;
333 }
334 static LCK_MTX_DECLARE(sshot_report_test_lck, &sshot_report_lck_grp);
335 static bool sshot_report_test_active;
336 // avoid multiple active tests
337 lck_mtx_lock(&sshot_report_test_lck);
338 if (sshot_report_test_active) {
339 lck_mtx_unlock(&sshot_report_test_lck);
340 return EBUSY;
341 }
342 sshot_report_test_active = true;
343 lck_mtx_unlock(&sshot_report_test_lck);
344
345 sshot_entitlement_send_batch(NULL, NULL); // flush out existing data
346 sshot_report_test_events = 0;
347 sshot_report_test_counts = 0;
348
349 // fill with test events
350 for (int idx = 0; idx < SSHOT_ENTITLEMENT_RECENT; idx++) {
351 stackshot_entitlement_do_report(false, STACKSHOT_NOT_ENTITLED, SSHOT_ENTITLEMENT_REPORT_TEST(idx + 1));
352 }
353 sshot_entitlement_send_batch(NULL, NULL);
354 const uint32_t post_batch = sshot_report_test_events;
355 const uint64_t post_batch_counts = sshot_report_test_counts;
356
357 // overflow test
358 stackshot_entitlement_do_report(false, STACKSHOT_NOT_ENTITLED, SSHOT_ENTITLEMENT_REPORT_TEST_OVERFLOW);
359 stackshot_entitlement_do_report(false, STACKSHOT_NOT_ENTITLED, SSHOT_ENTITLEMENT_REPORT_TEST_OVERFLOW);
360 sshot_entitlement_send_batch(NULL, NULL);
361 const uint32_t post_overflow = sshot_report_test_events - post_batch;
362 const uint64_t post_overflow_counts = sshot_report_test_counts - post_batch_counts;
363
364 os_log_error(OS_LOG_DEFAULT, "sysctl_stackshot_entitlement_test: made %d events, %d events sent, %d counts (both should == events)",
365 SSHOT_ENTITLEMENT_RECENT, post_batch, (int)post_batch_counts);
366 os_log_error(OS_LOG_DEFAULT, "sysctl_stackshot_entitlement_test: overflow, %d events sent (expect 2), %llx counts (expect %llx)",
367 post_overflow, (long long)post_overflow_counts, 2 * (long long)UINT32_MAX);
368
369 lck_mtx_lock(&sshot_report_test_lck);
370 sshot_report_test_active = false;
371 lck_mtx_unlock(&sshot_report_test_lck);
372
373 if (post_batch != SSHOT_ENTITLEMENT_RECENT ||
374 post_batch_counts != SSHOT_ENTITLEMENT_RECENT ||
375 post_overflow != 2 ||
376 post_overflow_counts != 2 * (long long)UINT32_MAX) {
377 os_log_error(OS_LOG_DEFAULT, "sysctl_stackshot_entitlement_test: failed");
378 return EDEVERR;
379 }
380
381 os_log_error(OS_LOG_DEFAULT, "sysctl_stackshot_entitlement_test: success");
382 return 0;
383 }
384 SYSCTL_PROC(_debug, OID_AUTO, stackshot_entitlement_send_batch,
385 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0,
386 &sysctl_stackshot_entitlement_test, "I", "");
387
388 /* Return current entitlement enforcement state. */
389 static int
390 sysctl_stackshot_entitlement_status SYSCTL_HANDLER_ARGS
391 {
392 int return_value = ((stackshot_entitlement_report & 0xf) | (stackshot_entitlement_refuse ? 0x10 : 0));
393 return SYSCTL_OUT(req, &return_value, sizeof(return_value));
394 }
395 SYSCTL_PROC(_kern, OID_AUTO, stackshot_entitlement_status,
396 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0,
397 &sysctl_stackshot_entitlement_status, "I", "");
398
399 #endif /* DEVELOPMENT || DEBUG */
400
401 __startup_func
402 static void
atboot_stackshot_entitlement(void)403 atboot_stackshot_entitlement(void)
404 {
405 uint32_t boot_arg;
406 if (PE_parse_boot_argn(SSHOT_ENTITLEMENT_BOOTARG_REPORT, &boot_arg, sizeof(boot_arg))) {
407 /* clamp to valid values */
408 boot_arg = (boot_arg <= STACKSHOT_REPORT_ALL ? boot_arg : STACKSHOT_REPORT_ALL);
409 stackshot_entitlement_report = (uint8_t)boot_arg;
410 }
411 if (PE_parse_boot_argn(SSHOT_ENTITLEMENT_BOOTARG_FAIL, &boot_arg, sizeof(boot_arg))) {
412 stackshot_entitlement_refuse = (boot_arg != 0);
413 }
414 sshot_entitlement_thread_call = thread_call_allocate_with_options(
415 sshot_entitlement_send_batch, NULL, THREAD_CALL_PRIORITY_LOW, THREAD_CALL_OPTIONS_ONCE);
416 }
417 STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, atboot_stackshot_entitlement);
418
419
420 static int
421 sysctl_stackshot_stats SYSCTL_HANDLER_ARGS
422 {
423 #pragma unused(oidp, arg1, arg2)
424 stackshot_stats_t stats;
425 proc_t self = current_proc();
426
427 /* root processes and non-root processes with the STATS entitlement can read this */
428 if (suser(kauth_cred_get(), &self->p_acflag) != 0 &&
429 !IOCurrentTaskHasEntitlement(STACKSHOT_STATS_ENTITLEMENT)) {
430 return EPERM;
431 }
432
433 if (req->newptr != USER_ADDR_NULL) {
434 return EPERM;
435 }
436 if (req->oldptr == USER_ADDR_NULL) {
437 req->oldidx = sizeof(stats);
438 return 0;
439 }
440 extern void stackshot_get_timing(uint64_t *last_abs_start, uint64_t *last_abs_end, uint64_t *count, uint64_t *total_duration);
441 stackshot_get_timing(&stats.ss_last_start, &stats.ss_last_end, &stats.ss_count, &stats.ss_duration);
442
443 return SYSCTL_OUT(req, &stats, MIN(sizeof(stats), req->oldlen));
444 }
445
446 SYSCTL_PROC(_kern, OID_AUTO, stackshot_stats,
447 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED |
448 CTLFLAG_KERN,
449 NULL, 0, sysctl_stackshot_stats, "S,stackshot_stats",
450 "Get stackshot statistics");
451
452 /*
453 * Stackshot system calls
454 */
455
456 #if CONFIG_TELEMETRY
457 extern kern_return_t stack_microstackshot(user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, int32_t *retval);
458 #endif /* CONFIG_TELEMETRY */
459 extern kern_return_t kern_stack_snapshot_with_reason(char* reason);
460 extern kern_return_t kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_config, size_t stackshot_config_size, boolean_t stackshot_from_user);
461
462 static int
stackshot_kern_return_to_bsd_error(kern_return_t kr)463 stackshot_kern_return_to_bsd_error(kern_return_t kr)
464 {
465 switch (kr) {
466 case KERN_SUCCESS:
467 return 0;
468 case KERN_RESOURCE_SHORTAGE:
469 /* could not allocate memory, or stackshot is actually bigger than
470 * SANE_TRACEBUF_SIZE */
471 return ENOMEM;
472 case KERN_INSUFFICIENT_BUFFER_SIZE:
473 case KERN_NO_SPACE:
474 /* ran out of buffer to write the stackshot. Normally this error
475 * causes a larger buffer to be allocated in-kernel, rather than
476 * being returned to the user. */
477 return ENOSPC;
478 case KERN_NO_ACCESS:
479 return EPERM;
480 case KERN_MEMORY_PRESENT:
481 return EEXIST;
482 case KERN_NOT_SUPPORTED:
483 return ENOTSUP;
484 case KERN_NOT_IN_SET:
485 /* requested existing buffer, but there isn't one. */
486 return ENOENT;
487 case KERN_ABORTED:
488 /* kdp did not report an error, but also did not produce any data */
489 return EINTR;
490 case KERN_FAILURE:
491 /* stackshot came across inconsistent data and needed to bail out */
492 return EBUSY;
493 case KERN_OPERATION_TIMED_OUT:
494 /* debugger synchronization timed out */
495 return ETIMEDOUT;
496 default:
497 return EINVAL;
498 }
499 }
500
501 /*
502 * stack_snapshot_with_config: Obtains a coherent set of stack traces for specified threads on the sysem,
503 * tracing both kernel and user stacks where available. Allocates a buffer from the
504 * kernel and maps the buffer into the calling task's address space.
505 *
506 * Inputs: uap->stackshot_config_version - version of the stackshot config that is being passed
507 * uap->stackshot_config - pointer to the stackshot config
508 * uap->stackshot_config_size- size of the stackshot config being passed
509 * Outputs: EINVAL if there is a problem with the arguments
510 * EFAULT if we failed to copy in the arguments succesfully
511 * EPERM if the caller is not privileged
512 * ENOTSUP if the caller is passing a version of arguments that is not supported by the kernel
513 * (indicates libsyscall:kernel mismatch) or if the caller is requesting unsupported flags
514 * ENOENT if the caller is requesting an existing buffer that doesn't exist or if the
515 * requested PID isn't found
516 * ENOMEM if the kernel is unable to allocate enough memory to serve the request
517 * ENOSPC if there isn't enough space in the caller's address space to remap the buffer
518 * ESRCH if the target PID isn't found
519 * returns KERN_SUCCESS on success
520 */
521 int
stack_snapshot_with_config(struct proc * p,struct stack_snapshot_with_config_args * uap,__unused int * retval)522 stack_snapshot_with_config(struct proc *p, struct stack_snapshot_with_config_args *uap, __unused int *retval)
523 {
524 int error = 0;
525 kern_return_t kr;
526 const uint8_t report = stackshot_entitlement_report;
527 const bool refuse = stackshot_entitlement_refuse;
528 enum stackshot_progress progress = STACKSHOT_NOT_ROOT;
529 bool has_entitlement = true;
530
531 if ((error = suser(kauth_cred_get(), &p->p_acflag))) {
532 goto err;
533 }
534 progress = STACKSHOT_NOT_ENTITLED;
535
536 if ((report != STACKSHOT_REPORT_NONE || refuse) &&
537 !IOCurrentTaskHasEntitlement(STACKSHOT_ENTITLEMENT)) {
538 has_entitlement = false;
539 if (refuse) {
540 error = EPERM;
541 goto err;
542 }
543 }
544 progress = STACKSHOT_PERMITTED;
545
546 if ((void*)uap->stackshot_config == NULL) {
547 error = EINVAL;
548 goto err;
549 }
550
551 switch (uap->stackshot_config_version) {
552 case STACKSHOT_CONFIG_TYPE:
553 if (uap->stackshot_config_size != sizeof(stackshot_config_t)) {
554 error = EINVAL;
555 break;
556 }
557 stackshot_config_t config;
558 error = copyin(uap->stackshot_config, &config, sizeof(stackshot_config_t));
559 if (error != KERN_SUCCESS) {
560 error = EFAULT;
561 break;
562 }
563 kr = kern_stack_snapshot_internal(uap->stackshot_config_version, &config, sizeof(stackshot_config_t), TRUE);
564 error = stackshot_kern_return_to_bsd_error(kr);
565 progress = (error == 0) ? STACKSHOT_SUCCEEDED : STACKSHOT_ATTEMPTED;
566 break;
567 default:
568 error = ENOTSUP;
569 break;
570 }
571 err:
572 if (report == STACKSHOT_REPORT_ALL || (report == STACKSHOT_REPORT_NO_ENT && !has_entitlement)) {
573 stackshot_entitlement_do_report(has_entitlement, progress, SSHOT_ENTITLEMENT_REPORT_NORMAL);
574 }
575 return error;
576 }
577
578 #if CONFIG_TELEMETRY
579 /*
580 * microstackshot: Catch all system call for microstackshot related operations, including
581 * enabling/disabling both global and windowed microstackshots as well
582 * as retrieving windowed or global stackshots and the boot profile.
583 * Inputs: uap->tracebuf - address of the user space destination
584 * buffer
585 * uap->tracebuf_size - size of the user space trace buffer
586 * uap->flags - various flags
587 * Outputs: EPERM if the caller is not privileged
588 * EINVAL if the supplied mss_args is NULL, mss_args.tracebuf is NULL or mss_args.tracebuf_size is not sane
589 * ENOMEM if we don't have enough memory to satisfy the request
590 * *retval contains the number of bytes traced, if successful
591 * and -1 otherwise.
592 */
593 int
microstackshot(struct proc * p,struct microstackshot_args * uap,int32_t * retval)594 microstackshot(struct proc *p, struct microstackshot_args *uap, int32_t *retval)
595 {
596 int error = 0;
597 kern_return_t kr;
598
599 if ((error = suser(kauth_cred_get(), &p->p_acflag))) {
600 return error;
601 }
602
603 kr = stack_microstackshot(uap->tracebuf, uap->tracebuf_size, uap->flags, retval);
604 return stackshot_kern_return_to_bsd_error(kr);
605 }
606 #endif /* CONFIG_TELEMETRY */
607
608 /*
609 * kern_stack_snapshot_with_reason: Obtains a coherent set of stack traces for specified threads on the sysem,
610 * tracing both kernel and user stacks where available. Allocates a buffer from the
611 * kernel and stores the address of this buffer.
612 *
613 * Inputs: reason - the reason for triggering a stackshot (unused at the moment, but in the
614 * future will be saved in the stackshot)
615 * Outputs: EINVAL/ENOTSUP if there is a problem with the arguments
616 * EPERM if the caller doesn't pass at least one KERNEL stackshot flag
617 * ENOMEM if the kernel is unable to allocate enough memory to serve the request
618 * ESRCH if the target PID isn't found
619 * returns KERN_SUCCESS on success
620 */
621 int
kern_stack_snapshot_with_reason(__unused char * reason)622 kern_stack_snapshot_with_reason(__unused char *reason)
623 {
624 stackshot_config_t config;
625 kern_return_t kr;
626
627 config.sc_pid = -1;
628 config.sc_flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_IN_KERNEL_BUFFER |
629 STACKSHOT_KCDATA_FORMAT | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_THREAD_WAITINFO |
630 STACKSHOT_NO_IO_STATS | STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT);
631 config.sc_delta_timestamp = 0;
632 config.sc_out_buffer_addr = 0;
633 config.sc_out_size_addr = 0;
634
635 kr = kern_stack_snapshot_internal(STACKSHOT_CONFIG_TYPE, &config, sizeof(stackshot_config_t), FALSE);
636 return stackshot_kern_return_to_bsd_error(kr);
637 }
638
639 #if DEBUG || DEVELOPMENT
640 static int
stackshot_dirty_buffer_test(__unused int64_t in,int64_t * out)641 stackshot_dirty_buffer_test(__unused int64_t in, int64_t *out)
642 {
643 uint64_t ss_flags = STACKSHOT_KCDATA_FORMAT | STACKSHOT_NO_IO_STATS | STACKSHOT_SAVE_KEXT_LOADINFO | STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY | STACKSHOT_THREAD_WAITINFO | STACKSHOT_INCLUDE_DRIVER_THREADS_IN_KERNEL;
644 unsigned ss_bytes = 0;
645 vm_offset_t buf = 0;
646 kern_return_t kr;
647
648 // 8MB buffer
649 kr = kmem_alloc(kernel_map, &buf, 8 * 1024 * 1024, KMA_ZERO | KMA_DATA, VM_KERN_MEMORY_DIAG);
650 if (kr != KERN_SUCCESS) {
651 printf("stackshot_dirty_buffer_test: kmem_alloc returned %d\n", kr);
652 goto err;
653 }
654 // scribble pattern into buffer for easy identification
655 memset((char *)buf, 0xAA, 8 * 1024 * 1024);
656
657 kr = stack_snapshot_from_kernel(0, (char *)buf, 8 * 1024 * 1024, ss_flags, 0, 0, &ss_bytes);
658 if (kr != KERN_SUCCESS) {
659 printf("stackshot_dirty_buffer_test: stackshot returned %d\n", kr);
660 goto err;
661 }
662
663 if (ss_bytes == 0) {
664 *out = -2;
665 printf("stackshot_dirty_buffer_test: stackshot was empty but did not fail\n");
666 goto end;
667 }
668
669 printf("stackshot_dirty_buffer_test: captured %u bytes\n", ss_bytes);
670
671 err:
672 *out = (kr == KERN_SUCCESS) ? 1 : -1;
673 end:
674 if (buf != 0) {
675 kmem_free(kernel_map, buf, 8 * 1024 * 1024);
676 }
677 return KERN_SUCCESS;
678 }
679 SYSCTL_TEST_REGISTER(stackshot_dirty_buffer, stackshot_dirty_buffer_test);
680
681 static int
stackshot_kernel_initiator_test(int64_t in,int64_t * out)682 stackshot_kernel_initiator_test(int64_t in, int64_t *out)
683 {
684 kern_return_t kr = KERN_SUCCESS;
685 vm_offset_t buf = 0;
686 uint64_t ss_flags = STACKSHOT_KCDATA_FORMAT | STACKSHOT_NO_IO_STATS | STACKSHOT_SAVE_KEXT_LOADINFO | STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY | STACKSHOT_THREAD_WAITINFO | STACKSHOT_INCLUDE_DRIVER_THREADS_IN_KERNEL;
687 unsigned ss_bytes = 0;
688 if (in == 1) {
689 kr = kmem_alloc(kernel_map, &buf, 8 * 1024 * 1024, KMA_ZERO | KMA_DATA, VM_KERN_MEMORY_DIAG);
690 if (kr != KERN_SUCCESS) {
691 printf("stackshot_kernel_initiator_test: kmem_alloc returned %d\n", kr);
692 goto err;
693 }
694
695 kr = stack_snapshot_from_kernel(0, (char *)buf, 8 * 1024 * 1024, ss_flags, 0, 0, &ss_bytes);
696 if (kr != KERN_SUCCESS) {
697 printf("stackshot_kernel_initiator_test: stackshot returned %d\n", kr);
698 goto err;
699 }
700
701 if (ss_bytes == 0) {
702 *out = -2;
703 printf("stackshot_kernel_initiator_test: stackshot was empty but did not fail\n");
704 goto end;
705 }
706 } else if (in == 2) {
707 kr = kern_stack_snapshot_with_reason("");
708 if (kr != KERN_SUCCESS) {
709 printf("stackshot_kernel_initiator_test: kern_stack_snapshot_with_reason failed: %d\n", kr);
710 goto err;
711 }
712 } else {
713 return KERN_NOT_SUPPORTED;
714 }
715
716 err:
717 *out = (kr == KERN_SUCCESS) ? 1 : -1;
718 end:
719 if (buf != 0) {
720 kmem_free(kernel_map, buf, 8 * 1024 * 1024);
721 }
722 return KERN_SUCCESS;
723 }
724 SYSCTL_TEST_REGISTER(stackshot_kernel_initiator, stackshot_kernel_initiator_test);
725 #endif /* DEBUG || DEVELOPMENT */
726