1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_assert.h>
58 #include <mach_kdp.h>
59 #include <kdp/kdp.h>
60 #include <kdp/kdp_core.h>
61 #include <kdp/kdp_internal.h>
62 #include <kdp/kdp_callout.h>
63 #include <kern/cpu_number.h>
64 #include <kern/kalloc.h>
65 #include <kern/percpu.h>
66 #include <kern/spl.h>
67 #include <kern/thread.h>
68 #include <kern/assert.h>
69 #include <kern/sched_prim.h>
70 #include <kern/socd_client.h>
71 #include <kern/misc_protos.h>
72 #include <kern/clock.h>
73 #include <kern/telemetry.h>
74 #include <kern/ecc.h>
75 #include <kern/kern_stackshot.h>
76 #include <kern/kern_cdata.h>
77 #include <kern/zalloc_internal.h>
78 #include <kern/iotrace.h>
79 #include <pexpert/device_tree.h>
80 #include <vm/vm_kern_xnu.h>
81 #include <vm/vm_map.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_compressor_xnu.h>
84 #include <stdarg.h>
85 #include <stdatomic.h>
86 #include <sys/pgo.h>
87 #include <console/serial_protos.h>
88 #include <IOKit/IOBSD.h>
89
90 #if !(MACH_KDP && CONFIG_KDP_INTERACTIVE_DEBUGGING)
91 #include <kdp/kdp_udp.h>
92 #endif
93 #include <kern/processor.h>
94
95 #if defined(__i386__) || defined(__x86_64__)
96 #include <IOKit/IOBSD.h>
97
98 #include <i386/cpu_threads.h>
99 #include <i386/pmCPU.h>
100 #include <i386/lbr.h>
101 #endif
102
103 #include <IOKit/IOPlatformExpert.h>
104 #include <machine/machine_cpu.h>
105 #include <machine/pal_routines.h>
106
107 #include <sys/kdebug.h>
108 #include <libkern/OSKextLibPrivate.h>
109 #include <libkern/OSAtomic.h>
110 #include <libkern/kernel_mach_header.h>
111 #include <libkern/section_keywords.h>
112 #include <uuid/uuid.h>
113 #include <mach_debug/zone_info.h>
114 #include <mach/resource_monitors.h>
115 #include <machine/machine_routines.h>
116 #include <sys/proc_require.h>
117
118 #include <os/log_private.h>
119
120 #include <kern/ext_paniclog.h>
121
122 #if defined(__arm64__)
123 #include <pexpert/pexpert.h> /* For gPanicBase */
124 #include <arm/caches_internal.h>
125 #include <arm/misc_protos.h>
126 extern volatile struct xnu_hw_shmem_dbg_command_info *hwsd_info;
127 #endif
128
129 #include <san/kcov.h>
130
131 #if CONFIG_XNUPOST
132 #include <tests/xnupost.h>
133 extern int vsnprintf(char *, size_t, const char *, va_list);
134 #endif
135
136 #if CONFIG_CSR
137 #include <sys/csr.h>
138 #endif
139
140 #if CONFIG_EXCLAVES
141 #include <xnuproxy/panic.h>
142 #include "exclaves_panic.h"
143 #endif
144
145 #if CONFIG_SPTM
146 #include <arm64/sptm/sptm.h>
147 #include <arm64/sptm/pmap/pmap_data.h>
148 #endif /* CONFIG_SPTM */
149
150 extern int IODTGetLoaderInfo( const char *key, void **infoAddr, int *infosize );
151 extern void IODTFreeLoaderInfo( const char *key, void *infoAddr, int infoSize );
152
153 unsigned int halt_in_debugger = 0;
154 unsigned int current_debugger = 0;
155 unsigned int active_debugger = 0;
156 SECURITY_READ_ONLY_LATE(unsigned int) panicDebugging = FALSE;
157 unsigned int kernel_debugger_entry_count = 0;
158
159 #if DEVELOPMENT || DEBUG
160 unsigned int panic_test_failure_mode = PANIC_TEST_FAILURE_MODE_BADPTR;
161 unsigned int panic_test_action_count = 1;
162 unsigned int panic_test_case = PANIC_TEST_CASE_DISABLED;
163 #endif
164
165 #if defined(__arm64__)
166 struct additional_panic_data_buffer *panic_data_buffers = NULL;
167 #endif
168
169 #if defined(__arm64__)
170 /*
171 * Magic number; this should be identical to the armv7 encoding for trap.
172 */
173 #define TRAP_DEBUGGER __asm__ volatile(".long 0xe7ffdeff")
174 #elif defined (__x86_64__)
175 #define TRAP_DEBUGGER __asm__("int3")
176 #else
177 #error No TRAP_DEBUGGER for this architecture
178 #endif
179
180 #if defined(__i386__) || defined(__x86_64__)
181 #define panic_stop() pmCPUHalt(PM_HALT_PANIC)
182 #else
183 #define panic_stop() panic_spin_forever()
184 #endif
185
186 #if defined(__arm64__) && (DEVELOPMENT || DEBUG)
187 /*
188 * More than enough for any typical format string passed to panic();
189 * anything longer will be truncated but that's better than nothing.
190 */
191 #define EARLY_PANIC_BUFLEN 256
192 #endif
193
194 struct debugger_state {
195 uint64_t db_panic_options;
196 debugger_op db_current_op;
197 boolean_t db_proceed_on_sync_failure;
198 const char *db_message;
199 const char *db_panic_str;
200 va_list *db_panic_args;
201 void *db_panic_data_ptr;
202 unsigned long db_panic_caller;
203 const char *db_panic_initiator;
204 /* incremented whenever we panic or call Debugger (current CPU panic level) */
205 uint32_t db_entry_count;
206 kern_return_t db_op_return;
207 };
208 static struct debugger_state PERCPU_DATA(debugger_state);
209
210 /* __pure2 is correct if this function is called with preemption disabled */
211 static inline __pure2 struct debugger_state *
current_debugger_state(void)212 current_debugger_state(void)
213 {
214 return PERCPU_GET(debugger_state);
215 }
216
217 #define CPUDEBUGGEROP current_debugger_state()->db_current_op
218 #define CPUDEBUGGERMSG current_debugger_state()->db_message
219 #define CPUPANICSTR current_debugger_state()->db_panic_str
220 #define CPUPANICARGS current_debugger_state()->db_panic_args
221 #define CPUPANICOPTS current_debugger_state()->db_panic_options
222 #define CPUPANICDATAPTR current_debugger_state()->db_panic_data_ptr
223 #define CPUDEBUGGERSYNC current_debugger_state()->db_proceed_on_sync_failure
224 #define CPUDEBUGGERCOUNT current_debugger_state()->db_entry_count
225 #define CPUDEBUGGERRET current_debugger_state()->db_op_return
226 #define CPUPANICCALLER current_debugger_state()->db_panic_caller
227 #define CPUPANICINITIATOR current_debugger_state()->db_panic_initiator
228
229
230 /*
231 * Usage:
232 * panic_test_action_count is in the context of other flags, e.g. for IO errors it is "succeed this many times then fail" and for nesting it is "panic this many times then succeed"
233 * panic_test_failure_mode is a bit map of things to do
234 * panic_test_case is what sort of test we are injecting
235 *
236 * For more details see definitions in debugger.h
237 *
238 * Note that not all combinations are sensible, but some actions can be combined, e.g.
239 * - BADPTR+SPIN with action count = 3 will cause panic->panic->spin
240 * - BADPTR with action count = 2 will cause 2 nested panics (in addition to the initial panic)
241 * - IO_ERR with action 15 will cause 14 successful IOs, then fail on the next one
242 */
243 #if DEVELOPMENT || DEBUG
244 #define INJECT_NESTED_PANIC_IF_REQUESTED(requested) \
245 MACRO_BEGIN \
246 if ((panic_test_case & requested) && panic_test_action_count) { \
247 panic_test_action_count--; \
248 volatile int *panic_test_badpointer = (int *)4; \
249 if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_SPIN) && (!panic_test_action_count)) { printf("inject spin...\n"); while(panic_test_badpointer); } \
250 if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_BADPTR) && (panic_test_action_count+1)) { printf("inject badptr...\n"); *panic_test_badpointer = 0; } \
251 if ((panic_test_failure_mode & PANIC_TEST_FAILURE_MODE_PANIC) && (panic_test_action_count+1)) { printf("inject panic...\n"); panic("nested panic level %d", panic_test_action_count); } \
252 } \
253 MACRO_END
254
255 #endif /* DEVELOPMENT || DEBUG */
256
257 debugger_op debugger_current_op = DBOP_NONE;
258 const char *debugger_panic_str = NULL;
259 va_list *debugger_panic_args = NULL;
260 void *debugger_panic_data = NULL;
261 uint64_t debugger_panic_options = 0;
262 const char *debugger_message = NULL;
263 unsigned long debugger_panic_caller = 0;
264 const char *debugger_panic_initiator = "";
265
266 void panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args,
267 unsigned int reason, void *ctx, uint64_t panic_options_mask, void *panic_data,
268 unsigned long panic_caller, const char *panic_initiator) __dead2 __printflike(1, 0);
269 static void kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags);
270 void panic_spin_forever(void) __dead2;
271 void panic_stackshot_release_lock(void);
272 extern void PE_panic_hook(const char*);
273 extern int sync_internal(void);
274
275 #define NESTEDDEBUGGERENTRYMAX 5
276 static TUNABLE(unsigned int, max_debugger_entry_count, "nested_panic_max",
277 NESTEDDEBUGGERENTRYMAX);
278
279 SECURITY_READ_ONLY_LATE(bool) awl_scratch_reg_supported = false;
280 static bool PERCPU_DATA(hv_entry_detected); // = false
281 static void awl_set_scratch_reg_hv_bit(void);
282 void awl_mark_hv_entry(void);
283 static bool awl_pm_state_change_cbk(void *param, enum cpu_event event, unsigned int cpu_or_cluster);
284
285 #if !XNU_TARGET_OS_OSX & CONFIG_KDP_INTERACTIVE_DEBUGGING
286 static boolean_t device_corefile_valid_on_ephemeral(void);
287 #endif /* !XNU_TARGET_OS_OSX & CONFIG_KDP_INTERACTIVE_DEBUGGING */
288
289 #if defined(__arm64__)
290 #define DEBUG_BUF_SIZE (4096)
291
292 /* debug_buf is directly linked with iBoot panic region for arm targets */
293 char *debug_buf_base = NULL;
294 char *debug_buf_ptr = NULL;
295 unsigned int debug_buf_size = 0;
296
297 SECURITY_READ_ONLY_LATE(boolean_t) kdp_explicitly_requested = FALSE;
298 #else /* defined(__arm64__) */
299 #define DEBUG_BUF_SIZE ((3 * PAGE_SIZE) + offsetof(struct macos_panic_header, mph_data))
300 /* EXTENDED_DEBUG_BUF_SIZE definition is now in debug.h */
301 static_assert(((EXTENDED_DEBUG_BUF_SIZE % PANIC_FLUSH_BOUNDARY) == 0), "Extended debug buf size must match SMC alignment requirements");
302
303 char debug_buf[DEBUG_BUF_SIZE];
304 struct macos_panic_header *panic_info = (struct macos_panic_header *)debug_buf;
305 char *debug_buf_base = (debug_buf + offsetof(struct macos_panic_header, mph_data));
306 char *debug_buf_ptr = (debug_buf + offsetof(struct macos_panic_header, mph_data));
307
308 /*
309 * We don't include the size of the panic header in the length of the data we actually write.
310 * On co-processor platforms, we lose sizeof(struct macos_panic_header) bytes from the end of
311 * the end of the log because we only support writing (3*PAGESIZE) bytes.
312 */
313 unsigned int debug_buf_size = (DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
314
315 boolean_t extended_debug_log_enabled = FALSE;
316 #endif /* defined(__arm64__) */
317
318 #if defined(XNU_TARGET_OS_OSX)
319 #define KDBG_TRACE_PANIC_FILENAME "/var/tmp/panic.trace"
320 #else
321 #define KDBG_TRACE_PANIC_FILENAME "/var/log/panic.trace"
322 #endif
323
324 static inline void debug_fatal_panic_begin(void);
325
326 /* Debugger state */
327 atomic_int debugger_cpu = DEBUGGER_NO_CPU;
328 boolean_t debugger_allcpus_halted = FALSE;
329 boolean_t debugger_safe_to_return = TRUE;
330 unsigned int debugger_context = 0;
331
332 static char model_name[64];
333 unsigned char *kernel_uuid;
334
335 boolean_t kernelcache_uuid_valid = FALSE;
336 uuid_t kernelcache_uuid;
337 uuid_string_t kernelcache_uuid_string;
338
339 boolean_t pageablekc_uuid_valid = FALSE;
340 uuid_t pageablekc_uuid;
341 uuid_string_t pageablekc_uuid_string;
342
343 boolean_t auxkc_uuid_valid = FALSE;
344 uuid_t auxkc_uuid;
345 uuid_string_t auxkc_uuid_string;
346
347
348 /*
349 * By default we treat Debugger() the same as calls to panic(), unless
350 * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
351 * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
352 *
353 * Return from Debugger() is currently only implemented on x86
354 */
355 static boolean_t debugger_is_panic = TRUE;
356
357 TUNABLE(unsigned int, debug_boot_arg, "debug", 0);
358
359 TUNABLE_DEV_WRITEABLE(unsigned int, verbose_panic_flow_logging, "verbose_panic_flow_logging", 0);
360
361 char kernel_uuid_string[37]; /* uuid_string_t */
362 char kernelcache_uuid_string[37]; /* uuid_string_t */
363 char panic_disk_error_description[512];
364 size_t panic_disk_error_description_size = sizeof(panic_disk_error_description);
365
366 extern unsigned int write_trace_on_panic;
367 int kext_assertions_enable =
368 #if DEBUG || DEVELOPMENT
369 TRUE;
370 #else
371 FALSE;
372 #endif
373
374 #if (DEVELOPMENT || DEBUG)
375 uint64_t xnu_platform_stall_value = PLATFORM_STALL_XNU_DISABLE;
376 #endif
377
378 /*
379 * Maintain the physically-contiguous carveouts for the carveout bootargs.
380 */
381 TUNABLE_WRITEABLE(boolean_t, phys_carveout_core, "phys_carveout_core", 1);
382
383 TUNABLE(uint32_t, phys_carveout_mb, "phys_carveout_mb", 0);
384 SECURITY_READ_ONLY_LATE(vm_offset_t) phys_carveout = 0;
385 SECURITY_READ_ONLY_LATE(uintptr_t) phys_carveout_pa = 0;
386 SECURITY_READ_ONLY_LATE(size_t) phys_carveout_size = 0;
387
388
389 #if CONFIG_SPTM && (DEVELOPMENT || DEBUG)
390 /**
391 * Extra debug state which is set when panic lockdown is initiated.
392 * This information is intended to help when debugging issues with the panic
393 * path.
394 */
395 struct panic_lockdown_initiator_state debug_panic_lockdown_initiator_state;
396 #endif /* CONFIG_SPTM && (DEVELOPMENT || DEBUG) */
397
398 /*
399 * Returns whether kernel debugging is expected to be restricted
400 * on the device currently based on CSR or other platform restrictions.
401 */
402 boolean_t
kernel_debugging_restricted(void)403 kernel_debugging_restricted(void)
404 {
405 #if XNU_TARGET_OS_OSX
406 #if CONFIG_CSR
407 if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) != 0) {
408 return TRUE;
409 }
410 #endif /* CONFIG_CSR */
411 return FALSE;
412 #else /* XNU_TARGET_OS_OSX */
413 return FALSE;
414 #endif /* XNU_TARGET_OS_OSX */
415 }
416
417 __startup_func
418 static void
panic_init(void)419 panic_init(void)
420 {
421 unsigned long uuidlen = 0;
422 void *uuid;
423
424 uuid = getuuidfromheader(&_mh_execute_header, &uuidlen);
425 if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) {
426 kernel_uuid = uuid;
427 uuid_unparse_upper(*(uuid_t *)uuid, kernel_uuid_string);
428 }
429
430 /*
431 * Take the value of the debug boot-arg into account
432 */
433 #if MACH_KDP
434 if (!kernel_debugging_restricted() && debug_boot_arg) {
435 if (debug_boot_arg & DB_HALT) {
436 halt_in_debugger = 1;
437 }
438
439 #if defined(__arm64__)
440 if (debug_boot_arg & DB_NMI) {
441 panicDebugging = TRUE;
442 }
443 #else
444 panicDebugging = TRUE;
445 #endif /* defined(__arm64__) */
446 }
447
448 #if defined(__arm64__)
449 char kdpname[80];
450
451 kdp_explicitly_requested = PE_parse_boot_argn("kdp_match_name", kdpname, sizeof(kdpname));
452 #endif /* defined(__arm64__) */
453
454 #endif /* MACH_KDP */
455
456 #if defined (__x86_64__)
457 /*
458 * By default we treat Debugger() the same as calls to panic(), unless
459 * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set.
460 * If DB_KERN_DUMP_ON_NMI is *NOT* set, return from Debugger() is supported.
461 * This is because writing an on-device corefile is a destructive operation.
462 *
463 * Return from Debugger() is currently only implemented on x86
464 */
465 if (PE_i_can_has_debugger(NULL) && !(debug_boot_arg & DB_KERN_DUMP_ON_NMI)) {
466 debugger_is_panic = FALSE;
467 }
468 #endif
469 }
470 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, panic_init);
471
472 #if defined (__x86_64__)
473 void
extended_debug_log_init(void)474 extended_debug_log_init(void)
475 {
476 assert(coprocessor_paniclog_flush);
477 /*
478 * Allocate an extended panic log buffer that has space for the panic
479 * stackshot at the end. Update the debug buf pointers appropriately
480 * to point at this new buffer.
481 *
482 * iBoot pre-initializes the panic region with the NULL character. We set this here
483 * so we can accurately calculate the CRC for the region without needing to flush the
484 * full region over SMC.
485 */
486 char *new_debug_buf = kalloc_data(EXTENDED_DEBUG_BUF_SIZE, Z_WAITOK | Z_ZERO);
487
488 panic_info = (struct macos_panic_header *)new_debug_buf;
489 debug_buf_ptr = debug_buf_base = (new_debug_buf + offsetof(struct macos_panic_header, mph_data));
490 debug_buf_size = (EXTENDED_DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data));
491
492 extended_debug_log_enabled = TRUE;
493
494 /*
495 * Insert a compiler barrier so we don't free the other panic stackshot buffer
496 * until after we've marked the new one as available
497 */
498 __compiler_barrier();
499 kmem_free(kernel_map, panic_stackshot_buf, panic_stackshot_buf_len);
500 panic_stackshot_buf = 0;
501 panic_stackshot_buf_len = 0;
502 }
503 #endif /* defined (__x86_64__) */
504
505 void
debug_log_init(void)506 debug_log_init(void)
507 {
508 #if defined(__arm64__)
509 if (!gPanicBase) {
510 printf("debug_log_init: Error!! gPanicBase is still not initialized\n");
511 return;
512 }
513 /* Shift debug buf start location and size by the length of the panic header */
514 debug_buf_base = (char *)gPanicBase + sizeof(struct embedded_panic_header);
515 debug_buf_ptr = debug_buf_base;
516 debug_buf_size = gPanicSize - sizeof(struct embedded_panic_header);
517
518 #if CONFIG_EXT_PANICLOG
519 ext_paniclog_init();
520 #endif
521 #else
522 kern_return_t kr = KERN_SUCCESS;
523 bzero(panic_info, DEBUG_BUF_SIZE);
524
525 assert(debug_buf_base != NULL);
526 assert(debug_buf_ptr != NULL);
527 assert(debug_buf_size != 0);
528
529 /*
530 * We allocate a buffer to store a panic time stackshot. If we later discover that this is a
531 * system that supports flushing a stackshot via an extended debug log (see above), we'll free this memory
532 * as it's not necessary on this platform. This information won't be available until the IOPlatform has come
533 * up.
534 */
535 kr = kmem_alloc(kernel_map, &panic_stackshot_buf, PANIC_STACKSHOT_BUFSIZE,
536 KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_DIAG);
537 assert(kr == KERN_SUCCESS);
538 if (kr == KERN_SUCCESS) {
539 panic_stackshot_buf_len = PANIC_STACKSHOT_BUFSIZE;
540 }
541 #endif
542 }
543
544 void
phys_carveout_init(void)545 phys_carveout_init(void)
546 {
547 if (!PE_i_can_has_debugger(NULL)) {
548 return;
549 }
550
551 #if __arm__ || __arm64__
552 #if DEVELOPMENT || DEBUG
553 #endif /* DEVELOPMENT || DEBUG */
554 #endif /* __arm__ || __arm64__ */
555
556 struct carveout {
557 const char *name;
558 vm_offset_t *va;
559 uint32_t requested_size;
560 uintptr_t *pa;
561 size_t *allocated_size;
562 uint64_t present;
563 } carveouts[] = {
564 {
565 "phys_carveout",
566 &phys_carveout,
567 phys_carveout_mb,
568 &phys_carveout_pa,
569 &phys_carveout_size,
570 phys_carveout_mb != 0,
571 }
572 };
573
574 for (int i = 0; i < (sizeof(carveouts) / sizeof(struct carveout)); i++) {
575 if (carveouts[i].present) {
576 size_t temp_carveout_size = 0;
577 if (os_mul_overflow(carveouts[i].requested_size, 1024 * 1024, &temp_carveout_size)) {
578 panic("%s_mb size overflowed (%uMB)",
579 carveouts[i].name, carveouts[i].requested_size);
580 return;
581 }
582
583 kmem_alloc_contig(kernel_map, carveouts[i].va,
584 temp_carveout_size, PAGE_MASK, 0, 0,
585 KMA_NOFAIL | KMA_PERMANENT | KMA_NOPAGEWAIT | KMA_DATA,
586 VM_KERN_MEMORY_DIAG);
587
588 *carveouts[i].pa = kvtophys(*carveouts[i].va);
589 *carveouts[i].allocated_size = temp_carveout_size;
590 }
591 }
592
593 #if __arm64__ && (DEVELOPMENT || DEBUG)
594 /* likely panic_trace boot-arg is also set so check and enable tracing if necessary into new carveout */
595 PE_arm_debug_enable_trace(true);
596 #endif /* __arm64__ && (DEVELOPMENT || DEBUG) */
597 }
598
599 boolean_t
debug_is_in_phys_carveout(vm_map_offset_t va)600 debug_is_in_phys_carveout(vm_map_offset_t va)
601 {
602 return phys_carveout_size && va >= phys_carveout &&
603 va < (phys_carveout + phys_carveout_size);
604 }
605
606 boolean_t
debug_can_coredump_phys_carveout(void)607 debug_can_coredump_phys_carveout(void)
608 {
609 return phys_carveout_core;
610 }
611
612 static void
DebuggerLock(void)613 DebuggerLock(void)
614 {
615 int my_cpu = cpu_number();
616 int debugger_exp_cpu = DEBUGGER_NO_CPU;
617 assert(ml_get_interrupts_enabled() == FALSE);
618
619 if (atomic_load(&debugger_cpu) == my_cpu) {
620 return;
621 }
622
623 while (!atomic_compare_exchange_strong(&debugger_cpu, &debugger_exp_cpu, my_cpu)) {
624 debugger_exp_cpu = DEBUGGER_NO_CPU;
625 }
626
627 return;
628 }
629
630 static void
DebuggerUnlock(void)631 DebuggerUnlock(void)
632 {
633 assert(atomic_load_explicit(&debugger_cpu, memory_order_relaxed) == cpu_number());
634
635 /*
636 * We don't do an atomic exchange here in case
637 * there's another CPU spinning to acquire the debugger_lock
638 * and we never get a chance to update it. We already have the
639 * lock so we can simply store DEBUGGER_NO_CPU and follow with
640 * a barrier.
641 */
642 atomic_store(&debugger_cpu, DEBUGGER_NO_CPU);
643 OSMemoryBarrier();
644
645 return;
646 }
647
648 static kern_return_t
DebuggerHaltOtherCores(boolean_t proceed_on_failure,bool is_stackshot)649 DebuggerHaltOtherCores(boolean_t proceed_on_failure, bool is_stackshot)
650 {
651 #if defined(__arm64__)
652 return DebuggerXCallEnter(proceed_on_failure, is_stackshot);
653 #else /* defined(__arm64__) */
654 #pragma unused(proceed_on_failure)
655 mp_kdp_enter(proceed_on_failure, is_stackshot);
656 return KERN_SUCCESS;
657 #endif
658 }
659
660 static void
DebuggerResumeOtherCores(void)661 DebuggerResumeOtherCores(void)
662 {
663 #if defined(__arm64__)
664 DebuggerXCallReturn();
665 #else /* defined(__arm64__) */
666 mp_kdp_exit();
667 #endif
668 }
669
670 __printflike(3, 0)
671 static void
DebuggerSaveState(debugger_op db_op,const char * db_message,const char * db_panic_str,va_list * db_panic_args,uint64_t db_panic_options,void * db_panic_data_ptr,boolean_t db_proceed_on_sync_failure,unsigned long db_panic_caller,const char * db_panic_initiator)672 DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_panic_str,
673 va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
674 boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller, const char *db_panic_initiator)
675 {
676 CPUDEBUGGEROP = db_op;
677
678 /*
679 * Note:
680 * if CPUDEBUGGERCOUNT == 1 then we are in the normal case - record the panic data
681 * if CPUDEBUGGERCOUNT > 1 and CPUPANICSTR == NULL then we are in a nested panic that happened before DebuggerSaveState was called, so store the nested panic data
682 * if CPUDEBUGGERCOUNT > 1 and CPUPANICSTR != NULL then we are in a nested panic that happened after DebuggerSaveState was called, so leave the original panic data
683 *
684 * TODO: is it safe to flatten this to if (CPUPANICSTR == NULL)?
685 */
686 if (CPUDEBUGGERCOUNT == 1 || CPUPANICSTR == NULL) {
687 CPUDEBUGGERMSG = db_message;
688 CPUPANICSTR = db_panic_str;
689 CPUPANICARGS = db_panic_args;
690 CPUPANICDATAPTR = db_panic_data_ptr;
691 CPUPANICCALLER = db_panic_caller;
692 CPUPANICINITIATOR = db_panic_initiator;
693
694 #if CONFIG_EXCLAVES
695 char *panic_str;
696 if (exclaves_panic_get_string(&panic_str) == KERN_SUCCESS) {
697 CPUPANICSTR = panic_str;
698 }
699 #endif
700 }
701
702 CPUDEBUGGERSYNC = db_proceed_on_sync_failure;
703 CPUDEBUGGERRET = KERN_SUCCESS;
704
705 /* Reset these on any nested panics */
706 // follow up in rdar://88497308 (nested panics should not clobber panic flags)
707 CPUPANICOPTS = db_panic_options;
708
709 return;
710 }
711
712 /*
713 * Save the requested debugger state/action into the current processor's
714 * percu state and trap to the debugger.
715 */
716 kern_return_t
DebuggerTrapWithState(debugger_op db_op,const char * db_message,const char * db_panic_str,va_list * db_panic_args,uint64_t db_panic_options,void * db_panic_data_ptr,boolean_t db_proceed_on_sync_failure,unsigned long db_panic_caller,const char * db_panic_initiator)717 DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str,
718 va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr,
719 boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller, const char* db_panic_initiator)
720 {
721 kern_return_t ret;
722
723 #if defined(__arm64__) && (DEVELOPMENT || DEBUG)
724 if (!PE_arm_debug_and_trace_initialized()) {
725 /*
726 * In practice this can only happen if we panicked very early,
727 * when only the boot CPU is online and before it has finished
728 * initializing the debug and trace infrastructure. We're going
729 * to hang soon, so let's at least make sure the message passed
730 * to panic() is actually logged.
731 */
732 char buf[EARLY_PANIC_BUFLEN];
733 vsnprintf(buf, EARLY_PANIC_BUFLEN, db_panic_str, *db_panic_args);
734 paniclog_append_noflush("%s\n", buf);
735 }
736 #endif
737
738 assert(ml_get_interrupts_enabled() == FALSE);
739 DebuggerSaveState(db_op, db_message, db_panic_str, db_panic_args,
740 db_panic_options, db_panic_data_ptr,
741 db_proceed_on_sync_failure, db_panic_caller, db_panic_initiator);
742
743 /*
744 * On ARM this generates an uncategorized exception -> sleh code ->
745 * DebuggerCall -> kdp_trap -> handle_debugger_trap
746 * So that is how XNU ensures that only one core can panic.
747 * The rest of the cores are halted by IPI if possible; if that
748 * fails it will fall back to dbgwrap.
749 */
750 TRAP_DEBUGGER;
751
752 ret = CPUDEBUGGERRET;
753
754 DebuggerSaveState(DBOP_NONE, NULL, NULL, NULL, 0, NULL, FALSE, 0, NULL);
755
756 return ret;
757 }
758
759 void __attribute__((noinline))
Assert(const char * file,int line,const char * expression)760 Assert(
761 const char *file,
762 int line,
763 const char *expression
764 )
765 {
766 #if CONFIG_NONFATAL_ASSERTS
767 static TUNABLE(bool, mach_assert, "assertions", true);
768
769 if (!mach_assert) {
770 kprintf("%s:%d non-fatal Assertion: %s", file, line, expression);
771 return;
772 }
773 #endif
774
775 panic_plain("%s:%d Assertion failed: %s", file, line, expression);
776 }
777
778 boolean_t
debug_is_current_cpu_in_panic_state(void)779 debug_is_current_cpu_in_panic_state(void)
780 {
781 return current_debugger_state()->db_entry_count > 0;
782 }
783
784 /*
785 * check if we are in a nested panic, report findings, take evasive action where necessary
786 *
787 * see also PE_update_panicheader_nestedpanic
788 */
789 static void
check_and_handle_nested_panic(uint64_t panic_options_mask,unsigned long panic_caller,const char * db_panic_str,va_list * db_panic_args)790 check_and_handle_nested_panic(uint64_t panic_options_mask, unsigned long panic_caller, const char *db_panic_str, va_list *db_panic_args)
791 {
792 if ((CPUDEBUGGERCOUNT > 1) && (CPUDEBUGGERCOUNT < max_debugger_entry_count)) {
793 // Note: this is the first indication in the panic log or serial that we are off the rails...
794 //
795 // if we panic *before* the paniclog is finalized then this will end up in the ips report with a panic_caller addr that gives us a clue
796 // if we panic *after* the log is finalized then we will only see it in the serial log
797 //
798 paniclog_append_noflush("Nested panic detected - entry count: %d panic_caller: 0x%016lx\n", CPUDEBUGGERCOUNT, panic_caller);
799 paniclog_flush();
800
801 // print the *new* panic string to the console, we might not get it by other means...
802 // TODO: I tried to write this stuff to the paniclog, but the serial output gets corrupted and the panicstring in the ips file is <mysterious>
803 // rdar://87846117 (NestedPanic: output panic string to paniclog)
804 if (db_panic_str) {
805 printf("Nested panic string:\n");
806 #pragma clang diagnostic push
807 #pragma clang diagnostic ignored "-Wformat-nonliteral"
808 _doprnt(db_panic_str, db_panic_args, PE_kputc, 0);
809 #pragma clang diagnostic pop
810 printf("\n<end nested panic string>\n");
811 }
812 }
813
814 // Stage 1 bailout
815 //
816 // Try to complete the normal panic flow, i.e. try to make sure the callouts happen and we flush the paniclog. If this fails with another nested
817 // panic then we will land in Stage 2 below...
818 //
819 if (CPUDEBUGGERCOUNT == max_debugger_entry_count) {
820 uint32_t panic_details = 0;
821
822 // if this is a force-reset panic then capture a log and reboot immediately.
823 if (panic_options_mask & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
824 panic_details |= kPanicDetailsForcePowerOff;
825 }
826
827 // normally the kPEPanicBegin is sent from debugger_collect_diagnostics(), but we might nested-panic before we get
828 // there. To be safe send another notification, the function called below will only send kPEPanicBegin if it has not yet been sent.
829 //
830 PEHaltRestartInternal(kPEPanicBegin, panic_details);
831
832 paniclog_append_noflush("Nested panic count exceeds limit %d, machine will reset or spin\n", max_debugger_entry_count);
833 PE_update_panicheader_nestedpanic();
834 paniclog_flush();
835
836 if (!panicDebugging) {
837 // note that this will also send kPEPanicEnd
838 kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask);
839 }
840
841 // prints to console
842 paniclog_append_noflush("\nNested panic stall. Stage 1 bailout. Please go to https://panic.apple.com to report this panic\n");
843 panic_spin_forever();
844 }
845
846 // Stage 2 bailout
847 //
848 // Things are severely hosed, we have nested to the point of bailout and then nested again during the bailout path. Try to issue
849 // a chipreset as quickly as possible, hopefully something in the panic log is salvageable, since we flushed it during Stage 1.
850 //
851 if (CPUDEBUGGERCOUNT == max_debugger_entry_count + 1) {
852 if (!panicDebugging) {
853 // note that:
854 // - this code path should be audited for prints, as that is a common cause of nested panics
855 // - this code path should take the fastest route to the actual reset, and not call any un-necessary code
856 kdp_machine_reboot_type(kPEPanicRestartCPU, panic_options_mask & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS);
857 }
858
859 // prints to console, but another nested panic will land in Stage 3 where we simply spin, so that is sort of ok...
860 paniclog_append_noflush("\nIn Nested panic stall. Stage 2 bailout. Please go to https://panic.apple.com to report this panic\n");
861 panic_spin_forever();
862 }
863
864 // Stage 3 bailout
865 //
866 // We are done here, we were unable to reset the platform without another nested panic. Spin until the watchdog kicks in.
867 //
868 if (CPUDEBUGGERCOUNT > max_debugger_entry_count + 1) {
869 kdp_machine_reboot_type(kPEHangCPU, 0);
870 }
871 }
872
873 void
Debugger(const char * message)874 Debugger(const char *message)
875 {
876 DebuggerWithContext(0, NULL, message, DEBUGGER_OPTION_NONE, (unsigned long)(char *)__builtin_return_address(0));
877 }
878
879 /*
880 * Enter the Debugger
881 *
882 * This is similar to, but not the same as a panic
883 *
884 * Key differences:
885 * - we get here from a debugger entry action (e.g. NMI)
886 * - the system is resumable on x86 (in theory, however it is not clear if this is tested)
887 * - rdar://57738811 (xnu: support resume from debugger via KDP on arm devices)
888 *
889 */
890 void
DebuggerWithContext(unsigned int reason,void * ctx,const char * message,uint64_t debugger_options_mask,unsigned long debugger_caller)891 DebuggerWithContext(unsigned int reason, void *ctx, const char *message,
892 uint64_t debugger_options_mask, unsigned long debugger_caller)
893 {
894 spl_t previous_interrupts_state;
895 boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
896
897 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
898 read_lbr();
899 #endif
900 previous_interrupts_state = ml_set_interrupts_enabled(FALSE);
901 disable_preemption();
902
903 /* track depth of debugger/panic entry */
904 CPUDEBUGGERCOUNT++;
905
906 /* emit a tracepoint as early as possible in case of hang */
907 SOCD_TRACE_XNU(PANIC, PACK_2X32(VALUE(cpu_number()), VALUE(CPUDEBUGGERCOUNT)), VALUE(debugger_options_mask), ADDR(message), ADDR(debugger_caller));
908
909 /* do max nested panic/debugger check, this will report nesting to the console and spin forever if we exceed a limit */
910 check_and_handle_nested_panic(debugger_options_mask, debugger_caller, message, NULL);
911
912 /* Handle any necessary platform specific actions before we proceed */
913 PEInitiatePanic();
914
915 #if DEVELOPMENT || DEBUG
916 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_ENTRY);
917 #endif
918
919 PE_panic_hook(message);
920
921 doprnt_hide_pointers = FALSE;
922
923 if (ctx != NULL) {
924 DebuggerSaveState(DBOP_DEBUGGER, message,
925 NULL, NULL, debugger_options_mask, NULL, TRUE, 0, "");
926 handle_debugger_trap(reason, 0, 0, ctx);
927 DebuggerSaveState(DBOP_NONE, NULL, NULL,
928 NULL, 0, NULL, FALSE, 0, "");
929 } else {
930 DebuggerTrapWithState(DBOP_DEBUGGER, message,
931 NULL, NULL, debugger_options_mask, NULL, TRUE, 0, NULL);
932 }
933
934 /* resume from the debugger */
935
936 CPUDEBUGGERCOUNT--;
937 doprnt_hide_pointers = old_doprnt_hide_pointers;
938 enable_preemption();
939 ml_set_interrupts_enabled(previous_interrupts_state);
940 }
941
942 static struct kdp_callout {
943 struct kdp_callout * callout_next;
944 kdp_callout_fn_t callout_fn;
945 boolean_t callout_in_progress;
946 void * callout_arg;
947 } * kdp_callout_list = NULL;
948
949 /*
950 * Called from kernel context to register a kdp event callout.
951 */
952 void
kdp_register_callout(kdp_callout_fn_t fn,void * arg)953 kdp_register_callout(kdp_callout_fn_t fn, void * arg)
954 {
955 struct kdp_callout * kcp;
956 struct kdp_callout * list_head;
957
958 kcp = zalloc_permanent_type(struct kdp_callout);
959
960 kcp->callout_fn = fn;
961 kcp->callout_arg = arg;
962 kcp->callout_in_progress = FALSE;
963
964 /* Lock-less list insertion using compare and exchange. */
965 do {
966 list_head = kdp_callout_list;
967 kcp->callout_next = list_head;
968 } while (!OSCompareAndSwapPtr(list_head, kcp, &kdp_callout_list));
969 }
970
971 static void
kdp_callouts(kdp_event_t event)972 kdp_callouts(kdp_event_t event)
973 {
974 struct kdp_callout *kcp = kdp_callout_list;
975
976 while (kcp) {
977 if (!kcp->callout_in_progress) {
978 kcp->callout_in_progress = TRUE;
979 kcp->callout_fn(kcp->callout_arg, event);
980 kcp->callout_in_progress = FALSE;
981 }
982 kcp = kcp->callout_next;
983 }
984 }
985
986 #if defined(__arm64__)
987 /*
988 * Register an additional buffer with data to include in the panic log
989 *
990 * <rdar://problem/50137705> tracks supporting more than one buffer
991 *
992 * Note that producer_name and buf should never be de-allocated as we reference these during panic.
993 */
994 void
register_additional_panic_data_buffer(const char * producer_name,void * buf,int len)995 register_additional_panic_data_buffer(const char *producer_name, void *buf, int len)
996 {
997 if (panic_data_buffers != NULL) {
998 panic("register_additional_panic_data_buffer called with buffer already registered");
999 }
1000
1001 if (producer_name == NULL || (strlen(producer_name) == 0)) {
1002 panic("register_additional_panic_data_buffer called with invalid producer_name");
1003 }
1004
1005 if (buf == NULL) {
1006 panic("register_additional_panic_data_buffer called with invalid buffer pointer");
1007 }
1008
1009 if ((len <= 0) || (len > ADDITIONAL_PANIC_DATA_BUFFER_MAX_LEN)) {
1010 panic("register_additional_panic_data_buffer called with invalid length");
1011 }
1012
1013 struct additional_panic_data_buffer *new_panic_data_buffer = zalloc_permanent_type(struct additional_panic_data_buffer);
1014 new_panic_data_buffer->producer_name = producer_name;
1015 new_panic_data_buffer->buf = buf;
1016 new_panic_data_buffer->len = len;
1017
1018 if (!OSCompareAndSwapPtr(NULL, new_panic_data_buffer, &panic_data_buffers)) {
1019 panic("register_additional_panic_data_buffer called with buffer already registered");
1020 }
1021
1022 return;
1023 }
1024 #endif /* defined(__arm64__) */
1025
1026 /*
1027 * An overview of the xnu panic path:
1028 *
1029 * Several panic wrappers (panic(), panic_with_options(), etc.) all funnel into panic_trap_to_debugger().
1030 * panic_trap_to_debugger() sets the panic state in the current processor's debugger_state prior
1031 * to trapping into the debugger. Once we trap to the debugger, we end up in handle_debugger_trap()
1032 * which tries to acquire the panic lock by atomically swapping the current CPU number into debugger_cpu.
1033 * debugger_cpu acts as a synchronization point, from which the winning CPU can halt the other cores and
1034 * continue to debugger_collect_diagnostics() where we write the paniclog, corefile (if appropriate) and proceed
1035 * according to the device's boot-args.
1036 */
1037 #undef panic
1038 void
panic(const char * str,...)1039 panic(const char *str, ...)
1040 {
1041 va_list panic_str_args;
1042
1043 va_start(panic_str_args, str);
1044 panic_trap_to_debugger(str, &panic_str_args, 0, NULL, 0, NULL, (unsigned long)(char *)__builtin_return_address(0), NULL);
1045 va_end(panic_str_args);
1046 }
1047
1048 void
panic_with_data(uuid_t uuid,void * addr,uint32_t len,uint64_t debugger_options_mask,const char * str,...)1049 panic_with_data(uuid_t uuid, void *addr, uint32_t len, uint64_t debugger_options_mask, const char *str, ...)
1050 {
1051 va_list panic_str_args;
1052
1053 ext_paniclog_panic_with_data(uuid, addr, len);
1054
1055 #if CONFIG_EXCLAVES
1056 /*
1057 * Before trapping, inform the exclaves scheduler that we're going down
1058 * so it can grab an exclaves stackshot.
1059 */
1060 if ((debugger_options_mask & DEBUGGER_OPTION_USER_WATCHDOG) != 0 &&
1061 exclaves_get_boot_stage() != EXCLAVES_BOOT_STAGE_NONE) {
1062 (void) exclaves_scheduler_request_watchdog_panic();
1063 }
1064 #endif /* CONFIG_EXCLAVES */
1065
1066 va_start(panic_str_args, str);
1067 panic_trap_to_debugger(str, &panic_str_args, 0, NULL, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK),
1068 NULL, (unsigned long)(char *)__builtin_return_address(0), NULL);
1069 va_end(panic_str_args);
1070 }
1071
1072 void
panic_with_options(unsigned int reason,void * ctx,uint64_t debugger_options_mask,const char * str,...)1073 panic_with_options(unsigned int reason, void *ctx, uint64_t debugger_options_mask, const char *str, ...)
1074 {
1075 va_list panic_str_args;
1076
1077 #if CONFIG_EXCLAVES
1078 /*
1079 * Before trapping, inform the exclaves scheduler that we're going down
1080 * so it can grab an exclaves stackshot.
1081 */
1082 if ((debugger_options_mask & DEBUGGER_OPTION_USER_WATCHDOG) != 0 &&
1083 exclaves_get_boot_stage() != EXCLAVES_BOOT_STAGE_NONE) {
1084 (void) exclaves_scheduler_request_watchdog_panic();
1085 }
1086 #endif /* CONFIG_EXCLAVES */
1087
1088 va_start(panic_str_args, str);
1089 panic_trap_to_debugger(str, &panic_str_args, reason, ctx, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK),
1090 NULL, (unsigned long)(char *)__builtin_return_address(0), NULL);
1091 va_end(panic_str_args);
1092 }
1093
1094 void
panic_with_options_and_initiator(const char * initiator,unsigned int reason,void * ctx,uint64_t debugger_options_mask,const char * str,...)1095 panic_with_options_and_initiator(const char* initiator, unsigned int reason, void *ctx, uint64_t debugger_options_mask, const char *str, ...)
1096 {
1097 va_list panic_str_args;
1098
1099 va_start(panic_str_args, str);
1100 panic_trap_to_debugger(str, &panic_str_args, reason, ctx, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK),
1101 NULL, (unsigned long)(char *)__builtin_return_address(0), initiator);
1102 va_end(panic_str_args);
1103 }
1104
1105 boolean_t
panic_validate_ptr(void * ptr,vm_size_t size,const char * what)1106 panic_validate_ptr(void *ptr, vm_size_t size, const char *what)
1107 {
1108 if (ptr == NULL) {
1109 paniclog_append_noflush("NULL %s pointer\n", what);
1110 return false;
1111 }
1112
1113 if (!ml_validate_nofault((vm_offset_t)ptr, size)) {
1114 paniclog_append_noflush("Invalid %s pointer: %p (size %d)\n",
1115 what, ptr, (uint32_t)size);
1116 return false;
1117 }
1118
1119 return true;
1120 }
1121
1122 boolean_t
panic_get_thread_proc_task(struct thread * thread,struct task ** task,struct proc ** proc)1123 panic_get_thread_proc_task(struct thread *thread, struct task **task, struct proc **proc)
1124 {
1125 if (!PANIC_VALIDATE_PTR(thread)) {
1126 return false;
1127 }
1128
1129 if (!PANIC_VALIDATE_PTR(thread->t_tro)) {
1130 return false;
1131 }
1132
1133 if (!PANIC_VALIDATE_PTR(thread->t_tro->tro_task)) {
1134 return false;
1135 }
1136
1137 if (task) {
1138 *task = thread->t_tro->tro_task;
1139 }
1140
1141 if (!panic_validate_ptr(thread->t_tro->tro_proc,
1142 sizeof(struct proc *), "bsd_info")) {
1143 *proc = NULL;
1144 } else {
1145 *proc = thread->t_tro->tro_proc;
1146 }
1147
1148 return true;
1149 }
1150
1151 #if defined (__x86_64__)
1152 /*
1153 * panic_with_thread_context() is used on x86 platforms to specify a different thread that should be backtraced in the paniclog.
1154 * We don't generally need this functionality on embedded platforms because embedded platforms include a panic time stackshot
1155 * from customer devices. We plumb the thread pointer via the debugger trap mechanism and backtrace the kernel stack from the
1156 * thread when writing the panic log.
1157 *
1158 * NOTE: panic_with_thread_context() should be called with an explicit thread reference held on the passed thread.
1159 */
1160 void
panic_with_thread_context(unsigned int reason,void * ctx,uint64_t debugger_options_mask,thread_t thread,const char * str,...)1161 panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_options_mask, thread_t thread, const char *str, ...)
1162 {
1163 va_list panic_str_args;
1164 __assert_only os_ref_count_t th_ref_count;
1165
1166 assert_thread_magic(thread);
1167 th_ref_count = os_ref_get_count_raw(&thread->ref_count);
1168 assertf(th_ref_count > 0, "panic_with_thread_context called with invalid thread %p with refcount %u", thread, th_ref_count);
1169
1170 /* Take a reference on the thread so it doesn't disappear by the time we try to backtrace it */
1171 thread_reference(thread);
1172
1173 va_start(panic_str_args, str);
1174 panic_trap_to_debugger(str, &panic_str_args, reason, ctx, ((debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK) | DEBUGGER_INTERNAL_OPTION_THREAD_BACKTRACE),
1175 thread, (unsigned long)(char *)__builtin_return_address(0), "");
1176
1177 va_end(panic_str_args);
1178 }
1179 #endif /* defined (__x86_64__) */
1180
1181 #pragma clang diagnostic push
1182 #pragma clang diagnostic ignored "-Wmissing-noreturn"
1183 void
panic_trap_to_debugger(const char * panic_format_str,va_list * panic_args,unsigned int reason,void * ctx,uint64_t panic_options_mask,void * panic_data_ptr,unsigned long panic_caller,const char * panic_initiator)1184 panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsigned int reason, void *ctx,
1185 uint64_t panic_options_mask, void *panic_data_ptr, unsigned long panic_caller, const char *panic_initiator)
1186 {
1187 #pragma clang diagnostic pop
1188
1189 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
1190 read_lbr();
1191 #endif
1192
1193 /* optionally call sync, to reduce lost logs on restart, avoid on recursive panic. Unsafe due to unbounded sync() duration */
1194 if ((panic_options_mask & DEBUGGER_OPTION_SYNC_ON_PANIC_UNSAFE) && (CPUDEBUGGERCOUNT == 0)) {
1195 sync_internal();
1196 }
1197
1198 /* Turn off I/O tracing once we've panicked */
1199 iotrace_disable();
1200
1201 /* call machine-layer panic handler */
1202 ml_panic_trap_to_debugger(panic_format_str, panic_args, reason, ctx, panic_options_mask, panic_caller, panic_initiator);
1203
1204 /* track depth of debugger/panic entry */
1205 CPUDEBUGGERCOUNT++;
1206
1207 /* emit a tracepoint as early as possible in case of hang */
1208 SOCD_TRACE_XNU(PANIC, PACK_2X32(VALUE(cpu_number()), VALUE(CPUDEBUGGERCOUNT)), VALUE(panic_options_mask), ADDR(panic_format_str), ADDR(panic_caller));
1209
1210 /* do max nested panic/debugger check, this will report nesting to the console and spin forever if we exceed a limit */
1211 check_and_handle_nested_panic(panic_options_mask, panic_caller, panic_format_str, panic_args);
1212
1213 /* If we're in a stackshot, signal that we've started panicking and wait for other CPUs to coalesce and spin before proceeding */
1214 stackshot_cpu_signal_panic();
1215
1216 /* Handle any necessary platform specific actions before we proceed */
1217 PEInitiatePanic();
1218
1219 #if DEVELOPMENT || DEBUG
1220 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_ENTRY);
1221 #endif
1222
1223 PE_panic_hook(panic_format_str);
1224
1225 #if defined (__x86_64__)
1226 plctrace_disable();
1227 #endif
1228
1229 if (write_trace_on_panic && kdebug_enable) {
1230 if (get_preemption_level() == 0 && !ml_at_interrupt_context()) {
1231 ml_set_interrupts_enabled(TRUE);
1232 KDBG_RELEASE(TRACE_PANIC);
1233 kdbg_dump_trace_to_file(KDBG_TRACE_PANIC_FILENAME, false);
1234 }
1235 }
1236
1237 ml_set_interrupts_enabled(FALSE);
1238 disable_preemption();
1239
1240 debug_fatal_panic_begin();
1241
1242 #if defined (__x86_64__)
1243 pmSafeMode(x86_lcpu(), PM_SAFE_FL_SAFE);
1244 #endif /* defined (__x86_64__) */
1245
1246 /* Never hide pointers from panic logs. */
1247 doprnt_hide_pointers = FALSE;
1248
1249 if (ctx != NULL) {
1250 /*
1251 * We called into panic from a trap, no need to trap again. Set the
1252 * state on the current CPU and then jump to handle_debugger_trap.
1253 */
1254 DebuggerSaveState(DBOP_PANIC, "panic",
1255 panic_format_str, panic_args,
1256 panic_options_mask, panic_data_ptr, TRUE, panic_caller, panic_initiator);
1257 handle_debugger_trap(reason, 0, 0, ctx);
1258 }
1259
1260 #if defined(__arm64__) && !APPLEVIRTUALPLATFORM
1261 /*
1262 * Signal to fastsim that it should open debug ports (nop on hardware)
1263 */
1264 __asm__ volatile ("hint #0x45");
1265 #endif /* defined(__arm64__) && !APPLEVIRTUALPLATFORM */
1266
1267 DebuggerTrapWithState(DBOP_PANIC, "panic", panic_format_str,
1268 panic_args, panic_options_mask, panic_data_ptr, TRUE, panic_caller, panic_initiator);
1269
1270 /*
1271 * Not reached.
1272 */
1273 panic_stop();
1274 __builtin_unreachable();
1275 }
1276
1277 void
panic_spin_forever(void)1278 panic_spin_forever(void)
1279 {
1280 for (;;) {
1281 #if defined(__arm__) || defined(__arm64__)
1282 /* On arm32, which doesn't have a WFE timeout, this may not return. But that should be OK on this path. */
1283 __builtin_arm_wfe();
1284 #else
1285 cpu_pause();
1286 #endif
1287 }
1288 }
1289
1290 void
panic_stackshot_release_lock(void)1291 panic_stackshot_release_lock(void)
1292 {
1293 assert(!not_in_kdp);
1294 DebuggerUnlock();
1295 }
1296
1297 static void
kdp_machine_reboot_type(unsigned int type,uint64_t debugger_flags)1298 kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags)
1299 {
1300 if ((type == kPEPanicRestartCPU) && (debugger_flags & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS)) {
1301 PEHaltRestart(kPEPanicRestartCPUNoCallouts);
1302 } else {
1303 PEHaltRestart(type);
1304 }
1305 halt_all_cpus(TRUE);
1306 }
1307
1308 void
kdp_machine_reboot(void)1309 kdp_machine_reboot(void)
1310 {
1311 kdp_machine_reboot_type(kPEPanicRestartCPU, 0);
1312 }
1313
1314 static __attribute__((unused)) void
panic_debugger_log(const char * string,...)1315 panic_debugger_log(const char *string, ...)
1316 {
1317 va_list panic_debugger_log_args;
1318
1319 va_start(panic_debugger_log_args, string);
1320 #pragma clang diagnostic push
1321 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1322 _doprnt(string, &panic_debugger_log_args, consdebug_putc, 16);
1323 #pragma clang diagnostic pop
1324 va_end(panic_debugger_log_args);
1325
1326 #if defined(__arm64__)
1327 paniclog_flush();
1328 #endif
1329 }
1330
1331 /*
1332 * Gather and save diagnostic information about a panic (or Debugger call).
1333 *
1334 * On embedded, Debugger and Panic are treated very similarly -- WDT uses Debugger so we can
1335 * theoretically return from it. On desktop, Debugger is treated as a conventional debugger -- i.e no
1336 * paniclog is written and no core is written unless we request a core on NMI.
1337 *
1338 * This routine handles kicking off local coredumps, paniclogs, calling into the Debugger/KDP (if it's configured),
1339 * and calling out to any other functions we have for collecting diagnostic info.
1340 */
1341 static void
debugger_collect_diagnostics(unsigned int exception,unsigned int code,unsigned int subcode,void * state)1342 debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
1343 {
1344 #if DEVELOPMENT || DEBUG
1345 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_PRELOG);
1346 #endif
1347
1348 #if defined(__x86_64__)
1349 kprintf("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1350 #endif
1351 /*
1352 * DB_HALT (halt_in_debugger) can be requested on startup, we shouldn't generate
1353 * a coredump/paniclog for this type of debugger entry. If KDP isn't configured,
1354 * we'll just spin in kdp_raise_exception.
1355 */
1356 if (debugger_current_op == DBOP_DEBUGGER && halt_in_debugger) {
1357 kdp_raise_exception(exception, code, subcode, state);
1358 if (debugger_safe_to_return && !debugger_is_panic) {
1359 return;
1360 }
1361 }
1362
1363 #ifdef CONFIG_KCOV
1364 /* Try not to break core dump path by sanitizer. */
1365 kcov_panic_disable();
1366 #endif
1367
1368 if ((debugger_current_op == DBOP_PANIC) ||
1369 ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1370 /*
1371 * Attempt to notify listeners once and only once that we've started
1372 * panicking. Only do this for Debugger() calls if we're treating
1373 * Debugger() calls like panic().
1374 */
1375 uint32_t panic_details = 0;
1376 /* if this is a force-reset panic then capture a log and reboot immediately. */
1377 if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1378 panic_details |= kPanicDetailsForcePowerOff;
1379 }
1380 PEHaltRestartInternal(kPEPanicBegin, panic_details);
1381
1382 /*
1383 * Set the begin pointer in the panic log structure. We key off of this
1384 * static variable rather than contents from the panic header itself in case someone
1385 * has stomped over the panic_info structure. Also initializes the header magic.
1386 */
1387 static boolean_t began_writing_paniclog = FALSE;
1388 if (!began_writing_paniclog) {
1389 PE_init_panicheader();
1390 began_writing_paniclog = TRUE;
1391 }
1392
1393 if (CPUDEBUGGERCOUNT > 1) {
1394 /*
1395 * we are in a nested panic. Record the nested bit in panic flags and do some housekeeping
1396 */
1397 PE_update_panicheader_nestedpanic();
1398 paniclog_flush();
1399 }
1400 }
1401
1402 /*
1403 * Write panic string if this was a panic.
1404 *
1405 * TODO: Consider moving to SavePanicInfo as this is part of the panic log.
1406 */
1407 if (debugger_current_op == DBOP_PANIC) {
1408 paniclog_append_noflush("panic(cpu %u caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller);
1409 if (debugger_panic_str) {
1410 #pragma clang diagnostic push
1411 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1412 _doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0);
1413 #pragma clang diagnostic pop
1414 }
1415 paniclog_append_noflush("\n");
1416 }
1417 #if defined(__x86_64__)
1418 else if (((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1419 paniclog_append_noflush("Debugger called: <%s>\n", debugger_message ? debugger_message : "");
1420 }
1421
1422 /*
1423 * Debugger() is treated like panic() on embedded -- for example we use it for WDT
1424 * panics (so we need to write a paniclog). On desktop Debugger() is used in the
1425 * conventional sense.
1426 */
1427 if (debugger_current_op == DBOP_PANIC || ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic))
1428 #endif /* __x86_64__ */
1429 {
1430 kdp_callouts(KDP_EVENT_PANICLOG);
1431
1432 /*
1433 * Write paniclog and panic stackshot (if supported)
1434 * TODO: Need to clear panic log when return from debugger
1435 * hooked up for embedded
1436 */
1437 SavePanicInfo(debugger_message, debugger_panic_data, debugger_panic_options, debugger_panic_initiator);
1438
1439 #if DEVELOPMENT || DEBUG
1440 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_POSTLOG);
1441 #endif
1442
1443 /* DEBUGGER_OPTION_PANICLOGANDREBOOT is used for two finger resets on embedded so we get a paniclog */
1444 if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) {
1445 PEHaltRestart(kPEPanicDiagnosticsDone);
1446 PEHaltRestart(kPEPanicRestartCPUNoCallouts);
1447 }
1448 }
1449
1450 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
1451 /*
1452 * If reboot on panic is enabled and the caller of panic indicated that we should skip
1453 * local coredumps, don't try to write these and instead go straight to reboot. This
1454 * allows us to persist any data that's stored in the panic log.
1455 */
1456 if ((debugger_panic_options & DEBUGGER_OPTION_SKIP_LOCAL_COREDUMP) &&
1457 (debug_boot_arg & DB_REBOOT_POST_CORE)) {
1458 PEHaltRestart(kPEPanicDiagnosticsDone);
1459 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1460 }
1461
1462 /*
1463 * Consider generating a local corefile if the infrastructure is configured
1464 * and we haven't disabled on-device coredumps.
1465 */
1466 if (on_device_corefile_enabled()) {
1467 #if CONFIG_SPTM
1468 /* We want to skip taking a local core dump if this is a panic from SPTM/TXM/cL4. */
1469 extern uint8_t sptm_supports_local_coredump;
1470 bool sptm_interrupted = false;
1471 pmap_sptm_percpu_data_t *sptm_pcpu = PERCPU_GET(pmap_sptm_percpu);
1472 sptm_get_cpu_state(sptm_pcpu->sptm_cpu_id, CPUSTATE_SPTM_INTERRUPTED, &sptm_interrupted);
1473 #endif
1474 if (!kdp_has_polled_corefile()) {
1475 if (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI)) {
1476 paniclog_append_noflush("skipping local kernel core because core file could not be opened prior to panic (mode : 0x%x, error : 0x%x)\n",
1477 kdp_polled_corefile_mode(), kdp_polled_corefile_error());
1478 #if defined(__arm64__)
1479 if (kdp_polled_corefile_mode() == kIOPolledCoreFileModeUnlinked) {
1480 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREFILE_UNLINKED;
1481 }
1482 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1483 paniclog_flush();
1484 #else /* defined(__arm64__) */
1485 if (panic_info->mph_panic_log_offset != 0) {
1486 if (kdp_polled_corefile_mode() == kIOPolledCoreFileModeUnlinked) {
1487 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREFILE_UNLINKED;
1488 }
1489 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1490 paniclog_flush();
1491 }
1492 #endif /* defined(__arm64__) */
1493 }
1494 }
1495 #if XNU_MONITOR
1496 else if (pmap_get_cpu_data()->ppl_state != PPL_STATE_KERNEL) {
1497 paniclog_append_noflush("skipping local kernel core because the PPL is not in KERNEL state\n");
1498 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1499 paniclog_flush();
1500 }
1501 #elif CONFIG_SPTM
1502 else if (!sptm_supports_local_coredump) {
1503 paniclog_append_noflush("skipping local kernel core because the SPTM is in PANIC state and can't support core dump generation\n");
1504 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1505 paniclog_flush();
1506 } else if (sptm_interrupted) {
1507 paniclog_append_noflush("skipping local kernel core because the SPTM is in INTERRUPTED state and can't support core dump generation\n");
1508 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED;
1509 paniclog_flush();
1510 }
1511 #endif /* XNU_MONITOR */
1512 else {
1513 int ret = -1;
1514
1515 #if defined (__x86_64__)
1516 /* On x86 we don't do a coredump on Debugger unless the DB_KERN_DUMP_ON_NMI boot-arg is specified. */
1517 if (debugger_current_op != DBOP_DEBUGGER || (debug_boot_arg & DB_KERN_DUMP_ON_NMI))
1518 #endif
1519 {
1520 /*
1521 * Doing an on-device coredump leaves the disk driver in a state
1522 * that can not be resumed.
1523 */
1524 debugger_safe_to_return = FALSE;
1525 begin_panic_transfer();
1526 vm_memtag_disable_checking();
1527 ret = kern_dump(KERN_DUMP_DISK);
1528 vm_memtag_enable_checking();
1529 abort_panic_transfer();
1530
1531 #if DEVELOPMENT || DEBUG
1532 INJECT_NESTED_PANIC_IF_REQUESTED(PANIC_TEST_CASE_RECURPANIC_POSTCORE);
1533 #endif
1534 }
1535
1536 /*
1537 * If DB_REBOOT_POST_CORE is set, then reboot if coredump is sucessfully saved
1538 * or if option to ignore failures is set.
1539 */
1540 if ((debug_boot_arg & DB_REBOOT_POST_CORE) &&
1541 ((ret == 0) || (debugger_panic_options & DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT))) {
1542 PEHaltRestart(kPEPanicDiagnosticsDone);
1543 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1544 }
1545 }
1546 }
1547
1548 if (debugger_current_op == DBOP_PANIC ||
1549 ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) {
1550 PEHaltRestart(kPEPanicDiagnosticsDone);
1551 }
1552
1553 if (debug_boot_arg & DB_REBOOT_ALWAYS) {
1554 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1555 }
1556
1557 /* If KDP is configured, try to trap to the debugger */
1558 #if defined(__arm64__)
1559 if (kdp_explicitly_requested && (current_debugger != NO_CUR_DB)) {
1560 #else
1561 if (current_debugger != NO_CUR_DB) {
1562 #endif
1563 kdp_raise_exception(exception, code, subcode, state);
1564 /*
1565 * Only return if we entered via Debugger and it's safe to return
1566 * (we halted the other cores successfully, this isn't a nested panic, etc)
1567 */
1568 if (debugger_current_op == DBOP_DEBUGGER &&
1569 debugger_safe_to_return &&
1570 kernel_debugger_entry_count == 1 &&
1571 !debugger_is_panic) {
1572 return;
1573 }
1574 }
1575
1576 #if defined(__arm64__)
1577 if (PE_i_can_has_debugger(NULL) && panicDebugging) {
1578 /*
1579 * Print panic string at the end of serial output
1580 * to make panic more obvious when someone connects a debugger
1581 */
1582 if (debugger_panic_str) {
1583 panic_debugger_log("Original panic string:\n");
1584 panic_debugger_log("panic(cpu %u caller 0x%lx): ", (unsigned) cpu_number(), debugger_panic_caller);
1585 #pragma clang diagnostic push
1586 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1587 _doprnt(debugger_panic_str, debugger_panic_args, consdebug_putc, 0);
1588 #pragma clang diagnostic pop
1589 panic_debugger_log("\n");
1590 }
1591
1592 /* If panic debugging is configured and we're on a dev fused device, spin for astris to connect */
1593 panic_spin_shmcon();
1594 }
1595 #endif /* defined(__arm64__) */
1596
1597 #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1598
1599 PEHaltRestart(kPEPanicDiagnosticsDone);
1600
1601 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1602
1603 if (!panicDebugging) {
1604 kdp_machine_reboot_type(kPEPanicRestartCPU, debugger_panic_options);
1605 }
1606
1607 paniclog_append_noflush("\nPlease go to https://panic.apple.com to report this panic\n");
1608 panic_spin_forever();
1609 }
1610
1611 #if SCHED_HYGIENE_DEBUG
1612 uint64_t debugger_trap_timestamps[9];
1613 # define DEBUGGER_TRAP_TIMESTAMP(i) debugger_trap_timestamps[i] = mach_absolute_time();
1614 #else
1615 # define DEBUGGER_TRAP_TIMESTAMP(i)
1616 #endif /* SCHED_HYGIENE_DEBUG */
1617
1618 void
1619 handle_debugger_trap(unsigned int exception, unsigned int code, unsigned int subcode, void *state)
1620 {
1621 unsigned int initial_not_in_kdp = not_in_kdp;
1622 kern_return_t ret = KERN_SUCCESS;
1623 debugger_op db_prev_op = debugger_current_op;
1624
1625 DEBUGGER_TRAP_TIMESTAMP(0);
1626
1627 DebuggerLock();
1628 ret = DebuggerHaltOtherCores(CPUDEBUGGERSYNC, (CPUDEBUGGEROP == DBOP_STACKSHOT));
1629
1630 DEBUGGER_TRAP_TIMESTAMP(1);
1631
1632 #if SCHED_HYGIENE_DEBUG
1633 if (serialmode & SERIALMODE_OUTPUT) {
1634 ml_spin_debug_reset(current_thread());
1635 }
1636 #endif /* SCHED_HYGIENE_DEBUG */
1637 if (ret != KERN_SUCCESS) {
1638 CPUDEBUGGERRET = ret;
1639 DebuggerUnlock();
1640 return;
1641 }
1642
1643 /* Update the global panic/debugger nested entry level */
1644 kernel_debugger_entry_count = CPUDEBUGGERCOUNT;
1645 if (kernel_debugger_entry_count > 0) {
1646 console_suspend();
1647 }
1648
1649 /*
1650 * TODO: Should we do anything special for nested panics here? i.e. if we've trapped more than twice
1651 * should we call into the debugger if it's configured and then reboot if the panic log has been written?
1652 */
1653
1654 if (CPUDEBUGGEROP == DBOP_NONE) {
1655 /* If there was no debugger context setup, we trapped due to a software breakpoint */
1656 debugger_current_op = DBOP_BREAKPOINT;
1657 } else {
1658 /* Not safe to return from a nested panic/debugger call */
1659 if (debugger_current_op == DBOP_PANIC ||
1660 debugger_current_op == DBOP_DEBUGGER) {
1661 debugger_safe_to_return = FALSE;
1662 }
1663
1664 debugger_current_op = CPUDEBUGGEROP;
1665
1666 /* Only overwrite the panic message if there is none already - save the data from the first call */
1667 if (debugger_panic_str == NULL) {
1668 debugger_panic_str = CPUPANICSTR;
1669 debugger_panic_args = CPUPANICARGS;
1670 debugger_panic_data = CPUPANICDATAPTR;
1671 debugger_message = CPUDEBUGGERMSG;
1672 debugger_panic_caller = CPUPANICCALLER;
1673 debugger_panic_initiator = CPUPANICINITIATOR;
1674 }
1675
1676 debugger_panic_options = CPUPANICOPTS;
1677 }
1678
1679 /*
1680 * Clear the op from the processor debugger context so we can handle
1681 * breakpoints in the debugger
1682 */
1683 CPUDEBUGGEROP = DBOP_NONE;
1684
1685 DEBUGGER_TRAP_TIMESTAMP(2);
1686
1687 kdp_callouts(KDP_EVENT_ENTER);
1688 not_in_kdp = 0;
1689
1690 DEBUGGER_TRAP_TIMESTAMP(3);
1691
1692 #if defined(__arm64__) && CONFIG_KDP_INTERACTIVE_DEBUGGING
1693 shmem_mark_as_busy();
1694 #endif
1695
1696 if (debugger_current_op == DBOP_BREAKPOINT) {
1697 kdp_raise_exception(exception, code, subcode, state);
1698 } else if (debugger_current_op == DBOP_STACKSHOT) {
1699 CPUDEBUGGERRET = do_stackshot(NULL);
1700 #if PGO
1701 } else if (debugger_current_op == DBOP_RESET_PGO_COUNTERS) {
1702 CPUDEBUGGERRET = do_pgo_reset_counters();
1703 #endif
1704 } else {
1705 /* note: this is the panic path... */
1706 debug_fatal_panic_begin();
1707 #if defined(__arm64__) && (DEBUG || DEVELOPMENT)
1708 if (!PE_arm_debug_and_trace_initialized()) {
1709 paniclog_append_noflush("kernel panicked before debug and trace infrastructure initialized!\n"
1710 "spinning forever...\n");
1711 panic_spin_forever();
1712 }
1713 #endif
1714 debugger_collect_diagnostics(exception, code, subcode, state);
1715 }
1716
1717 #if defined(__arm64__) && CONFIG_KDP_INTERACTIVE_DEBUGGING
1718 shmem_unmark_as_busy();
1719 #endif
1720
1721 DEBUGGER_TRAP_TIMESTAMP(4);
1722
1723 not_in_kdp = initial_not_in_kdp;
1724 kdp_callouts(KDP_EVENT_EXIT);
1725
1726 DEBUGGER_TRAP_TIMESTAMP(5);
1727
1728 if (debugger_current_op != DBOP_BREAKPOINT) {
1729 debugger_panic_str = NULL;
1730 debugger_panic_args = NULL;
1731 debugger_panic_data = NULL;
1732 debugger_panic_options = 0;
1733 debugger_message = NULL;
1734 }
1735
1736 /* Restore the previous debugger state */
1737 debugger_current_op = db_prev_op;
1738
1739 DEBUGGER_TRAP_TIMESTAMP(6);
1740
1741 DebuggerResumeOtherCores();
1742
1743 DEBUGGER_TRAP_TIMESTAMP(7);
1744
1745 DebuggerUnlock();
1746
1747 DEBUGGER_TRAP_TIMESTAMP(8);
1748
1749 return;
1750 }
1751
1752 __attribute__((noinline, not_tail_called))
1753 void
1754 log(__unused int level, char *fmt, ...)
1755 {
1756 void *caller = __builtin_return_address(0);
1757 va_list listp;
1758 va_list listp2;
1759
1760
1761 #ifdef lint
1762 level++;
1763 #endif /* lint */
1764 #ifdef MACH_BSD
1765 va_start(listp, fmt);
1766 va_copy(listp2, listp);
1767
1768 disable_preemption();
1769 _doprnt(fmt, &listp, cons_putc_locked, 0);
1770 enable_preemption();
1771
1772 va_end(listp);
1773
1774 #pragma clang diagnostic push
1775 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1776 os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp2, caller);
1777 #pragma clang diagnostic pop
1778 va_end(listp2);
1779 #endif
1780 }
1781
1782 /*
1783 * Per <rdar://problem/24974766>, skip appending log messages to
1784 * the new logging infrastructure in contexts where safety is
1785 * uncertain. These contexts include:
1786 * - When we're in the debugger
1787 * - We're in a panic
1788 * - Interrupts are disabled
1789 * - Or Pre-emption is disabled
1790 * In all the above cases, it is potentially unsafe to log messages.
1791 */
1792
1793 boolean_t
1794 oslog_is_safe(void)
1795 {
1796 return kernel_debugger_entry_count == 0 &&
1797 not_in_kdp == 1 &&
1798 get_preemption_level() == 0 &&
1799 ml_get_interrupts_enabled() == TRUE;
1800 }
1801
1802 boolean_t
1803 debug_mode_active(void)
1804 {
1805 return (0 != kernel_debugger_entry_count != 0) || (0 == not_in_kdp);
1806 }
1807
1808 void
1809 debug_putc(char c)
1810 {
1811 if ((debug_buf_size != 0) &&
1812 ((debug_buf_ptr - debug_buf_base) < (int)debug_buf_size) &&
1813 (!is_debug_ptr_in_ext_paniclog())) {
1814 *debug_buf_ptr = c;
1815 debug_buf_ptr++;
1816 }
1817 }
1818
1819 #if defined (__x86_64__)
1820 struct pasc {
1821 unsigned a: 7;
1822 unsigned b: 7;
1823 unsigned c: 7;
1824 unsigned d: 7;
1825 unsigned e: 7;
1826 unsigned f: 7;
1827 unsigned g: 7;
1828 unsigned h: 7;
1829 } __attribute__((packed));
1830
1831 typedef struct pasc pasc_t;
1832
1833 /*
1834 * In-place packing routines -- inefficient, but they're called at most once.
1835 * Assumes "buflen" is a multiple of 8. Used for compressing paniclogs on x86.
1836 */
1837 int
1838 packA(char *inbuf, uint32_t length, uint32_t buflen)
1839 {
1840 unsigned int i, j = 0;
1841 pasc_t pack;
1842
1843 length = MIN(((length + 7) & ~7), buflen);
1844
1845 for (i = 0; i < length; i += 8) {
1846 pack.a = inbuf[i];
1847 pack.b = inbuf[i + 1];
1848 pack.c = inbuf[i + 2];
1849 pack.d = inbuf[i + 3];
1850 pack.e = inbuf[i + 4];
1851 pack.f = inbuf[i + 5];
1852 pack.g = inbuf[i + 6];
1853 pack.h = inbuf[i + 7];
1854 bcopy((char *) &pack, inbuf + j, 7);
1855 j += 7;
1856 }
1857 return j;
1858 }
1859
1860 void
1861 unpackA(char *inbuf, uint32_t length)
1862 {
1863 pasc_t packs;
1864 unsigned i = 0;
1865 length = (length * 8) / 7;
1866
1867 while (i < length) {
1868 packs = *(pasc_t *)&inbuf[i];
1869 bcopy(&inbuf[i + 7], &inbuf[i + 8], MAX(0, (int) (length - i - 8)));
1870 inbuf[i++] = packs.a;
1871 inbuf[i++] = packs.b;
1872 inbuf[i++] = packs.c;
1873 inbuf[i++] = packs.d;
1874 inbuf[i++] = packs.e;
1875 inbuf[i++] = packs.f;
1876 inbuf[i++] = packs.g;
1877 inbuf[i++] = packs.h;
1878 }
1879 }
1880 #endif /* defined (__x86_64__) */
1881
1882 extern char *proc_name_address(void *);
1883 extern char *proc_longname_address(void *);
1884
1885 __private_extern__ void
1886 panic_display_process_name(void)
1887 {
1888 proc_name_t proc_name = {};
1889 struct proc *cbsd_info = NULL;
1890 task_t ctask = NULL;
1891 vm_size_t size;
1892
1893 if (!panic_get_thread_proc_task(current_thread(), &ctask, &cbsd_info)) {
1894 goto out;
1895 }
1896
1897 if (cbsd_info == NULL) {
1898 goto out;
1899 }
1900
1901 size = ml_nofault_copy((vm_offset_t)proc_longname_address(cbsd_info),
1902 (vm_offset_t)&proc_name, sizeof(proc_name));
1903
1904 if (size == 0 || proc_name[0] == '\0') {
1905 size = ml_nofault_copy((vm_offset_t)proc_name_address(cbsd_info),
1906 (vm_offset_t)&proc_name,
1907 MIN(sizeof(command_t), sizeof(proc_name)));
1908 if (size > 0) {
1909 proc_name[size - 1] = '\0';
1910 }
1911 }
1912
1913 out:
1914 proc_name[sizeof(proc_name) - 1] = '\0';
1915 paniclog_append_noflush("\nProcess name corresponding to current thread (%p): %s\n",
1916 current_thread(), proc_name[0] != '\0' ? proc_name : "Unknown");
1917 }
1918
1919 unsigned
1920 panic_active(void)
1921 {
1922 return debugger_current_op == DBOP_PANIC ||
1923 (debugger_current_op == DBOP_DEBUGGER && debugger_is_panic);
1924 }
1925
1926 void
1927 populate_model_name(char *model_string)
1928 {
1929 strlcpy(model_name, model_string, sizeof(model_name));
1930 }
1931
1932 void
1933 panic_display_model_name(void)
1934 {
1935 char tmp_model_name[sizeof(model_name)];
1936
1937 if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name)) {
1938 return;
1939 }
1940
1941 tmp_model_name[sizeof(tmp_model_name) - 1] = '\0';
1942
1943 if (tmp_model_name[0] != 0) {
1944 paniclog_append_noflush("System model name: %s\n", tmp_model_name);
1945 }
1946 }
1947
1948 void
1949 panic_display_kernel_uuid(void)
1950 {
1951 char tmp_kernel_uuid[sizeof(kernel_uuid_string)];
1952
1953 if (ml_nofault_copy((vm_offset_t) &kernel_uuid_string, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid_string)) != sizeof(kernel_uuid_string)) {
1954 return;
1955 }
1956
1957 if (tmp_kernel_uuid[0] != '\0') {
1958 paniclog_append_noflush("Kernel UUID: %s\n", tmp_kernel_uuid);
1959 }
1960 }
1961
1962 #if CONFIG_SPTM
1963 static void
1964 panic_display_component_uuid(char const *component_name, void *component_address)
1965 {
1966 uuid_t *component_uuid;
1967 unsigned long component_uuid_len = 0;
1968 uuid_string_t component_uuid_string;
1969
1970 component_uuid = getuuidfromheader((kernel_mach_header_t *)component_address, &component_uuid_len);
1971
1972 if (component_uuid != NULL && component_uuid_len == sizeof(uuid_t)) {
1973 uuid_unparse_upper(*component_uuid, component_uuid_string);
1974 paniclog_append_noflush("%s UUID: %s\n", component_name, component_uuid_string);
1975 }
1976 }
1977 #endif /* CONFIG_SPTM */
1978
1979 void
1980 panic_display_kernel_aslr(void)
1981 {
1982 #if CONFIG_SPTM
1983 {
1984 struct debug_header const *dh = SPTMArgs->debug_header;
1985
1986 paniclog_append_noflush("Debug Header address: %p\n", dh);
1987
1988 if (dh != NULL) {
1989 void *component_address;
1990
1991 paniclog_append_noflush("Debug Header entry count: %d\n", dh->count);
1992
1993 switch (dh->count) {
1994 default: // 3 or more
1995 component_address = dh->image[DEBUG_HEADER_ENTRY_TXM];
1996 paniclog_append_noflush("TXM load address: %p\n", component_address);
1997
1998 panic_display_component_uuid("TXM", component_address);
1999 OS_FALLTHROUGH;
2000 case 2:
2001 component_address = dh->image[DEBUG_HEADER_ENTRY_XNU];
2002 paniclog_append_noflush("Debug Header kernelcache load address: %p\n", component_address);
2003
2004 panic_display_component_uuid("Debug Header kernelcache", component_address);
2005 OS_FALLTHROUGH;
2006 case 1:
2007 component_address = dh->image[DEBUG_HEADER_ENTRY_SPTM];
2008 paniclog_append_noflush("SPTM load address: %p\n", component_address);
2009
2010 panic_display_component_uuid("SPTM", component_address);
2011 OS_FALLTHROUGH;
2012 case 0:
2013 ; // nothing to print
2014 }
2015 }
2016 }
2017 #endif /* CONFIG_SPTM */
2018
2019 kc_format_t kc_format;
2020
2021 PE_get_primary_kc_format(&kc_format);
2022
2023 if (kc_format == KCFormatFileset) {
2024 void *kch = PE_get_kc_header(KCKindPrimary);
2025 paniclog_append_noflush("KernelCache slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
2026 paniclog_append_noflush("KernelCache base: %p\n", (void*) kch);
2027 paniclog_append_noflush("Kernel slide: 0x%016lx\n", vm_kernel_stext - (unsigned long)kch + vm_kernel_slide);
2028 paniclog_append_noflush("Kernel text base: %p\n", (void *) vm_kernel_stext);
2029 #if defined(__arm64__)
2030 extern vm_offset_t segTEXTEXECB;
2031 paniclog_append_noflush("Kernel text exec slide: 0x%016lx\n", (unsigned long)segTEXTEXECB - (unsigned long)kch + vm_kernel_slide);
2032 paniclog_append_noflush("Kernel text exec base: 0x%016lx\n", (unsigned long)segTEXTEXECB);
2033 #endif /* defined(__arm64__) */
2034 } else if (vm_kernel_slide) {
2035 paniclog_append_noflush("Kernel slide: 0x%016lx\n", (unsigned long) vm_kernel_slide);
2036 paniclog_append_noflush("Kernel text base: %p\n", (void *)vm_kernel_stext);
2037 } else {
2038 paniclog_append_noflush("Kernel text base: %p\n", (void *)vm_kernel_stext);
2039 }
2040 }
2041
2042 void
2043 panic_display_hibb(void)
2044 {
2045 #if defined(__i386__) || defined (__x86_64__)
2046 paniclog_append_noflush("__HIB text base: %p\n", (void *) vm_hib_base);
2047 #endif
2048 }
2049
2050 #if CONFIG_ECC_LOGGING
2051 __private_extern__ void
2052 panic_display_ecc_errors(void)
2053 {
2054 uint32_t count = ecc_log_get_correction_count();
2055
2056 if (count > 0) {
2057 paniclog_append_noflush("ECC Corrections:%u\n", count);
2058 }
2059 }
2060 #endif /* CONFIG_ECC_LOGGING */
2061
2062 #if CONFIG_FREEZE
2063 extern bool freezer_incore_cseg_acct;
2064 extern int32_t c_segment_pages_compressed_incore;
2065 #endif
2066
2067 extern uint32_t c_segment_pages_compressed;
2068 extern uint32_t c_segment_count;
2069 extern uint32_t c_segments_limit;
2070 extern uint32_t c_segment_pages_compressed_limit;
2071 extern uint32_t c_segment_pages_compressed_nearing_limit;
2072 extern uint32_t c_segments_nearing_limit;
2073 extern int vm_num_swap_files;
2074
2075 void
2076 panic_display_compressor_stats(void)
2077 {
2078 int isswaplow = vm_swap_low_on_space();
2079 #if CONFIG_FREEZE
2080 uint32_t incore_seg_count;
2081 uint32_t incore_compressed_pages;
2082 if (freezer_incore_cseg_acct) {
2083 incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count;
2084 incore_compressed_pages = c_segment_pages_compressed_incore;
2085 } else {
2086 incore_seg_count = c_segment_count;
2087 incore_compressed_pages = c_segment_pages_compressed;
2088 }
2089
2090 paniclog_append_noflush("Compressor Info: %u%% of compressed pages limit (%s) and %u%% of segments limit (%s) with %d swapfiles and %s swap space\n",
2091 (incore_compressed_pages * 100) / c_segment_pages_compressed_limit,
2092 (incore_compressed_pages > c_segment_pages_compressed_nearing_limit) ? "BAD":"OK",
2093 (incore_seg_count * 100) / c_segments_limit,
2094 (incore_seg_count > c_segments_nearing_limit) ? "BAD":"OK",
2095 vm_num_swap_files,
2096 isswaplow ? "LOW":"OK");
2097 #else /* CONFIG_FREEZE */
2098 paniclog_append_noflush("Compressor Info: %u%% of compressed pages limit (%s) and %u%% of segments limit (%s) with %d swapfiles and %s swap space\n",
2099 (c_segment_pages_compressed * 100) / c_segment_pages_compressed_limit,
2100 (c_segment_pages_compressed > c_segment_pages_compressed_nearing_limit) ? "BAD":"OK",
2101 (c_segment_count * 100) / c_segments_limit,
2102 (c_segment_count > c_segments_nearing_limit) ? "BAD":"OK",
2103 vm_num_swap_files,
2104 isswaplow ? "LOW":"OK");
2105 #endif /* CONFIG_FREEZE */
2106 }
2107
2108 #if !CONFIG_TELEMETRY
2109 int
2110 telemetry_gather(user_addr_t buffer __unused, uint32_t *length __unused, bool mark __unused)
2111 {
2112 return KERN_NOT_SUPPORTED;
2113 }
2114 #endif
2115
2116 #include <machine/machine_cpu.h>
2117
2118 TUNABLE(uint32_t, kern_feature_overrides, "validation_disables", 0);
2119
2120 boolean_t
2121 kern_feature_override(uint32_t fmask)
2122 {
2123 return (kern_feature_overrides & fmask) == fmask;
2124 }
2125
2126 #if !XNU_TARGET_OS_OSX & CONFIG_KDP_INTERACTIVE_DEBUGGING
2127 static boolean_t
2128 device_corefile_valid_on_ephemeral(void)
2129 {
2130 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
2131 DTEntry node;
2132 const uint32_t *value = NULL;
2133 unsigned int size = 0;
2134 if (kSuccess != SecureDTLookupEntry(NULL, "/product", &node)) {
2135 return TRUE;
2136 }
2137 if (kSuccess != SecureDTGetProperty(node, "ephemeral-data-mode", (void const **) &value, &size)) {
2138 return TRUE;
2139 }
2140
2141 if (size != sizeof(uint32_t)) {
2142 return TRUE;
2143 }
2144
2145 if ((*value) && (kern_dump_should_enforce_encryption() == true)) {
2146 return FALSE;
2147 }
2148 #endif /* ifdef CONFIG_KDP_COREDUMP_ENCRYPTION */
2149
2150 return TRUE;
2151 }
2152 #endif /* !XNU_TARGET_OS_OSX & CONFIG_KDP_INTERACTIVE_DEBUGGING */
2153
2154 boolean_t
2155 on_device_corefile_enabled(void)
2156 {
2157 assert(startup_phase >= STARTUP_SUB_TUNABLES);
2158 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
2159 if (debug_boot_arg == 0) {
2160 return FALSE;
2161 }
2162 if (debug_boot_arg & DB_DISABLE_LOCAL_CORE) {
2163 return FALSE;
2164 }
2165 #if !XNU_TARGET_OS_OSX
2166 if (device_corefile_valid_on_ephemeral() == FALSE) {
2167 return FALSE;
2168 }
2169 /*
2170 * outside of macOS, if there's a debug boot-arg set and local
2171 * cores aren't explicitly disabled, we always write a corefile.
2172 */
2173 return TRUE;
2174 #else /* !XNU_TARGET_OS_OSX */
2175 /*
2176 * on macOS, if corefiles on panic are requested and local cores
2177 * aren't disabled we write a local core.
2178 */
2179 if (debug_boot_arg & (DB_KERN_DUMP_ON_NMI | DB_KERN_DUMP_ON_PANIC)) {
2180 return TRUE;
2181 }
2182 #endif /* !XNU_TARGET_OS_OSX */
2183 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
2184 return FALSE;
2185 }
2186
2187 boolean_t
2188 panic_stackshot_to_disk_enabled(void)
2189 {
2190 assert(startup_phase >= STARTUP_SUB_TUNABLES);
2191 #if defined(__x86_64__)
2192 if (PEGetCoprocessorVersion() < kCoprocessorVersion2) {
2193 /* Only enabled on pre-Gibraltar machines where it hasn't been disabled explicitly */
2194 if ((debug_boot_arg != 0) && (debug_boot_arg & DB_DISABLE_STACKSHOT_TO_DISK)) {
2195 return FALSE;
2196 }
2197
2198 return TRUE;
2199 }
2200 #endif
2201 return FALSE;
2202 }
2203
2204 const char *
2205 sysctl_debug_get_preoslog(size_t *size)
2206 {
2207 int result = 0;
2208 void *preoslog_pa = NULL;
2209 int preoslog_size = 0;
2210
2211 result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size);
2212 if (result || preoslog_pa == NULL || preoslog_size == 0) {
2213 kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size);
2214 *size = 0;
2215 return NULL;
2216 }
2217
2218 /*
2219 * Beware:
2220 * On release builds, we would need to call IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size) to free the preoslog buffer.
2221 * On Development & Debug builds, we retain the buffer so it can be extracted from coredumps.
2222 */
2223 *size = preoslog_size;
2224 return (char *)(ml_static_ptovirt((vm_offset_t)(preoslog_pa)));
2225 }
2226
2227 void
2228 sysctl_debug_free_preoslog(void)
2229 {
2230 #if RELEASE
2231 int result = 0;
2232 void *preoslog_pa = NULL;
2233 int preoslog_size = 0;
2234
2235 result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size);
2236 if (result || preoslog_pa == NULL || preoslog_size == 0) {
2237 kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size);
2238 return;
2239 }
2240
2241 IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size);
2242 #else
2243 /* On Development & Debug builds, we retain the buffer so it can be extracted from coredumps. */
2244 #endif // RELEASE
2245 }
2246
2247
2248 #if (DEVELOPMENT || DEBUG)
2249
2250 void
2251 platform_stall_panic_or_spin(uint32_t req)
2252 {
2253 if (xnu_platform_stall_value & req) {
2254 if (xnu_platform_stall_value & PLATFORM_STALL_XNU_ACTION_PANIC) {
2255 panic("Platform stall: User requested panic");
2256 } else {
2257 paniclog_append_noflush("\nUser requested platform stall. Stall Code: 0x%x", req);
2258 panic_spin_forever();
2259 }
2260 }
2261 }
2262 #endif
2263
2264
2265 #define AWL_HV_ENTRY_FLAG (0x1)
2266
2267 static inline void
2268 awl_set_scratch_reg_hv_bit(void)
2269 {
2270 #if defined(__arm64__)
2271 #define WATCHDOG_DIAG0 "S3_5_c15_c2_6"
2272 uint64_t awl_diag0 = __builtin_arm_rsr64(WATCHDOG_DIAG0);
2273 awl_diag0 |= AWL_HV_ENTRY_FLAG;
2274 __builtin_arm_wsr64(WATCHDOG_DIAG0, awl_diag0);
2275 #endif // defined(__arm64__)
2276 }
2277
2278 void
2279 awl_mark_hv_entry(void)
2280 {
2281 if (__probable(*PERCPU_GET(hv_entry_detected) || !awl_scratch_reg_supported)) {
2282 return;
2283 }
2284 *PERCPU_GET(hv_entry_detected) = true;
2285
2286 awl_set_scratch_reg_hv_bit();
2287 }
2288
2289 /*
2290 * Awl WatchdogDiag0 is not restored by hardware when coming out of reset,
2291 * so restore it manually.
2292 */
2293 static bool
2294 awl_pm_state_change_cbk(void *param __unused, enum cpu_event event, unsigned int cpu_or_cluster __unused)
2295 {
2296 if (event == CPU_BOOTED) {
2297 if (*PERCPU_GET(hv_entry_detected)) {
2298 awl_set_scratch_reg_hv_bit();
2299 }
2300 }
2301
2302 return true;
2303 }
2304
2305 /*
2306 * Identifies and sets a flag if AWL Scratch0/1 exists in the system, subscribes
2307 * for a callback to restore register after hibernation
2308 */
2309 __startup_func
2310 static void
2311 set_awl_scratch_exists_flag_and_subscribe_for_pm(void)
2312 {
2313 DTEntry base = NULL;
2314
2315 if (SecureDTLookupEntry(NULL, "/arm-io/wdt", &base) != kSuccess) {
2316 return;
2317 }
2318 const uint8_t *data = NULL;
2319 unsigned int data_size = sizeof(uint8_t);
2320
2321 if (base != NULL && SecureDTGetProperty(base, "awl-scratch-supported", (const void **)&data, &data_size) == kSuccess) {
2322 for (unsigned int i = 0; i < data_size; i++) {
2323 if (data[i] != 0) {
2324 awl_scratch_reg_supported = true;
2325 cpu_event_register_callback(awl_pm_state_change_cbk, NULL);
2326 break;
2327 }
2328 }
2329 }
2330 }
2331 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, set_awl_scratch_exists_flag_and_subscribe_for_pm);
2332
2333 /**
2334 * Signal that the system is going down for a panic
2335 */
2336 static inline void
2337 debug_fatal_panic_begin(void)
2338 {
2339 #if CONFIG_SPTM
2340 /*
2341 * Since we're going down, initiate panic lockdown.
2342 *
2343 * Whether or not this call to panic lockdown can be subverted is murky.
2344 * This doesn't really matter, however, because any security critical panics
2345 * events will have already initiated lockdown from the exception vector
2346 * before calling panic. Thus, lockdown from panic itself is fine as merely
2347 * a "best effort".
2348 */
2349 #if DEVELOPMENT || DEBUG
2350 panic_lockdown_record_debug_data();
2351 #endif /* DEVELOPMENT || DEBUG */
2352 sptm_xnu_panic_begin();
2353 #endif /* CONFIG_SPTM */
2354 }
2355