1 /* 2 * Copyright (c) 2005-2012 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 // NOTE: This file is only c++ so I can get static initialisers going 30 #include <libkern/OSDebug.h> 31 #include <IOKit/IOLib.h> 32 33 #include <sys/cdefs.h> 34 35 #include <stdarg.h> 36 #include <mach/mach_types.h> 37 #include <mach/kmod.h> 38 #include <kern/locks.h> 39 40 #include <libkern/libkern.h> // From bsd's libkern directory 41 #include <mach/vm_param.h> 42 43 #include <sys/kdebug.h> 44 #include <kern/thread.h> 45 46 extern int etext; 47 __BEGIN_DECLS 48 // From osmfk/kern/thread.h but considered to be private 49 extern vm_offset_t min_valid_stack_address(void); 50 extern vm_offset_t max_valid_stack_address(void); 51 52 // From osfmk/kern/printf.c 53 extern boolean_t doprnt_hide_pointers; 54 55 // From osfmk/kmod.c 56 extern void kmod_dump_log(vm_offset_t *addr, unsigned int cnt, boolean_t doUnslide); 57 58 extern addr64_t kvtophys(vm_offset_t va); 59 #if __arm__ 60 extern int copyinframe(vm_address_t fp, char *frame); 61 #elif defined(__arm64__) 62 extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit); 63 #endif 64 65 __END_DECLS 66 67 extern lck_grp_t *IOLockGroup; 68 69 static lck_mtx_t *sOSReportLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); 70 71 /* Use kernel_debug() to log a backtrace */ 72 void 73 trace_backtrace(uint32_t debugid, uint32_t debugid2, uintptr_t size, uintptr_t data) { 74 void *bt[16]; 75 const unsigned cnt = sizeof(bt) / sizeof(bt[0]); 76 unsigned i; 77 int found = 0; 78 79 OSBacktrace(bt, cnt); 80 81 /* find first non-kernel frame */ 82 for (i = 3; i < cnt && bt[i]; i++) { 83 if (bt[i] > (void*)&etext) { 84 found = 1; 85 break; 86 } 87 } 88 /* 89 * if there are non-kernel frames, only log these 90 * otherwise, log everything but the first two 91 */ 92 if (!found) i=2; 93 94 #define safe_bt(a) (uintptr_t)(a<cnt ? bt[a] : 0) 95 kernel_debug(debugid, data, size, safe_bt(i), safe_bt(i+1), 0); 96 kernel_debug(debugid2, safe_bt(i+2), safe_bt(i+3), safe_bt(i+4), safe_bt(i+5), 0); 97 } 98 99 /* Report a message with a 4 entry backtrace - very slow */ 100 void 101 OSReportWithBacktrace(const char *str, ...) 102 { 103 char buf[128]; 104 void *bt[9] = {}; 105 const unsigned cnt = sizeof(bt) / sizeof(bt[0]); 106 va_list listp; 107 108 // Ignore the our and our callers stackframes, skipping frames 0 & 1 109 (void) OSBacktrace(bt, cnt); 110 111 va_start(listp, str); 112 vsnprintf(buf, sizeof(buf), str, listp); 113 va_end(listp); 114 115 lck_mtx_lock(sOSReportLock); 116 { 117 boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers; 118 doprnt_hide_pointers = FALSE; 119 printf("%s\nBacktrace 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n", buf, 120 (unsigned long) VM_KERNEL_UNSLIDE(bt[2]), (unsigned long) VM_KERNEL_UNSLIDE(bt[3]), 121 (unsigned long) VM_KERNEL_UNSLIDE(bt[4]), (unsigned long) VM_KERNEL_UNSLIDE(bt[5]), 122 (unsigned long) VM_KERNEL_UNSLIDE(bt[6]), (unsigned long) VM_KERNEL_UNSLIDE(bt[7]), 123 (unsigned long) VM_KERNEL_UNSLIDE(bt[8])); 124 kmod_dump_log((vm_offset_t *) &bt[2], cnt - 2, TRUE); 125 doprnt_hide_pointers = old_doprnt_hide_pointers; 126 } 127 lck_mtx_unlock(sOSReportLock); 128 } 129 130 static vm_offset_t minstackaddr = min_valid_stack_address(); 131 static vm_offset_t maxstackaddr = max_valid_stack_address(); 132 133 134 #if __x86_64__ 135 #define x86_64_RETURN_OFFSET 8 136 static unsigned int 137 x86_64_validate_raddr(vm_offset_t raddr) 138 { 139 return ((raddr > VM_MIN_KERNEL_AND_KEXT_ADDRESS) && 140 (raddr < VM_MAX_KERNEL_ADDRESS)); 141 } 142 static unsigned int 143 x86_64_validate_stackptr(vm_offset_t stackptr) 144 { 145 /* Existence and alignment check 146 */ 147 if (!stackptr || (stackptr & 0x7) || !x86_64_validate_raddr(stackptr)) 148 return 0; 149 150 /* Is a virtual->physical translation present? 151 */ 152 if (!kvtophys(stackptr)) 153 return 0; 154 155 /* Check if the return address lies on the same page; 156 * If not, verify that a translation exists. 157 */ 158 if (((PAGE_SIZE - (stackptr & PAGE_MASK)) < x86_64_RETURN_OFFSET) && 159 !kvtophys(stackptr + x86_64_RETURN_OFFSET)) 160 return 0; 161 return 1; 162 } 163 #endif 164 165 void 166 OSPrintBacktrace(void) 167 { 168 void * btbuf[20]; 169 int tmp = OSBacktrace(btbuf, 20); 170 int i; 171 for(i=0;i<tmp;i++) 172 { 173 kprintf("bt[%.2d] = %p\n", i, btbuf[i]); 174 } 175 } 176 177 unsigned OSBacktrace(void **bt, unsigned maxAddrs) 178 { 179 unsigned frame; 180 if (!current_thread()) return 0; 181 182 #if __x86_64__ 183 #define SANE_x86_64_FRAME_SIZE (kernel_stack_size >> 1) 184 vm_offset_t stackptr, stackptr_prev, raddr; 185 unsigned frame_index = 0; 186 /* Obtain current frame pointer */ 187 188 __asm__ volatile("movq %%rbp, %0" : "=m" (stackptr)); 189 190 if (!x86_64_validate_stackptr(stackptr)) 191 goto pad; 192 193 raddr = *((vm_offset_t *) (stackptr + x86_64_RETURN_OFFSET)); 194 195 if (!x86_64_validate_raddr(raddr)) 196 goto pad; 197 198 bt[frame_index++] = (void *) raddr; 199 200 for ( ; frame_index < maxAddrs; frame_index++) { 201 stackptr_prev = stackptr; 202 stackptr = *((vm_offset_t *) stackptr_prev); 203 204 if (!x86_64_validate_stackptr(stackptr)) 205 break; 206 /* Stack grows downwards */ 207 if (stackptr < stackptr_prev) 208 break; 209 210 if ((stackptr - stackptr_prev) > SANE_x86_64_FRAME_SIZE) 211 break; 212 213 raddr = *((vm_offset_t *) (stackptr + x86_64_RETURN_OFFSET)); 214 215 if (!x86_64_validate_raddr(raddr)) 216 break; 217 218 bt[frame_index] = (void *) raddr; 219 } 220 pad: 221 frame = frame_index; 222 223 for ( ; frame_index < maxAddrs; frame_index++) 224 bt[frame_index] = (void *) 0; 225 #elif __arm__ || __arm64__ 226 uint32_t i = 0; 227 uintptr_t frameb[2]; 228 uintptr_t fp = 0; 229 230 // get the current frame pointer for this thread 231 #if defined(__arm__) 232 #define OSBacktraceFrameAlignOK(x) (((x) & 0x3) == 0) 233 __asm__ volatile("mov %0,r7" : "=r" (fp)); 234 #elif defined(__arm64__) 235 #define OSBacktraceFrameAlignOK(x) (((x) & 0xf) == 0) 236 __asm__ volatile("mov %0, fp" : "=r" (fp)); 237 #else 238 #error Unknown architecture. 239 #endif 240 241 // now crawl up the stack recording the link value of each frame 242 do { 243 // check bounds 244 if ((fp == 0) || (!OSBacktraceFrameAlignOK(fp)) || (fp > VM_MAX_KERNEL_ADDRESS) || (fp < VM_MIN_KERNEL_AND_KEXT_ADDRESS)) { 245 break; 246 } 247 // safely read frame 248 #ifdef __arm64__ 249 if (copyinframe(fp, (char*)frameb, TRUE) != 0) { 250 #else 251 if (copyinframe(fp, (char*)frameb) != 0) { 252 #endif 253 break; 254 } 255 256 // No need to use copyin as this is always a kernel address, see check above 257 bt[i] = (void*)frameb[1]; // link register 258 fp = frameb[0]; 259 } while (++i < maxAddrs); 260 frame= i; 261 #else 262 #error arch 263 #endif 264 return frame; 265 } 266