1 /* 2 * Copyright (c) 2005 Apple Computer, Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 // NOTE: This file is only c++ so I can get static initialisers going 30 #include <libkern/OSDebug.h> 31 #include <IOKit/IOLib.h> 32 33 #include <sys/cdefs.h> 34 35 #include <stdarg.h> 36 #include <mach/mach_types.h> 37 #include <mach/kmod.h> 38 #include <kern/locks.h> 39 40 #include <libkern/libkern.h> // From bsd's libkern directory 41 #include <mach/vm_param.h> 42 43 #include <sys/kdebug.h> 44 #include <kern/thread.h> 45 46 extern int etext; 47 __BEGIN_DECLS 48 // From osmfk/kern/thread.h but considered to be private 49 extern vm_offset_t min_valid_stack_address(void); 50 extern vm_offset_t max_valid_stack_address(void); 51 52 // From osfmk/kmod.c 53 extern void kmod_dump_log(vm_offset_t *addr, unsigned int cnt); 54 55 extern addr64_t kvtophys(vm_offset_t va); 56 57 __END_DECLS 58 59 extern lck_grp_t *IOLockGroup; 60 61 static lck_mtx_t *sOSReportLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); 62 63 /* Use kernel_debug() to log a backtrace */ 64 void 65 trace_backtrace(uint32_t debugid, uint32_t debugid2, uintptr_t size, uintptr_t data) { 66 void *bt[16]; 67 const unsigned cnt = sizeof(bt) / sizeof(bt[0]); 68 unsigned i; 69 int found = 0; 70 71 OSBacktrace(bt, cnt); 72 73 /* find first non-kernel frame */ 74 for (i = 3; i < cnt && bt[i]; i++) { 75 if (bt[i] > (void*)&etext) { 76 found = 1; 77 break; 78 } 79 } 80 /* 81 * if there are non-kernel frames, only log these 82 * otherwise, log everything but the first two 83 */ 84 if (!found) i=2; 85 86 #define safe_bt(a) (uintptr_t)(a<cnt ? bt[a] : 0) 87 kernel_debug(debugid, data, size, safe_bt(i), safe_bt(i+1), 0); 88 kernel_debug(debugid2, safe_bt(i+2), safe_bt(i+3), safe_bt(i+4), safe_bt(i+5), 0); 89 } 90 91 /* Report a message with a 4 entry backtrace - very slow */ 92 void 93 OSReportWithBacktrace(const char *str, ...) 94 { 95 char buf[128]; 96 void *bt[9]; 97 const unsigned cnt = sizeof(bt) / sizeof(bt[0]); 98 va_list listp; 99 100 // Ignore the our and our callers stackframes, skipping frames 0 & 1 101 (void) OSBacktrace(bt, cnt); 102 103 va_start(listp, str); 104 vsnprintf(buf, sizeof(buf), str, listp); 105 va_end(listp); 106 107 lck_mtx_lock(sOSReportLock); 108 { 109 printf("%s\nBacktrace %p %p %p %p %p %p %p\n", 110 buf, bt[2], bt[3], bt[4], bt[5], bt[6], bt[7], bt[8]); 111 kmod_dump_log((vm_offset_t *) &bt[2], cnt - 2); 112 } 113 lck_mtx_unlock(sOSReportLock); 114 } 115 116 static vm_offset_t minstackaddr = min_valid_stack_address(); 117 static vm_offset_t maxstackaddr = max_valid_stack_address(); 118 119 #if __i386__ 120 #define i386_RETURN_OFFSET 4 121 122 static unsigned int 123 i386_validate_stackptr(vm_offset_t stackptr) 124 { 125 /* Existence and alignment check 126 */ 127 if (!stackptr || (stackptr & 0x3)) 128 return 0; 129 130 /* Is a virtual->physical translation present? 131 */ 132 if (!kvtophys(stackptr)) 133 return 0; 134 135 /* Check if the return address lies on the same page; 136 * If not, verify that a translation exists. 137 */ 138 if (((PAGE_SIZE - (stackptr & PAGE_MASK)) < i386_RETURN_OFFSET) && 139 !kvtophys(stackptr + i386_RETURN_OFFSET)) 140 return 0; 141 return 1; 142 } 143 144 static unsigned int 145 i386_validate_raddr(vm_offset_t raddr) 146 { 147 return ((raddr > VM_MIN_KERNEL_AND_KEXT_ADDRESS) && 148 (raddr < VM_MAX_KERNEL_ADDRESS)); 149 } 150 #endif 151 152 #if __x86_64__ 153 #define x86_64_RETURN_OFFSET 8 154 static unsigned int 155 x86_64_validate_raddr(vm_offset_t raddr) 156 { 157 return ((raddr > VM_MIN_KERNEL_AND_KEXT_ADDRESS) && 158 (raddr < VM_MAX_KERNEL_ADDRESS)); 159 } 160 static unsigned int 161 x86_64_validate_stackptr(vm_offset_t stackptr) 162 { 163 /* Existence and alignment check 164 */ 165 if (!stackptr || (stackptr & 0x7) || !x86_64_validate_raddr(stackptr)) 166 return 0; 167 168 /* Is a virtual->physical translation present? 169 */ 170 if (!kvtophys(stackptr)) 171 return 0; 172 173 /* Check if the return address lies on the same page; 174 * If not, verify that a translation exists. 175 */ 176 if (((PAGE_SIZE - (stackptr & PAGE_MASK)) < x86_64_RETURN_OFFSET) && 177 !kvtophys(stackptr + x86_64_RETURN_OFFSET)) 178 return 0; 179 return 1; 180 } 181 #endif 182 183 void 184 OSPrintBacktrace(void) 185 { 186 void * btbuf[20]; 187 int tmp = OSBacktrace(btbuf, 20); 188 int i; 189 for(i=0;i<tmp;i++) 190 { 191 kprintf("bt[%.2d] = %p\n", i, btbuf[i]); 192 } 193 } 194 195 unsigned OSBacktrace(void **bt, unsigned maxAddrs) 196 { 197 unsigned frame; 198 199 #if __i386__ 200 #define SANE_i386_FRAME_SIZE (kernel_stack_size >> 1) 201 vm_offset_t stackptr, stackptr_prev, raddr; 202 unsigned frame_index = 0; 203 /* Obtain current frame pointer */ 204 __asm__ volatile("movl %%ebp, %0" : "=m" (stackptr)); 205 206 if (!i386_validate_stackptr(stackptr)) 207 goto pad; 208 209 raddr = *((vm_offset_t *) (stackptr + i386_RETURN_OFFSET)); 210 211 if (!i386_validate_raddr(raddr)) 212 goto pad; 213 214 bt[frame_index++] = (void *) raddr; 215 216 for ( ; frame_index < maxAddrs; frame_index++) { 217 stackptr_prev = stackptr; 218 stackptr = *((vm_offset_t *) stackptr_prev); 219 220 if (!i386_validate_stackptr(stackptr)) 221 break; 222 /* Stack grows downwards */ 223 if (stackptr < stackptr_prev) 224 break; 225 226 if ((stackptr - stackptr_prev) > SANE_i386_FRAME_SIZE) 227 break; 228 229 raddr = *((vm_offset_t *) (stackptr + i386_RETURN_OFFSET)); 230 231 if (!i386_validate_raddr(raddr)) 232 break; 233 234 bt[frame_index] = (void *) raddr; 235 } 236 pad: 237 frame = frame_index; 238 239 for ( ; frame_index < maxAddrs; frame_index++) 240 bt[frame_index] = (void *) 0; 241 #elif __x86_64__ 242 #define SANE_x86_64_FRAME_SIZE (kernel_stack_size >> 1) 243 vm_offset_t stackptr, stackptr_prev, raddr; 244 unsigned frame_index = 0; 245 /* Obtain current frame pointer */ 246 247 __asm__ volatile("movq %%rbp, %0" : "=m" (stackptr)); 248 249 if (!x86_64_validate_stackptr(stackptr)) 250 goto pad; 251 252 raddr = *((vm_offset_t *) (stackptr + x86_64_RETURN_OFFSET)); 253 254 if (!x86_64_validate_raddr(raddr)) 255 goto pad; 256 257 bt[frame_index++] = (void *) raddr; 258 259 for ( ; frame_index < maxAddrs; frame_index++) { 260 stackptr_prev = stackptr; 261 stackptr = *((vm_offset_t *) stackptr_prev); 262 263 if (!x86_64_validate_stackptr(stackptr)) 264 break; 265 /* Stack grows downwards */ 266 if (stackptr < stackptr_prev) 267 break; 268 269 if ((stackptr - stackptr_prev) > SANE_x86_64_FRAME_SIZE) 270 break; 271 272 raddr = *((vm_offset_t *) (stackptr + x86_64_RETURN_OFFSET)); 273 274 if (!x86_64_validate_raddr(raddr)) 275 break; 276 277 bt[frame_index] = (void *) raddr; 278 } 279 pad: 280 frame = frame_index; 281 282 for ( ; frame_index < maxAddrs; frame_index++) 283 bt[frame_index] = (void *) 0; 284 #else 285 #error arch 286 #endif 287 return frame; 288 } 289