1 /* 2 * Copyright (c) 2005 Apple Computer, Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 // NOTE: This file is only c++ so I can get static initialisers going 30 #include <libkern/OSDebug.h> 31 32 #include <sys/cdefs.h> 33 34 #include <stdarg.h> 35 #include <mach/mach_types.h> 36 #include <mach/kmod.h> 37 #include <kern/lock.h> 38 39 #include <libkern/libkern.h> // From bsd's libkern directory 40 #include <mach/vm_param.h> 41 42 #include <sys/kdebug.h> 43 extern int etext; 44 __BEGIN_DECLS 45 // From osmfk/kern/thread.h but considered to be private 46 extern vm_offset_t min_valid_stack_address(void); 47 extern vm_offset_t max_valid_stack_address(void); 48 49 // From osfmk/kmod.c 50 extern void kmod_dump_log(vm_offset_t *addr, unsigned int cnt); 51 52 extern addr64_t kvtophys(vm_offset_t va); 53 #if __arm__ 54 extern int copyinframe(vm_address_t fp, uint32_t *frame); 55 #endif 56 57 __END_DECLS 58 59 static mutex_t *sOSReportLock = mutex_alloc(0); 60 61 /* Use kernel_debug() to log a backtrace */ 62 void 63 trace_backtrace(unsigned int debugid, unsigned int debugid2, int size, int data) { 64 void *bt[16]; 65 const unsigned cnt = sizeof(bt) / sizeof(bt[0]); 66 unsigned i; 67 int found = 0; 68 69 OSBacktrace(bt, cnt); 70 71 /* find first non-kernel frame */ 72 for (i = 3; i < cnt && bt[i]; i++) { 73 if (bt[i] > (void*)&etext) { 74 found = 1; 75 break; 76 } 77 } 78 /* 79 * if there are non-kernel frames, only log these 80 * otherwise, log everything but the first two 81 */ 82 if (!found) i=2; 83 84 #define safe_bt(a) (int)(a<cnt ? bt[a] : 0) 85 kernel_debug(debugid, data, size, safe_bt(i), safe_bt(i+1), 0); 86 kernel_debug(debugid2, safe_bt(i+2), safe_bt(i+3), safe_bt(i+4), safe_bt(i+5), 0); 87 } 88 89 /* Report a message with a 4 entry backtrace - very slow */ 90 void 91 OSReportWithBacktrace(const char *str, ...) 92 { 93 char buf[128]; 94 void *bt[9]; 95 const unsigned cnt = sizeof(bt) / sizeof(bt[0]); 96 va_list listp; 97 98 // Ignore the our and our callers stackframes, skipping frames 0 & 1 99 (void) OSBacktrace(bt, cnt); 100 101 va_start(listp, str); 102 vsnprintf(buf, sizeof(buf), str, listp); 103 va_end(listp); 104 105 mutex_lock(sOSReportLock); 106 { 107 printf("%s\nBacktrace %p %p %p %p %p %p %p\n", 108 buf, bt[2], bt[3], bt[4], bt[5], bt[6], bt[7], bt[8]); 109 kmod_dump_log((vm_offset_t *) &bt[2], cnt - 2); 110 } 111 mutex_unlock(sOSReportLock); 112 } 113 114 static vm_offset_t minstackaddr = min_valid_stack_address(); 115 static vm_offset_t maxstackaddr = max_valid_stack_address(); 116 117 #if __i386__ 118 #define i386_RETURN_OFFSET 4 119 120 static unsigned int 121 i386_validate_stackptr(vm_offset_t stackptr) 122 { 123 /* Existence and alignment check 124 */ 125 if (!stackptr || (stackptr & 0x3)) 126 return 0; 127 128 /* Is a virtual->physical translation present? 129 */ 130 if (!kvtophys(stackptr)) 131 return 0; 132 133 /* Check if the return address lies on the same page; 134 * If not, verify that a translation exists. 135 */ 136 if (((PAGE_SIZE - (stackptr & PAGE_MASK)) < i386_RETURN_OFFSET) && 137 !kvtophys(stackptr + i386_RETURN_OFFSET)) 138 return 0; 139 return 1; 140 } 141 142 static unsigned int 143 i386_validate_raddr(vm_offset_t raddr) 144 { 145 return ((raddr > VM_MIN_KERNEL_ADDRESS) && 146 (raddr < VM_MAX_KERNEL_ADDRESS)); 147 } 148 #endif 149 150 unsigned OSBacktrace(void **bt, unsigned maxAddrs) 151 { 152 unsigned frame; 153 154 #if __ppc__ 155 vm_offset_t stackptr, stackptr_prev; 156 const vm_offset_t * const mem = (vm_offset_t *) 0; 157 unsigned i = 0; 158 159 __asm__ volatile("mflr %0" : "=r" (stackptr)); 160 bt[i++] = (void *) stackptr; 161 162 __asm__ volatile("mr %0,r1" : "=r" (stackptr)); 163 for ( ; i < maxAddrs; i++) { 164 // Validate we have a reasonable stackptr 165 if ( !(minstackaddr <= stackptr && stackptr < maxstackaddr) 166 || (stackptr & 3)) 167 break; 168 169 stackptr_prev = stackptr; 170 stackptr = mem[stackptr_prev >> 2]; 171 if ((stackptr_prev ^ stackptr) > 8 * 1024) // Sanity check 172 break; 173 174 vm_offset_t addr = mem[(stackptr >> 2) + 2]; 175 if ((addr & 3) || (addr < 0x8000)) // More sanity checks 176 break; 177 bt[i] = (void *) addr; 178 } 179 frame = i; 180 181 for ( ; i < maxAddrs; i++) 182 bt[i] = (void *) 0; 183 #elif __i386__ 184 #define SANE_i386_FRAME_SIZE 8*1024 185 vm_offset_t stackptr, stackptr_prev, raddr; 186 unsigned frame_index = 0; 187 /* Obtain current frame pointer */ 188 __asm__ volatile("movl %%ebp, %0" : "=m" (stackptr)); 189 190 if (!i386_validate_stackptr(stackptr)) 191 goto pad; 192 193 raddr = *((vm_offset_t *) (stackptr + i386_RETURN_OFFSET)); 194 195 if (!i386_validate_raddr(raddr)) 196 goto pad; 197 198 bt[frame_index++] = (void *) raddr; 199 200 for ( ; frame_index < maxAddrs; frame_index++) { 201 stackptr_prev = stackptr; 202 stackptr = *((vm_offset_t *) stackptr_prev); 203 204 if (!i386_validate_stackptr(stackptr)) 205 break; 206 /* Stack grows downwards */ 207 if (stackptr < stackptr_prev) 208 break; 209 210 if ((stackptr_prev ^ stackptr) > SANE_i386_FRAME_SIZE) 211 break; 212 213 raddr = *((vm_offset_t *) (stackptr + i386_RETURN_OFFSET)); 214 215 if (!i386_validate_raddr(raddr)) 216 break; 217 218 bt[frame_index] = (void *) raddr; 219 } 220 pad: 221 frame = frame_index; 222 223 for ( ; frame_index < maxAddrs; frame_index++) 224 bt[frame_index] = (void *) 0; 225 #elif __arm__ 226 uint32_t i= 0; 227 uint32_t frameb[2]; 228 uint32_t fp= 0; 229 230 // get the current frame pointer for this thread 231 __asm__ volatile("mov %0,r7" : "=r" (fp)); 232 233 // now crawl up the stack recording the link value of each frame 234 do { 235 // check bounds 236 if ((fp == 0) || ((fp & 3) != 0) || (fp > VM_MAX_KERNEL_ADDRESS) || (fp < VM_MIN_KERNEL_ADDRESS)) { 237 break; 238 } 239 // safely read frame 240 if (copyinframe(fp, frameb) != 0) { 241 break; 242 } 243 244 // No need to use copyin as this is always a kernel address, see check above 245 bt[i] = (void*)frameb[1]; // link register 246 fp = frameb[0]; 247 } while (++i < maxAddrs); 248 frame= i; 249 #else 250 #error arch 251 #endif 252 return frame; 253 } 254