1 //===-- DNBArchImpl.cpp -----------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Created by Greg Clayton on 6/25/07. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #if defined (__arm__) || defined (__arm64__) || defined (__aarch64__) 15 16 #include "MacOSX/arm/DNBArchImpl.h" 17 #include "MacOSX/MachProcess.h" 18 #include "MacOSX/MachThread.h" 19 #include "DNBBreakpoint.h" 20 #include "DNBLog.h" 21 #include "DNBRegisterInfo.h" 22 #include "DNB.h" 23 #include "ARM_GCC_Registers.h" 24 #include "ARM_DWARF_Registers.h" 25 26 #include <inttypes.h> 27 #include <sys/sysctl.h> 28 29 // BCR address match type 30 #define BCR_M_IMVA_MATCH ((uint32_t)(0u << 21)) 31 #define BCR_M_CONTEXT_ID_MATCH ((uint32_t)(1u << 21)) 32 #define BCR_M_IMVA_MISMATCH ((uint32_t)(2u << 21)) 33 #define BCR_M_RESERVED ((uint32_t)(3u << 21)) 34 35 // Link a BVR/BCR or WVR/WCR pair to another 36 #define E_ENABLE_LINKING ((uint32_t)(1u << 20)) 37 38 // Byte Address Select 39 #define BAS_IMVA_PLUS_0 ((uint32_t)(1u << 5)) 40 #define BAS_IMVA_PLUS_1 ((uint32_t)(1u << 6)) 41 #define BAS_IMVA_PLUS_2 ((uint32_t)(1u << 7)) 42 #define BAS_IMVA_PLUS_3 ((uint32_t)(1u << 8)) 43 #define BAS_IMVA_0_1 ((uint32_t)(3u << 5)) 44 #define BAS_IMVA_2_3 ((uint32_t)(3u << 7)) 45 #define BAS_IMVA_ALL ((uint32_t)(0xfu << 5)) 46 47 // Break only in privileged or user mode 48 #define S_RSVD ((uint32_t)(0u << 1)) 49 #define S_PRIV ((uint32_t)(1u << 1)) 50 #define S_USER ((uint32_t)(2u << 1)) 51 #define S_PRIV_USER ((S_PRIV) | (S_USER)) 52 53 #define BCR_ENABLE ((uint32_t)(1u)) 54 #define WCR_ENABLE ((uint32_t)(1u)) 55 56 // Watchpoint load/store 57 #define WCR_LOAD ((uint32_t)(1u << 3)) 58 #define WCR_STORE ((uint32_t)(1u << 4)) 59 60 // Definitions for the Debug Status and Control Register fields: 61 // [5:2] => Method of debug entry 62 //#define WATCHPOINT_OCCURRED ((uint32_t)(2u)) 63 // I'm seeing this, instead. 64 #define WATCHPOINT_OCCURRED ((uint32_t)(10u)) 65 66 // 0xE120BE70 67 static const uint8_t g_arm_breakpoint_opcode[] = { 0x70, 0xBE, 0x20, 0xE1 }; 68 static const uint8_t g_thumb_breakpoint_opcode[] = { 0x70, 0xBE }; 69 70 // A watchpoint may need to be implemented using two watchpoint registers. 71 // e.g. watching an 8-byte region when the device can only watch 4-bytes. 72 // 73 // This stores the lo->hi mappings. It's safe to initialize to all 0's 74 // since hi > lo and therefore LoHi[i] cannot be 0. 75 static uint32_t LoHi[16] = { 0 }; 76 77 // ARM constants used during decoding 78 #define REG_RD 0 79 #define LDM_REGLIST 1 80 #define PC_REG 15 81 #define PC_REGLIST_BIT 0x8000 82 83 // ARM conditions 84 #define COND_EQ 0x0 85 #define COND_NE 0x1 86 #define COND_CS 0x2 87 #define COND_HS 0x2 88 #define COND_CC 0x3 89 #define COND_LO 0x3 90 #define COND_MI 0x4 91 #define COND_PL 0x5 92 #define COND_VS 0x6 93 #define COND_VC 0x7 94 #define COND_HI 0x8 95 #define COND_LS 0x9 96 #define COND_GE 0xA 97 #define COND_LT 0xB 98 #define COND_GT 0xC 99 #define COND_LE 0xD 100 #define COND_AL 0xE 101 #define COND_UNCOND 0xF 102 103 #define MASK_CPSR_T (1u << 5) 104 #define MASK_CPSR_J (1u << 24) 105 106 #define MNEMONIC_STRING_SIZE 32 107 #define OPERAND_STRING_SIZE 128 108 109 // Returns true if the first 16 bit opcode of a thumb instruction indicates 110 // the instruction will be a 32 bit thumb opcode 111 static bool 112 IsThumb32Opcode (uint16_t opcode) 113 { 114 if (((opcode & 0xE000) == 0xE000) && (opcode & 0x1800)) 115 return true; 116 return false; 117 } 118 119 void 120 DNBArchMachARM::Initialize() 121 { 122 DNBArchPluginInfo arch_plugin_info = 123 { 124 CPU_TYPE_ARM, 125 DNBArchMachARM::Create, 126 DNBArchMachARM::GetRegisterSetInfo, 127 DNBArchMachARM::SoftwareBreakpointOpcode 128 }; 129 130 // Register this arch plug-in with the main protocol class 131 DNBArchProtocol::RegisterArchPlugin (arch_plugin_info); 132 } 133 134 135 DNBArchProtocol * 136 DNBArchMachARM::Create (MachThread *thread) 137 { 138 DNBArchMachARM *obj = new DNBArchMachARM (thread); 139 return obj; 140 } 141 142 const uint8_t * 143 DNBArchMachARM::SoftwareBreakpointOpcode (nub_size_t byte_size) 144 { 145 switch (byte_size) 146 { 147 case 2: return g_thumb_breakpoint_opcode; 148 case 4: return g_arm_breakpoint_opcode; 149 } 150 return NULL; 151 } 152 153 uint32_t 154 DNBArchMachARM::GetCPUType() 155 { 156 return CPU_TYPE_ARM; 157 } 158 159 uint64_t 160 DNBArchMachARM::GetPC(uint64_t failValue) 161 { 162 // Get program counter 163 if (GetGPRState(false) == KERN_SUCCESS) 164 return m_state.context.gpr.__pc; 165 return failValue; 166 } 167 168 kern_return_t 169 DNBArchMachARM::SetPC(uint64_t value) 170 { 171 // Get program counter 172 kern_return_t err = GetGPRState(false); 173 if (err == KERN_SUCCESS) 174 { 175 m_state.context.gpr.__pc = (uint32_t) value; 176 err = SetGPRState(); 177 } 178 return err == KERN_SUCCESS; 179 } 180 181 uint64_t 182 DNBArchMachARM::GetSP(uint64_t failValue) 183 { 184 // Get stack pointer 185 if (GetGPRState(false) == KERN_SUCCESS) 186 return m_state.context.gpr.__sp; 187 return failValue; 188 } 189 190 kern_return_t 191 DNBArchMachARM::GetGPRState(bool force) 192 { 193 int set = e_regSetGPR; 194 // Check if we have valid cached registers 195 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS) 196 return KERN_SUCCESS; 197 198 // Read the registers from our thread 199 mach_msg_type_number_t count = ARM_THREAD_STATE_COUNT; 200 kern_return_t kret = ::thread_get_state(m_thread->MachPortNumber(), ARM_THREAD_STATE, (thread_state_t)&m_state.context.gpr, &count); 201 uint32_t *r = &m_state.context.gpr.__r[0]; 202 DNBLogThreadedIf(LOG_THREAD, "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x (count = %u) regs r0=%8.8x r1=%8.8x r2=%8.8x r3=%8.8x r4=%8.8x r5=%8.8x r6=%8.8x r7=%8.8x r8=%8.8x r9=%8.8x r10=%8.8x r11=%8.8x s12=%8.8x sp=%8.8x lr=%8.8x pc=%8.8x cpsr=%8.8x", 203 m_thread->MachPortNumber(), 204 ARM_THREAD_STATE, 205 ARM_THREAD_STATE_COUNT, 206 kret, 207 count, 208 r[0], 209 r[1], 210 r[2], 211 r[3], 212 r[4], 213 r[5], 214 r[6], 215 r[7], 216 r[8], 217 r[9], 218 r[10], 219 r[11], 220 r[12], 221 r[13], 222 r[14], 223 r[15], 224 r[16]); 225 m_state.SetError(set, Read, kret); 226 return kret; 227 } 228 229 kern_return_t 230 DNBArchMachARM::GetVFPState(bool force) 231 { 232 int set = e_regSetVFP; 233 // Check if we have valid cached registers 234 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS) 235 return KERN_SUCCESS; 236 237 kern_return_t kret; 238 239 #if defined (__arm64__) || defined (__aarch64__) 240 // Read the registers from our thread 241 mach_msg_type_number_t count = ARM_NEON_STATE_COUNT; 242 kret = ::thread_get_state(m_thread->MachPortNumber(), ARM_NEON_STATE, (thread_state_t)&m_state.context.vfp, &count); 243 if (DNBLogEnabledForAny (LOG_THREAD)) 244 { 245 DNBLogThreaded("thread_get_state(0x%4.4x, %u, &vfp, %u) => 0x%8.8x (count = %u) regs" 246 "\n q0 = 0x%16.16llx%16.16llx" 247 "\n q1 = 0x%16.16llx%16.16llx" 248 "\n q2 = 0x%16.16llx%16.16llx" 249 "\n q3 = 0x%16.16llx%16.16llx" 250 "\n q4 = 0x%16.16llx%16.16llx" 251 "\n q5 = 0x%16.16llx%16.16llx" 252 "\n q6 = 0x%16.16llx%16.16llx" 253 "\n q7 = 0x%16.16llx%16.16llx" 254 "\n q8 = 0x%16.16llx%16.16llx" 255 "\n q9 = 0x%16.16llx%16.16llx" 256 "\n q10 = 0x%16.16llx%16.16llx" 257 "\n q11 = 0x%16.16llx%16.16llx" 258 "\n q12 = 0x%16.16llx%16.16llx" 259 "\n q13 = 0x%16.16llx%16.16llx" 260 "\n q14 = 0x%16.16llx%16.16llx" 261 "\n q15 = 0x%16.16llx%16.16llx" 262 "\n fpsr = 0x%8.8x" 263 "\n fpcr = 0x%8.8x\n\n", 264 m_thread->MachPortNumber(), 265 ARM_NEON_STATE, 266 ARM_NEON_STATE_COUNT, 267 kret, 268 count, 269 ((uint64_t *)&m_state.context.vfp.__v[0])[0] , ((uint64_t *)&m_state.context.vfp.__v[0])[1], 270 ((uint64_t *)&m_state.context.vfp.__v[1])[0] , ((uint64_t *)&m_state.context.vfp.__v[1])[1], 271 ((uint64_t *)&m_state.context.vfp.__v[2])[0] , ((uint64_t *)&m_state.context.vfp.__v[2])[1], 272 ((uint64_t *)&m_state.context.vfp.__v[3])[0] , ((uint64_t *)&m_state.context.vfp.__v[3])[1], 273 ((uint64_t *)&m_state.context.vfp.__v[4])[0] , ((uint64_t *)&m_state.context.vfp.__v[4])[1], 274 ((uint64_t *)&m_state.context.vfp.__v[5])[0] , ((uint64_t *)&m_state.context.vfp.__v[5])[1], 275 ((uint64_t *)&m_state.context.vfp.__v[6])[0] , ((uint64_t *)&m_state.context.vfp.__v[6])[1], 276 ((uint64_t *)&m_state.context.vfp.__v[7])[0] , ((uint64_t *)&m_state.context.vfp.__v[7])[1], 277 ((uint64_t *)&m_state.context.vfp.__v[8])[0] , ((uint64_t *)&m_state.context.vfp.__v[8])[1], 278 ((uint64_t *)&m_state.context.vfp.__v[9])[0] , ((uint64_t *)&m_state.context.vfp.__v[9])[1], 279 ((uint64_t *)&m_state.context.vfp.__v[10])[0], ((uint64_t *)&m_state.context.vfp.__v[10])[1], 280 ((uint64_t *)&m_state.context.vfp.__v[11])[0], ((uint64_t *)&m_state.context.vfp.__v[11])[1], 281 ((uint64_t *)&m_state.context.vfp.__v[12])[0], ((uint64_t *)&m_state.context.vfp.__v[12])[1], 282 ((uint64_t *)&m_state.context.vfp.__v[13])[0], ((uint64_t *)&m_state.context.vfp.__v[13])[1], 283 ((uint64_t *)&m_state.context.vfp.__v[14])[0], ((uint64_t *)&m_state.context.vfp.__v[14])[1], 284 ((uint64_t *)&m_state.context.vfp.__v[15])[0], ((uint64_t *)&m_state.context.vfp.__v[15])[1], 285 m_state.context.vfp.__fpsr, 286 m_state.context.vfp.__fpcr); 287 288 } 289 #else 290 // Read the registers from our thread 291 mach_msg_type_number_t count = ARM_VFP_STATE_COUNT; 292 kret = ::thread_get_state(m_thread->MachPortNumber(), ARM_VFP_STATE, (thread_state_t)&m_state.context.vfp, &count); 293 294 if (DNBLogEnabledForAny (LOG_THREAD)) 295 { 296 uint32_t *r = &m_state.context.vfp.__r[0]; 297 DNBLogThreaded ("thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x (count => %u)", 298 m_thread->MachPortNumber(), 299 ARM_THREAD_STATE, 300 ARM_THREAD_STATE_COUNT, 301 kret, 302 count); 303 DNBLogThreaded(" s0=%8.8x s1=%8.8x s2=%8.8x s3=%8.8x s4=%8.8x s5=%8.8x s6=%8.8x s7=%8.8x",r[ 0],r[ 1],r[ 2],r[ 3],r[ 4],r[ 5],r[ 6],r[ 7]); 304 DNBLogThreaded(" s8=%8.8x s9=%8.8x s10=%8.8x s11=%8.8x s12=%8.8x s13=%8.8x s14=%8.8x s15=%8.8x",r[ 8],r[ 9],r[10],r[11],r[12],r[13],r[14],r[15]); 305 DNBLogThreaded(" s16=%8.8x s17=%8.8x s18=%8.8x s19=%8.8x s20=%8.8x s21=%8.8x s22=%8.8x s23=%8.8x",r[16],r[17],r[18],r[19],r[20],r[21],r[22],r[23]); 306 DNBLogThreaded(" s24=%8.8x s25=%8.8x s26=%8.8x s27=%8.8x s28=%8.8x s29=%8.8x s30=%8.8x s31=%8.8x",r[24],r[25],r[26],r[27],r[28],r[29],r[30],r[31]); 307 DNBLogThreaded(" s32=%8.8x s33=%8.8x s34=%8.8x s35=%8.8x s36=%8.8x s37=%8.8x s38=%8.8x s39=%8.8x",r[32],r[33],r[34],r[35],r[36],r[37],r[38],r[39]); 308 DNBLogThreaded(" s40=%8.8x s41=%8.8x s42=%8.8x s43=%8.8x s44=%8.8x s45=%8.8x s46=%8.8x s47=%8.8x",r[40],r[41],r[42],r[43],r[44],r[45],r[46],r[47]); 309 DNBLogThreaded(" s48=%8.8x s49=%8.8x s50=%8.8x s51=%8.8x s52=%8.8x s53=%8.8x s54=%8.8x s55=%8.8x",r[48],r[49],r[50],r[51],r[52],r[53],r[54],r[55]); 310 DNBLogThreaded(" s56=%8.8x s57=%8.8x s58=%8.8x s59=%8.8x s60=%8.8x s61=%8.8x s62=%8.8x s63=%8.8x fpscr=%8.8x",r[56],r[57],r[58],r[59],r[60],r[61],r[62],r[63],r[64]); 311 } 312 313 #endif 314 m_state.SetError(set, Read, kret); 315 return kret; 316 } 317 318 kern_return_t 319 DNBArchMachARM::GetEXCState(bool force) 320 { 321 int set = e_regSetEXC; 322 // Check if we have valid cached registers 323 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS) 324 return KERN_SUCCESS; 325 326 // Read the registers from our thread 327 mach_msg_type_number_t count = ARM_EXCEPTION_STATE_COUNT; 328 kern_return_t kret = ::thread_get_state(m_thread->MachPortNumber(), ARM_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, &count); 329 m_state.SetError(set, Read, kret); 330 return kret; 331 } 332 333 static void 334 DumpDBGState(const DNBArchMachARM::DBG& dbg) 335 { 336 uint32_t i = 0; 337 for (i=0; i<16; i++) 338 { 339 DNBLogThreadedIf(LOG_STEP, "BVR%-2u/BCR%-2u = { 0x%8.8x, 0x%8.8x } WVR%-2u/WCR%-2u = { 0x%8.8x, 0x%8.8x }", 340 i, i, dbg.__bvr[i], dbg.__bcr[i], 341 i, i, dbg.__wvr[i], dbg.__wcr[i]); 342 } 343 } 344 345 kern_return_t 346 DNBArchMachARM::GetDBGState(bool force) 347 { 348 int set = e_regSetDBG; 349 350 // Check if we have valid cached registers 351 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS) 352 return KERN_SUCCESS; 353 354 // Read the registers from our thread 355 #if defined (ARM_DEBUG_STATE32) && (defined (__arm64__) || defined (__aarch64__)) 356 mach_msg_type_number_t count = ARM_DEBUG_STATE32_COUNT; 357 kern_return_t kret = ::thread_get_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE32, (thread_state_t)&m_state.dbg, &count); 358 #else 359 mach_msg_type_number_t count = ARM_DEBUG_STATE_COUNT; 360 kern_return_t kret = ::thread_get_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE, (thread_state_t)&m_state.dbg, &count); 361 #endif 362 m_state.SetError(set, Read, kret); 363 364 return kret; 365 } 366 367 kern_return_t 368 DNBArchMachARM::SetGPRState() 369 { 370 int set = e_regSetGPR; 371 kern_return_t kret = ::thread_set_state(m_thread->MachPortNumber(), ARM_THREAD_STATE, (thread_state_t)&m_state.context.gpr, ARM_THREAD_STATE_COUNT); 372 m_state.SetError(set, Write, kret); // Set the current write error for this register set 373 m_state.InvalidateRegisterSetState(set); // Invalidate the current register state in case registers are read back differently 374 return kret; // Return the error code 375 } 376 377 kern_return_t 378 DNBArchMachARM::SetVFPState() 379 { 380 int set = e_regSetVFP; 381 kern_return_t kret; 382 mach_msg_type_number_t count; 383 384 #if defined (__arm64__) || defined (__aarch64__) 385 count = ARM_NEON_STATE_COUNT; 386 kret = ::thread_set_state (m_thread->MachPortNumber(), ARM_NEON_STATE, (thread_state_t)&m_state.context.vfp, count); 387 #else 388 count = ARM_VFP_STATE_COUNT; 389 kret = ::thread_set_state (m_thread->MachPortNumber(), ARM_VFP_STATE, (thread_state_t)&m_state.context.vfp, count); 390 #endif 391 392 #if defined (__arm64__) || defined (__aarch64__) 393 if (DNBLogEnabledForAny (LOG_THREAD)) 394 { 395 DNBLogThreaded("thread_set_state(0x%4.4x, %u, &vfp, %u) => 0x%8.8x (count = %u) regs" 396 "\n q0 = 0x%16.16llx%16.16llx" 397 "\n q1 = 0x%16.16llx%16.16llx" 398 "\n q2 = 0x%16.16llx%16.16llx" 399 "\n q3 = 0x%16.16llx%16.16llx" 400 "\n q4 = 0x%16.16llx%16.16llx" 401 "\n q5 = 0x%16.16llx%16.16llx" 402 "\n q6 = 0x%16.16llx%16.16llx" 403 "\n q7 = 0x%16.16llx%16.16llx" 404 "\n q8 = 0x%16.16llx%16.16llx" 405 "\n q9 = 0x%16.16llx%16.16llx" 406 "\n q10 = 0x%16.16llx%16.16llx" 407 "\n q11 = 0x%16.16llx%16.16llx" 408 "\n q12 = 0x%16.16llx%16.16llx" 409 "\n q13 = 0x%16.16llx%16.16llx" 410 "\n q14 = 0x%16.16llx%16.16llx" 411 "\n q15 = 0x%16.16llx%16.16llx" 412 "\n fpsr = 0x%8.8x" 413 "\n fpcr = 0x%8.8x\n\n", 414 m_thread->MachPortNumber(), 415 ARM_NEON_STATE, 416 ARM_NEON_STATE_COUNT, 417 kret, 418 count, 419 ((uint64_t *)&m_state.context.vfp.__v[0])[0] , ((uint64_t *)&m_state.context.vfp.__v[0])[1], 420 ((uint64_t *)&m_state.context.vfp.__v[1])[0] , ((uint64_t *)&m_state.context.vfp.__v[1])[1], 421 ((uint64_t *)&m_state.context.vfp.__v[2])[0] , ((uint64_t *)&m_state.context.vfp.__v[2])[1], 422 ((uint64_t *)&m_state.context.vfp.__v[3])[0] , ((uint64_t *)&m_state.context.vfp.__v[3])[1], 423 ((uint64_t *)&m_state.context.vfp.__v[4])[0] , ((uint64_t *)&m_state.context.vfp.__v[4])[1], 424 ((uint64_t *)&m_state.context.vfp.__v[5])[0] , ((uint64_t *)&m_state.context.vfp.__v[5])[1], 425 ((uint64_t *)&m_state.context.vfp.__v[6])[0] , ((uint64_t *)&m_state.context.vfp.__v[6])[1], 426 ((uint64_t *)&m_state.context.vfp.__v[7])[0] , ((uint64_t *)&m_state.context.vfp.__v[7])[1], 427 ((uint64_t *)&m_state.context.vfp.__v[8])[0] , ((uint64_t *)&m_state.context.vfp.__v[8])[1], 428 ((uint64_t *)&m_state.context.vfp.__v[9])[0] , ((uint64_t *)&m_state.context.vfp.__v[9])[1], 429 ((uint64_t *)&m_state.context.vfp.__v[10])[0], ((uint64_t *)&m_state.context.vfp.__v[10])[1], 430 ((uint64_t *)&m_state.context.vfp.__v[11])[0], ((uint64_t *)&m_state.context.vfp.__v[11])[1], 431 ((uint64_t *)&m_state.context.vfp.__v[12])[0], ((uint64_t *)&m_state.context.vfp.__v[12])[1], 432 ((uint64_t *)&m_state.context.vfp.__v[13])[0], ((uint64_t *)&m_state.context.vfp.__v[13])[1], 433 ((uint64_t *)&m_state.context.vfp.__v[14])[0], ((uint64_t *)&m_state.context.vfp.__v[14])[1], 434 ((uint64_t *)&m_state.context.vfp.__v[15])[0], ((uint64_t *)&m_state.context.vfp.__v[15])[1], 435 m_state.context.vfp.__fpsr, 436 m_state.context.vfp.__fpcr); 437 } 438 #else 439 if (DNBLogEnabledForAny (LOG_THREAD)) 440 { 441 uint32_t *r = &m_state.context.vfp.__r[0]; 442 DNBLogThreaded ("thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x (count => %u)", 443 m_thread->MachPortNumber(), 444 ARM_THREAD_STATE, 445 ARM_THREAD_STATE_COUNT, 446 kret, 447 count); 448 DNBLogThreaded(" s0=%8.8x s1=%8.8x s2=%8.8x s3=%8.8x s4=%8.8x s5=%8.8x s6=%8.8x s7=%8.8x",r[ 0],r[ 1],r[ 2],r[ 3],r[ 4],r[ 5],r[ 6],r[ 7]); 449 DNBLogThreaded(" s8=%8.8x s9=%8.8x s10=%8.8x s11=%8.8x s12=%8.8x s13=%8.8x s14=%8.8x s15=%8.8x",r[ 8],r[ 9],r[10],r[11],r[12],r[13],r[14],r[15]); 450 DNBLogThreaded(" s16=%8.8x s17=%8.8x s18=%8.8x s19=%8.8x s20=%8.8x s21=%8.8x s22=%8.8x s23=%8.8x",r[16],r[17],r[18],r[19],r[20],r[21],r[22],r[23]); 451 DNBLogThreaded(" s24=%8.8x s25=%8.8x s26=%8.8x s27=%8.8x s28=%8.8x s29=%8.8x s30=%8.8x s31=%8.8x",r[24],r[25],r[26],r[27],r[28],r[29],r[30],r[31]); 452 DNBLogThreaded(" s32=%8.8x s33=%8.8x s34=%8.8x s35=%8.8x s36=%8.8x s37=%8.8x s38=%8.8x s39=%8.8x",r[32],r[33],r[34],r[35],r[36],r[37],r[38],r[39]); 453 DNBLogThreaded(" s40=%8.8x s41=%8.8x s42=%8.8x s43=%8.8x s44=%8.8x s45=%8.8x s46=%8.8x s47=%8.8x",r[40],r[41],r[42],r[43],r[44],r[45],r[46],r[47]); 454 DNBLogThreaded(" s48=%8.8x s49=%8.8x s50=%8.8x s51=%8.8x s52=%8.8x s53=%8.8x s54=%8.8x s55=%8.8x",r[48],r[49],r[50],r[51],r[52],r[53],r[54],r[55]); 455 DNBLogThreaded(" s56=%8.8x s57=%8.8x s58=%8.8x s59=%8.8x s60=%8.8x s61=%8.8x s62=%8.8x s63=%8.8x fpscr=%8.8x",r[56],r[57],r[58],r[59],r[60],r[61],r[62],r[63],r[64]); 456 } 457 #endif 458 459 m_state.SetError(set, Write, kret); // Set the current write error for this register set 460 m_state.InvalidateRegisterSetState(set); // Invalidate the current register state in case registers are read back differently 461 return kret; // Return the error code 462 } 463 464 kern_return_t 465 DNBArchMachARM::SetEXCState() 466 { 467 int set = e_regSetEXC; 468 kern_return_t kret = ::thread_set_state (m_thread->MachPortNumber(), ARM_EXCEPTION_STATE, (thread_state_t)&m_state.context.exc, ARM_EXCEPTION_STATE_COUNT); 469 m_state.SetError(set, Write, kret); // Set the current write error for this register set 470 m_state.InvalidateRegisterSetState(set); // Invalidate the current register state in case registers are read back differently 471 return kret; // Return the error code 472 } 473 474 kern_return_t 475 DNBArchMachARM::SetDBGState(bool also_set_on_task) 476 { 477 int set = e_regSetDBG; 478 #if defined (ARM_DEBUG_STATE32) && (defined (__arm64__) || defined (__aarch64__)) 479 kern_return_t kret = ::thread_set_state (m_thread->MachPortNumber(), ARM_DEBUG_STATE32, (thread_state_t)&m_state.dbg, ARM_DEBUG_STATE32_COUNT); 480 if (also_set_on_task) 481 { 482 kern_return_t task_kret = ::task_set_state (m_thread->Process()->Task().TaskPort(), ARM_DEBUG_STATE32, (thread_state_t)&m_state.dbg, ARM_DEBUG_STATE32_COUNT); 483 if (task_kret != KERN_SUCCESS) 484 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::SetDBGState failed to set debug control register state: 0x%8.8x.", kret); 485 } 486 #else 487 kern_return_t kret = ::thread_set_state (m_thread->MachPortNumber(), ARM_DEBUG_STATE, (thread_state_t)&m_state.dbg, ARM_DEBUG_STATE_COUNT); 488 if (also_set_on_task) 489 { 490 kern_return_t task_kret = ::task_set_state (m_thread->Process()->Task().TaskPort(), ARM_DEBUG_STATE, (thread_state_t)&m_state.dbg, ARM_DEBUG_STATE_COUNT); 491 if (task_kret != KERN_SUCCESS) 492 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::SetDBGState failed to set debug control register state: 0x%8.8x.", kret); 493 } 494 #endif 495 496 m_state.SetError(set, Write, kret); // Set the current write error for this register set 497 m_state.InvalidateRegisterSetState(set); // Invalidate the current register state in case registers are read back differently 498 return kret; // Return the error code 499 } 500 501 void 502 DNBArchMachARM::ThreadWillResume() 503 { 504 // Do we need to step this thread? If so, let the mach thread tell us so. 505 if (m_thread->IsStepping()) 506 { 507 // This is the primary thread, let the arch do anything it needs 508 if (NumSupportedHardwareBreakpoints() > 0) 509 { 510 if (EnableHardwareSingleStep(true) != KERN_SUCCESS) 511 { 512 DNBLogThreaded("DNBArchMachARM::ThreadWillResume() failed to enable hardware single step"); 513 } 514 } 515 } 516 517 // Disable the triggered watchpoint temporarily before we resume. 518 // Plus, we try to enable hardware single step to execute past the instruction which triggered our watchpoint. 519 if (m_watchpoint_did_occur) 520 { 521 if (m_watchpoint_hw_index >= 0) 522 { 523 kern_return_t kret = GetDBGState(false); 524 if (kret == KERN_SUCCESS && !IsWatchpointEnabled(m_state.dbg, m_watchpoint_hw_index)) { 525 // The watchpoint might have been disabled by the user. We don't need to do anything at all 526 // to enable hardware single stepping. 527 m_watchpoint_did_occur = false; 528 m_watchpoint_hw_index = -1; 529 return; 530 } 531 532 DisableHardwareWatchpoint(m_watchpoint_hw_index, false); 533 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() DisableHardwareWatchpoint(%d) called", 534 m_watchpoint_hw_index); 535 536 // Enable hardware single step to move past the watchpoint-triggering instruction. 537 m_watchpoint_resume_single_step_enabled = (EnableHardwareSingleStep(true) == KERN_SUCCESS); 538 539 // If we are not able to enable single step to move past the watchpoint-triggering instruction, 540 // at least we should reset the two watchpoint member variables so that the next time around 541 // this callback function is invoked, the enclosing logical branch is skipped. 542 if (!m_watchpoint_resume_single_step_enabled) { 543 // Reset the two watchpoint member variables. 544 m_watchpoint_did_occur = false; 545 m_watchpoint_hw_index = -1; 546 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() failed to enable single step"); 547 } 548 else 549 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() succeeded to enable single step"); 550 } 551 } 552 } 553 554 bool 555 DNBArchMachARM::ThreadDidStop() 556 { 557 bool success = true; 558 559 m_state.InvalidateRegisterSetState (e_regSetALL); 560 561 if (m_watchpoint_resume_single_step_enabled) 562 { 563 // Great! We now disable the hardware single step as well as re-enable the hardware watchpoint. 564 // See also ThreadWillResume(). 565 if (EnableHardwareSingleStep(false) == KERN_SUCCESS) 566 { 567 if (m_watchpoint_did_occur && m_watchpoint_hw_index >= 0) 568 { 569 ReenableHardwareWatchpoint(m_watchpoint_hw_index); 570 m_watchpoint_resume_single_step_enabled = false; 571 m_watchpoint_did_occur = false; 572 m_watchpoint_hw_index = -1; 573 } 574 else 575 { 576 DNBLogError("internal error detected: m_watchpoint_resume_step_enabled is true but (m_watchpoint_did_occur && m_watchpoint_hw_index >= 0) does not hold!"); 577 } 578 } 579 else 580 { 581 DNBLogError("internal error detected: m_watchpoint_resume_step_enabled is true but unable to disable single step!"); 582 } 583 } 584 585 // Are we stepping a single instruction? 586 if (GetGPRState(true) == KERN_SUCCESS) 587 { 588 // We are single stepping, was this the primary thread? 589 if (m_thread->IsStepping()) 590 { 591 success = EnableHardwareSingleStep(false) == KERN_SUCCESS; 592 } 593 else 594 { 595 // The MachThread will automatically restore the suspend count 596 // in ThreadDidStop(), so we don't need to do anything here if 597 // we weren't the primary thread the last time 598 } 599 } 600 return success; 601 } 602 603 bool 604 DNBArchMachARM::NotifyException(MachException::Data& exc) 605 { 606 switch (exc.exc_type) 607 { 608 default: 609 break; 610 case EXC_BREAKPOINT: 611 if (exc.exc_data.size() == 2 && exc.exc_data[0] == EXC_ARM_DA_DEBUG) 612 { 613 // The data break address is passed as exc_data[1]. 614 nub_addr_t addr = exc.exc_data[1]; 615 // Find the hardware index with the side effect of possibly massaging the 616 // addr to return the starting address as seen from the debugger side. 617 uint32_t hw_index = GetHardwareWatchpointHit(addr); 618 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::NotifyException watchpoint %d was hit on address 0x%llx", hw_index, (uint64_t) addr); 619 const int num_watchpoints = NumSupportedHardwareWatchpoints (); 620 for (int i = 0; i < num_watchpoints; i++) 621 { 622 if (LoHi[i] != 0 623 && LoHi[i] == hw_index 624 && LoHi[i] != i 625 && GetWatchpointAddressByIndex (i) != INVALID_NUB_ADDRESS) 626 { 627 addr = GetWatchpointAddressByIndex (i); 628 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::NotifyException It is a linked watchpoint; rewritten to index %d addr 0x%llx", LoHi[i], (uint64_t) addr); 629 } 630 } 631 if (hw_index != INVALID_NUB_HW_INDEX) 632 { 633 m_watchpoint_did_occur = true; 634 m_watchpoint_hw_index = hw_index; 635 exc.exc_data[1] = addr; 636 // Piggyback the hw_index in the exc.data. 637 exc.exc_data.push_back(hw_index); 638 } 639 640 return true; 641 } 642 break; 643 } 644 return false; 645 } 646 647 bool 648 DNBArchMachARM::StepNotComplete () 649 { 650 if (m_hw_single_chained_step_addr != INVALID_NUB_ADDRESS) 651 { 652 kern_return_t kret = KERN_INVALID_ARGUMENT; 653 kret = GetGPRState(false); 654 if (kret == KERN_SUCCESS) 655 { 656 if (m_state.context.gpr.__pc == m_hw_single_chained_step_addr) 657 { 658 DNBLogThreadedIf(LOG_STEP, "Need to step some more at 0x%8.8llx", (uint64_t) m_hw_single_chained_step_addr); 659 return true; 660 } 661 } 662 } 663 664 m_hw_single_chained_step_addr = INVALID_NUB_ADDRESS; 665 return false; 666 } 667 668 // Set the single step bit in the processor status register. 669 kern_return_t 670 DNBArchMachARM::EnableHardwareSingleStep (bool enable) 671 { 672 DNBError err; 673 DNBLogThreadedIf(LOG_STEP, "%s( enable = %d )", __FUNCTION__, enable); 674 675 err = GetGPRState(false); 676 677 if (err.Fail()) 678 { 679 err.LogThreaded("%s: failed to read the GPR registers", __FUNCTION__); 680 return err.Error(); 681 } 682 683 err = GetDBGState(false); 684 685 if (err.Fail()) 686 { 687 err.LogThreaded("%s: failed to read the DBG registers", __FUNCTION__); 688 return err.Error(); 689 } 690 691 // The use of __arm64__ here is not ideal. If debugserver is running on 692 // an armv8 device, regardless of whether it was built for arch arm or arch arm64, 693 // it needs to use the MDSCR_EL1 SS bit to single instruction step. 694 695 #if defined (__arm64__) || defined (__aarch64__) 696 if (enable) 697 { 698 DNBLogThreadedIf(LOG_STEP, "%s: Setting MDSCR_EL1 Single Step bit at pc 0x%llx", __FUNCTION__, (uint64_t) m_state.context.gpr.__pc); 699 m_state.dbg.__mdscr_el1 |= 1; // Set bit 0 (single step, SS) in the MDSCR_EL1. 700 } 701 else 702 { 703 DNBLogThreadedIf(LOG_STEP, "%s: Clearing MDSCR_EL1 Single Step bit at pc 0x%llx", __FUNCTION__, (uint64_t) m_state.context.gpr.__pc); 704 m_state.dbg.__mdscr_el1 &= ~(1ULL); // Clear bit 0 (single step, SS) in the MDSCR_EL1. 705 } 706 #else 707 const uint32_t i = 0; 708 if (enable) 709 { 710 m_hw_single_chained_step_addr = INVALID_NUB_ADDRESS; 711 712 // Save our previous state 713 m_dbg_save = m_state.dbg; 714 // Set a breakpoint that will stop when the PC doesn't match the current one! 715 m_state.dbg.__bvr[i] = m_state.context.gpr.__pc & 0xFFFFFFFCu; // Set the current PC as the breakpoint address 716 m_state.dbg.__bcr[i] = BCR_M_IMVA_MISMATCH | // Stop on address mismatch 717 S_USER | // Stop only in user mode 718 BCR_ENABLE; // Enable this breakpoint 719 if (m_state.context.gpr.__cpsr & 0x20) 720 { 721 // Thumb breakpoint 722 if (m_state.context.gpr.__pc & 2) 723 m_state.dbg.__bcr[i] |= BAS_IMVA_2_3; 724 else 725 m_state.dbg.__bcr[i] |= BAS_IMVA_0_1; 726 727 uint16_t opcode; 728 if (sizeof(opcode) == m_thread->Process()->Task().ReadMemory(m_state.context.gpr.__pc, sizeof(opcode), &opcode)) 729 { 730 if (IsThumb32Opcode(opcode)) 731 { 732 // 32 bit thumb opcode... 733 if (m_state.context.gpr.__pc & 2) 734 { 735 // We can't take care of a 32 bit thumb instruction single step 736 // with just IVA mismatching. We will need to chain an extra 737 // hardware single step in order to complete this single step... 738 m_hw_single_chained_step_addr = m_state.context.gpr.__pc + 2; 739 } 740 else 741 { 742 // Extend the number of bits to ignore for the mismatch 743 m_state.dbg.__bcr[i] |= BAS_IMVA_ALL; 744 } 745 } 746 } 747 } 748 else 749 { 750 // ARM breakpoint 751 m_state.dbg.__bcr[i] |= BAS_IMVA_ALL; // Stop when any address bits change 752 } 753 754 DNBLogThreadedIf(LOG_STEP, "%s: BVR%u=0x%8.8x BCR%u=0x%8.8x", __FUNCTION__, i, m_state.dbg.__bvr[i], i, m_state.dbg.__bcr[i]); 755 756 for (uint32_t j=i+1; j<16; ++j) 757 { 758 // Disable all others 759 m_state.dbg.__bvr[j] = 0; 760 m_state.dbg.__bcr[j] = 0; 761 } 762 } 763 else 764 { 765 // Just restore the state we had before we did single stepping 766 m_state.dbg = m_dbg_save; 767 } 768 #endif 769 770 return SetDBGState(false); 771 } 772 773 // return 1 if bit "BIT" is set in "value" 774 static inline uint32_t bit(uint32_t value, uint32_t bit) 775 { 776 return (value >> bit) & 1u; 777 } 778 779 // return the bitfield "value[msbit:lsbit]". 780 static inline uint32_t bits(uint32_t value, uint32_t msbit, uint32_t lsbit) 781 { 782 assert(msbit >= lsbit); 783 uint32_t shift_left = sizeof(value) * 8 - 1 - msbit; 784 value <<= shift_left; // shift anything above the msbit off of the unsigned edge 785 value >>= (shift_left + lsbit); // shift it back again down to the lsbit (including undoing any shift from above) 786 return value; // return our result 787 } 788 789 bool 790 DNBArchMachARM::ConditionPassed(uint8_t condition, uint32_t cpsr) 791 { 792 uint32_t cpsr_n = bit(cpsr, 31); // Negative condition code flag 793 uint32_t cpsr_z = bit(cpsr, 30); // Zero condition code flag 794 uint32_t cpsr_c = bit(cpsr, 29); // Carry condition code flag 795 uint32_t cpsr_v = bit(cpsr, 28); // Overflow condition code flag 796 797 switch (condition) { 798 case COND_EQ: // (0x0) 799 if (cpsr_z == 1) return true; 800 break; 801 case COND_NE: // (0x1) 802 if (cpsr_z == 0) return true; 803 break; 804 case COND_CS: // (0x2) 805 if (cpsr_c == 1) return true; 806 break; 807 case COND_CC: // (0x3) 808 if (cpsr_c == 0) return true; 809 break; 810 case COND_MI: // (0x4) 811 if (cpsr_n == 1) return true; 812 break; 813 case COND_PL: // (0x5) 814 if (cpsr_n == 0) return true; 815 break; 816 case COND_VS: // (0x6) 817 if (cpsr_v == 1) return true; 818 break; 819 case COND_VC: // (0x7) 820 if (cpsr_v == 0) return true; 821 break; 822 case COND_HI: // (0x8) 823 if ((cpsr_c == 1) && (cpsr_z == 0)) return true; 824 break; 825 case COND_LS: // (0x9) 826 if ((cpsr_c == 0) || (cpsr_z == 1)) return true; 827 break; 828 case COND_GE: // (0xA) 829 if (cpsr_n == cpsr_v) return true; 830 break; 831 case COND_LT: // (0xB) 832 if (cpsr_n != cpsr_v) return true; 833 break; 834 case COND_GT: // (0xC) 835 if ((cpsr_z == 0) && (cpsr_n == cpsr_v)) return true; 836 break; 837 case COND_LE: // (0xD) 838 if ((cpsr_z == 1) || (cpsr_n != cpsr_v)) return true; 839 break; 840 default: 841 return true; 842 break; 843 } 844 845 return false; 846 } 847 848 uint32_t 849 DNBArchMachARM::NumSupportedHardwareBreakpoints() 850 { 851 // Set the init value to something that will let us know that we need to 852 // autodetect how many breakpoints are supported dynamically... 853 static uint32_t g_num_supported_hw_breakpoints = UINT_MAX; 854 if (g_num_supported_hw_breakpoints == UINT_MAX) 855 { 856 // Set this to zero in case we can't tell if there are any HW breakpoints 857 g_num_supported_hw_breakpoints = 0; 858 859 size_t len; 860 uint32_t n = 0; 861 len = sizeof (n); 862 if (::sysctlbyname("hw.optional.breakpoint", &n, &len, NULL, 0) == 0) 863 { 864 g_num_supported_hw_breakpoints = n; 865 DNBLogThreadedIf(LOG_THREAD, "hw.optional.breakpoint=%u", n); 866 } 867 else 868 { 869 #if !defined (__arm64__) && !defined (__aarch64__) 870 // Read the DBGDIDR to get the number of available hardware breakpoints 871 // However, in some of our current armv7 processors, hardware 872 // breakpoints/watchpoints were not properly connected. So detect those 873 // cases using a field in a sysctl. For now we are using "hw.cpusubtype" 874 // field to distinguish CPU architectures. This is a hack until we can 875 // get <rdar://problem/6372672> fixed, at which point we will switch to 876 // using a different sysctl string that will tell us how many BRPs 877 // are available to us directly without having to read DBGDIDR. 878 uint32_t register_DBGDIDR; 879 880 asm("mrc p14, 0, %0, c0, c0, 0" : "=r" (register_DBGDIDR)); 881 uint32_t numBRPs = bits(register_DBGDIDR, 27, 24); 882 // Zero is reserved for the BRP count, so don't increment it if it is zero 883 if (numBRPs > 0) 884 numBRPs++; 885 DNBLogThreadedIf(LOG_THREAD, "DBGDIDR=0x%8.8x (number BRP pairs = %u)", register_DBGDIDR, numBRPs); 886 887 if (numBRPs > 0) 888 { 889 uint32_t cpusubtype; 890 len = sizeof(cpusubtype); 891 // TODO: remove this hack and change to using hw.optional.xx when implmented 892 if (::sysctlbyname("hw.cpusubtype", &cpusubtype, &len, NULL, 0) == 0) 893 { 894 DNBLogThreadedIf(LOG_THREAD, "hw.cpusubtype=%d", cpusubtype); 895 if (cpusubtype == CPU_SUBTYPE_ARM_V7) 896 DNBLogThreadedIf(LOG_THREAD, "Hardware breakpoints disabled for armv7 (rdar://problem/6372672)"); 897 else 898 g_num_supported_hw_breakpoints = numBRPs; 899 } 900 } 901 #endif 902 } 903 } 904 return g_num_supported_hw_breakpoints; 905 } 906 907 908 uint32_t 909 DNBArchMachARM::NumSupportedHardwareWatchpoints() 910 { 911 // Set the init value to something that will let us know that we need to 912 // autodetect how many watchpoints are supported dynamically... 913 static uint32_t g_num_supported_hw_watchpoints = UINT_MAX; 914 if (g_num_supported_hw_watchpoints == UINT_MAX) 915 { 916 // Set this to zero in case we can't tell if there are any HW breakpoints 917 g_num_supported_hw_watchpoints = 0; 918 919 920 size_t len; 921 uint32_t n = 0; 922 len = sizeof (n); 923 if (::sysctlbyname("hw.optional.watchpoint", &n, &len, NULL, 0) == 0) 924 { 925 g_num_supported_hw_watchpoints = n; 926 DNBLogThreadedIf(LOG_THREAD, "hw.optional.watchpoint=%u", n); 927 } 928 else 929 { 930 #if !defined (__arm64__) && !defined (__aarch64__) 931 // Read the DBGDIDR to get the number of available hardware breakpoints 932 // However, in some of our current armv7 processors, hardware 933 // breakpoints/watchpoints were not properly connected. So detect those 934 // cases using a field in a sysctl. For now we are using "hw.cpusubtype" 935 // field to distinguish CPU architectures. This is a hack until we can 936 // get <rdar://problem/6372672> fixed, at which point we will switch to 937 // using a different sysctl string that will tell us how many WRPs 938 // are available to us directly without having to read DBGDIDR. 939 940 uint32_t register_DBGDIDR; 941 asm("mrc p14, 0, %0, c0, c0, 0" : "=r" (register_DBGDIDR)); 942 uint32_t numWRPs = bits(register_DBGDIDR, 31, 28) + 1; 943 DNBLogThreadedIf(LOG_THREAD, "DBGDIDR=0x%8.8x (number WRP pairs = %u)", register_DBGDIDR, numWRPs); 944 945 if (numWRPs > 0) 946 { 947 uint32_t cpusubtype; 948 size_t len; 949 len = sizeof(cpusubtype); 950 // TODO: remove this hack and change to using hw.optional.xx when implmented 951 if (::sysctlbyname("hw.cpusubtype", &cpusubtype, &len, NULL, 0) == 0) 952 { 953 DNBLogThreadedIf(LOG_THREAD, "hw.cpusubtype=0x%d", cpusubtype); 954 955 if (cpusubtype == CPU_SUBTYPE_ARM_V7) 956 DNBLogThreadedIf(LOG_THREAD, "Hardware watchpoints disabled for armv7 (rdar://problem/6372672)"); 957 else 958 g_num_supported_hw_watchpoints = numWRPs; 959 } 960 } 961 #endif 962 } 963 } 964 return g_num_supported_hw_watchpoints; 965 } 966 967 968 uint32_t 969 DNBArchMachARM::EnableHardwareBreakpoint (nub_addr_t addr, nub_size_t size) 970 { 971 // Make sure our address isn't bogus 972 if (addr & 1) 973 return INVALID_NUB_HW_INDEX; 974 975 kern_return_t kret = GetDBGState(false); 976 977 if (kret == KERN_SUCCESS) 978 { 979 const uint32_t num_hw_breakpoints = NumSupportedHardwareBreakpoints(); 980 uint32_t i; 981 for (i=0; i<num_hw_breakpoints; ++i) 982 { 983 if ((m_state.dbg.__bcr[i] & BCR_ENABLE) == 0) 984 break; // We found an available hw breakpoint slot (in i) 985 } 986 987 // See if we found an available hw breakpoint slot above 988 if (i < num_hw_breakpoints) 989 { 990 // Make sure bits 1:0 are clear in our address 991 m_state.dbg.__bvr[i] = addr & ~((nub_addr_t)3); 992 993 if (size == 2 || addr & 2) 994 { 995 uint32_t byte_addr_select = (addr & 2) ? BAS_IMVA_2_3 : BAS_IMVA_0_1; 996 997 // We have a thumb breakpoint 998 // We have an ARM breakpoint 999 m_state.dbg.__bcr[i] = BCR_M_IMVA_MATCH | // Stop on address mismatch 1000 byte_addr_select | // Set the correct byte address select so we only trigger on the correct opcode 1001 S_USER | // Which modes should this breakpoint stop in? 1002 BCR_ENABLE; // Enable this hardware breakpoint 1003 DNBLogThreadedIf (LOG_BREAKPOINTS, "DNBArchMachARM::EnableHardwareBreakpoint( addr = 0x%8.8llx, size = %llu ) - BVR%u/BCR%u = 0x%8.8x / 0x%8.8x (Thumb)", 1004 (uint64_t)addr, 1005 (uint64_t)size, 1006 i, 1007 i, 1008 m_state.dbg.__bvr[i], 1009 m_state.dbg.__bcr[i]); 1010 } 1011 else if (size == 4) 1012 { 1013 // We have an ARM breakpoint 1014 m_state.dbg.__bcr[i] = BCR_M_IMVA_MATCH | // Stop on address mismatch 1015 BAS_IMVA_ALL | // Stop on any of the four bytes following the IMVA 1016 S_USER | // Which modes should this breakpoint stop in? 1017 BCR_ENABLE; // Enable this hardware breakpoint 1018 DNBLogThreadedIf (LOG_BREAKPOINTS, "DNBArchMachARM::EnableHardwareBreakpoint( addr = 0x%8.8llx, size = %llu ) - BVR%u/BCR%u = 0x%8.8x / 0x%8.8x (ARM)", 1019 (uint64_t)addr, 1020 (uint64_t)size, 1021 i, 1022 i, 1023 m_state.dbg.__bvr[i], 1024 m_state.dbg.__bcr[i]); 1025 } 1026 1027 kret = SetDBGState(false); 1028 DNBLogThreadedIf(LOG_BREAKPOINTS, "DNBArchMachARM::EnableHardwareBreakpoint() SetDBGState() => 0x%8.8x.", kret); 1029 1030 if (kret == KERN_SUCCESS) 1031 return i; 1032 } 1033 else 1034 { 1035 DNBLogThreadedIf (LOG_BREAKPOINTS, "DNBArchMachARM::EnableHardwareBreakpoint(addr = 0x%8.8llx, size = %llu) => all hardware breakpoint resources are being used.", (uint64_t)addr, (uint64_t)size); 1036 } 1037 } 1038 1039 return INVALID_NUB_HW_INDEX; 1040 } 1041 1042 bool 1043 DNBArchMachARM::DisableHardwareBreakpoint (uint32_t hw_index) 1044 { 1045 kern_return_t kret = GetDBGState(false); 1046 1047 const uint32_t num_hw_points = NumSupportedHardwareBreakpoints(); 1048 if (kret == KERN_SUCCESS) 1049 { 1050 if (hw_index < num_hw_points) 1051 { 1052 m_state.dbg.__bcr[hw_index] = 0; 1053 DNBLogThreadedIf(LOG_BREAKPOINTS, "DNBArchMachARM::SetHardwareBreakpoint( %u ) - BVR%u = 0x%8.8x BCR%u = 0x%8.8x", 1054 hw_index, 1055 hw_index, 1056 m_state.dbg.__bvr[hw_index], 1057 hw_index, 1058 m_state.dbg.__bcr[hw_index]); 1059 1060 kret = SetDBGState(false); 1061 1062 if (kret == KERN_SUCCESS) 1063 return true; 1064 } 1065 } 1066 return false; 1067 } 1068 1069 // ARM v7 watchpoints may be either word-size or double-word-size. 1070 // It's implementation defined which they can handle. It looks like on an 1071 // armv8 device, armv7 processes can watch dwords. But on a genuine armv7 1072 // device I tried, only word watchpoints are supported. 1073 1074 #if defined (__arm64__) || defined (__aarch64__) 1075 #define WATCHPOINTS_ARE_DWORD 1 1076 #else 1077 #undef WATCHPOINTS_ARE_DWORD 1078 #endif 1079 1080 uint32_t 1081 DNBArchMachARM::EnableHardwareWatchpoint (nub_addr_t addr, nub_size_t size, bool read, bool write, bool also_set_on_task) 1082 { 1083 1084 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint(addr = 0x%8.8llx, size = %zu, read = %u, write = %u)", (uint64_t)addr, size, read, write); 1085 1086 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints(); 1087 1088 // Can't watch zero bytes 1089 if (size == 0) 1090 return INVALID_NUB_HW_INDEX; 1091 1092 // We must watch for either read or write 1093 if (read == false && write == false) 1094 return INVALID_NUB_HW_INDEX; 1095 1096 // Otherwise, can't watch more than 8 bytes per WVR/WCR pair 1097 if (size > 8) 1098 return INVALID_NUB_HW_INDEX; 1099 1100 // Treat arm watchpoints as having an 8-byte alignment requirement. You can put a watchpoint on a 4-byte 1101 // offset address but you can only watch 4 bytes with that watchpoint. 1102 1103 // arm watchpoints on an 8-byte (double word) aligned addr can watch any bytes in that 1104 // 8-byte long region of memory. They can watch the 1st byte, the 2nd byte, 3rd byte, etc, or any 1105 // combination therein by setting the bits in the BAS [12:5] (Byte Address Select) field of 1106 // the DBGWCRn_EL1 reg for the watchpoint. 1107 1108 // If the MASK [28:24] bits in the DBGWCRn_EL1 allow a single watchpoint to monitor a larger region 1109 // of memory (16 bytes, 32 bytes, or 2GB) but the Byte Address Select bitfield then selects a larger 1110 // range of bytes, instead of individual bytes. See the ARMv8 Debug Architecture manual for details. 1111 // This implementation does not currently use the MASK bits; the largest single region watched by a single 1112 // watchpoint right now is 8-bytes. 1113 1114 #if defined (WATCHPOINTS_ARE_DWORD) 1115 nub_addr_t aligned_wp_address = addr & ~0x7; 1116 uint32_t addr_dword_offset = addr & 0x7; 1117 const int max_watchpoint_size = 8; 1118 #else 1119 nub_addr_t aligned_wp_address = addr & ~0x3; 1120 uint32_t addr_dword_offset = addr & 0x3; 1121 const int max_watchpoint_size = 4; 1122 #endif 1123 1124 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint aligned_wp_address is 0x%llx and addr_dword_offset is 0x%x", (uint64_t)aligned_wp_address, addr_dword_offset); 1125 1126 // Do we need to split up this logical watchpoint into two hardware watchpoint 1127 // registers? 1128 // e.g. a watchpoint of length 4 on address 6. We need do this with 1129 // one watchpoint on address 0 with bytes 6 & 7 being monitored 1130 // one watchpoint on address 8 with bytes 0, 1, 2, 3 being monitored 1131 1132 if (addr_dword_offset + size > max_watchpoint_size) 1133 { 1134 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint(addr = 0x%8.8llx, size = %zu) needs two hardware watchpoints slots to monitor", (uint64_t)addr, size); 1135 int low_watchpoint_size = max_watchpoint_size - addr_dword_offset; 1136 int high_watchpoint_size = addr_dword_offset + size - max_watchpoint_size; 1137 1138 uint32_t lo = EnableHardwareWatchpoint(addr, low_watchpoint_size, read, write, also_set_on_task); 1139 if (lo == INVALID_NUB_HW_INDEX) 1140 return INVALID_NUB_HW_INDEX; 1141 uint32_t hi = EnableHardwareWatchpoint (aligned_wp_address + max_watchpoint_size, high_watchpoint_size, read, write, also_set_on_task); 1142 if (hi == INVALID_NUB_HW_INDEX) 1143 { 1144 DisableHardwareWatchpoint (lo, also_set_on_task); 1145 return INVALID_NUB_HW_INDEX; 1146 } 1147 // Tag this lo->hi mapping in our database. 1148 LoHi[lo] = hi; 1149 return lo; 1150 } 1151 1152 // At this point 1153 // 1 aligned_wp_address is the requested address rounded down to 8-byte alignment 1154 // 2 addr_dword_offset is the offset into that double word (8-byte) region that we are watching 1155 // 3 size is the number of bytes within that 8-byte region that we are watching 1156 1157 // Set the Byte Address Selects bits DBGWCRn_EL1 bits [12:5] based on the above. 1158 // The bit shift and negation operation will give us 0b11 for 2, 0b1111 for 4, etc, up to 0b11111111 for 8. 1159 // then we shift those bits left by the offset into this dword that we are interested in. 1160 // e.g. if we are watching bytes 4,5,6,7 in a dword we want a BAS of 0b11110000. 1161 uint32_t byte_address_select = ((1 << size) - 1) << addr_dword_offset; 1162 1163 // Read the debug state 1164 kern_return_t kret = GetDBGState(true); 1165 1166 if (kret == KERN_SUCCESS) 1167 { 1168 // Check to make sure we have the needed hardware support 1169 uint32_t i = 0; 1170 1171 for (i=0; i<num_hw_watchpoints; ++i) 1172 { 1173 if ((m_state.dbg.__wcr[i] & WCR_ENABLE) == 0) 1174 break; // We found an available hw watchpoint slot (in i) 1175 } 1176 1177 // See if we found an available hw watchpoint slot above 1178 if (i < num_hw_watchpoints) 1179 { 1180 //DumpDBGState(m_state.dbg); 1181 1182 // Clear any previous LoHi joined-watchpoint that may have been in use 1183 LoHi[i] = 0; 1184 1185 // shift our Byte Address Select bits up to the correct bit range for the DBGWCRn_EL1 1186 byte_address_select = byte_address_select << 5; 1187 1188 // Make sure bits 1:0 are clear in our address 1189 m_state.dbg.__wvr[i] = aligned_wp_address; // DVA (Data Virtual Address) 1190 m_state.dbg.__wcr[i] = byte_address_select | // Which bytes that follow the DVA that we will watch 1191 S_USER | // Stop only in user mode 1192 (read ? WCR_LOAD : 0) | // Stop on read access? 1193 (write ? WCR_STORE : 0) | // Stop on write access? 1194 WCR_ENABLE; // Enable this watchpoint; 1195 1196 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint() adding watchpoint on address 0x%llx with control register value 0x%x", (uint64_t) m_state.dbg.__wvr[i], (uint32_t) m_state.dbg.__wcr[i]); 1197 1198 // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us automatically, don't need to do it here. 1199 1200 kret = SetDBGState(also_set_on_task); 1201 //DumpDBGState(m_state.dbg); 1202 1203 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint() SetDBGState() => 0x%8.8x.", kret); 1204 1205 if (kret == KERN_SUCCESS) 1206 return i; 1207 } 1208 else 1209 { 1210 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint(): All hardware resources (%u) are in use.", num_hw_watchpoints); 1211 } 1212 } 1213 return INVALID_NUB_HW_INDEX; 1214 } 1215 1216 bool 1217 DNBArchMachARM::ReenableHardwareWatchpoint (uint32_t hw_index) 1218 { 1219 // If this logical watchpoint # is actually implemented using 1220 // two hardware watchpoint registers, re-enable both of them. 1221 1222 if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index]) 1223 { 1224 return ReenableHardwareWatchpoint_helper (hw_index) && ReenableHardwareWatchpoint_helper (LoHi[hw_index]); 1225 } 1226 else 1227 { 1228 return ReenableHardwareWatchpoint_helper (hw_index); 1229 } 1230 } 1231 1232 bool 1233 DNBArchMachARM::ReenableHardwareWatchpoint_helper (uint32_t hw_index) 1234 { 1235 kern_return_t kret = GetDBGState(false); 1236 if (kret != KERN_SUCCESS) 1237 return false; 1238 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints(); 1239 if (hw_index >= num_hw_points) 1240 return false; 1241 1242 m_state.dbg.__wvr[hw_index] = m_disabled_watchpoints[hw_index].addr; 1243 m_state.dbg.__wcr[hw_index] = m_disabled_watchpoints[hw_index].control; 1244 1245 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::EnableHardwareWatchpoint( %u ) - WVR%u = 0x%8.8llx WCR%u = 0x%8.8llx", 1246 hw_index, 1247 hw_index, 1248 (uint64_t) m_state.dbg.__wvr[hw_index], 1249 hw_index, 1250 (uint64_t) m_state.dbg.__wcr[hw_index]); 1251 1252 // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us automatically, don't need to do it here. 1253 1254 kret = SetDBGState(false); 1255 1256 return (kret == KERN_SUCCESS); 1257 } 1258 1259 bool 1260 DNBArchMachARM::DisableHardwareWatchpoint (uint32_t hw_index, bool also_set_on_task) 1261 { 1262 if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index]) 1263 { 1264 return DisableHardwareWatchpoint_helper (hw_index, also_set_on_task) && DisableHardwareWatchpoint_helper (LoHi[hw_index], also_set_on_task); 1265 } 1266 else 1267 { 1268 return DisableHardwareWatchpoint_helper (hw_index, also_set_on_task); 1269 } 1270 } 1271 1272 bool 1273 DNBArchMachARM::DisableHardwareWatchpoint_helper (uint32_t hw_index, bool also_set_on_task) 1274 { 1275 kern_return_t kret = GetDBGState(false); 1276 if (kret != KERN_SUCCESS) 1277 return false; 1278 1279 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints(); 1280 if (hw_index >= num_hw_points) 1281 return false; 1282 1283 m_disabled_watchpoints[hw_index].addr = m_state.dbg.__wvr[hw_index]; 1284 m_disabled_watchpoints[hw_index].control = m_state.dbg.__wcr[hw_index]; 1285 1286 m_state.dbg.__wvr[hw_index] = 0; 1287 m_state.dbg.__wcr[hw_index] = 0; 1288 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::DisableHardwareWatchpoint( %u ) - WVR%u = 0x%8.8llx WCR%u = 0x%8.8llx", 1289 hw_index, 1290 hw_index, 1291 (uint64_t) m_state.dbg.__wvr[hw_index], 1292 hw_index, 1293 (uint64_t) m_state.dbg.__wcr[hw_index]); 1294 1295 kret = SetDBGState(also_set_on_task); 1296 1297 return (kret == KERN_SUCCESS); 1298 } 1299 1300 // Returns -1 if the trailing bit patterns are not one of: 1301 // { 0b???1, 0b??10, 0b?100, 0b1000 }. 1302 static inline 1303 int32_t 1304 LowestBitSet(uint32_t val) 1305 { 1306 for (unsigned i = 0; i < 4; ++i) { 1307 if (bit(val, i)) 1308 return i; 1309 } 1310 return -1; 1311 } 1312 1313 // Iterate through the debug registers; return the index of the first watchpoint whose address matches. 1314 // As a side effect, the starting address as understood by the debugger is returned which could be 1315 // different from 'addr' passed as an in/out argument. 1316 uint32_t 1317 DNBArchMachARM::GetHardwareWatchpointHit(nub_addr_t &addr) 1318 { 1319 // Read the debug state 1320 kern_return_t kret = GetDBGState(true); 1321 //DumpDBGState(m_state.dbg); 1322 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", kret); 1323 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::GetHardwareWatchpointHit() addr = 0x%llx", (uint64_t)addr); 1324 1325 // This is the watchpoint value to match against, i.e., word address. 1326 #if defined (WATCHPOINTS_ARE_DWORD) 1327 nub_addr_t wp_val = addr & ~((nub_addr_t)7); 1328 #else 1329 nub_addr_t wp_val = addr & ~((nub_addr_t)3); 1330 #endif 1331 if (kret == KERN_SUCCESS) 1332 { 1333 DBG &debug_state = m_state.dbg; 1334 uint32_t i, num = NumSupportedHardwareWatchpoints(); 1335 for (i = 0; i < num; ++i) 1336 { 1337 nub_addr_t wp_addr = GetWatchAddress(debug_state, i); 1338 DNBLogThreadedIf(LOG_WATCHPOINTS, 1339 "DNBArchMachARM::GetHardwareWatchpointHit() slot: %u (addr = 0x%llx).", 1340 i, (uint64_t)wp_addr); 1341 if (wp_val == wp_addr) { 1342 #if defined (WATCHPOINTS_ARE_DWORD) 1343 uint32_t byte_mask = bits(debug_state.__wcr[i], 12, 5); 1344 #else 1345 uint32_t byte_mask = bits(debug_state.__wcr[i], 8, 5); 1346 #endif 1347 1348 // Sanity check the byte_mask, first. 1349 if (LowestBitSet(byte_mask) < 0) 1350 continue; 1351 1352 // Compute the starting address (from the point of view of the debugger). 1353 addr = wp_addr + LowestBitSet(byte_mask); 1354 return i; 1355 } 1356 } 1357 } 1358 return INVALID_NUB_HW_INDEX; 1359 } 1360 1361 nub_addr_t 1362 DNBArchMachARM::GetWatchpointAddressByIndex (uint32_t hw_index) 1363 { 1364 kern_return_t kret = GetDBGState(true); 1365 if (kret != KERN_SUCCESS) 1366 return INVALID_NUB_ADDRESS; 1367 const uint32_t num = NumSupportedHardwareWatchpoints(); 1368 if (hw_index >= num) 1369 return INVALID_NUB_ADDRESS; 1370 if (IsWatchpointEnabled (m_state.dbg, hw_index)) 1371 return GetWatchAddress (m_state.dbg, hw_index); 1372 return INVALID_NUB_ADDRESS; 1373 } 1374 1375 bool 1376 DNBArchMachARM::IsWatchpointEnabled(const DBG &debug_state, uint32_t hw_index) 1377 { 1378 // Watchpoint Control Registers, bitfield definitions 1379 // ... 1380 // Bits Value Description 1381 // [0] 0 Watchpoint disabled 1382 // 1 Watchpoint enabled. 1383 return (debug_state.__wcr[hw_index] & 1u); 1384 } 1385 1386 nub_addr_t 1387 DNBArchMachARM::GetWatchAddress(const DBG &debug_state, uint32_t hw_index) 1388 { 1389 // Watchpoint Value Registers, bitfield definitions 1390 // Bits Description 1391 // [31:2] Watchpoint value (word address, i.e., 4-byte aligned) 1392 // [1:0] RAZ/SBZP 1393 return bits(debug_state.__wvr[hw_index], 31, 0); 1394 } 1395 1396 //---------------------------------------------------------------------- 1397 // Register information definitions for 32 bit ARMV7. 1398 //---------------------------------------------------------------------- 1399 enum gpr_regnums 1400 { 1401 gpr_r0 = 0, 1402 gpr_r1, 1403 gpr_r2, 1404 gpr_r3, 1405 gpr_r4, 1406 gpr_r5, 1407 gpr_r6, 1408 gpr_r7, 1409 gpr_r8, 1410 gpr_r9, 1411 gpr_r10, 1412 gpr_r11, 1413 gpr_r12, 1414 gpr_sp, 1415 gpr_lr, 1416 gpr_pc, 1417 gpr_cpsr 1418 }; 1419 1420 enum 1421 { 1422 vfp_s0 = 0, 1423 vfp_s1, 1424 vfp_s2, 1425 vfp_s3, 1426 vfp_s4, 1427 vfp_s5, 1428 vfp_s6, 1429 vfp_s7, 1430 vfp_s8, 1431 vfp_s9, 1432 vfp_s10, 1433 vfp_s11, 1434 vfp_s12, 1435 vfp_s13, 1436 vfp_s14, 1437 vfp_s15, 1438 vfp_s16, 1439 vfp_s17, 1440 vfp_s18, 1441 vfp_s19, 1442 vfp_s20, 1443 vfp_s21, 1444 vfp_s22, 1445 vfp_s23, 1446 vfp_s24, 1447 vfp_s25, 1448 vfp_s26, 1449 vfp_s27, 1450 vfp_s28, 1451 vfp_s29, 1452 vfp_s30, 1453 vfp_s31, 1454 vfp_d0, 1455 vfp_d1, 1456 vfp_d2, 1457 vfp_d3, 1458 vfp_d4, 1459 vfp_d5, 1460 vfp_d6, 1461 vfp_d7, 1462 vfp_d8, 1463 vfp_d9, 1464 vfp_d10, 1465 vfp_d11, 1466 vfp_d12, 1467 vfp_d13, 1468 vfp_d14, 1469 vfp_d15, 1470 vfp_d16, 1471 vfp_d17, 1472 vfp_d18, 1473 vfp_d19, 1474 vfp_d20, 1475 vfp_d21, 1476 vfp_d22, 1477 vfp_d23, 1478 vfp_d24, 1479 vfp_d25, 1480 vfp_d26, 1481 vfp_d27, 1482 vfp_d28, 1483 vfp_d29, 1484 vfp_d30, 1485 vfp_d31, 1486 vfp_q0, 1487 vfp_q1, 1488 vfp_q2, 1489 vfp_q3, 1490 vfp_q4, 1491 vfp_q5, 1492 vfp_q6, 1493 vfp_q7, 1494 vfp_q8, 1495 vfp_q9, 1496 vfp_q10, 1497 vfp_q11, 1498 vfp_q12, 1499 vfp_q13, 1500 vfp_q14, 1501 vfp_q15, 1502 #if defined (__arm64__) || defined (__aarch64__) 1503 vfp_fpsr, 1504 vfp_fpcr, 1505 #else 1506 vfp_fpscr 1507 #endif 1508 }; 1509 1510 enum 1511 { 1512 exc_exception, 1513 exc_fsr, 1514 exc_far, 1515 }; 1516 1517 #define GPR_OFFSET_IDX(idx) (offsetof (DNBArchMachARM::GPR, __r[idx])) 1518 #define GPR_OFFSET_NAME(reg) (offsetof (DNBArchMachARM::GPR, __##reg)) 1519 1520 #define EXC_OFFSET(reg) (offsetof (DNBArchMachARM::EXC, __##reg) + offsetof (DNBArchMachARM::Context, exc)) 1521 1522 // These macros will auto define the register name, alt name, register size, 1523 // register offset, encoding, format and native register. This ensures that 1524 // the register state structures are defined correctly and have the correct 1525 // sizes and offsets. 1526 #define DEFINE_GPR_IDX(idx, reg, alt, gen) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 4, GPR_OFFSET_IDX(idx), gcc_##reg, dwarf_##reg, gen, INVALID_NUB_REGNUM, NULL, NULL} 1527 #define DEFINE_GPR_NAME(reg, alt, gen, inval) { e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 4, GPR_OFFSET_NAME(reg), gcc_##reg, dwarf_##reg, gen, INVALID_NUB_REGNUM, NULL, inval} 1528 1529 // In case we are debugging to a debug target that the ability to 1530 // change into the protected modes with folded registers (ABT, IRQ, 1531 // FIQ, SYS, USR, etc..), we should invalidate r8-r14 if the CPSR 1532 // gets modified. 1533 1534 const char * g_invalidate_cpsr[] = { "r8", "r9", "r10", "r11", "r12", "sp", "lr", NULL }; 1535 1536 // General purpose registers 1537 const DNBRegisterInfo 1538 DNBArchMachARM::g_gpr_registers[] = 1539 { 1540 DEFINE_GPR_IDX ( 0, r0,"arg1", GENERIC_REGNUM_ARG1 ), 1541 DEFINE_GPR_IDX ( 1, r1,"arg2", GENERIC_REGNUM_ARG2 ), 1542 DEFINE_GPR_IDX ( 2, r2,"arg3", GENERIC_REGNUM_ARG3 ), 1543 DEFINE_GPR_IDX ( 3, r3,"arg4", GENERIC_REGNUM_ARG4 ), 1544 DEFINE_GPR_IDX ( 4, r4, NULL, INVALID_NUB_REGNUM ), 1545 DEFINE_GPR_IDX ( 5, r5, NULL, INVALID_NUB_REGNUM ), 1546 DEFINE_GPR_IDX ( 6, r6, NULL, INVALID_NUB_REGNUM ), 1547 DEFINE_GPR_IDX ( 7, r7, "fp", GENERIC_REGNUM_FP ), 1548 DEFINE_GPR_IDX ( 8, r8, NULL, INVALID_NUB_REGNUM ), 1549 DEFINE_GPR_IDX ( 9, r9, NULL, INVALID_NUB_REGNUM ), 1550 DEFINE_GPR_IDX (10, r10, NULL, INVALID_NUB_REGNUM ), 1551 DEFINE_GPR_IDX (11, r11, NULL, INVALID_NUB_REGNUM ), 1552 DEFINE_GPR_IDX (12, r12, NULL, INVALID_NUB_REGNUM ), 1553 DEFINE_GPR_NAME (sp, "r13", GENERIC_REGNUM_SP, NULL), 1554 DEFINE_GPR_NAME (lr, "r14", GENERIC_REGNUM_RA, NULL), 1555 DEFINE_GPR_NAME (pc, "r15", GENERIC_REGNUM_PC, NULL), 1556 DEFINE_GPR_NAME (cpsr, "flags", GENERIC_REGNUM_FLAGS, g_invalidate_cpsr) 1557 }; 1558 1559 const char *g_contained_q0 [] { "q0", NULL }; 1560 const char *g_contained_q1 [] { "q1", NULL }; 1561 const char *g_contained_q2 [] { "q2", NULL }; 1562 const char *g_contained_q3 [] { "q3", NULL }; 1563 const char *g_contained_q4 [] { "q4", NULL }; 1564 const char *g_contained_q5 [] { "q5", NULL }; 1565 const char *g_contained_q6 [] { "q6", NULL }; 1566 const char *g_contained_q7 [] { "q7", NULL }; 1567 const char *g_contained_q8 [] { "q8", NULL }; 1568 const char *g_contained_q9 [] { "q9", NULL }; 1569 const char *g_contained_q10[] { "q10", NULL }; 1570 const char *g_contained_q11[] { "q11", NULL }; 1571 const char *g_contained_q12[] { "q12", NULL }; 1572 const char *g_contained_q13[] { "q13", NULL }; 1573 const char *g_contained_q14[] { "q14", NULL }; 1574 const char *g_contained_q15[] { "q15", NULL }; 1575 1576 const char *g_invalidate_q0[] { "q0", "d0" , "d1" , "s0" , "s1" , "s2" , "s3" , NULL }; 1577 const char *g_invalidate_q1[] { "q1", "d2" , "d3" , "s4" , "s5" , "s6" , "s7" , NULL }; 1578 const char *g_invalidate_q2[] { "q2", "d4" , "d5" , "s8" , "s9" , "s10", "s11", NULL }; 1579 const char *g_invalidate_q3[] { "q3", "d6" , "d7" , "s12", "s13", "s14", "s15", NULL }; 1580 const char *g_invalidate_q4[] { "q4", "d8" , "d9" , "s16", "s17", "s18", "s19", NULL }; 1581 const char *g_invalidate_q5[] { "q5", "d10", "d11", "s20", "s21", "s22", "s23", NULL }; 1582 const char *g_invalidate_q6[] { "q6", "d12", "d13", "s24", "s25", "s26", "s27", NULL }; 1583 const char *g_invalidate_q7[] { "q7", "d14", "d15", "s28", "s29", "s30", "s31", NULL }; 1584 const char *g_invalidate_q8[] { "q8", "d16", "d17", NULL }; 1585 const char *g_invalidate_q9[] { "q9", "d18", "d19", NULL }; 1586 const char *g_invalidate_q10[] { "q10", "d20", "d21", NULL }; 1587 const char *g_invalidate_q11[] { "q11", "d22", "d23", NULL }; 1588 const char *g_invalidate_q12[] { "q12", "d24", "d25", NULL }; 1589 const char *g_invalidate_q13[] { "q13", "d26", "d27", NULL }; 1590 const char *g_invalidate_q14[] { "q14", "d28", "d29", NULL }; 1591 const char *g_invalidate_q15[] { "q15", "d30", "d31", NULL }; 1592 1593 #define VFP_S_OFFSET_IDX(idx) (((idx) % 4) * 4) // offset into q reg: 0, 4, 8, 12 1594 #define VFP_D_OFFSET_IDX(idx) (((idx) % 2) * 8) // offset into q reg: 0, 8 1595 #define VFP_Q_OFFSET_IDX(idx) (VFP_S_OFFSET_IDX ((idx) * 4)) 1596 1597 #define VFP_OFFSET_NAME(reg) (offsetof (DNBArchMachARM::FPU, __##reg) + offsetof (DNBArchMachARM::Context, vfp)) 1598 1599 #define FLOAT_FORMAT Float 1600 1601 #define DEFINE_VFP_S_IDX(idx) e_regSetVFP, vfp_s##idx, "s" #idx, NULL, IEEE754, FLOAT_FORMAT, 4, VFP_S_OFFSET_IDX(idx), INVALID_NUB_REGNUM, dwarf_s##idx, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM 1602 #define DEFINE_VFP_D_IDX(idx) e_regSetVFP, vfp_d##idx, "d" #idx, NULL, IEEE754, FLOAT_FORMAT, 8, VFP_D_OFFSET_IDX(idx), INVALID_NUB_REGNUM, dwarf_d##idx, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM 1603 #define DEFINE_VFP_Q_IDX(idx) e_regSetVFP, vfp_q##idx, "q" #idx, NULL, Vector, VectorOfUInt8, 16, VFP_Q_OFFSET_IDX(idx), INVALID_NUB_REGNUM, dwarf_q##idx, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM 1604 1605 // Floating point registers 1606 const DNBRegisterInfo 1607 DNBArchMachARM::g_vfp_registers[] = 1608 { 1609 { DEFINE_VFP_S_IDX ( 0), g_contained_q0, g_invalidate_q0 }, 1610 { DEFINE_VFP_S_IDX ( 1), g_contained_q0, g_invalidate_q0 }, 1611 { DEFINE_VFP_S_IDX ( 2), g_contained_q0, g_invalidate_q0 }, 1612 { DEFINE_VFP_S_IDX ( 3), g_contained_q0, g_invalidate_q0 }, 1613 { DEFINE_VFP_S_IDX ( 4), g_contained_q1, g_invalidate_q1 }, 1614 { DEFINE_VFP_S_IDX ( 5), g_contained_q1, g_invalidate_q1 }, 1615 { DEFINE_VFP_S_IDX ( 6), g_contained_q1, g_invalidate_q1 }, 1616 { DEFINE_VFP_S_IDX ( 7), g_contained_q1, g_invalidate_q1 }, 1617 { DEFINE_VFP_S_IDX ( 8), g_contained_q2, g_invalidate_q2 }, 1618 { DEFINE_VFP_S_IDX ( 9), g_contained_q2, g_invalidate_q2 }, 1619 { DEFINE_VFP_S_IDX (10), g_contained_q2, g_invalidate_q2 }, 1620 { DEFINE_VFP_S_IDX (11), g_contained_q2, g_invalidate_q2 }, 1621 { DEFINE_VFP_S_IDX (12), g_contained_q3, g_invalidate_q3 }, 1622 { DEFINE_VFP_S_IDX (13), g_contained_q3, g_invalidate_q3 }, 1623 { DEFINE_VFP_S_IDX (14), g_contained_q3, g_invalidate_q3 }, 1624 { DEFINE_VFP_S_IDX (15), g_contained_q3, g_invalidate_q3 }, 1625 { DEFINE_VFP_S_IDX (16), g_contained_q4, g_invalidate_q4 }, 1626 { DEFINE_VFP_S_IDX (17), g_contained_q4, g_invalidate_q4 }, 1627 { DEFINE_VFP_S_IDX (18), g_contained_q4, g_invalidate_q4 }, 1628 { DEFINE_VFP_S_IDX (19), g_contained_q4, g_invalidate_q4 }, 1629 { DEFINE_VFP_S_IDX (20), g_contained_q5, g_invalidate_q5 }, 1630 { DEFINE_VFP_S_IDX (21), g_contained_q5, g_invalidate_q5 }, 1631 { DEFINE_VFP_S_IDX (22), g_contained_q5, g_invalidate_q5 }, 1632 { DEFINE_VFP_S_IDX (23), g_contained_q5, g_invalidate_q5 }, 1633 { DEFINE_VFP_S_IDX (24), g_contained_q6, g_invalidate_q6 }, 1634 { DEFINE_VFP_S_IDX (25), g_contained_q6, g_invalidate_q6 }, 1635 { DEFINE_VFP_S_IDX (26), g_contained_q6, g_invalidate_q6 }, 1636 { DEFINE_VFP_S_IDX (27), g_contained_q6, g_invalidate_q6 }, 1637 { DEFINE_VFP_S_IDX (28), g_contained_q7, g_invalidate_q7 }, 1638 { DEFINE_VFP_S_IDX (29), g_contained_q7, g_invalidate_q7 }, 1639 { DEFINE_VFP_S_IDX (30), g_contained_q7, g_invalidate_q7 }, 1640 { DEFINE_VFP_S_IDX (31), g_contained_q7, g_invalidate_q7 }, 1641 1642 { DEFINE_VFP_D_IDX (0), g_contained_q0, g_invalidate_q0 }, 1643 { DEFINE_VFP_D_IDX (1), g_contained_q0, g_invalidate_q0 }, 1644 { DEFINE_VFP_D_IDX (2), g_contained_q1, g_invalidate_q1 }, 1645 { DEFINE_VFP_D_IDX (3), g_contained_q1, g_invalidate_q1 }, 1646 { DEFINE_VFP_D_IDX (4), g_contained_q2, g_invalidate_q2 }, 1647 { DEFINE_VFP_D_IDX (5), g_contained_q2, g_invalidate_q2 }, 1648 { DEFINE_VFP_D_IDX (6), g_contained_q3, g_invalidate_q3 }, 1649 { DEFINE_VFP_D_IDX (7), g_contained_q3, g_invalidate_q3 }, 1650 { DEFINE_VFP_D_IDX (8), g_contained_q4, g_invalidate_q4 }, 1651 { DEFINE_VFP_D_IDX (9), g_contained_q4, g_invalidate_q4 }, 1652 { DEFINE_VFP_D_IDX (10), g_contained_q5, g_invalidate_q5 }, 1653 { DEFINE_VFP_D_IDX (11), g_contained_q5, g_invalidate_q5 }, 1654 { DEFINE_VFP_D_IDX (12), g_contained_q6, g_invalidate_q6 }, 1655 { DEFINE_VFP_D_IDX (13), g_contained_q6, g_invalidate_q6 }, 1656 { DEFINE_VFP_D_IDX (14), g_contained_q7, g_invalidate_q7 }, 1657 { DEFINE_VFP_D_IDX (15), g_contained_q7, g_invalidate_q7 }, 1658 { DEFINE_VFP_D_IDX (16), g_contained_q8, g_invalidate_q8 }, 1659 { DEFINE_VFP_D_IDX (17), g_contained_q8, g_invalidate_q8 }, 1660 { DEFINE_VFP_D_IDX (18), g_contained_q9, g_invalidate_q9 }, 1661 { DEFINE_VFP_D_IDX (19), g_contained_q9, g_invalidate_q9 }, 1662 { DEFINE_VFP_D_IDX (20), g_contained_q10, g_invalidate_q10 }, 1663 { DEFINE_VFP_D_IDX (21), g_contained_q10, g_invalidate_q10 }, 1664 { DEFINE_VFP_D_IDX (22), g_contained_q11, g_invalidate_q11 }, 1665 { DEFINE_VFP_D_IDX (23), g_contained_q11, g_invalidate_q11 }, 1666 { DEFINE_VFP_D_IDX (24), g_contained_q12, g_invalidate_q12 }, 1667 { DEFINE_VFP_D_IDX (25), g_contained_q12, g_invalidate_q12 }, 1668 { DEFINE_VFP_D_IDX (26), g_contained_q13, g_invalidate_q13 }, 1669 { DEFINE_VFP_D_IDX (27), g_contained_q13, g_invalidate_q13 }, 1670 { DEFINE_VFP_D_IDX (28), g_contained_q14, g_invalidate_q14 }, 1671 { DEFINE_VFP_D_IDX (29), g_contained_q14, g_invalidate_q14 }, 1672 { DEFINE_VFP_D_IDX (30), g_contained_q15, g_invalidate_q15 }, 1673 { DEFINE_VFP_D_IDX (31), g_contained_q15, g_invalidate_q15 }, 1674 1675 { DEFINE_VFP_Q_IDX (0), NULL, g_invalidate_q0 }, 1676 { DEFINE_VFP_Q_IDX (1), NULL, g_invalidate_q1 }, 1677 { DEFINE_VFP_Q_IDX (2), NULL, g_invalidate_q2 }, 1678 { DEFINE_VFP_Q_IDX (3), NULL, g_invalidate_q3 }, 1679 { DEFINE_VFP_Q_IDX (4), NULL, g_invalidate_q4 }, 1680 { DEFINE_VFP_Q_IDX (5), NULL, g_invalidate_q5 }, 1681 { DEFINE_VFP_Q_IDX (6), NULL, g_invalidate_q6 }, 1682 { DEFINE_VFP_Q_IDX (7), NULL, g_invalidate_q7 }, 1683 { DEFINE_VFP_Q_IDX (8), NULL, g_invalidate_q8 }, 1684 { DEFINE_VFP_Q_IDX (9), NULL, g_invalidate_q9 }, 1685 { DEFINE_VFP_Q_IDX (10), NULL, g_invalidate_q10 }, 1686 { DEFINE_VFP_Q_IDX (11), NULL, g_invalidate_q11 }, 1687 { DEFINE_VFP_Q_IDX (12), NULL, g_invalidate_q12 }, 1688 { DEFINE_VFP_Q_IDX (13), NULL, g_invalidate_q13 }, 1689 { DEFINE_VFP_Q_IDX (14), NULL, g_invalidate_q14 }, 1690 { DEFINE_VFP_Q_IDX (15), NULL, g_invalidate_q15 }, 1691 1692 #if defined (__arm64__) || defined (__aarch64__) 1693 { e_regSetVFP, vfp_fpsr, "fpsr", NULL, Uint, Hex, 4, VFP_OFFSET_NAME(fpsr), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL }, 1694 { e_regSetVFP, vfp_fpcr, "fpcr", NULL, Uint, Hex, 4, VFP_OFFSET_NAME(fpcr), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL } 1695 #else 1696 { e_regSetVFP, vfp_fpscr, "fpscr", NULL, Uint, Hex, 4, VFP_OFFSET_NAME(fpscr), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL } 1697 #endif 1698 }; 1699 1700 // Exception registers 1701 1702 const DNBRegisterInfo 1703 DNBArchMachARM::g_exc_registers[] = 1704 { 1705 { e_regSetVFP, exc_exception , "exception" , NULL, Uint, Hex, 4, EXC_OFFSET(exception) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM }, 1706 { e_regSetVFP, exc_fsr , "fsr" , NULL, Uint, Hex, 4, EXC_OFFSET(fsr) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM }, 1707 { e_regSetVFP, exc_far , "far" , NULL, Uint, Hex, 4, EXC_OFFSET(far) , INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM } 1708 }; 1709 1710 // Number of registers in each register set 1711 const size_t DNBArchMachARM::k_num_gpr_registers = sizeof(g_gpr_registers)/sizeof(DNBRegisterInfo); 1712 const size_t DNBArchMachARM::k_num_vfp_registers = sizeof(g_vfp_registers)/sizeof(DNBRegisterInfo); 1713 const size_t DNBArchMachARM::k_num_exc_registers = sizeof(g_exc_registers)/sizeof(DNBRegisterInfo); 1714 const size_t DNBArchMachARM::k_num_all_registers = k_num_gpr_registers + k_num_vfp_registers + k_num_exc_registers; 1715 1716 //---------------------------------------------------------------------- 1717 // Register set definitions. The first definitions at register set index 1718 // of zero is for all registers, followed by other registers sets. The 1719 // register information for the all register set need not be filled in. 1720 //---------------------------------------------------------------------- 1721 const DNBRegisterSetInfo 1722 DNBArchMachARM::g_reg_sets[] = 1723 { 1724 { "ARM Registers", NULL, k_num_all_registers }, 1725 { "General Purpose Registers", g_gpr_registers, k_num_gpr_registers }, 1726 { "Floating Point Registers", g_vfp_registers, k_num_vfp_registers }, 1727 { "Exception State Registers", g_exc_registers, k_num_exc_registers } 1728 }; 1729 // Total number of register sets for this architecture 1730 const size_t DNBArchMachARM::k_num_register_sets = sizeof(g_reg_sets)/sizeof(DNBRegisterSetInfo); 1731 1732 1733 const DNBRegisterSetInfo * 1734 DNBArchMachARM::GetRegisterSetInfo(nub_size_t *num_reg_sets) 1735 { 1736 *num_reg_sets = k_num_register_sets; 1737 return g_reg_sets; 1738 } 1739 1740 bool 1741 DNBArchMachARM::GetRegisterValue(uint32_t set, uint32_t reg, DNBRegisterValue *value) 1742 { 1743 if (set == REGISTER_SET_GENERIC) 1744 { 1745 switch (reg) 1746 { 1747 case GENERIC_REGNUM_PC: // Program Counter 1748 set = e_regSetGPR; 1749 reg = gpr_pc; 1750 break; 1751 1752 case GENERIC_REGNUM_SP: // Stack Pointer 1753 set = e_regSetGPR; 1754 reg = gpr_sp; 1755 break; 1756 1757 case GENERIC_REGNUM_FP: // Frame Pointer 1758 set = e_regSetGPR; 1759 reg = gpr_r7; // is this the right reg? 1760 break; 1761 1762 case GENERIC_REGNUM_RA: // Return Address 1763 set = e_regSetGPR; 1764 reg = gpr_lr; 1765 break; 1766 1767 case GENERIC_REGNUM_FLAGS: // Processor flags register 1768 set = e_regSetGPR; 1769 reg = gpr_cpsr; 1770 break; 1771 1772 default: 1773 return false; 1774 } 1775 } 1776 1777 if (GetRegisterState(set, false) != KERN_SUCCESS) 1778 return false; 1779 1780 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1781 if (regInfo) 1782 { 1783 value->info = *regInfo; 1784 switch (set) 1785 { 1786 case e_regSetGPR: 1787 if (reg < k_num_gpr_registers) 1788 { 1789 value->value.uint32 = m_state.context.gpr.__r[reg]; 1790 return true; 1791 } 1792 break; 1793 1794 case e_regSetVFP: 1795 // "reg" is an index into the floating point register set at this point. 1796 // We need to translate it up so entry 0 in the fp reg set is the same as vfp_s0 1797 // in the enumerated values for case statement below. 1798 if (reg >= vfp_s0 && reg <= vfp_s31) 1799 { 1800 #if defined (__arm64__) || defined (__aarch64__) 1801 uint32_t *s_reg = ((uint32_t *) &m_state.context.vfp.__v[0]) + (reg - vfp_s0); 1802 memcpy (&value->value.v_uint8, s_reg, 4); 1803 #else 1804 value->value.uint32 = m_state.context.vfp.__r[reg]; 1805 #endif 1806 return true; 1807 } 1808 else if (reg >= vfp_d0 && reg <= vfp_d31) 1809 { 1810 #if defined (__arm64__) || defined (__aarch64__) 1811 uint64_t *d_reg = ((uint64_t *) &m_state.context.vfp.__v[0]) + (reg - vfp_d0); 1812 memcpy (&value->value.v_uint8, d_reg, 8); 1813 #else 1814 uint32_t d_reg_idx = reg - vfp_d0; 1815 uint32_t s_reg_idx = d_reg_idx * 2; 1816 value->value.v_sint32[0] = m_state.context.vfp.__r[s_reg_idx + 0]; 1817 value->value.v_sint32[1] = m_state.context.vfp.__r[s_reg_idx + 1]; 1818 #endif 1819 return true; 1820 } 1821 else if (reg >= vfp_q0 && reg <= vfp_q15) 1822 { 1823 #if defined (__arm64__) || defined (__aarch64__) 1824 memcpy (&value->value.v_uint8, (uint8_t *) &m_state.context.vfp.__v[reg - vfp_q0], 16); 1825 #else 1826 uint32_t s_reg_idx = (reg - vfp_q0) * 4; 1827 memcpy (&value->value.v_uint8, (uint8_t *) &m_state.context.vfp.__r[s_reg_idx], 16); 1828 #endif 1829 return true; 1830 } 1831 #if defined (__arm64__) || defined (__aarch64__) 1832 else if (reg == vfp_fpsr) 1833 { 1834 value->value.uint32 = m_state.context.vfp.__fpsr; 1835 return true; 1836 } 1837 else if (reg == vfp_fpcr) 1838 { 1839 value->value.uint32 = m_state.context.vfp.__fpcr; 1840 return true; 1841 } 1842 #else 1843 else if (reg == vfp_fpscr) 1844 { 1845 value->value.uint32 = m_state.context.vfp.__fpscr; 1846 return true; 1847 } 1848 #endif 1849 break; 1850 1851 case e_regSetEXC: 1852 if (reg < k_num_exc_registers) 1853 { 1854 value->value.uint32 = (&m_state.context.exc.__exception)[reg]; 1855 return true; 1856 } 1857 break; 1858 } 1859 } 1860 return false; 1861 } 1862 1863 bool 1864 DNBArchMachARM::SetRegisterValue(uint32_t set, uint32_t reg, const DNBRegisterValue *value) 1865 { 1866 if (set == REGISTER_SET_GENERIC) 1867 { 1868 switch (reg) 1869 { 1870 case GENERIC_REGNUM_PC: // Program Counter 1871 set = e_regSetGPR; 1872 reg = gpr_pc; 1873 break; 1874 1875 case GENERIC_REGNUM_SP: // Stack Pointer 1876 set = e_regSetGPR; 1877 reg = gpr_sp; 1878 break; 1879 1880 case GENERIC_REGNUM_FP: // Frame Pointer 1881 set = e_regSetGPR; 1882 reg = gpr_r7; 1883 break; 1884 1885 case GENERIC_REGNUM_RA: // Return Address 1886 set = e_regSetGPR; 1887 reg = gpr_lr; 1888 break; 1889 1890 case GENERIC_REGNUM_FLAGS: // Processor flags register 1891 set = e_regSetGPR; 1892 reg = gpr_cpsr; 1893 break; 1894 1895 default: 1896 return false; 1897 } 1898 } 1899 1900 if (GetRegisterState(set, false) != KERN_SUCCESS) 1901 return false; 1902 1903 bool success = false; 1904 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1905 if (regInfo) 1906 { 1907 switch (set) 1908 { 1909 case e_regSetGPR: 1910 if (reg < k_num_gpr_registers) 1911 { 1912 m_state.context.gpr.__r[reg] = value->value.uint32; 1913 success = true; 1914 } 1915 break; 1916 1917 case e_regSetVFP: 1918 // "reg" is an index into the floating point register set at this point. 1919 // We need to translate it up so entry 0 in the fp reg set is the same as vfp_s0 1920 // in the enumerated values for case statement below. 1921 if (reg >= vfp_s0 && reg <= vfp_s31) 1922 { 1923 #if defined (__arm64__) || defined (__aarch64__) 1924 uint32_t *s_reg = ((uint32_t *) &m_state.context.vfp.__v[0]) + (reg - vfp_s0); 1925 memcpy (s_reg, &value->value.v_uint8, 4); 1926 #else 1927 m_state.context.vfp.__r[reg] = value->value.uint32; 1928 #endif 1929 success = true; 1930 } 1931 else if (reg >= vfp_d0 && reg <= vfp_d31) 1932 { 1933 #if defined (__arm64__) || defined (__aarch64__) 1934 uint64_t *d_reg = ((uint64_t *) &m_state.context.vfp.__v[0]) + (reg - vfp_d0); 1935 memcpy (d_reg, &value->value.v_uint8, 8); 1936 #else 1937 uint32_t d_reg_idx = reg - vfp_d0; 1938 uint32_t s_reg_idx = d_reg_idx * 2; 1939 m_state.context.vfp.__r[s_reg_idx + 0] = value->value.v_sint32[0]; 1940 m_state.context.vfp.__r[s_reg_idx + 1] = value->value.v_sint32[1]; 1941 #endif 1942 success = true; 1943 } 1944 else if (reg >= vfp_q0 && reg <= vfp_q15) 1945 { 1946 #if defined (__arm64__) || defined (__aarch64__) 1947 memcpy ((uint8_t *) &m_state.context.vfp.__v[reg - vfp_q0], &value->value.v_uint8, 16); 1948 #else 1949 uint32_t s_reg_idx = (reg - vfp_q0) * 4; 1950 memcpy ((uint8_t *) &m_state.context.vfp.__r[s_reg_idx], &value->value.v_uint8, 16); 1951 #endif 1952 success = true; 1953 } 1954 #if defined (__arm64__) || defined (__aarch64__) 1955 else if (reg == vfp_fpsr) 1956 { 1957 m_state.context.vfp.__fpsr = value->value.uint32; 1958 success = true; 1959 } 1960 else if (reg == vfp_fpcr) 1961 { 1962 m_state.context.vfp.__fpcr = value->value.uint32; 1963 success = true; 1964 } 1965 #else 1966 else if (reg == vfp_fpscr) 1967 { 1968 m_state.context.vfp.__fpscr = value->value.uint32; 1969 success = true; 1970 } 1971 #endif 1972 break; 1973 1974 case e_regSetEXC: 1975 if (reg < k_num_exc_registers) 1976 { 1977 (&m_state.context.exc.__exception)[reg] = value->value.uint32; 1978 success = true; 1979 } 1980 break; 1981 } 1982 1983 } 1984 if (success) 1985 return SetRegisterState(set) == KERN_SUCCESS; 1986 return false; 1987 } 1988 1989 kern_return_t 1990 DNBArchMachARM::GetRegisterState(int set, bool force) 1991 { 1992 switch (set) 1993 { 1994 case e_regSetALL: return GetGPRState(force) | 1995 GetVFPState(force) | 1996 GetEXCState(force) | 1997 GetDBGState(force); 1998 case e_regSetGPR: return GetGPRState(force); 1999 case e_regSetVFP: return GetVFPState(force); 2000 case e_regSetEXC: return GetEXCState(force); 2001 case e_regSetDBG: return GetDBGState(force); 2002 default: break; 2003 } 2004 return KERN_INVALID_ARGUMENT; 2005 } 2006 2007 kern_return_t 2008 DNBArchMachARM::SetRegisterState(int set) 2009 { 2010 // Make sure we have a valid context to set. 2011 kern_return_t err = GetRegisterState(set, false); 2012 if (err != KERN_SUCCESS) 2013 return err; 2014 2015 switch (set) 2016 { 2017 case e_regSetALL: return SetGPRState() | 2018 SetVFPState() | 2019 SetEXCState() | 2020 SetDBGState(false); 2021 case e_regSetGPR: return SetGPRState(); 2022 case e_regSetVFP: return SetVFPState(); 2023 case e_regSetEXC: return SetEXCState(); 2024 case e_regSetDBG: return SetDBGState(false); 2025 default: break; 2026 } 2027 return KERN_INVALID_ARGUMENT; 2028 } 2029 2030 bool 2031 DNBArchMachARM::RegisterSetStateIsValid (int set) const 2032 { 2033 return m_state.RegsAreValid(set); 2034 } 2035 2036 2037 nub_size_t 2038 DNBArchMachARM::GetRegisterContext (void *buf, nub_size_t buf_len) 2039 { 2040 nub_size_t size = sizeof (m_state.context.gpr) + 2041 sizeof (m_state.context.vfp) + 2042 sizeof (m_state.context.exc); 2043 2044 if (buf && buf_len) 2045 { 2046 if (size > buf_len) 2047 size = buf_len; 2048 2049 bool force = false; 2050 if (GetGPRState(force) | GetVFPState(force) | GetEXCState(force)) 2051 return 0; 2052 2053 // Copy each struct individually to avoid any padding that might be between the structs in m_state.context 2054 uint8_t *p = (uint8_t *)buf; 2055 ::memcpy (p, &m_state.context.gpr, sizeof(m_state.context.gpr)); 2056 p += sizeof(m_state.context.gpr); 2057 ::memcpy (p, &m_state.context.vfp, sizeof(m_state.context.vfp)); 2058 p += sizeof(m_state.context.vfp); 2059 ::memcpy (p, &m_state.context.exc, sizeof(m_state.context.exc)); 2060 p += sizeof(m_state.context.exc); 2061 2062 size_t bytes_written = p - (uint8_t *)buf; 2063 UNUSED_IF_ASSERT_DISABLED(bytes_written); 2064 assert (bytes_written == size); 2065 2066 } 2067 DNBLogThreadedIf (LOG_THREAD, "DNBArchMachARM::GetRegisterContext (buf = %p, len = %llu) => %llu", buf, (uint64_t)buf_len, (uint64_t)size); 2068 // Return the size of the register context even if NULL was passed in 2069 return size; 2070 } 2071 2072 nub_size_t 2073 DNBArchMachARM::SetRegisterContext (const void *buf, nub_size_t buf_len) 2074 { 2075 nub_size_t size = sizeof (m_state.context.gpr) + 2076 sizeof (m_state.context.vfp) + 2077 sizeof (m_state.context.exc); 2078 2079 if (buf == NULL || buf_len == 0) 2080 size = 0; 2081 2082 if (size) 2083 { 2084 if (size > buf_len) 2085 size = buf_len; 2086 2087 // Copy each struct individually to avoid any padding that might be between the structs in m_state.context 2088 uint8_t *p = (uint8_t *)buf; 2089 ::memcpy (&m_state.context.gpr, p, sizeof(m_state.context.gpr)); 2090 p += sizeof(m_state.context.gpr); 2091 ::memcpy (&m_state.context.vfp, p, sizeof(m_state.context.vfp)); 2092 p += sizeof(m_state.context.vfp); 2093 ::memcpy (&m_state.context.exc, p, sizeof(m_state.context.exc)); 2094 p += sizeof(m_state.context.exc); 2095 2096 size_t bytes_written = p - (uint8_t *)buf; 2097 UNUSED_IF_ASSERT_DISABLED(bytes_written); 2098 assert (bytes_written == size); 2099 2100 if (SetGPRState() | SetVFPState() | SetEXCState()) 2101 return 0; 2102 } 2103 DNBLogThreadedIf (LOG_THREAD, "DNBArchMachARM::SetRegisterContext (buf = %p, len = %llu) => %llu", buf, (uint64_t)buf_len, (uint64_t)size); 2104 return size; 2105 } 2106 2107 2108 uint32_t 2109 DNBArchMachARM::SaveRegisterState () 2110 { 2111 kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber()); 2112 DNBLogThreadedIf (LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u (SetGPRState() for stop_count = %u)", m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount()); 2113 2114 // Always re-read the registers because above we call thread_abort_safely(); 2115 bool force = true; 2116 2117 if ((kret = GetGPRState(force)) != KERN_SUCCESS) 2118 { 2119 DNBLogThreadedIf (LOG_THREAD, "DNBArchMachARM::SaveRegisterState () error: GPR regs failed to read: %u ", kret); 2120 } 2121 else if ((kret = GetVFPState(force)) != KERN_SUCCESS) 2122 { 2123 DNBLogThreadedIf (LOG_THREAD, "DNBArchMachARM::SaveRegisterState () error: %s regs failed to read: %u", "VFP", kret); 2124 } 2125 else 2126 { 2127 const uint32_t save_id = GetNextRegisterStateSaveID (); 2128 m_saved_register_states[save_id] = m_state.context; 2129 return save_id; 2130 } 2131 return UINT32_MAX; 2132 } 2133 2134 bool 2135 DNBArchMachARM::RestoreRegisterState (uint32_t save_id) 2136 { 2137 SaveRegisterStates::iterator pos = m_saved_register_states.find(save_id); 2138 if (pos != m_saved_register_states.end()) 2139 { 2140 m_state.context.gpr = pos->second.gpr; 2141 m_state.context.vfp = pos->second.vfp; 2142 kern_return_t kret; 2143 bool success = true; 2144 if ((kret = SetGPRState()) != KERN_SUCCESS) 2145 { 2146 DNBLogThreadedIf (LOG_THREAD, "DNBArchMachARM::RestoreRegisterState (save_id = %u) error: GPR regs failed to write: %u", save_id, kret); 2147 success = false; 2148 } 2149 else if ((kret = SetVFPState()) != KERN_SUCCESS) 2150 { 2151 DNBLogThreadedIf (LOG_THREAD, "DNBArchMachARM::RestoreRegisterState (save_id = %u) error: %s regs failed to write: %u", save_id, "VFP", kret); 2152 success = false; 2153 } 2154 m_saved_register_states.erase(pos); 2155 return success; 2156 } 2157 return false; 2158 } 2159 2160 2161 #endif // #if defined (__arm__) 2162 2163