1 //===-- DNBArchImplARM64.cpp ------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Created by Greg Clayton on 6/25/07. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #if defined(__arm__) || defined(__arm64__) || defined(__aarch64__) 14 15 #include "MacOSX/arm64/DNBArchImplARM64.h" 16 17 #if defined(ARM_THREAD_STATE64_COUNT) 18 19 #include "DNB.h" 20 #include "DNBBreakpoint.h" 21 #include "DNBLog.h" 22 #include "DNBRegisterInfo.h" 23 #include "MacOSX/MachProcess.h" 24 #include "MacOSX/MachThread.h" 25 26 #include <inttypes.h> 27 #include <sys/sysctl.h> 28 29 // Break only in privileged or user mode 30 // (PAC bits in the DBGWVRn_EL1 watchpoint control register) 31 #define S_USER ((uint32_t)(2u << 1)) 32 33 #define BCR_ENABLE ((uint32_t)(1u)) 34 #define WCR_ENABLE ((uint32_t)(1u)) 35 36 // Watchpoint load/store 37 // (LSC bits in the DBGWVRn_EL1 watchpoint control register) 38 #define WCR_LOAD ((uint32_t)(1u << 3)) 39 #define WCR_STORE ((uint32_t)(1u << 4)) 40 41 // Enable breakpoint, watchpoint, and vector catch debug exceptions. 42 // (MDE bit in the MDSCR_EL1 register. Equivalent to the MDBGen bit in 43 // DBGDSCRext in Aarch32) 44 #define MDE_ENABLE ((uint32_t)(1u << 15)) 45 46 // Single instruction step 47 // (SS bit in the MDSCR_EL1 register) 48 #define SS_ENABLE ((uint32_t)(1u)) 49 50 static const uint8_t g_arm64_breakpoint_opcode[] = { 51 0x00, 0x00, 0x20, 0xD4}; // "brk #0", 0xd4200000 in BE byte order 52 static const uint8_t g_arm_breakpoint_opcode[] = { 53 0xFE, 0xDE, 0xFF, 0xE7}; // this armv7 insn also works in arm64 54 55 // If we need to set one logical watchpoint by using 56 // two hardware watchpoint registers, the watchpoint 57 // will be split into a "high" and "low" watchpoint. 58 // Record both of them in the LoHi array. 59 60 // It's safe to initialize to all 0's since 61 // hi > lo and therefore LoHi[i] cannot be 0. 62 static uint32_t LoHi[16] = {0}; 63 64 void DNBArchMachARM64::Initialize() { 65 DNBArchPluginInfo arch_plugin_info = { 66 CPU_TYPE_ARM64, DNBArchMachARM64::Create, 67 DNBArchMachARM64::GetRegisterSetInfo, 68 DNBArchMachARM64::SoftwareBreakpointOpcode}; 69 70 // Register this arch plug-in with the main protocol class 71 DNBArchProtocol::RegisterArchPlugin(arch_plugin_info); 72 73 DNBArchPluginInfo arch_plugin_info_32 = { 74 CPU_TYPE_ARM64_32, DNBArchMachARM64::Create, 75 DNBArchMachARM64::GetRegisterSetInfo, 76 DNBArchMachARM64::SoftwareBreakpointOpcode}; 77 78 // Register this arch plug-in with the main protocol class 79 DNBArchProtocol::RegisterArchPlugin(arch_plugin_info_32); 80 } 81 82 DNBArchProtocol *DNBArchMachARM64::Create(MachThread *thread) { 83 DNBArchMachARM64 *obj = new DNBArchMachARM64(thread); 84 85 return obj; 86 } 87 88 const uint8_t * 89 DNBArchMachARM64::SoftwareBreakpointOpcode(nub_size_t byte_size) { 90 return g_arm_breakpoint_opcode; 91 } 92 93 uint32_t DNBArchMachARM64::GetCPUType() { return CPU_TYPE_ARM64; } 94 95 uint64_t DNBArchMachARM64::GetPC(uint64_t failValue) { 96 // Get program counter 97 if (GetGPRState(false) == KERN_SUCCESS) 98 return m_state.context.gpr.__pc; 99 return failValue; 100 } 101 102 kern_return_t DNBArchMachARM64::SetPC(uint64_t value) { 103 // Get program counter 104 kern_return_t err = GetGPRState(false); 105 if (err == KERN_SUCCESS) { 106 m_state.context.gpr.__pc = value; 107 err = SetGPRState(); 108 } 109 return err == KERN_SUCCESS; 110 } 111 112 uint64_t DNBArchMachARM64::GetSP(uint64_t failValue) { 113 // Get stack pointer 114 if (GetGPRState(false) == KERN_SUCCESS) 115 return m_state.context.gpr.__sp; 116 return failValue; 117 } 118 119 kern_return_t DNBArchMachARM64::GetGPRState(bool force) { 120 int set = e_regSetGPR; 121 // Check if we have valid cached registers 122 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS) 123 return KERN_SUCCESS; 124 125 // Read the registers from our thread 126 mach_msg_type_number_t count = e_regSetGPRCount; 127 kern_return_t kret = 128 ::thread_get_state(m_thread->MachPortNumber(), ARM_THREAD_STATE64, 129 (thread_state_t)&m_state.context.gpr, &count); 130 if (DNBLogEnabledForAny(LOG_THREAD)) { 131 uint64_t *x = &m_state.context.gpr.__x[0]; 132 DNBLogThreaded( 133 "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x (count = %u) regs" 134 "\n x0=%16.16llx" 135 "\n x1=%16.16llx" 136 "\n x2=%16.16llx" 137 "\n x3=%16.16llx" 138 "\n x4=%16.16llx" 139 "\n x5=%16.16llx" 140 "\n x6=%16.16llx" 141 "\n x7=%16.16llx" 142 "\n x8=%16.16llx" 143 "\n x9=%16.16llx" 144 "\n x10=%16.16llx" 145 "\n x11=%16.16llx" 146 "\n x12=%16.16llx" 147 "\n x13=%16.16llx" 148 "\n x14=%16.16llx" 149 "\n x15=%16.16llx" 150 "\n x16=%16.16llx" 151 "\n x17=%16.16llx" 152 "\n x18=%16.16llx" 153 "\n x19=%16.16llx" 154 "\n x20=%16.16llx" 155 "\n x21=%16.16llx" 156 "\n x22=%16.16llx" 157 "\n x23=%16.16llx" 158 "\n x24=%16.16llx" 159 "\n x25=%16.16llx" 160 "\n x26=%16.16llx" 161 "\n x27=%16.16llx" 162 "\n x28=%16.16llx" 163 "\n fp=%16.16llx" 164 "\n lr=%16.16llx" 165 "\n sp=%16.16llx" 166 "\n pc=%16.16llx" 167 "\n cpsr=%8.8x", 168 m_thread->MachPortNumber(), e_regSetGPR, e_regSetGPRCount, kret, count, 169 x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[0], x[11], 170 x[12], x[13], x[14], x[15], x[16], x[17], x[18], x[19], x[20], x[21], 171 x[22], x[23], x[24], x[25], x[26], x[27], x[28], 172 m_state.context.gpr.__fp, m_state.context.gpr.__lr, 173 m_state.context.gpr.__sp, m_state.context.gpr.__pc, 174 m_state.context.gpr.__cpsr); 175 } 176 m_state.SetError(set, Read, kret); 177 return kret; 178 } 179 180 kern_return_t DNBArchMachARM64::GetVFPState(bool force) { 181 int set = e_regSetVFP; 182 // Check if we have valid cached registers 183 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS) 184 return KERN_SUCCESS; 185 186 // Read the registers from our thread 187 mach_msg_type_number_t count = e_regSetVFPCount; 188 kern_return_t kret = 189 ::thread_get_state(m_thread->MachPortNumber(), ARM_NEON_STATE64, 190 (thread_state_t)&m_state.context.vfp, &count); 191 if (DNBLogEnabledForAny(LOG_THREAD)) { 192 #if defined(__arm64__) || defined(__aarch64__) 193 DNBLogThreaded( 194 "thread_get_state(0x%4.4x, %u, &vfp, %u) => 0x%8.8x (count = %u) regs" 195 "\n q0 = 0x%16.16llx%16.16llx" 196 "\n q1 = 0x%16.16llx%16.16llx" 197 "\n q2 = 0x%16.16llx%16.16llx" 198 "\n q3 = 0x%16.16llx%16.16llx" 199 "\n q4 = 0x%16.16llx%16.16llx" 200 "\n q5 = 0x%16.16llx%16.16llx" 201 "\n q6 = 0x%16.16llx%16.16llx" 202 "\n q7 = 0x%16.16llx%16.16llx" 203 "\n q8 = 0x%16.16llx%16.16llx" 204 "\n q9 = 0x%16.16llx%16.16llx" 205 "\n q10 = 0x%16.16llx%16.16llx" 206 "\n q11 = 0x%16.16llx%16.16llx" 207 "\n q12 = 0x%16.16llx%16.16llx" 208 "\n q13 = 0x%16.16llx%16.16llx" 209 "\n q14 = 0x%16.16llx%16.16llx" 210 "\n q15 = 0x%16.16llx%16.16llx" 211 "\n q16 = 0x%16.16llx%16.16llx" 212 "\n q17 = 0x%16.16llx%16.16llx" 213 "\n q18 = 0x%16.16llx%16.16llx" 214 "\n q19 = 0x%16.16llx%16.16llx" 215 "\n q20 = 0x%16.16llx%16.16llx" 216 "\n q21 = 0x%16.16llx%16.16llx" 217 "\n q22 = 0x%16.16llx%16.16llx" 218 "\n q23 = 0x%16.16llx%16.16llx" 219 "\n q24 = 0x%16.16llx%16.16llx" 220 "\n q25 = 0x%16.16llx%16.16llx" 221 "\n q26 = 0x%16.16llx%16.16llx" 222 "\n q27 = 0x%16.16llx%16.16llx" 223 "\n q28 = 0x%16.16llx%16.16llx" 224 "\n q29 = 0x%16.16llx%16.16llx" 225 "\n q30 = 0x%16.16llx%16.16llx" 226 "\n q31 = 0x%16.16llx%16.16llx" 227 "\n fpsr = 0x%8.8x" 228 "\n fpcr = 0x%8.8x\n\n", 229 m_thread->MachPortNumber(), e_regSetVFP, e_regSetVFPCount, kret, count, 230 ((uint64_t *)&m_state.context.vfp.__v[0])[0], 231 ((uint64_t *)&m_state.context.vfp.__v[0])[1], 232 ((uint64_t *)&m_state.context.vfp.__v[1])[0], 233 ((uint64_t *)&m_state.context.vfp.__v[1])[1], 234 ((uint64_t *)&m_state.context.vfp.__v[2])[0], 235 ((uint64_t *)&m_state.context.vfp.__v[2])[1], 236 ((uint64_t *)&m_state.context.vfp.__v[3])[0], 237 ((uint64_t *)&m_state.context.vfp.__v[3])[1], 238 ((uint64_t *)&m_state.context.vfp.__v[4])[0], 239 ((uint64_t *)&m_state.context.vfp.__v[4])[1], 240 ((uint64_t *)&m_state.context.vfp.__v[5])[0], 241 ((uint64_t *)&m_state.context.vfp.__v[5])[1], 242 ((uint64_t *)&m_state.context.vfp.__v[6])[0], 243 ((uint64_t *)&m_state.context.vfp.__v[6])[1], 244 ((uint64_t *)&m_state.context.vfp.__v[7])[0], 245 ((uint64_t *)&m_state.context.vfp.__v[7])[1], 246 ((uint64_t *)&m_state.context.vfp.__v[8])[0], 247 ((uint64_t *)&m_state.context.vfp.__v[8])[1], 248 ((uint64_t *)&m_state.context.vfp.__v[9])[0], 249 ((uint64_t *)&m_state.context.vfp.__v[9])[1], 250 ((uint64_t *)&m_state.context.vfp.__v[10])[0], 251 ((uint64_t *)&m_state.context.vfp.__v[10])[1], 252 ((uint64_t *)&m_state.context.vfp.__v[11])[0], 253 ((uint64_t *)&m_state.context.vfp.__v[11])[1], 254 ((uint64_t *)&m_state.context.vfp.__v[12])[0], 255 ((uint64_t *)&m_state.context.vfp.__v[12])[1], 256 ((uint64_t *)&m_state.context.vfp.__v[13])[0], 257 ((uint64_t *)&m_state.context.vfp.__v[13])[1], 258 ((uint64_t *)&m_state.context.vfp.__v[14])[0], 259 ((uint64_t *)&m_state.context.vfp.__v[14])[1], 260 ((uint64_t *)&m_state.context.vfp.__v[15])[0], 261 ((uint64_t *)&m_state.context.vfp.__v[15])[1], 262 ((uint64_t *)&m_state.context.vfp.__v[16])[0], 263 ((uint64_t *)&m_state.context.vfp.__v[16])[1], 264 ((uint64_t *)&m_state.context.vfp.__v[17])[0], 265 ((uint64_t *)&m_state.context.vfp.__v[17])[1], 266 ((uint64_t *)&m_state.context.vfp.__v[18])[0], 267 ((uint64_t *)&m_state.context.vfp.__v[18])[1], 268 ((uint64_t *)&m_state.context.vfp.__v[19])[0], 269 ((uint64_t *)&m_state.context.vfp.__v[19])[1], 270 ((uint64_t *)&m_state.context.vfp.__v[20])[0], 271 ((uint64_t *)&m_state.context.vfp.__v[20])[1], 272 ((uint64_t *)&m_state.context.vfp.__v[21])[0], 273 ((uint64_t *)&m_state.context.vfp.__v[21])[1], 274 ((uint64_t *)&m_state.context.vfp.__v[22])[0], 275 ((uint64_t *)&m_state.context.vfp.__v[22])[1], 276 ((uint64_t *)&m_state.context.vfp.__v[23])[0], 277 ((uint64_t *)&m_state.context.vfp.__v[23])[1], 278 ((uint64_t *)&m_state.context.vfp.__v[24])[0], 279 ((uint64_t *)&m_state.context.vfp.__v[24])[1], 280 ((uint64_t *)&m_state.context.vfp.__v[25])[0], 281 ((uint64_t *)&m_state.context.vfp.__v[25])[1], 282 ((uint64_t *)&m_state.context.vfp.__v[26])[0], 283 ((uint64_t *)&m_state.context.vfp.__v[26])[1], 284 ((uint64_t *)&m_state.context.vfp.__v[27])[0], 285 ((uint64_t *)&m_state.context.vfp.__v[27])[1], 286 ((uint64_t *)&m_state.context.vfp.__v[28])[0], 287 ((uint64_t *)&m_state.context.vfp.__v[28])[1], 288 ((uint64_t *)&m_state.context.vfp.__v[29])[0], 289 ((uint64_t *)&m_state.context.vfp.__v[29])[1], 290 ((uint64_t *)&m_state.context.vfp.__v[30])[0], 291 ((uint64_t *)&m_state.context.vfp.__v[30])[1], 292 ((uint64_t *)&m_state.context.vfp.__v[31])[0], 293 ((uint64_t *)&m_state.context.vfp.__v[31])[1], 294 m_state.context.vfp.__fpsr, m_state.context.vfp.__fpcr); 295 #endif 296 } 297 m_state.SetError(set, Read, kret); 298 return kret; 299 } 300 301 kern_return_t DNBArchMachARM64::GetEXCState(bool force) { 302 int set = e_regSetEXC; 303 // Check if we have valid cached registers 304 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS) 305 return KERN_SUCCESS; 306 307 // Read the registers from our thread 308 mach_msg_type_number_t count = e_regSetEXCCount; 309 kern_return_t kret = 310 ::thread_get_state(m_thread->MachPortNumber(), ARM_EXCEPTION_STATE64, 311 (thread_state_t)&m_state.context.exc, &count); 312 m_state.SetError(set, Read, kret); 313 return kret; 314 } 315 316 static void DumpDBGState(const arm_debug_state_t &dbg) { 317 uint32_t i = 0; 318 for (i = 0; i < 16; i++) 319 DNBLogThreadedIf(LOG_STEP, "BVR%-2u/BCR%-2u = { 0x%8.8x, 0x%8.8x } " 320 "WVR%-2u/WCR%-2u = { 0x%8.8x, 0x%8.8x }", 321 i, i, dbg.__bvr[i], dbg.__bcr[i], i, i, dbg.__wvr[i], 322 dbg.__wcr[i]); 323 } 324 325 kern_return_t DNBArchMachARM64::GetDBGState(bool force) { 326 int set = e_regSetDBG; 327 328 // Check if we have valid cached registers 329 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS) 330 return KERN_SUCCESS; 331 332 // Read the registers from our thread 333 mach_msg_type_number_t count = e_regSetDBGCount; 334 kern_return_t kret = 335 ::thread_get_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE64, 336 (thread_state_t)&m_state.dbg, &count); 337 m_state.SetError(set, Read, kret); 338 339 return kret; 340 } 341 342 kern_return_t DNBArchMachARM64::SetGPRState() { 343 int set = e_regSetGPR; 344 kern_return_t kret = ::thread_set_state( 345 m_thread->MachPortNumber(), ARM_THREAD_STATE64, 346 (thread_state_t)&m_state.context.gpr, e_regSetGPRCount); 347 m_state.SetError(set, Write, 348 kret); // Set the current write error for this register set 349 m_state.InvalidateRegisterSetState(set); // Invalidate the current register 350 // state in case registers are read 351 // back differently 352 return kret; // Return the error code 353 } 354 355 kern_return_t DNBArchMachARM64::SetVFPState() { 356 int set = e_regSetVFP; 357 kern_return_t kret = ::thread_set_state( 358 m_thread->MachPortNumber(), ARM_NEON_STATE64, 359 (thread_state_t)&m_state.context.vfp, e_regSetVFPCount); 360 m_state.SetError(set, Write, 361 kret); // Set the current write error for this register set 362 m_state.InvalidateRegisterSetState(set); // Invalidate the current register 363 // state in case registers are read 364 // back differently 365 return kret; // Return the error code 366 } 367 368 kern_return_t DNBArchMachARM64::SetEXCState() { 369 int set = e_regSetEXC; 370 kern_return_t kret = ::thread_set_state( 371 m_thread->MachPortNumber(), ARM_EXCEPTION_STATE64, 372 (thread_state_t)&m_state.context.exc, e_regSetEXCCount); 373 m_state.SetError(set, Write, 374 kret); // Set the current write error for this register set 375 m_state.InvalidateRegisterSetState(set); // Invalidate the current register 376 // state in case registers are read 377 // back differently 378 return kret; // Return the error code 379 } 380 381 kern_return_t DNBArchMachARM64::SetDBGState(bool also_set_on_task) { 382 int set = e_regSetDBG; 383 kern_return_t kret = 384 ::thread_set_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE64, 385 (thread_state_t)&m_state.dbg, e_regSetDBGCount); 386 if (also_set_on_task) { 387 kern_return_t task_kret = task_set_state( 388 m_thread->Process()->Task().TaskPort(), ARM_DEBUG_STATE64, 389 (thread_state_t)&m_state.dbg, e_regSetDBGCount); 390 if (task_kret != KERN_SUCCESS) 391 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::SetDBGState failed " 392 "to set debug control register state: " 393 "0x%8.8x.", 394 task_kret); 395 } 396 m_state.SetError(set, Write, 397 kret); // Set the current write error for this register set 398 m_state.InvalidateRegisterSetState(set); // Invalidate the current register 399 // state in case registers are read 400 // back differently 401 402 return kret; // Return the error code 403 } 404 405 void DNBArchMachARM64::ThreadWillResume() { 406 // Do we need to step this thread? If so, let the mach thread tell us so. 407 if (m_thread->IsStepping()) { 408 EnableHardwareSingleStep(true); 409 } 410 411 // Disable the triggered watchpoint temporarily before we resume. 412 // Plus, we try to enable hardware single step to execute past the instruction 413 // which triggered our watchpoint. 414 if (m_watchpoint_did_occur) { 415 if (m_watchpoint_hw_index >= 0) { 416 kern_return_t kret = GetDBGState(false); 417 if (kret == KERN_SUCCESS && 418 !IsWatchpointEnabled(m_state.dbg, m_watchpoint_hw_index)) { 419 // The watchpoint might have been disabled by the user. We don't need 420 // to do anything at all 421 // to enable hardware single stepping. 422 m_watchpoint_did_occur = false; 423 m_watchpoint_hw_index = -1; 424 return; 425 } 426 427 DisableHardwareWatchpoint(m_watchpoint_hw_index, false); 428 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() " 429 "DisableHardwareWatchpoint(%d) called", 430 m_watchpoint_hw_index); 431 432 // Enable hardware single step to move past the watchpoint-triggering 433 // instruction. 434 m_watchpoint_resume_single_step_enabled = 435 (EnableHardwareSingleStep(true) == KERN_SUCCESS); 436 437 // If we are not able to enable single step to move past the 438 // watchpoint-triggering instruction, 439 // at least we should reset the two watchpoint member variables so that 440 // the next time around 441 // this callback function is invoked, the enclosing logical branch is 442 // skipped. 443 if (!m_watchpoint_resume_single_step_enabled) { 444 // Reset the two watchpoint member variables. 445 m_watchpoint_did_occur = false; 446 m_watchpoint_hw_index = -1; 447 DNBLogThreadedIf( 448 LOG_WATCHPOINTS, 449 "DNBArchMachARM::ThreadWillResume() failed to enable single step"); 450 } else 451 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() " 452 "succeeded to enable single step"); 453 } 454 } 455 } 456 457 bool DNBArchMachARM64::NotifyException(MachException::Data &exc) { 458 459 switch (exc.exc_type) { 460 default: 461 break; 462 case EXC_BREAKPOINT: 463 if (exc.exc_data.size() == 2 && exc.exc_data[0] == EXC_ARM_DA_DEBUG) { 464 // The data break address is passed as exc_data[1]. 465 nub_addr_t addr = exc.exc_data[1]; 466 // Find the hardware index with the side effect of possibly massaging the 467 // addr to return the starting address as seen from the debugger side. 468 uint32_t hw_index = GetHardwareWatchpointHit(addr); 469 470 // One logical watchpoint was split into two watchpoint locations because 471 // it was too big. If the watchpoint exception is indicating the 2nd half 472 // of the two-parter, find the address of the 1st half and report that -- 473 // that's what lldb is going to expect to see. 474 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::NotifyException " 475 "watchpoint %d was hit on address " 476 "0x%llx", 477 hw_index, (uint64_t)addr); 478 const int num_watchpoints = NumSupportedHardwareWatchpoints(); 479 for (int i = 0; i < num_watchpoints; i++) { 480 if (LoHi[i] != 0 && LoHi[i] == hw_index && LoHi[i] != i && 481 GetWatchpointAddressByIndex(i) != INVALID_NUB_ADDRESS) { 482 addr = GetWatchpointAddressByIndex(i); 483 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::NotifyException " 484 "It is a linked watchpoint; " 485 "rewritten to index %d addr 0x%llx", 486 LoHi[i], (uint64_t)addr); 487 } 488 } 489 490 if (hw_index != INVALID_NUB_HW_INDEX) { 491 m_watchpoint_did_occur = true; 492 m_watchpoint_hw_index = hw_index; 493 exc.exc_data[1] = addr; 494 // Piggyback the hw_index in the exc.data. 495 exc.exc_data.push_back(hw_index); 496 } 497 498 return true; 499 } 500 break; 501 } 502 return false; 503 } 504 505 bool DNBArchMachARM64::ThreadDidStop() { 506 bool success = true; 507 508 m_state.InvalidateAllRegisterStates(); 509 510 if (m_watchpoint_resume_single_step_enabled) { 511 // Great! We now disable the hardware single step as well as re-enable the 512 // hardware watchpoint. 513 // See also ThreadWillResume(). 514 if (EnableHardwareSingleStep(false) == KERN_SUCCESS) { 515 if (m_watchpoint_did_occur && m_watchpoint_hw_index >= 0) { 516 ReenableHardwareWatchpoint(m_watchpoint_hw_index); 517 m_watchpoint_resume_single_step_enabled = false; 518 m_watchpoint_did_occur = false; 519 m_watchpoint_hw_index = -1; 520 } else { 521 DNBLogError("internal error detected: m_watchpoint_resume_step_enabled " 522 "is true but (m_watchpoint_did_occur && " 523 "m_watchpoint_hw_index >= 0) does not hold!"); 524 } 525 } else { 526 DNBLogError("internal error detected: m_watchpoint_resume_step_enabled " 527 "is true but unable to disable single step!"); 528 } 529 } 530 531 // Are we stepping a single instruction? 532 if (GetGPRState(true) == KERN_SUCCESS) { 533 // We are single stepping, was this the primary thread? 534 if (m_thread->IsStepping()) { 535 // This was the primary thread, we need to clear the trace 536 // bit if so. 537 success = EnableHardwareSingleStep(false) == KERN_SUCCESS; 538 } else { 539 // The MachThread will automatically restore the suspend count 540 // in ThreadDidStop(), so we don't need to do anything here if 541 // we weren't the primary thread the last time 542 } 543 } 544 return success; 545 } 546 547 // Set the single step bit in the processor status register. 548 kern_return_t DNBArchMachARM64::EnableHardwareSingleStep(bool enable) { 549 DNBError err; 550 DNBLogThreadedIf(LOG_STEP, "%s( enable = %d )", __FUNCTION__, enable); 551 552 err = GetGPRState(false); 553 554 if (err.Fail()) { 555 err.LogThreaded("%s: failed to read the GPR registers", __FUNCTION__); 556 return err.Status(); 557 } 558 559 err = GetDBGState(false); 560 561 if (err.Fail()) { 562 err.LogThreaded("%s: failed to read the DBG registers", __FUNCTION__); 563 return err.Status(); 564 } 565 566 if (enable) { 567 DNBLogThreadedIf(LOG_STEP, 568 "%s: Setting MDSCR_EL1 Single Step bit at pc 0x%llx", 569 __FUNCTION__, (uint64_t)m_state.context.gpr.__pc); 570 m_state.dbg.__mdscr_el1 |= SS_ENABLE; 571 } else { 572 DNBLogThreadedIf(LOG_STEP, 573 "%s: Clearing MDSCR_EL1 Single Step bit at pc 0x%llx", 574 __FUNCTION__, (uint64_t)m_state.context.gpr.__pc); 575 m_state.dbg.__mdscr_el1 &= ~(SS_ENABLE); 576 } 577 578 return SetDBGState(false); 579 } 580 581 // return 1 if bit "BIT" is set in "value" 582 static inline uint32_t bit(uint32_t value, uint32_t bit) { 583 return (value >> bit) & 1u; 584 } 585 586 // return the bitfield "value[msbit:lsbit]". 587 static inline uint64_t bits(uint64_t value, uint32_t msbit, uint32_t lsbit) { 588 assert(msbit >= lsbit); 589 uint64_t shift_left = sizeof(value) * 8 - 1 - msbit; 590 value <<= 591 shift_left; // shift anything above the msbit off of the unsigned edge 592 value >>= shift_left + lsbit; // shift it back again down to the lsbit 593 // (including undoing any shift from above) 594 return value; // return our result 595 } 596 597 uint32_t DNBArchMachARM64::NumSupportedHardwareWatchpoints() { 598 // Set the init value to something that will let us know that we need to 599 // autodetect how many watchpoints are supported dynamically... 600 static uint32_t g_num_supported_hw_watchpoints = UINT_MAX; 601 if (g_num_supported_hw_watchpoints == UINT_MAX) { 602 // Set this to zero in case we can't tell if there are any HW breakpoints 603 g_num_supported_hw_watchpoints = 0; 604 605 size_t len; 606 uint32_t n = 0; 607 len = sizeof(n); 608 if (::sysctlbyname("hw.optional.watchpoint", &n, &len, NULL, 0) == 0) { 609 g_num_supported_hw_watchpoints = n; 610 DNBLogThreadedIf(LOG_THREAD, "hw.optional.watchpoint=%u", n); 611 } else { 612 // For AArch64 we would need to look at ID_AA64DFR0_EL1 but debugserver runs in 613 // EL0 so it can't 614 // access that reg. The kernel should have filled in the sysctls based on it 615 // though. 616 #if defined(__arm__) 617 uint32_t register_DBGDIDR; 618 619 asm("mrc p14, 0, %0, c0, c0, 0" : "=r"(register_DBGDIDR)); 620 uint32_t numWRPs = bits(register_DBGDIDR, 31, 28); 621 // Zero is reserved for the WRP count, so don't increment it if it is zero 622 if (numWRPs > 0) 623 numWRPs++; 624 g_num_supported_hw_watchpoints = numWRPs; 625 DNBLogThreadedIf(LOG_THREAD, 626 "Number of supported hw watchpoints via asm(): %d", 627 g_num_supported_hw_watchpoints); 628 #endif 629 } 630 } 631 return g_num_supported_hw_watchpoints; 632 } 633 634 uint32_t DNBArchMachARM64::EnableHardwareWatchpoint(nub_addr_t addr, 635 nub_size_t size, bool read, 636 bool write, 637 bool also_set_on_task) { 638 DNBLogThreadedIf(LOG_WATCHPOINTS, 639 "DNBArchMachARM64::EnableHardwareWatchpoint(addr = " 640 "0x%8.8llx, size = %zu, read = %u, write = %u)", 641 (uint64_t)addr, size, read, write); 642 643 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints(); 644 645 // Can't watch zero bytes 646 if (size == 0) 647 return INVALID_NUB_HW_INDEX; 648 649 // We must watch for either read or write 650 if (read == false && write == false) 651 return INVALID_NUB_HW_INDEX; 652 653 // Otherwise, can't watch more than 8 bytes per WVR/WCR pair 654 if (size > 8) 655 return INVALID_NUB_HW_INDEX; 656 657 // arm64 watchpoints really have an 8-byte alignment requirement. You can put 658 // a watchpoint on a 4-byte 659 // offset address but you can only watch 4 bytes with that watchpoint. 660 661 // arm64 watchpoints on an 8-byte (double word) aligned addr can watch any 662 // bytes in that 663 // 8-byte long region of memory. They can watch the 1st byte, the 2nd byte, 664 // 3rd byte, etc, or any 665 // combination therein by setting the bits in the BAS [12:5] (Byte Address 666 // Select) field of 667 // the DBGWCRn_EL1 reg for the watchpoint. 668 669 // If the MASK [28:24] bits in the DBGWCRn_EL1 allow a single watchpoint to 670 // monitor a larger region 671 // of memory (16 bytes, 32 bytes, or 2GB) but the Byte Address Select bitfield 672 // then selects a larger 673 // range of bytes, instead of individual bytes. See the ARMv8 Debug 674 // Architecture manual for details. 675 // This implementation does not currently use the MASK bits; the largest 676 // single region watched by a single 677 // watchpoint right now is 8-bytes. 678 679 nub_addr_t aligned_wp_address = addr & ~0x7; 680 uint32_t addr_dword_offset = addr & 0x7; 681 682 // Do we need to split up this logical watchpoint into two hardware watchpoint 683 // registers? 684 // e.g. a watchpoint of length 4 on address 6. We need do this with 685 // one watchpoint on address 0 with bytes 6 & 7 being monitored 686 // one watchpoint on address 8 with bytes 0, 1, 2, 3 being monitored 687 688 if (addr_dword_offset + size > 8) { 689 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::" 690 "EnableHardwareWatchpoint(addr = " 691 "0x%8.8llx, size = %zu) needs two " 692 "hardware watchpoints slots to monitor", 693 (uint64_t)addr, size); 694 int low_watchpoint_size = 8 - addr_dword_offset; 695 int high_watchpoint_size = addr_dword_offset + size - 8; 696 697 uint32_t lo = EnableHardwareWatchpoint(addr, low_watchpoint_size, read, 698 write, also_set_on_task); 699 if (lo == INVALID_NUB_HW_INDEX) 700 return INVALID_NUB_HW_INDEX; 701 uint32_t hi = 702 EnableHardwareWatchpoint(aligned_wp_address + 8, high_watchpoint_size, 703 read, write, also_set_on_task); 704 if (hi == INVALID_NUB_HW_INDEX) { 705 DisableHardwareWatchpoint(lo, also_set_on_task); 706 return INVALID_NUB_HW_INDEX; 707 } 708 // Tag this lo->hi mapping in our database. 709 LoHi[lo] = hi; 710 return lo; 711 } 712 713 // At this point 714 // 1 aligned_wp_address is the requested address rounded down to 8-byte 715 // alignment 716 // 2 addr_dword_offset is the offset into that double word (8-byte) region 717 // that we are watching 718 // 3 size is the number of bytes within that 8-byte region that we are 719 // watching 720 721 // Set the Byte Address Selects bits DBGWCRn_EL1 bits [12:5] based on the 722 // above. 723 // The bit shift and negation operation will give us 0b11 for 2, 0b1111 for 4, 724 // etc, up to 0b11111111 for 8. 725 // then we shift those bits left by the offset into this dword that we are 726 // interested in. 727 // e.g. if we are watching bytes 4,5,6,7 in a dword we want a BAS of 728 // 0b11110000. 729 uint32_t byte_address_select = ((1 << size) - 1) << addr_dword_offset; 730 731 // Read the debug state 732 kern_return_t kret = GetDBGState(false); 733 734 if (kret == KERN_SUCCESS) { 735 // Check to make sure we have the needed hardware support 736 uint32_t i = 0; 737 738 for (i = 0; i < num_hw_watchpoints; ++i) { 739 if ((m_state.dbg.__wcr[i] & WCR_ENABLE) == 0) 740 break; // We found an available hw watchpoint slot (in i) 741 } 742 743 // See if we found an available hw watchpoint slot above 744 if (i < num_hw_watchpoints) { 745 // DumpDBGState(m_state.dbg); 746 747 // Clear any previous LoHi joined-watchpoint that may have been in use 748 LoHi[i] = 0; 749 750 // shift our Byte Address Select bits up to the correct bit range for the 751 // DBGWCRn_EL1 752 byte_address_select = byte_address_select << 5; 753 754 // Make sure bits 1:0 are clear in our address 755 m_state.dbg.__wvr[i] = aligned_wp_address; // DVA (Data Virtual Address) 756 m_state.dbg.__wcr[i] = byte_address_select | // Which bytes that follow 757 // the DVA that we will watch 758 S_USER | // Stop only in user mode 759 (read ? WCR_LOAD : 0) | // Stop on read access? 760 (write ? WCR_STORE : 0) | // Stop on write access? 761 WCR_ENABLE; // Enable this watchpoint; 762 763 DNBLogThreadedIf( 764 LOG_WATCHPOINTS, "DNBArchMachARM64::EnableHardwareWatchpoint() " 765 "adding watchpoint on address 0x%llx with control " 766 "register value 0x%x", 767 (uint64_t)m_state.dbg.__wvr[i], (uint32_t)m_state.dbg.__wcr[i]); 768 769 // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us 770 // automatically, don't need to do it here. 771 772 kret = SetDBGState(also_set_on_task); 773 // DumpDBGState(m_state.dbg); 774 775 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::" 776 "EnableHardwareWatchpoint() " 777 "SetDBGState() => 0x%8.8x.", 778 kret); 779 780 if (kret == KERN_SUCCESS) 781 return i; 782 } else { 783 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::" 784 "EnableHardwareWatchpoint(): All " 785 "hardware resources (%u) are in use.", 786 num_hw_watchpoints); 787 } 788 } 789 return INVALID_NUB_HW_INDEX; 790 } 791 792 bool DNBArchMachARM64::ReenableHardwareWatchpoint(uint32_t hw_index) { 793 // If this logical watchpoint # is actually implemented using 794 // two hardware watchpoint registers, re-enable both of them. 795 796 if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index]) { 797 return ReenableHardwareWatchpoint_helper(hw_index) && 798 ReenableHardwareWatchpoint_helper(LoHi[hw_index]); 799 } else { 800 return ReenableHardwareWatchpoint_helper(hw_index); 801 } 802 } 803 804 bool DNBArchMachARM64::ReenableHardwareWatchpoint_helper(uint32_t hw_index) { 805 kern_return_t kret = GetDBGState(false); 806 if (kret != KERN_SUCCESS) 807 return false; 808 809 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints(); 810 if (hw_index >= num_hw_points) 811 return false; 812 813 m_state.dbg.__wvr[hw_index] = m_disabled_watchpoints[hw_index].addr; 814 m_state.dbg.__wcr[hw_index] = m_disabled_watchpoints[hw_index].control; 815 816 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::" 817 "EnableHardwareWatchpoint( %u ) - WVR%u = " 818 "0x%8.8llx WCR%u = 0x%8.8llx", 819 hw_index, hw_index, (uint64_t)m_state.dbg.__wvr[hw_index], 820 hw_index, (uint64_t)m_state.dbg.__wcr[hw_index]); 821 822 // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us 823 // automatically, don't need to do it here. 824 825 kret = SetDBGState(false); 826 827 return (kret == KERN_SUCCESS); 828 } 829 830 bool DNBArchMachARM64::DisableHardwareWatchpoint(uint32_t hw_index, 831 bool also_set_on_task) { 832 if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index]) { 833 return DisableHardwareWatchpoint_helper(hw_index, also_set_on_task) && 834 DisableHardwareWatchpoint_helper(LoHi[hw_index], also_set_on_task); 835 } else { 836 return DisableHardwareWatchpoint_helper(hw_index, also_set_on_task); 837 } 838 } 839 840 bool DNBArchMachARM64::DisableHardwareWatchpoint_helper(uint32_t hw_index, 841 bool also_set_on_task) { 842 kern_return_t kret = GetDBGState(false); 843 if (kret != KERN_SUCCESS) 844 return false; 845 846 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints(); 847 if (hw_index >= num_hw_points) 848 return false; 849 850 m_disabled_watchpoints[hw_index].addr = m_state.dbg.__wvr[hw_index]; 851 m_disabled_watchpoints[hw_index].control = m_state.dbg.__wcr[hw_index]; 852 853 m_state.dbg.__wcr[hw_index] &= ~((nub_addr_t)WCR_ENABLE); 854 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::" 855 "DisableHardwareWatchpoint( %u ) - WVR%u = " 856 "0x%8.8llx WCR%u = 0x%8.8llx", 857 hw_index, hw_index, (uint64_t)m_state.dbg.__wvr[hw_index], 858 hw_index, (uint64_t)m_state.dbg.__wcr[hw_index]); 859 860 kret = SetDBGState(also_set_on_task); 861 862 return (kret == KERN_SUCCESS); 863 } 864 865 // This is for checking the Byte Address Select bits in the DBRWCRn_EL1 control 866 // register. 867 // Returns -1 if the trailing bit patterns are not one of: 868 // { 0b???????1, 0b??????10, 0b?????100, 0b????1000, 0b???10000, 0b??100000, 869 // 0b?1000000, 0b10000000 }. 870 static inline int32_t LowestBitSet(uint32_t val) { 871 for (unsigned i = 0; i < 8; ++i) { 872 if (bit(val, i)) 873 return i; 874 } 875 return -1; 876 } 877 878 // Iterate through the debug registers; return the index of the first watchpoint 879 // whose address matches. 880 // As a side effect, the starting address as understood by the debugger is 881 // returned which could be 882 // different from 'addr' passed as an in/out argument. 883 uint32_t DNBArchMachARM64::GetHardwareWatchpointHit(nub_addr_t &addr) { 884 // Read the debug state 885 kern_return_t kret = GetDBGState(true); 886 // DumpDBGState(m_state.dbg); 887 DNBLogThreadedIf( 888 LOG_WATCHPOINTS, 889 "DNBArchMachARM64::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", 890 kret); 891 DNBLogThreadedIf(LOG_WATCHPOINTS, 892 "DNBArchMachARM64::GetHardwareWatchpointHit() addr = 0x%llx", 893 (uint64_t)addr); 894 895 // This is the watchpoint value to match against, i.e., word address. 896 nub_addr_t wp_val = addr & ~((nub_addr_t)3); 897 if (kret == KERN_SUCCESS) { 898 DBG &debug_state = m_state.dbg; 899 uint32_t i, num = NumSupportedHardwareWatchpoints(); 900 for (i = 0; i < num; ++i) { 901 nub_addr_t wp_addr = GetWatchAddress(debug_state, i); 902 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::" 903 "GetHardwareWatchpointHit() slot: %u " 904 "(addr = 0x%llx).", 905 i, (uint64_t)wp_addr); 906 if (wp_val == wp_addr) { 907 uint32_t byte_mask = bits(debug_state.__wcr[i], 12, 5); 908 909 // Sanity check the byte_mask, first. 910 if (LowestBitSet(byte_mask) < 0) 911 continue; 912 913 // Check that the watchpoint is enabled. 914 if (!IsWatchpointEnabled(debug_state, i)) 915 continue; 916 917 // Compute the starting address (from the point of view of the 918 // debugger). 919 addr = wp_addr + LowestBitSet(byte_mask); 920 return i; 921 } 922 } 923 } 924 return INVALID_NUB_HW_INDEX; 925 } 926 927 nub_addr_t DNBArchMachARM64::GetWatchpointAddressByIndex(uint32_t hw_index) { 928 kern_return_t kret = GetDBGState(true); 929 if (kret != KERN_SUCCESS) 930 return INVALID_NUB_ADDRESS; 931 const uint32_t num = NumSupportedHardwareWatchpoints(); 932 if (hw_index >= num) 933 return INVALID_NUB_ADDRESS; 934 if (IsWatchpointEnabled(m_state.dbg, hw_index)) 935 return GetWatchAddress(m_state.dbg, hw_index); 936 return INVALID_NUB_ADDRESS; 937 } 938 939 bool DNBArchMachARM64::IsWatchpointEnabled(const DBG &debug_state, 940 uint32_t hw_index) { 941 // Watchpoint Control Registers, bitfield definitions 942 // ... 943 // Bits Value Description 944 // [0] 0 Watchpoint disabled 945 // 1 Watchpoint enabled. 946 return (debug_state.__wcr[hw_index] & 1u); 947 } 948 949 nub_addr_t DNBArchMachARM64::GetWatchAddress(const DBG &debug_state, 950 uint32_t hw_index) { 951 // Watchpoint Value Registers, bitfield definitions 952 // Bits Description 953 // [31:2] Watchpoint value (word address, i.e., 4-byte aligned) 954 // [1:0] RAZ/SBZP 955 return bits(debug_state.__wvr[hw_index], 63, 0); 956 } 957 958 // Register information definitions for 64 bit ARMv8. 959 enum gpr_regnums { 960 gpr_x0 = 0, 961 gpr_x1, 962 gpr_x2, 963 gpr_x3, 964 gpr_x4, 965 gpr_x5, 966 gpr_x6, 967 gpr_x7, 968 gpr_x8, 969 gpr_x9, 970 gpr_x10, 971 gpr_x11, 972 gpr_x12, 973 gpr_x13, 974 gpr_x14, 975 gpr_x15, 976 gpr_x16, 977 gpr_x17, 978 gpr_x18, 979 gpr_x19, 980 gpr_x20, 981 gpr_x21, 982 gpr_x22, 983 gpr_x23, 984 gpr_x24, 985 gpr_x25, 986 gpr_x26, 987 gpr_x27, 988 gpr_x28, 989 gpr_fp, 990 gpr_x29 = gpr_fp, 991 gpr_lr, 992 gpr_x30 = gpr_lr, 993 gpr_sp, 994 gpr_x31 = gpr_sp, 995 gpr_pc, 996 gpr_cpsr, 997 gpr_w0, 998 gpr_w1, 999 gpr_w2, 1000 gpr_w3, 1001 gpr_w4, 1002 gpr_w5, 1003 gpr_w6, 1004 gpr_w7, 1005 gpr_w8, 1006 gpr_w9, 1007 gpr_w10, 1008 gpr_w11, 1009 gpr_w12, 1010 gpr_w13, 1011 gpr_w14, 1012 gpr_w15, 1013 gpr_w16, 1014 gpr_w17, 1015 gpr_w18, 1016 gpr_w19, 1017 gpr_w20, 1018 gpr_w21, 1019 gpr_w22, 1020 gpr_w23, 1021 gpr_w24, 1022 gpr_w25, 1023 gpr_w26, 1024 gpr_w27, 1025 gpr_w28 1026 1027 }; 1028 1029 enum { 1030 vfp_v0 = 0, 1031 vfp_v1, 1032 vfp_v2, 1033 vfp_v3, 1034 vfp_v4, 1035 vfp_v5, 1036 vfp_v6, 1037 vfp_v7, 1038 vfp_v8, 1039 vfp_v9, 1040 vfp_v10, 1041 vfp_v11, 1042 vfp_v12, 1043 vfp_v13, 1044 vfp_v14, 1045 vfp_v15, 1046 vfp_v16, 1047 vfp_v17, 1048 vfp_v18, 1049 vfp_v19, 1050 vfp_v20, 1051 vfp_v21, 1052 vfp_v22, 1053 vfp_v23, 1054 vfp_v24, 1055 vfp_v25, 1056 vfp_v26, 1057 vfp_v27, 1058 vfp_v28, 1059 vfp_v29, 1060 vfp_v30, 1061 vfp_v31, 1062 vfp_fpsr, 1063 vfp_fpcr, 1064 1065 // lower 32 bits of the corresponding vfp_v<n> reg. 1066 vfp_s0, 1067 vfp_s1, 1068 vfp_s2, 1069 vfp_s3, 1070 vfp_s4, 1071 vfp_s5, 1072 vfp_s6, 1073 vfp_s7, 1074 vfp_s8, 1075 vfp_s9, 1076 vfp_s10, 1077 vfp_s11, 1078 vfp_s12, 1079 vfp_s13, 1080 vfp_s14, 1081 vfp_s15, 1082 vfp_s16, 1083 vfp_s17, 1084 vfp_s18, 1085 vfp_s19, 1086 vfp_s20, 1087 vfp_s21, 1088 vfp_s22, 1089 vfp_s23, 1090 vfp_s24, 1091 vfp_s25, 1092 vfp_s26, 1093 vfp_s27, 1094 vfp_s28, 1095 vfp_s29, 1096 vfp_s30, 1097 vfp_s31, 1098 1099 // lower 64 bits of the corresponding vfp_v<n> reg. 1100 vfp_d0, 1101 vfp_d1, 1102 vfp_d2, 1103 vfp_d3, 1104 vfp_d4, 1105 vfp_d5, 1106 vfp_d6, 1107 vfp_d7, 1108 vfp_d8, 1109 vfp_d9, 1110 vfp_d10, 1111 vfp_d11, 1112 vfp_d12, 1113 vfp_d13, 1114 vfp_d14, 1115 vfp_d15, 1116 vfp_d16, 1117 vfp_d17, 1118 vfp_d18, 1119 vfp_d19, 1120 vfp_d20, 1121 vfp_d21, 1122 vfp_d22, 1123 vfp_d23, 1124 vfp_d24, 1125 vfp_d25, 1126 vfp_d26, 1127 vfp_d27, 1128 vfp_d28, 1129 vfp_d29, 1130 vfp_d30, 1131 vfp_d31 1132 }; 1133 1134 enum { exc_far = 0, exc_esr, exc_exception }; 1135 1136 // These numbers from the "DWARF for the ARM 64-bit Architecture (AArch64)" 1137 // document. 1138 1139 enum { 1140 dwarf_x0 = 0, 1141 dwarf_x1, 1142 dwarf_x2, 1143 dwarf_x3, 1144 dwarf_x4, 1145 dwarf_x5, 1146 dwarf_x6, 1147 dwarf_x7, 1148 dwarf_x8, 1149 dwarf_x9, 1150 dwarf_x10, 1151 dwarf_x11, 1152 dwarf_x12, 1153 dwarf_x13, 1154 dwarf_x14, 1155 dwarf_x15, 1156 dwarf_x16, 1157 dwarf_x17, 1158 dwarf_x18, 1159 dwarf_x19, 1160 dwarf_x20, 1161 dwarf_x21, 1162 dwarf_x22, 1163 dwarf_x23, 1164 dwarf_x24, 1165 dwarf_x25, 1166 dwarf_x26, 1167 dwarf_x27, 1168 dwarf_x28, 1169 dwarf_x29, 1170 dwarf_x30, 1171 dwarf_x31, 1172 dwarf_pc = 32, 1173 dwarf_elr_mode = 33, 1174 dwarf_fp = dwarf_x29, 1175 dwarf_lr = dwarf_x30, 1176 dwarf_sp = dwarf_x31, 1177 // 34-63 reserved 1178 1179 // V0-V31 (128 bit vector registers) 1180 dwarf_v0 = 64, 1181 dwarf_v1, 1182 dwarf_v2, 1183 dwarf_v3, 1184 dwarf_v4, 1185 dwarf_v5, 1186 dwarf_v6, 1187 dwarf_v7, 1188 dwarf_v8, 1189 dwarf_v9, 1190 dwarf_v10, 1191 dwarf_v11, 1192 dwarf_v12, 1193 dwarf_v13, 1194 dwarf_v14, 1195 dwarf_v15, 1196 dwarf_v16, 1197 dwarf_v17, 1198 dwarf_v18, 1199 dwarf_v19, 1200 dwarf_v20, 1201 dwarf_v21, 1202 dwarf_v22, 1203 dwarf_v23, 1204 dwarf_v24, 1205 dwarf_v25, 1206 dwarf_v26, 1207 dwarf_v27, 1208 dwarf_v28, 1209 dwarf_v29, 1210 dwarf_v30, 1211 dwarf_v31 1212 1213 // 96-127 reserved 1214 }; 1215 1216 enum { 1217 debugserver_gpr_x0 = 0, 1218 debugserver_gpr_x1, 1219 debugserver_gpr_x2, 1220 debugserver_gpr_x3, 1221 debugserver_gpr_x4, 1222 debugserver_gpr_x5, 1223 debugserver_gpr_x6, 1224 debugserver_gpr_x7, 1225 debugserver_gpr_x8, 1226 debugserver_gpr_x9, 1227 debugserver_gpr_x10, 1228 debugserver_gpr_x11, 1229 debugserver_gpr_x12, 1230 debugserver_gpr_x13, 1231 debugserver_gpr_x14, 1232 debugserver_gpr_x15, 1233 debugserver_gpr_x16, 1234 debugserver_gpr_x17, 1235 debugserver_gpr_x18, 1236 debugserver_gpr_x19, 1237 debugserver_gpr_x20, 1238 debugserver_gpr_x21, 1239 debugserver_gpr_x22, 1240 debugserver_gpr_x23, 1241 debugserver_gpr_x24, 1242 debugserver_gpr_x25, 1243 debugserver_gpr_x26, 1244 debugserver_gpr_x27, 1245 debugserver_gpr_x28, 1246 debugserver_gpr_fp, // x29 1247 debugserver_gpr_lr, // x30 1248 debugserver_gpr_sp, // sp aka xsp 1249 debugserver_gpr_pc, 1250 debugserver_gpr_cpsr, 1251 debugserver_vfp_v0, 1252 debugserver_vfp_v1, 1253 debugserver_vfp_v2, 1254 debugserver_vfp_v3, 1255 debugserver_vfp_v4, 1256 debugserver_vfp_v5, 1257 debugserver_vfp_v6, 1258 debugserver_vfp_v7, 1259 debugserver_vfp_v8, 1260 debugserver_vfp_v9, 1261 debugserver_vfp_v10, 1262 debugserver_vfp_v11, 1263 debugserver_vfp_v12, 1264 debugserver_vfp_v13, 1265 debugserver_vfp_v14, 1266 debugserver_vfp_v15, 1267 debugserver_vfp_v16, 1268 debugserver_vfp_v17, 1269 debugserver_vfp_v18, 1270 debugserver_vfp_v19, 1271 debugserver_vfp_v20, 1272 debugserver_vfp_v21, 1273 debugserver_vfp_v22, 1274 debugserver_vfp_v23, 1275 debugserver_vfp_v24, 1276 debugserver_vfp_v25, 1277 debugserver_vfp_v26, 1278 debugserver_vfp_v27, 1279 debugserver_vfp_v28, 1280 debugserver_vfp_v29, 1281 debugserver_vfp_v30, 1282 debugserver_vfp_v31, 1283 debugserver_vfp_fpsr, 1284 debugserver_vfp_fpcr 1285 }; 1286 1287 const char *g_contained_x0[]{"x0", NULL}; 1288 const char *g_contained_x1[]{"x1", NULL}; 1289 const char *g_contained_x2[]{"x2", NULL}; 1290 const char *g_contained_x3[]{"x3", NULL}; 1291 const char *g_contained_x4[]{"x4", NULL}; 1292 const char *g_contained_x5[]{"x5", NULL}; 1293 const char *g_contained_x6[]{"x6", NULL}; 1294 const char *g_contained_x7[]{"x7", NULL}; 1295 const char *g_contained_x8[]{"x8", NULL}; 1296 const char *g_contained_x9[]{"x9", NULL}; 1297 const char *g_contained_x10[]{"x10", NULL}; 1298 const char *g_contained_x11[]{"x11", NULL}; 1299 const char *g_contained_x12[]{"x12", NULL}; 1300 const char *g_contained_x13[]{"x13", NULL}; 1301 const char *g_contained_x14[]{"x14", NULL}; 1302 const char *g_contained_x15[]{"x15", NULL}; 1303 const char *g_contained_x16[]{"x16", NULL}; 1304 const char *g_contained_x17[]{"x17", NULL}; 1305 const char *g_contained_x18[]{"x18", NULL}; 1306 const char *g_contained_x19[]{"x19", NULL}; 1307 const char *g_contained_x20[]{"x20", NULL}; 1308 const char *g_contained_x21[]{"x21", NULL}; 1309 const char *g_contained_x22[]{"x22", NULL}; 1310 const char *g_contained_x23[]{"x23", NULL}; 1311 const char *g_contained_x24[]{"x24", NULL}; 1312 const char *g_contained_x25[]{"x25", NULL}; 1313 const char *g_contained_x26[]{"x26", NULL}; 1314 const char *g_contained_x27[]{"x27", NULL}; 1315 const char *g_contained_x28[]{"x28", NULL}; 1316 1317 const char *g_invalidate_x0[]{"x0", "w0", NULL}; 1318 const char *g_invalidate_x1[]{"x1", "w1", NULL}; 1319 const char *g_invalidate_x2[]{"x2", "w2", NULL}; 1320 const char *g_invalidate_x3[]{"x3", "w3", NULL}; 1321 const char *g_invalidate_x4[]{"x4", "w4", NULL}; 1322 const char *g_invalidate_x5[]{"x5", "w5", NULL}; 1323 const char *g_invalidate_x6[]{"x6", "w6", NULL}; 1324 const char *g_invalidate_x7[]{"x7", "w7", NULL}; 1325 const char *g_invalidate_x8[]{"x8", "w8", NULL}; 1326 const char *g_invalidate_x9[]{"x9", "w9", NULL}; 1327 const char *g_invalidate_x10[]{"x10", "w10", NULL}; 1328 const char *g_invalidate_x11[]{"x11", "w11", NULL}; 1329 const char *g_invalidate_x12[]{"x12", "w12", NULL}; 1330 const char *g_invalidate_x13[]{"x13", "w13", NULL}; 1331 const char *g_invalidate_x14[]{"x14", "w14", NULL}; 1332 const char *g_invalidate_x15[]{"x15", "w15", NULL}; 1333 const char *g_invalidate_x16[]{"x16", "w16", NULL}; 1334 const char *g_invalidate_x17[]{"x17", "w17", NULL}; 1335 const char *g_invalidate_x18[]{"x18", "w18", NULL}; 1336 const char *g_invalidate_x19[]{"x19", "w19", NULL}; 1337 const char *g_invalidate_x20[]{"x20", "w20", NULL}; 1338 const char *g_invalidate_x21[]{"x21", "w21", NULL}; 1339 const char *g_invalidate_x22[]{"x22", "w22", NULL}; 1340 const char *g_invalidate_x23[]{"x23", "w23", NULL}; 1341 const char *g_invalidate_x24[]{"x24", "w24", NULL}; 1342 const char *g_invalidate_x25[]{"x25", "w25", NULL}; 1343 const char *g_invalidate_x26[]{"x26", "w26", NULL}; 1344 const char *g_invalidate_x27[]{"x27", "w27", NULL}; 1345 const char *g_invalidate_x28[]{"x28", "w28", NULL}; 1346 1347 #define GPR_OFFSET_IDX(idx) (offsetof(DNBArchMachARM64::GPR, __x[idx])) 1348 1349 #define GPR_OFFSET_NAME(reg) (offsetof(DNBArchMachARM64::GPR, __##reg)) 1350 1351 // These macros will auto define the register name, alt name, register size, 1352 // register offset, encoding, format and native register. This ensures that 1353 // the register state structures are defined correctly and have the correct 1354 // sizes and offsets. 1355 #define DEFINE_GPR_IDX(idx, reg, alt, gen) \ 1356 { \ 1357 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 8, GPR_OFFSET_IDX(idx), \ 1358 dwarf_##reg, dwarf_##reg, gen, debugserver_gpr_##reg, NULL, \ 1359 g_invalidate_x##idx \ 1360 } 1361 #define DEFINE_GPR_NAME(reg, alt, gen) \ 1362 { \ 1363 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 8, GPR_OFFSET_NAME(reg), \ 1364 dwarf_##reg, dwarf_##reg, gen, debugserver_gpr_##reg, NULL, NULL \ 1365 } 1366 #define DEFINE_PSEUDO_GPR_IDX(idx, reg) \ 1367 { \ 1368 e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, 4, 0, INVALID_NUB_REGNUM, \ 1369 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1370 g_contained_x##idx, g_invalidate_x##idx \ 1371 } 1372 1373 //_STRUCT_ARM_THREAD_STATE64 1374 //{ 1375 // uint64_t x[29]; /* General purpose registers x0-x28 */ 1376 // uint64_t fp; /* Frame pointer x29 */ 1377 // uint64_t lr; /* Link register x30 */ 1378 // uint64_t sp; /* Stack pointer x31 */ 1379 // uint64_t pc; /* Program counter */ 1380 // uint32_t cpsr; /* Current program status register */ 1381 //}; 1382 1383 // General purpose registers 1384 const DNBRegisterInfo DNBArchMachARM64::g_gpr_registers[] = { 1385 DEFINE_GPR_IDX(0, x0, "arg1", GENERIC_REGNUM_ARG1), 1386 DEFINE_GPR_IDX(1, x1, "arg2", GENERIC_REGNUM_ARG2), 1387 DEFINE_GPR_IDX(2, x2, "arg3", GENERIC_REGNUM_ARG3), 1388 DEFINE_GPR_IDX(3, x3, "arg4", GENERIC_REGNUM_ARG4), 1389 DEFINE_GPR_IDX(4, x4, "arg5", GENERIC_REGNUM_ARG5), 1390 DEFINE_GPR_IDX(5, x5, "arg6", GENERIC_REGNUM_ARG6), 1391 DEFINE_GPR_IDX(6, x6, "arg7", GENERIC_REGNUM_ARG7), 1392 DEFINE_GPR_IDX(7, x7, "arg8", GENERIC_REGNUM_ARG8), 1393 DEFINE_GPR_IDX(8, x8, NULL, INVALID_NUB_REGNUM), 1394 DEFINE_GPR_IDX(9, x9, NULL, INVALID_NUB_REGNUM), 1395 DEFINE_GPR_IDX(10, x10, NULL, INVALID_NUB_REGNUM), 1396 DEFINE_GPR_IDX(11, x11, NULL, INVALID_NUB_REGNUM), 1397 DEFINE_GPR_IDX(12, x12, NULL, INVALID_NUB_REGNUM), 1398 DEFINE_GPR_IDX(13, x13, NULL, INVALID_NUB_REGNUM), 1399 DEFINE_GPR_IDX(14, x14, NULL, INVALID_NUB_REGNUM), 1400 DEFINE_GPR_IDX(15, x15, NULL, INVALID_NUB_REGNUM), 1401 DEFINE_GPR_IDX(16, x16, NULL, INVALID_NUB_REGNUM), 1402 DEFINE_GPR_IDX(17, x17, NULL, INVALID_NUB_REGNUM), 1403 DEFINE_GPR_IDX(18, x18, NULL, INVALID_NUB_REGNUM), 1404 DEFINE_GPR_IDX(19, x19, NULL, INVALID_NUB_REGNUM), 1405 DEFINE_GPR_IDX(20, x20, NULL, INVALID_NUB_REGNUM), 1406 DEFINE_GPR_IDX(21, x21, NULL, INVALID_NUB_REGNUM), 1407 DEFINE_GPR_IDX(22, x22, NULL, INVALID_NUB_REGNUM), 1408 DEFINE_GPR_IDX(23, x23, NULL, INVALID_NUB_REGNUM), 1409 DEFINE_GPR_IDX(24, x24, NULL, INVALID_NUB_REGNUM), 1410 DEFINE_GPR_IDX(25, x25, NULL, INVALID_NUB_REGNUM), 1411 DEFINE_GPR_IDX(26, x26, NULL, INVALID_NUB_REGNUM), 1412 DEFINE_GPR_IDX(27, x27, NULL, INVALID_NUB_REGNUM), 1413 DEFINE_GPR_IDX(28, x28, NULL, INVALID_NUB_REGNUM), 1414 DEFINE_GPR_NAME(fp, "x29", GENERIC_REGNUM_FP), 1415 DEFINE_GPR_NAME(lr, "x30", GENERIC_REGNUM_RA), 1416 DEFINE_GPR_NAME(sp, "xsp", GENERIC_REGNUM_SP), 1417 DEFINE_GPR_NAME(pc, NULL, GENERIC_REGNUM_PC), 1418 1419 // in armv7 we specify that writing to the CPSR should invalidate r8-12, sp, 1420 // lr. 1421 // this should be specified for arm64 too even though debugserver is only 1422 // used for 1423 // userland debugging. 1424 {e_regSetGPR, gpr_cpsr, "cpsr", "flags", Uint, Hex, 4, 1425 GPR_OFFSET_NAME(cpsr), dwarf_elr_mode, dwarf_elr_mode, INVALID_NUB_REGNUM, 1426 debugserver_gpr_cpsr, NULL, NULL}, 1427 1428 DEFINE_PSEUDO_GPR_IDX(0, w0), 1429 DEFINE_PSEUDO_GPR_IDX(1, w1), 1430 DEFINE_PSEUDO_GPR_IDX(2, w2), 1431 DEFINE_PSEUDO_GPR_IDX(3, w3), 1432 DEFINE_PSEUDO_GPR_IDX(4, w4), 1433 DEFINE_PSEUDO_GPR_IDX(5, w5), 1434 DEFINE_PSEUDO_GPR_IDX(6, w6), 1435 DEFINE_PSEUDO_GPR_IDX(7, w7), 1436 DEFINE_PSEUDO_GPR_IDX(8, w8), 1437 DEFINE_PSEUDO_GPR_IDX(9, w9), 1438 DEFINE_PSEUDO_GPR_IDX(10, w10), 1439 DEFINE_PSEUDO_GPR_IDX(11, w11), 1440 DEFINE_PSEUDO_GPR_IDX(12, w12), 1441 DEFINE_PSEUDO_GPR_IDX(13, w13), 1442 DEFINE_PSEUDO_GPR_IDX(14, w14), 1443 DEFINE_PSEUDO_GPR_IDX(15, w15), 1444 DEFINE_PSEUDO_GPR_IDX(16, w16), 1445 DEFINE_PSEUDO_GPR_IDX(17, w17), 1446 DEFINE_PSEUDO_GPR_IDX(18, w18), 1447 DEFINE_PSEUDO_GPR_IDX(19, w19), 1448 DEFINE_PSEUDO_GPR_IDX(20, w20), 1449 DEFINE_PSEUDO_GPR_IDX(21, w21), 1450 DEFINE_PSEUDO_GPR_IDX(22, w22), 1451 DEFINE_PSEUDO_GPR_IDX(23, w23), 1452 DEFINE_PSEUDO_GPR_IDX(24, w24), 1453 DEFINE_PSEUDO_GPR_IDX(25, w25), 1454 DEFINE_PSEUDO_GPR_IDX(26, w26), 1455 DEFINE_PSEUDO_GPR_IDX(27, w27), 1456 DEFINE_PSEUDO_GPR_IDX(28, w28)}; 1457 1458 const char *g_contained_v0[]{"v0", NULL}; 1459 const char *g_contained_v1[]{"v1", NULL}; 1460 const char *g_contained_v2[]{"v2", NULL}; 1461 const char *g_contained_v3[]{"v3", NULL}; 1462 const char *g_contained_v4[]{"v4", NULL}; 1463 const char *g_contained_v5[]{"v5", NULL}; 1464 const char *g_contained_v6[]{"v6", NULL}; 1465 const char *g_contained_v7[]{"v7", NULL}; 1466 const char *g_contained_v8[]{"v8", NULL}; 1467 const char *g_contained_v9[]{"v9", NULL}; 1468 const char *g_contained_v10[]{"v10", NULL}; 1469 const char *g_contained_v11[]{"v11", NULL}; 1470 const char *g_contained_v12[]{"v12", NULL}; 1471 const char *g_contained_v13[]{"v13", NULL}; 1472 const char *g_contained_v14[]{"v14", NULL}; 1473 const char *g_contained_v15[]{"v15", NULL}; 1474 const char *g_contained_v16[]{"v16", NULL}; 1475 const char *g_contained_v17[]{"v17", NULL}; 1476 const char *g_contained_v18[]{"v18", NULL}; 1477 const char *g_contained_v19[]{"v19", NULL}; 1478 const char *g_contained_v20[]{"v20", NULL}; 1479 const char *g_contained_v21[]{"v21", NULL}; 1480 const char *g_contained_v22[]{"v22", NULL}; 1481 const char *g_contained_v23[]{"v23", NULL}; 1482 const char *g_contained_v24[]{"v24", NULL}; 1483 const char *g_contained_v25[]{"v25", NULL}; 1484 const char *g_contained_v26[]{"v26", NULL}; 1485 const char *g_contained_v27[]{"v27", NULL}; 1486 const char *g_contained_v28[]{"v28", NULL}; 1487 const char *g_contained_v29[]{"v29", NULL}; 1488 const char *g_contained_v30[]{"v30", NULL}; 1489 const char *g_contained_v31[]{"v31", NULL}; 1490 1491 const char *g_invalidate_v0[]{"v0", "d0", "s0", NULL}; 1492 const char *g_invalidate_v1[]{"v1", "d1", "s1", NULL}; 1493 const char *g_invalidate_v2[]{"v2", "d2", "s2", NULL}; 1494 const char *g_invalidate_v3[]{"v3", "d3", "s3", NULL}; 1495 const char *g_invalidate_v4[]{"v4", "d4", "s4", NULL}; 1496 const char *g_invalidate_v5[]{"v5", "d5", "s5", NULL}; 1497 const char *g_invalidate_v6[]{"v6", "d6", "s6", NULL}; 1498 const char *g_invalidate_v7[]{"v7", "d7", "s7", NULL}; 1499 const char *g_invalidate_v8[]{"v8", "d8", "s8", NULL}; 1500 const char *g_invalidate_v9[]{"v9", "d9", "s9", NULL}; 1501 const char *g_invalidate_v10[]{"v10", "d10", "s10", NULL}; 1502 const char *g_invalidate_v11[]{"v11", "d11", "s11", NULL}; 1503 const char *g_invalidate_v12[]{"v12", "d12", "s12", NULL}; 1504 const char *g_invalidate_v13[]{"v13", "d13", "s13", NULL}; 1505 const char *g_invalidate_v14[]{"v14", "d14", "s14", NULL}; 1506 const char *g_invalidate_v15[]{"v15", "d15", "s15", NULL}; 1507 const char *g_invalidate_v16[]{"v16", "d16", "s16", NULL}; 1508 const char *g_invalidate_v17[]{"v17", "d17", "s17", NULL}; 1509 const char *g_invalidate_v18[]{"v18", "d18", "s18", NULL}; 1510 const char *g_invalidate_v19[]{"v19", "d19", "s19", NULL}; 1511 const char *g_invalidate_v20[]{"v20", "d20", "s20", NULL}; 1512 const char *g_invalidate_v21[]{"v21", "d21", "s21", NULL}; 1513 const char *g_invalidate_v22[]{"v22", "d22", "s22", NULL}; 1514 const char *g_invalidate_v23[]{"v23", "d23", "s23", NULL}; 1515 const char *g_invalidate_v24[]{"v24", "d24", "s24", NULL}; 1516 const char *g_invalidate_v25[]{"v25", "d25", "s25", NULL}; 1517 const char *g_invalidate_v26[]{"v26", "d26", "s26", NULL}; 1518 const char *g_invalidate_v27[]{"v27", "d27", "s27", NULL}; 1519 const char *g_invalidate_v28[]{"v28", "d28", "s28", NULL}; 1520 const char *g_invalidate_v29[]{"v29", "d29", "s29", NULL}; 1521 const char *g_invalidate_v30[]{"v30", "d30", "s30", NULL}; 1522 const char *g_invalidate_v31[]{"v31", "d31", "s31", NULL}; 1523 1524 #if defined(__arm64__) || defined(__aarch64__) 1525 #define VFP_V_OFFSET_IDX(idx) \ 1526 (offsetof(DNBArchMachARM64::FPU, __v) + (idx * 16) + \ 1527 offsetof(DNBArchMachARM64::Context, vfp)) 1528 #else 1529 #define VFP_V_OFFSET_IDX(idx) \ 1530 (offsetof(DNBArchMachARM64::FPU, opaque) + (idx * 16) + \ 1531 offsetof(DNBArchMachARM64::Context, vfp)) 1532 #endif 1533 #define VFP_OFFSET_NAME(reg) \ 1534 (offsetof(DNBArchMachARM64::FPU, reg) + \ 1535 offsetof(DNBArchMachARM64::Context, vfp)) 1536 #define EXC_OFFSET(reg) \ 1537 (offsetof(DNBArchMachARM64::EXC, reg) + \ 1538 offsetof(DNBArchMachARM64::Context, exc)) 1539 1540 //#define FLOAT_FORMAT Float 1541 #define DEFINE_VFP_V_IDX(idx) \ 1542 { \ 1543 e_regSetVFP, vfp_v##idx, "v" #idx, "q" #idx, Vector, VectorOfUInt8, 16, \ 1544 VFP_V_OFFSET_IDX(idx), INVALID_NUB_REGNUM, dwarf_v##idx, \ 1545 INVALID_NUB_REGNUM, debugserver_vfp_v##idx, NULL, g_invalidate_v##idx \ 1546 } 1547 #define DEFINE_PSEUDO_VFP_S_IDX(idx) \ 1548 { \ 1549 e_regSetVFP, vfp_s##idx, "s" #idx, NULL, IEEE754, Float, 4, 0, \ 1550 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1551 INVALID_NUB_REGNUM, g_contained_v##idx, g_invalidate_v##idx \ 1552 } 1553 #define DEFINE_PSEUDO_VFP_D_IDX(idx) \ 1554 { \ 1555 e_regSetVFP, vfp_d##idx, "d" #idx, NULL, IEEE754, Float, 8, 0, \ 1556 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1557 INVALID_NUB_REGNUM, g_contained_v##idx, g_invalidate_v##idx \ 1558 } 1559 1560 // Floating point registers 1561 const DNBRegisterInfo DNBArchMachARM64::g_vfp_registers[] = { 1562 DEFINE_VFP_V_IDX(0), 1563 DEFINE_VFP_V_IDX(1), 1564 DEFINE_VFP_V_IDX(2), 1565 DEFINE_VFP_V_IDX(3), 1566 DEFINE_VFP_V_IDX(4), 1567 DEFINE_VFP_V_IDX(5), 1568 DEFINE_VFP_V_IDX(6), 1569 DEFINE_VFP_V_IDX(7), 1570 DEFINE_VFP_V_IDX(8), 1571 DEFINE_VFP_V_IDX(9), 1572 DEFINE_VFP_V_IDX(10), 1573 DEFINE_VFP_V_IDX(11), 1574 DEFINE_VFP_V_IDX(12), 1575 DEFINE_VFP_V_IDX(13), 1576 DEFINE_VFP_V_IDX(14), 1577 DEFINE_VFP_V_IDX(15), 1578 DEFINE_VFP_V_IDX(16), 1579 DEFINE_VFP_V_IDX(17), 1580 DEFINE_VFP_V_IDX(18), 1581 DEFINE_VFP_V_IDX(19), 1582 DEFINE_VFP_V_IDX(20), 1583 DEFINE_VFP_V_IDX(21), 1584 DEFINE_VFP_V_IDX(22), 1585 DEFINE_VFP_V_IDX(23), 1586 DEFINE_VFP_V_IDX(24), 1587 DEFINE_VFP_V_IDX(25), 1588 DEFINE_VFP_V_IDX(26), 1589 DEFINE_VFP_V_IDX(27), 1590 DEFINE_VFP_V_IDX(28), 1591 DEFINE_VFP_V_IDX(29), 1592 DEFINE_VFP_V_IDX(30), 1593 DEFINE_VFP_V_IDX(31), 1594 {e_regSetVFP, vfp_fpsr, "fpsr", NULL, Uint, Hex, 4, 1595 VFP_V_OFFSET_IDX(32) + 0, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1596 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1597 {e_regSetVFP, vfp_fpcr, "fpcr", NULL, Uint, Hex, 4, 1598 VFP_V_OFFSET_IDX(32) + 4, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1599 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1600 1601 DEFINE_PSEUDO_VFP_S_IDX(0), 1602 DEFINE_PSEUDO_VFP_S_IDX(1), 1603 DEFINE_PSEUDO_VFP_S_IDX(2), 1604 DEFINE_PSEUDO_VFP_S_IDX(3), 1605 DEFINE_PSEUDO_VFP_S_IDX(4), 1606 DEFINE_PSEUDO_VFP_S_IDX(5), 1607 DEFINE_PSEUDO_VFP_S_IDX(6), 1608 DEFINE_PSEUDO_VFP_S_IDX(7), 1609 DEFINE_PSEUDO_VFP_S_IDX(8), 1610 DEFINE_PSEUDO_VFP_S_IDX(9), 1611 DEFINE_PSEUDO_VFP_S_IDX(10), 1612 DEFINE_PSEUDO_VFP_S_IDX(11), 1613 DEFINE_PSEUDO_VFP_S_IDX(12), 1614 DEFINE_PSEUDO_VFP_S_IDX(13), 1615 DEFINE_PSEUDO_VFP_S_IDX(14), 1616 DEFINE_PSEUDO_VFP_S_IDX(15), 1617 DEFINE_PSEUDO_VFP_S_IDX(16), 1618 DEFINE_PSEUDO_VFP_S_IDX(17), 1619 DEFINE_PSEUDO_VFP_S_IDX(18), 1620 DEFINE_PSEUDO_VFP_S_IDX(19), 1621 DEFINE_PSEUDO_VFP_S_IDX(20), 1622 DEFINE_PSEUDO_VFP_S_IDX(21), 1623 DEFINE_PSEUDO_VFP_S_IDX(22), 1624 DEFINE_PSEUDO_VFP_S_IDX(23), 1625 DEFINE_PSEUDO_VFP_S_IDX(24), 1626 DEFINE_PSEUDO_VFP_S_IDX(25), 1627 DEFINE_PSEUDO_VFP_S_IDX(26), 1628 DEFINE_PSEUDO_VFP_S_IDX(27), 1629 DEFINE_PSEUDO_VFP_S_IDX(28), 1630 DEFINE_PSEUDO_VFP_S_IDX(29), 1631 DEFINE_PSEUDO_VFP_S_IDX(30), 1632 DEFINE_PSEUDO_VFP_S_IDX(31), 1633 1634 DEFINE_PSEUDO_VFP_D_IDX(0), 1635 DEFINE_PSEUDO_VFP_D_IDX(1), 1636 DEFINE_PSEUDO_VFP_D_IDX(2), 1637 DEFINE_PSEUDO_VFP_D_IDX(3), 1638 DEFINE_PSEUDO_VFP_D_IDX(4), 1639 DEFINE_PSEUDO_VFP_D_IDX(5), 1640 DEFINE_PSEUDO_VFP_D_IDX(6), 1641 DEFINE_PSEUDO_VFP_D_IDX(7), 1642 DEFINE_PSEUDO_VFP_D_IDX(8), 1643 DEFINE_PSEUDO_VFP_D_IDX(9), 1644 DEFINE_PSEUDO_VFP_D_IDX(10), 1645 DEFINE_PSEUDO_VFP_D_IDX(11), 1646 DEFINE_PSEUDO_VFP_D_IDX(12), 1647 DEFINE_PSEUDO_VFP_D_IDX(13), 1648 DEFINE_PSEUDO_VFP_D_IDX(14), 1649 DEFINE_PSEUDO_VFP_D_IDX(15), 1650 DEFINE_PSEUDO_VFP_D_IDX(16), 1651 DEFINE_PSEUDO_VFP_D_IDX(17), 1652 DEFINE_PSEUDO_VFP_D_IDX(18), 1653 DEFINE_PSEUDO_VFP_D_IDX(19), 1654 DEFINE_PSEUDO_VFP_D_IDX(20), 1655 DEFINE_PSEUDO_VFP_D_IDX(21), 1656 DEFINE_PSEUDO_VFP_D_IDX(22), 1657 DEFINE_PSEUDO_VFP_D_IDX(23), 1658 DEFINE_PSEUDO_VFP_D_IDX(24), 1659 DEFINE_PSEUDO_VFP_D_IDX(25), 1660 DEFINE_PSEUDO_VFP_D_IDX(26), 1661 DEFINE_PSEUDO_VFP_D_IDX(27), 1662 DEFINE_PSEUDO_VFP_D_IDX(28), 1663 DEFINE_PSEUDO_VFP_D_IDX(29), 1664 DEFINE_PSEUDO_VFP_D_IDX(30), 1665 DEFINE_PSEUDO_VFP_D_IDX(31) 1666 1667 }; 1668 1669 //_STRUCT_ARM_EXCEPTION_STATE64 1670 //{ 1671 // uint64_t far; /* Virtual Fault Address */ 1672 // uint32_t esr; /* Exception syndrome */ 1673 // uint32_t exception; /* number of arm exception taken */ 1674 //}; 1675 1676 // Exception registers 1677 const DNBRegisterInfo DNBArchMachARM64::g_exc_registers[] = { 1678 {e_regSetEXC, exc_far, "far", NULL, Uint, Hex, 8, EXC_OFFSET(__far), 1679 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1680 INVALID_NUB_REGNUM, NULL, NULL}, 1681 {e_regSetEXC, exc_esr, "esr", NULL, Uint, Hex, 4, EXC_OFFSET(__esr), 1682 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1683 INVALID_NUB_REGNUM, NULL, NULL}, 1684 {e_regSetEXC, exc_exception, "exception", NULL, Uint, Hex, 4, 1685 EXC_OFFSET(__exception), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1686 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}}; 1687 1688 // Number of registers in each register set 1689 const size_t DNBArchMachARM64::k_num_gpr_registers = 1690 sizeof(g_gpr_registers) / sizeof(DNBRegisterInfo); 1691 const size_t DNBArchMachARM64::k_num_vfp_registers = 1692 sizeof(g_vfp_registers) / sizeof(DNBRegisterInfo); 1693 const size_t DNBArchMachARM64::k_num_exc_registers = 1694 sizeof(g_exc_registers) / sizeof(DNBRegisterInfo); 1695 const size_t DNBArchMachARM64::k_num_all_registers = 1696 k_num_gpr_registers + k_num_vfp_registers + k_num_exc_registers; 1697 1698 // Register set definitions. The first definitions at register set index 1699 // of zero is for all registers, followed by other registers sets. The 1700 // register information for the all register set need not be filled in. 1701 const DNBRegisterSetInfo DNBArchMachARM64::g_reg_sets[] = { 1702 {"ARM64 Registers", NULL, k_num_all_registers}, 1703 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers}, 1704 {"Floating Point Registers", g_vfp_registers, k_num_vfp_registers}, 1705 {"Exception State Registers", g_exc_registers, k_num_exc_registers}}; 1706 // Total number of register sets for this architecture 1707 const size_t DNBArchMachARM64::k_num_register_sets = 1708 sizeof(g_reg_sets) / sizeof(DNBRegisterSetInfo); 1709 1710 const DNBRegisterSetInfo * 1711 DNBArchMachARM64::GetRegisterSetInfo(nub_size_t *num_reg_sets) { 1712 *num_reg_sets = k_num_register_sets; 1713 return g_reg_sets; 1714 } 1715 1716 bool DNBArchMachARM64::FixGenericRegisterNumber(uint32_t &set, uint32_t ®) { 1717 if (set == REGISTER_SET_GENERIC) { 1718 switch (reg) { 1719 case GENERIC_REGNUM_PC: // Program Counter 1720 set = e_regSetGPR; 1721 reg = gpr_pc; 1722 break; 1723 1724 case GENERIC_REGNUM_SP: // Stack Pointer 1725 set = e_regSetGPR; 1726 reg = gpr_sp; 1727 break; 1728 1729 case GENERIC_REGNUM_FP: // Frame Pointer 1730 set = e_regSetGPR; 1731 reg = gpr_fp; 1732 break; 1733 1734 case GENERIC_REGNUM_RA: // Return Address 1735 set = e_regSetGPR; 1736 reg = gpr_lr; 1737 break; 1738 1739 case GENERIC_REGNUM_FLAGS: // Processor flags register 1740 set = e_regSetGPR; 1741 reg = gpr_cpsr; 1742 break; 1743 1744 case GENERIC_REGNUM_ARG1: 1745 case GENERIC_REGNUM_ARG2: 1746 case GENERIC_REGNUM_ARG3: 1747 case GENERIC_REGNUM_ARG4: 1748 case GENERIC_REGNUM_ARG5: 1749 case GENERIC_REGNUM_ARG6: 1750 set = e_regSetGPR; 1751 reg = gpr_x0 + reg - GENERIC_REGNUM_ARG1; 1752 break; 1753 1754 default: 1755 return false; 1756 } 1757 } 1758 return true; 1759 } 1760 bool DNBArchMachARM64::GetRegisterValue(uint32_t set, uint32_t reg, 1761 DNBRegisterValue *value) { 1762 if (!FixGenericRegisterNumber(set, reg)) 1763 return false; 1764 1765 if (GetRegisterState(set, false) != KERN_SUCCESS) 1766 return false; 1767 1768 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1769 if (regInfo) { 1770 value->info = *regInfo; 1771 switch (set) { 1772 case e_regSetGPR: 1773 if (reg <= gpr_pc) { 1774 value->value.uint64 = m_state.context.gpr.__x[reg]; 1775 return true; 1776 } else if (reg == gpr_cpsr) { 1777 value->value.uint32 = m_state.context.gpr.__cpsr; 1778 return true; 1779 } 1780 break; 1781 1782 case e_regSetVFP: 1783 1784 if (reg >= vfp_v0 && reg <= vfp_v31) { 1785 #if defined(__arm64__) || defined(__aarch64__) 1786 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_v0], 1787 16); 1788 #else 1789 memcpy(&value->value.v_uint8, 1790 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_v0) * 16), 1791 16); 1792 #endif 1793 return true; 1794 } else if (reg == vfp_fpsr) { 1795 #if defined(__arm64__) || defined(__aarch64__) 1796 memcpy(&value->value.uint32, &m_state.context.vfp.__fpsr, 4); 1797 #else 1798 memcpy(&value->value.uint32, 1799 ((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 0, 4); 1800 #endif 1801 return true; 1802 } else if (reg == vfp_fpcr) { 1803 #if defined(__arm64__) || defined(__aarch64__) 1804 memcpy(&value->value.uint32, &m_state.context.vfp.__fpcr, 4); 1805 #else 1806 memcpy(&value->value.uint32, 1807 ((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 4, 4); 1808 #endif 1809 return true; 1810 } else if (reg >= vfp_s0 && reg <= vfp_s31) { 1811 #if defined(__arm64__) || defined(__aarch64__) 1812 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_s0], 1813 4); 1814 #else 1815 memcpy(&value->value.v_uint8, 1816 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_s0) * 16), 1817 4); 1818 #endif 1819 return true; 1820 } else if (reg >= vfp_d0 && reg <= vfp_d31) { 1821 #if defined(__arm64__) || defined(__aarch64__) 1822 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_d0], 1823 8); 1824 #else 1825 memcpy(&value->value.v_uint8, 1826 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_d0) * 16), 1827 8); 1828 #endif 1829 return true; 1830 } 1831 break; 1832 1833 case e_regSetEXC: 1834 if (reg == exc_far) { 1835 value->value.uint64 = m_state.context.exc.__far; 1836 return true; 1837 } else if (reg == exc_esr) { 1838 value->value.uint32 = m_state.context.exc.__esr; 1839 return true; 1840 } else if (reg == exc_exception) { 1841 value->value.uint32 = m_state.context.exc.__exception; 1842 return true; 1843 } 1844 break; 1845 } 1846 } 1847 return false; 1848 } 1849 1850 bool DNBArchMachARM64::SetRegisterValue(uint32_t set, uint32_t reg, 1851 const DNBRegisterValue *value) { 1852 if (!FixGenericRegisterNumber(set, reg)) 1853 return false; 1854 1855 if (GetRegisterState(set, false) != KERN_SUCCESS) 1856 return false; 1857 1858 bool success = false; 1859 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1860 if (regInfo) { 1861 switch (set) { 1862 case e_regSetGPR: 1863 if (reg <= gpr_pc) { 1864 m_state.context.gpr.__x[reg] = value->value.uint64; 1865 success = true; 1866 } else if (reg == gpr_cpsr) { 1867 m_state.context.gpr.__cpsr = value->value.uint32; 1868 success = true; 1869 } 1870 break; 1871 1872 case e_regSetVFP: 1873 if (reg >= vfp_v0 && reg <= vfp_v31) { 1874 #if defined(__arm64__) || defined(__aarch64__) 1875 memcpy(&m_state.context.vfp.__v[reg - vfp_v0], &value->value.v_uint8, 1876 16); 1877 #else 1878 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_v0) * 16), 1879 &value->value.v_uint8, 16); 1880 #endif 1881 success = true; 1882 } else if (reg == vfp_fpsr) { 1883 #if defined(__arm64__) || defined(__aarch64__) 1884 memcpy(&m_state.context.vfp.__fpsr, &value->value.uint32, 4); 1885 #else 1886 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 0, 1887 &value->value.uint32, 4); 1888 #endif 1889 success = true; 1890 } else if (reg == vfp_fpcr) { 1891 #if defined(__arm64__) || defined(__aarch64__) 1892 memcpy(&m_state.context.vfp.__fpcr, &value->value.uint32, 4); 1893 #else 1894 memcpy(((uint8_t *)m_state.context.vfp.opaque) + (32 * 16) + 4, 1895 &value->value.uint32, 4); 1896 #endif 1897 success = true; 1898 } else if (reg >= vfp_s0 && reg <= vfp_s31) { 1899 #if defined(__arm64__) || defined(__aarch64__) 1900 memcpy(&m_state.context.vfp.__v[reg - vfp_s0], &value->value.v_uint8, 1901 4); 1902 #else 1903 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_s0) * 16), 1904 &value->value.v_uint8, 4); 1905 #endif 1906 success = true; 1907 } else if (reg >= vfp_d0 && reg <= vfp_d31) { 1908 #if defined(__arm64__) || defined(__aarch64__) 1909 memcpy(&m_state.context.vfp.__v[reg - vfp_d0], &value->value.v_uint8, 1910 8); 1911 #else 1912 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_d0) * 16), 1913 &value->value.v_uint8, 8); 1914 #endif 1915 success = true; 1916 } 1917 break; 1918 1919 case e_regSetEXC: 1920 if (reg == exc_far) { 1921 m_state.context.exc.__far = value->value.uint64; 1922 success = true; 1923 } else if (reg == exc_esr) { 1924 m_state.context.exc.__esr = value->value.uint32; 1925 success = true; 1926 } else if (reg == exc_exception) { 1927 m_state.context.exc.__exception = value->value.uint32; 1928 success = true; 1929 } 1930 break; 1931 } 1932 } 1933 if (success) 1934 return SetRegisterState(set) == KERN_SUCCESS; 1935 return false; 1936 } 1937 1938 kern_return_t DNBArchMachARM64::GetRegisterState(int set, bool force) { 1939 switch (set) { 1940 case e_regSetALL: 1941 return GetGPRState(force) | GetVFPState(force) | GetEXCState(force) | 1942 GetDBGState(force); 1943 case e_regSetGPR: 1944 return GetGPRState(force); 1945 case e_regSetVFP: 1946 return GetVFPState(force); 1947 case e_regSetEXC: 1948 return GetEXCState(force); 1949 case e_regSetDBG: 1950 return GetDBGState(force); 1951 default: 1952 break; 1953 } 1954 return KERN_INVALID_ARGUMENT; 1955 } 1956 1957 kern_return_t DNBArchMachARM64::SetRegisterState(int set) { 1958 // Make sure we have a valid context to set. 1959 kern_return_t err = GetRegisterState(set, false); 1960 if (err != KERN_SUCCESS) 1961 return err; 1962 1963 switch (set) { 1964 case e_regSetALL: 1965 return SetGPRState() | SetVFPState() | SetEXCState() | SetDBGState(false); 1966 case e_regSetGPR: 1967 return SetGPRState(); 1968 case e_regSetVFP: 1969 return SetVFPState(); 1970 case e_regSetEXC: 1971 return SetEXCState(); 1972 case e_regSetDBG: 1973 return SetDBGState(false); 1974 default: 1975 break; 1976 } 1977 return KERN_INVALID_ARGUMENT; 1978 } 1979 1980 bool DNBArchMachARM64::RegisterSetStateIsValid(int set) const { 1981 return m_state.RegsAreValid(set); 1982 } 1983 1984 nub_size_t DNBArchMachARM64::GetRegisterContext(void *buf, nub_size_t buf_len) { 1985 nub_size_t size = sizeof(m_state.context.gpr) + sizeof(m_state.context.vfp) + 1986 sizeof(m_state.context.exc); 1987 1988 if (buf && buf_len) { 1989 if (size > buf_len) 1990 size = buf_len; 1991 1992 bool force = false; 1993 if (GetGPRState(force) | GetVFPState(force) | GetEXCState(force)) 1994 return 0; 1995 1996 // Copy each struct individually to avoid any padding that might be between 1997 // the structs in m_state.context 1998 uint8_t *p = (uint8_t *)buf; 1999 ::memcpy(p, &m_state.context.gpr, sizeof(m_state.context.gpr)); 2000 p += sizeof(m_state.context.gpr); 2001 ::memcpy(p, &m_state.context.vfp, sizeof(m_state.context.vfp)); 2002 p += sizeof(m_state.context.vfp); 2003 ::memcpy(p, &m_state.context.exc, sizeof(m_state.context.exc)); 2004 p += sizeof(m_state.context.exc); 2005 2006 size_t bytes_written = p - (uint8_t *)buf; 2007 UNUSED_IF_ASSERT_DISABLED(bytes_written); 2008 assert(bytes_written == size); 2009 } 2010 DNBLogThreadedIf( 2011 LOG_THREAD, 2012 "DNBArchMachARM64::GetRegisterContext (buf = %p, len = %zu) => %zu", buf, 2013 buf_len, size); 2014 // Return the size of the register context even if NULL was passed in 2015 return size; 2016 } 2017 2018 nub_size_t DNBArchMachARM64::SetRegisterContext(const void *buf, 2019 nub_size_t buf_len) { 2020 nub_size_t size = sizeof(m_state.context.gpr) + sizeof(m_state.context.vfp) + 2021 sizeof(m_state.context.exc); 2022 2023 if (buf == NULL || buf_len == 0) 2024 size = 0; 2025 2026 if (size) { 2027 if (size > buf_len) 2028 size = buf_len; 2029 2030 // Copy each struct individually to avoid any padding that might be between 2031 // the structs in m_state.context 2032 uint8_t *p = (uint8_t *)buf; 2033 ::memcpy(&m_state.context.gpr, p, sizeof(m_state.context.gpr)); 2034 p += sizeof(m_state.context.gpr); 2035 ::memcpy(&m_state.context.vfp, p, sizeof(m_state.context.vfp)); 2036 p += sizeof(m_state.context.vfp); 2037 ::memcpy(&m_state.context.exc, p, sizeof(m_state.context.exc)); 2038 p += sizeof(m_state.context.exc); 2039 2040 size_t bytes_written = p - (uint8_t *)buf; 2041 UNUSED_IF_ASSERT_DISABLED(bytes_written); 2042 assert(bytes_written == size); 2043 SetGPRState(); 2044 SetVFPState(); 2045 SetEXCState(); 2046 } 2047 DNBLogThreadedIf( 2048 LOG_THREAD, 2049 "DNBArchMachARM64::SetRegisterContext (buf = %p, len = %zu) => %zu", buf, 2050 buf_len, size); 2051 return size; 2052 } 2053 2054 uint32_t DNBArchMachARM64::SaveRegisterState() { 2055 kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber()); 2056 DNBLogThreadedIf( 2057 LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u " 2058 "(SetGPRState() for stop_count = %u)", 2059 m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount()); 2060 2061 // Always re-read the registers because above we call thread_abort_safely(); 2062 bool force = true; 2063 2064 if ((kret = GetGPRState(force)) != KERN_SUCCESS) { 2065 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::SaveRegisterState () " 2066 "error: GPR regs failed to read: %u ", 2067 kret); 2068 } else if ((kret = GetVFPState(force)) != KERN_SUCCESS) { 2069 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::SaveRegisterState () " 2070 "error: %s regs failed to read: %u", 2071 "VFP", kret); 2072 } else { 2073 const uint32_t save_id = GetNextRegisterStateSaveID(); 2074 m_saved_register_states[save_id] = m_state.context; 2075 return save_id; 2076 } 2077 return UINT32_MAX; 2078 } 2079 2080 bool DNBArchMachARM64::RestoreRegisterState(uint32_t save_id) { 2081 SaveRegisterStates::iterator pos = m_saved_register_states.find(save_id); 2082 if (pos != m_saved_register_states.end()) { 2083 m_state.context.gpr = pos->second.gpr; 2084 m_state.context.vfp = pos->second.vfp; 2085 kern_return_t kret; 2086 bool success = true; 2087 if ((kret = SetGPRState()) != KERN_SUCCESS) { 2088 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::RestoreRegisterState " 2089 "(save_id = %u) error: GPR regs failed to " 2090 "write: %u", 2091 save_id, kret); 2092 success = false; 2093 } else if ((kret = SetVFPState()) != KERN_SUCCESS) { 2094 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::RestoreRegisterState " 2095 "(save_id = %u) error: %s regs failed to " 2096 "write: %u", 2097 save_id, "VFP", kret); 2098 success = false; 2099 } 2100 m_saved_register_states.erase(pos); 2101 return success; 2102 } 2103 return false; 2104 } 2105 2106 #endif // #if defined (ARM_THREAD_STATE64_COUNT) 2107 #endif // #if defined (__arm__) || defined (__arm64__) || defined (__aarch64__) 2108