1 //===-- DNBArchImplX86_64.cpp -----------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Created by Greg Clayton on 6/25/07. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #if defined(__i386__) || defined(__x86_64__) 14 15 #include <sys/cdefs.h> 16 #include <sys/sysctl.h> 17 #include <sys/types.h> 18 19 #include "DNBLog.h" 20 #include "MacOSX/x86_64/DNBArchImplX86_64.h" 21 #include "MachProcess.h" 22 #include "MachThread.h" 23 #include <cstdlib> 24 #include <mach/mach.h> 25 26 #if defined(LLDB_DEBUGSERVER_RELEASE) || defined(LLDB_DEBUGSERVER_DEBUG) 27 enum debugState { debugStateUnknown, debugStateOff, debugStateOn }; 28 29 static debugState sFPUDebugState = debugStateUnknown; 30 static debugState sAVXForceState = debugStateUnknown; 31 32 static bool DebugFPURegs() { 33 if (sFPUDebugState == debugStateUnknown) { 34 if (getenv("DNB_DEBUG_FPU_REGS")) 35 sFPUDebugState = debugStateOn; 36 else 37 sFPUDebugState = debugStateOff; 38 } 39 40 return (sFPUDebugState == debugStateOn); 41 } 42 43 static bool ForceAVXRegs() { 44 if (sFPUDebugState == debugStateUnknown) { 45 if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS")) 46 sAVXForceState = debugStateOn; 47 else 48 sAVXForceState = debugStateOff; 49 } 50 51 return (sAVXForceState == debugStateOn); 52 } 53 54 #define DEBUG_FPU_REGS (DebugFPURegs()) 55 #define FORCE_AVX_REGS (ForceAVXRegs()) 56 #else 57 #define DEBUG_FPU_REGS (0) 58 #define FORCE_AVX_REGS (0) 59 #endif 60 61 bool DetectHardwareFeature(const char *feature) { 62 int answer = 0; 63 size_t answer_size = sizeof(answer); 64 int error = ::sysctlbyname(feature, &answer, &answer_size, NULL, 0); 65 return error == 0 && answer != 0; 66 } 67 68 enum AVXPresence { eAVXUnknown = -1, eAVXNotPresent = 0, eAVXPresent = 1 }; 69 70 bool LogAVXAndReturn(AVXPresence has_avx, int err, const char * os_ver) { 71 DNBLogThreadedIf(LOG_THREAD, 72 "CPUHasAVX(): g_has_avx = %i (err = %i, os_ver = %s)", 73 has_avx, err, os_ver); 74 return (has_avx == eAVXPresent); 75 } 76 77 extern "C" bool CPUHasAVX() { 78 static AVXPresence g_has_avx = eAVXUnknown; 79 if (g_has_avx != eAVXUnknown) 80 return LogAVXAndReturn(g_has_avx, 0, ""); 81 82 g_has_avx = eAVXNotPresent; 83 84 // OS X 10.7.3 and earlier have a bug in thread_get_state that truncated the 85 // size of the return. To work around this we have to disable AVX debugging 86 // on hosts prior to 10.7.3 (<rdar://problem/10122874>). 87 int mib[2]; 88 char buffer[1024]; 89 size_t length = sizeof(buffer); 90 mib[0] = CTL_KERN; 91 mib[1] = KERN_OSVERSION; 92 93 // KERN_OSVERSION returns the build number which is a number signifying the 94 // major version, a capitol letter signifying the minor version, and numbers 95 // signifying the build (ex: on 10.12.3, the returned value is 16D32). 96 int err = ::sysctl(mib, 2, &buffer, &length, NULL, 0); 97 if (err != 0) 98 return LogAVXAndReturn(g_has_avx, err, ""); 99 100 size_t first_letter = 0; 101 for (; first_letter < length; ++first_letter) { 102 // This is looking for the first uppercase letter 103 if (isupper(buffer[first_letter])) 104 break; 105 } 106 char letter = buffer[first_letter]; 107 buffer[first_letter] = '\0'; 108 auto major_ver = strtoull(buffer, NULL, 0); 109 buffer[first_letter] = letter; 110 111 // In this check we're looking to see that our major and minor version numer 112 // was >= 11E, which is the 10.7.4 release. 113 if (major_ver < 11 || (major_ver == 11 && letter < 'E')) 114 return LogAVXAndReturn(g_has_avx, err, buffer); 115 if (DetectHardwareFeature("hw.optional.avx1_0")) 116 g_has_avx = eAVXPresent; 117 118 return LogAVXAndReturn(g_has_avx, err, buffer); 119 } 120 121 extern "C" bool CPUHasAVX512f() { 122 static AVXPresence g_has_avx512f = eAVXUnknown; 123 if (g_has_avx512f != eAVXUnknown) 124 return g_has_avx512f == eAVXPresent; 125 126 g_has_avx512f = DetectHardwareFeature("hw.optional.avx512f") ? eAVXPresent 127 : eAVXNotPresent; 128 129 return (g_has_avx512f == eAVXPresent); 130 } 131 132 uint64_t DNBArchImplX86_64::GetPC(uint64_t failValue) { 133 // Get program counter 134 if (GetGPRState(false) == KERN_SUCCESS) 135 return m_state.context.gpr.__rip; 136 return failValue; 137 } 138 139 kern_return_t DNBArchImplX86_64::SetPC(uint64_t value) { 140 // Get program counter 141 kern_return_t err = GetGPRState(false); 142 if (err == KERN_SUCCESS) { 143 m_state.context.gpr.__rip = value; 144 err = SetGPRState(); 145 } 146 return err == KERN_SUCCESS; 147 } 148 149 uint64_t DNBArchImplX86_64::GetSP(uint64_t failValue) { 150 // Get stack pointer 151 if (GetGPRState(false) == KERN_SUCCESS) 152 return m_state.context.gpr.__rsp; 153 return failValue; 154 } 155 156 // Uncomment the value below to verify the values in the debugger. 157 //#define DEBUG_GPR_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED 158 159 kern_return_t DNBArchImplX86_64::GetGPRState(bool force) { 160 if (force || m_state.GetError(e_regSetGPR, Read)) { 161 #if DEBUG_GPR_VALUES 162 m_state.context.gpr.__rax = ('a' << 8) + 'x'; 163 m_state.context.gpr.__rbx = ('b' << 8) + 'x'; 164 m_state.context.gpr.__rcx = ('c' << 8) + 'x'; 165 m_state.context.gpr.__rdx = ('d' << 8) + 'x'; 166 m_state.context.gpr.__rdi = ('d' << 8) + 'i'; 167 m_state.context.gpr.__rsi = ('s' << 8) + 'i'; 168 m_state.context.gpr.__rbp = ('b' << 8) + 'p'; 169 m_state.context.gpr.__rsp = ('s' << 8) + 'p'; 170 m_state.context.gpr.__r8 = ('r' << 8) + '8'; 171 m_state.context.gpr.__r9 = ('r' << 8) + '9'; 172 m_state.context.gpr.__r10 = ('r' << 8) + 'a'; 173 m_state.context.gpr.__r11 = ('r' << 8) + 'b'; 174 m_state.context.gpr.__r12 = ('r' << 8) + 'c'; 175 m_state.context.gpr.__r13 = ('r' << 8) + 'd'; 176 m_state.context.gpr.__r14 = ('r' << 8) + 'e'; 177 m_state.context.gpr.__r15 = ('r' << 8) + 'f'; 178 m_state.context.gpr.__rip = ('i' << 8) + 'p'; 179 m_state.context.gpr.__rflags = ('f' << 8) + 'l'; 180 m_state.context.gpr.__cs = ('c' << 8) + 's'; 181 m_state.context.gpr.__fs = ('f' << 8) + 's'; 182 m_state.context.gpr.__gs = ('g' << 8) + 's'; 183 m_state.SetError(e_regSetGPR, Read, 0); 184 #else 185 mach_msg_type_number_t count = e_regSetWordSizeGPR; 186 m_state.SetError( 187 e_regSetGPR, Read, 188 ::thread_get_state(m_thread->MachPortNumber(), __x86_64_THREAD_STATE, 189 (thread_state_t)&m_state.context.gpr, &count)); 190 DNBLogThreadedIf( 191 LOG_THREAD, 192 "::thread_get_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x" 193 "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx" 194 "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx" 195 "\n\t r8 = %16.16llx r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx" 196 "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx" 197 "\n\trip = %16.16llx" 198 "\n\tflg = %16.16llx cs = %16.16llx fs = %16.16llx gs = %16.16llx", 199 m_thread->MachPortNumber(), x86_THREAD_STATE64, 200 x86_THREAD_STATE64_COUNT, m_state.GetError(e_regSetGPR, Read), 201 m_state.context.gpr.__rax, m_state.context.gpr.__rbx, 202 m_state.context.gpr.__rcx, m_state.context.gpr.__rdx, 203 m_state.context.gpr.__rdi, m_state.context.gpr.__rsi, 204 m_state.context.gpr.__rbp, m_state.context.gpr.__rsp, 205 m_state.context.gpr.__r8, m_state.context.gpr.__r9, 206 m_state.context.gpr.__r10, m_state.context.gpr.__r11, 207 m_state.context.gpr.__r12, m_state.context.gpr.__r13, 208 m_state.context.gpr.__r14, m_state.context.gpr.__r15, 209 m_state.context.gpr.__rip, m_state.context.gpr.__rflags, 210 m_state.context.gpr.__cs, m_state.context.gpr.__fs, 211 m_state.context.gpr.__gs); 212 213 // DNBLogThreadedIf (LOG_THREAD, "thread_get_state(0x%4.4x, %u, &gpr, %u) 214 // => 0x%8.8x" 215 // "\n\trax = %16.16llx" 216 // "\n\trbx = %16.16llx" 217 // "\n\trcx = %16.16llx" 218 // "\n\trdx = %16.16llx" 219 // "\n\trdi = %16.16llx" 220 // "\n\trsi = %16.16llx" 221 // "\n\trbp = %16.16llx" 222 // "\n\trsp = %16.16llx" 223 // "\n\t r8 = %16.16llx" 224 // "\n\t r9 = %16.16llx" 225 // "\n\tr10 = %16.16llx" 226 // "\n\tr11 = %16.16llx" 227 // "\n\tr12 = %16.16llx" 228 // "\n\tr13 = %16.16llx" 229 // "\n\tr14 = %16.16llx" 230 // "\n\tr15 = %16.16llx" 231 // "\n\trip = %16.16llx" 232 // "\n\tflg = %16.16llx" 233 // "\n\t cs = %16.16llx" 234 // "\n\t fs = %16.16llx" 235 // "\n\t gs = %16.16llx", 236 // m_thread->MachPortNumber(), 237 // x86_THREAD_STATE64, 238 // x86_THREAD_STATE64_COUNT, 239 // m_state.GetError(e_regSetGPR, Read), 240 // m_state.context.gpr.__rax, 241 // m_state.context.gpr.__rbx, 242 // m_state.context.gpr.__rcx, 243 // m_state.context.gpr.__rdx, 244 // m_state.context.gpr.__rdi, 245 // m_state.context.gpr.__rsi, 246 // m_state.context.gpr.__rbp, 247 // m_state.context.gpr.__rsp, 248 // m_state.context.gpr.__r8, 249 // m_state.context.gpr.__r9, 250 // m_state.context.gpr.__r10, 251 // m_state.context.gpr.__r11, 252 // m_state.context.gpr.__r12, 253 // m_state.context.gpr.__r13, 254 // m_state.context.gpr.__r14, 255 // m_state.context.gpr.__r15, 256 // m_state.context.gpr.__rip, 257 // m_state.context.gpr.__rflags, 258 // m_state.context.gpr.__cs, 259 // m_state.context.gpr.__fs, 260 // m_state.context.gpr.__gs); 261 #endif 262 } 263 return m_state.GetError(e_regSetGPR, Read); 264 } 265 266 // Uncomment the value below to verify the values in the debugger. 267 //#define DEBUG_FPU_REGS 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED 268 269 kern_return_t DNBArchImplX86_64::GetFPUState(bool force) { 270 if (force || m_state.GetError(e_regSetFPU, Read)) { 271 if (DEBUG_FPU_REGS) { 272 m_state.context.fpu.no_avx.__fpu_reserved[0] = -1; 273 m_state.context.fpu.no_avx.__fpu_reserved[1] = -1; 274 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fcw) = 0x1234; 275 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fsw) = 0x5678; 276 m_state.context.fpu.no_avx.__fpu_ftw = 1; 277 m_state.context.fpu.no_avx.__fpu_rsrv1 = UINT8_MAX; 278 m_state.context.fpu.no_avx.__fpu_fop = 2; 279 m_state.context.fpu.no_avx.__fpu_ip = 3; 280 m_state.context.fpu.no_avx.__fpu_cs = 4; 281 m_state.context.fpu.no_avx.__fpu_rsrv2 = 5; 282 m_state.context.fpu.no_avx.__fpu_dp = 6; 283 m_state.context.fpu.no_avx.__fpu_ds = 7; 284 m_state.context.fpu.no_avx.__fpu_rsrv3 = UINT16_MAX; 285 m_state.context.fpu.no_avx.__fpu_mxcsr = 8; 286 m_state.context.fpu.no_avx.__fpu_mxcsrmask = 9; 287 for (int i = 0; i < 16; ++i) { 288 if (i < 10) { 289 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = 'a'; 290 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = 'b'; 291 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = 'c'; 292 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = 'd'; 293 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = 'e'; 294 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = 'f'; 295 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = 'g'; 296 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = 'h'; 297 } else { 298 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN; 299 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN; 300 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN; 301 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN; 302 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN; 303 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN; 304 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN; 305 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN; 306 } 307 308 m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg[i] = '0'; 309 m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg[i] = '1'; 310 m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg[i] = '2'; 311 m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg[i] = '3'; 312 m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg[i] = '4'; 313 m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg[i] = '5'; 314 m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg[i] = '6'; 315 m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg[i] = '7'; 316 m_state.context.fpu.no_avx.__fpu_xmm8.__xmm_reg[i] = '8'; 317 m_state.context.fpu.no_avx.__fpu_xmm9.__xmm_reg[i] = '9'; 318 m_state.context.fpu.no_avx.__fpu_xmm10.__xmm_reg[i] = 'A'; 319 m_state.context.fpu.no_avx.__fpu_xmm11.__xmm_reg[i] = 'B'; 320 m_state.context.fpu.no_avx.__fpu_xmm12.__xmm_reg[i] = 'C'; 321 m_state.context.fpu.no_avx.__fpu_xmm13.__xmm_reg[i] = 'D'; 322 m_state.context.fpu.no_avx.__fpu_xmm14.__xmm_reg[i] = 'E'; 323 m_state.context.fpu.no_avx.__fpu_xmm15.__xmm_reg[i] = 'F'; 324 } 325 for (int i = 0; i < sizeof(m_state.context.fpu.no_avx.__fpu_rsrv4); ++i) 326 m_state.context.fpu.no_avx.__fpu_rsrv4[i] = INT8_MIN; 327 m_state.context.fpu.no_avx.__fpu_reserved1 = -1; 328 329 if (CPUHasAVX() || FORCE_AVX_REGS) { 330 for (int i = 0; i < 16; ++i) { 331 m_state.context.fpu.avx.__fpu_ymmh0.__xmm_reg[i] = '0' + i; 332 m_state.context.fpu.avx.__fpu_ymmh1.__xmm_reg[i] = '1' + i; 333 m_state.context.fpu.avx.__fpu_ymmh2.__xmm_reg[i] = '2' + i; 334 m_state.context.fpu.avx.__fpu_ymmh3.__xmm_reg[i] = '3' + i; 335 m_state.context.fpu.avx.__fpu_ymmh4.__xmm_reg[i] = '4' + i; 336 m_state.context.fpu.avx.__fpu_ymmh5.__xmm_reg[i] = '5' + i; 337 m_state.context.fpu.avx.__fpu_ymmh6.__xmm_reg[i] = '6' + i; 338 m_state.context.fpu.avx.__fpu_ymmh7.__xmm_reg[i] = '7' + i; 339 m_state.context.fpu.avx.__fpu_ymmh8.__xmm_reg[i] = '8' + i; 340 m_state.context.fpu.avx.__fpu_ymmh9.__xmm_reg[i] = '9' + i; 341 m_state.context.fpu.avx.__fpu_ymmh10.__xmm_reg[i] = 'A' + i; 342 m_state.context.fpu.avx.__fpu_ymmh11.__xmm_reg[i] = 'B' + i; 343 m_state.context.fpu.avx.__fpu_ymmh12.__xmm_reg[i] = 'C' + i; 344 m_state.context.fpu.avx.__fpu_ymmh13.__xmm_reg[i] = 'D' + i; 345 m_state.context.fpu.avx.__fpu_ymmh14.__xmm_reg[i] = 'E' + i; 346 m_state.context.fpu.avx.__fpu_ymmh15.__xmm_reg[i] = 'F' + i; 347 } 348 for (int i = 0; i < sizeof(m_state.context.fpu.avx.__avx_reserved1); ++i) 349 m_state.context.fpu.avx.__avx_reserved1[i] = INT8_MIN; 350 } 351 if (CPUHasAVX512f() || FORCE_AVX_REGS) { 352 for (int i = 0; i < 8; ++i) { 353 m_state.context.fpu.avx512f.__fpu_k0.__opmask_reg[i] = '0'; 354 m_state.context.fpu.avx512f.__fpu_k1.__opmask_reg[i] = '1'; 355 m_state.context.fpu.avx512f.__fpu_k2.__opmask_reg[i] = '2'; 356 m_state.context.fpu.avx512f.__fpu_k3.__opmask_reg[i] = '3'; 357 m_state.context.fpu.avx512f.__fpu_k4.__opmask_reg[i] = '4'; 358 m_state.context.fpu.avx512f.__fpu_k5.__opmask_reg[i] = '5'; 359 m_state.context.fpu.avx512f.__fpu_k6.__opmask_reg[i] = '6'; 360 m_state.context.fpu.avx512f.__fpu_k7.__opmask_reg[i] = '7'; 361 } 362 363 for (int i = 0; i < 32; ++i) { 364 m_state.context.fpu.avx512f.__fpu_zmmh0.__ymm_reg[i] = '0'; 365 m_state.context.fpu.avx512f.__fpu_zmmh1.__ymm_reg[i] = '1'; 366 m_state.context.fpu.avx512f.__fpu_zmmh2.__ymm_reg[i] = '2'; 367 m_state.context.fpu.avx512f.__fpu_zmmh3.__ymm_reg[i] = '3'; 368 m_state.context.fpu.avx512f.__fpu_zmmh4.__ymm_reg[i] = '4'; 369 m_state.context.fpu.avx512f.__fpu_zmmh5.__ymm_reg[i] = '5'; 370 m_state.context.fpu.avx512f.__fpu_zmmh6.__ymm_reg[i] = '6'; 371 m_state.context.fpu.avx512f.__fpu_zmmh7.__ymm_reg[i] = '7'; 372 m_state.context.fpu.avx512f.__fpu_zmmh8.__ymm_reg[i] = '8'; 373 m_state.context.fpu.avx512f.__fpu_zmmh9.__ymm_reg[i] = '9'; 374 m_state.context.fpu.avx512f.__fpu_zmmh10.__ymm_reg[i] = 'A'; 375 m_state.context.fpu.avx512f.__fpu_zmmh11.__ymm_reg[i] = 'B'; 376 m_state.context.fpu.avx512f.__fpu_zmmh12.__ymm_reg[i] = 'C'; 377 m_state.context.fpu.avx512f.__fpu_zmmh13.__ymm_reg[i] = 'D'; 378 m_state.context.fpu.avx512f.__fpu_zmmh14.__ymm_reg[i] = 'E'; 379 m_state.context.fpu.avx512f.__fpu_zmmh15.__ymm_reg[i] = 'F'; 380 } 381 for (int i = 0; i < 64; ++i) { 382 m_state.context.fpu.avx512f.__fpu_zmm16.__zmm_reg[i] = 'G'; 383 m_state.context.fpu.avx512f.__fpu_zmm17.__zmm_reg[i] = 'H'; 384 m_state.context.fpu.avx512f.__fpu_zmm18.__zmm_reg[i] = 'I'; 385 m_state.context.fpu.avx512f.__fpu_zmm19.__zmm_reg[i] = 'J'; 386 m_state.context.fpu.avx512f.__fpu_zmm20.__zmm_reg[i] = 'K'; 387 m_state.context.fpu.avx512f.__fpu_zmm21.__zmm_reg[i] = 'L'; 388 m_state.context.fpu.avx512f.__fpu_zmm22.__zmm_reg[i] = 'M'; 389 m_state.context.fpu.avx512f.__fpu_zmm23.__zmm_reg[i] = 'N'; 390 m_state.context.fpu.avx512f.__fpu_zmm24.__zmm_reg[i] = 'O'; 391 m_state.context.fpu.avx512f.__fpu_zmm25.__zmm_reg[i] = 'P'; 392 m_state.context.fpu.avx512f.__fpu_zmm26.__zmm_reg[i] = 'Q'; 393 m_state.context.fpu.avx512f.__fpu_zmm27.__zmm_reg[i] = 'R'; 394 m_state.context.fpu.avx512f.__fpu_zmm28.__zmm_reg[i] = 'S'; 395 m_state.context.fpu.avx512f.__fpu_zmm29.__zmm_reg[i] = 'T'; 396 m_state.context.fpu.avx512f.__fpu_zmm30.__zmm_reg[i] = 'U'; 397 m_state.context.fpu.avx512f.__fpu_zmm31.__zmm_reg[i] = 'V'; 398 } 399 } 400 m_state.SetError(e_regSetFPU, Read, 0); 401 } else { 402 mach_msg_type_number_t count = e_regSetWordSizeFPU; 403 int flavor = __x86_64_FLOAT_STATE; 404 // On a machine with the AVX512 register set, a process only gets a 405 // full AVX512 register context after it uses the AVX512 registers; 406 // if the process has not yet triggered this change, trying to fetch 407 // the AVX512 registers will fail. Fall through to fetching the AVX 408 // registers. 409 if (CPUHasAVX512f() || FORCE_AVX_REGS) { 410 count = e_regSetWordSizeAVX512f; 411 flavor = __x86_64_AVX512F_STATE; 412 m_state.SetError(e_regSetFPU, Read, 413 ::thread_get_state(m_thread->MachPortNumber(), flavor, 414 (thread_state_t)&m_state.context.fpu, 415 &count)); 416 DNBLogThreadedIf(LOG_THREAD, 417 "::thread_get_state (0x%4.4x, %u, &fpu, %u => 0x%8.8x", 418 m_thread->MachPortNumber(), flavor, (uint32_t)count, 419 m_state.GetError(e_regSetFPU, Read)); 420 421 if (m_state.GetError(e_regSetFPU, Read) == KERN_SUCCESS) 422 return m_state.GetError(e_regSetFPU, Read); 423 else 424 DNBLogThreadedIf(LOG_THREAD, 425 "::thread_get_state attempted fetch of avx512 fpu regctx failed, will try fetching avx"); 426 } 427 if (CPUHasAVX() || FORCE_AVX_REGS) { 428 count = e_regSetWordSizeAVX; 429 flavor = __x86_64_AVX_STATE; 430 } 431 m_state.SetError(e_regSetFPU, Read, 432 ::thread_get_state(m_thread->MachPortNumber(), flavor, 433 (thread_state_t)&m_state.context.fpu, 434 &count)); 435 DNBLogThreadedIf(LOG_THREAD, 436 "::thread_get_state (0x%4.4x, %u, &fpu, %u => 0x%8.8x", 437 m_thread->MachPortNumber(), flavor, (uint32_t)count, 438 m_state.GetError(e_regSetFPU, Read)); 439 } 440 } 441 return m_state.GetError(e_regSetFPU, Read); 442 } 443 444 kern_return_t DNBArchImplX86_64::GetEXCState(bool force) { 445 if (force || m_state.GetError(e_regSetEXC, Read)) { 446 mach_msg_type_number_t count = e_regSetWordSizeEXC; 447 m_state.SetError( 448 e_regSetEXC, Read, 449 ::thread_get_state(m_thread->MachPortNumber(), __x86_64_EXCEPTION_STATE, 450 (thread_state_t)&m_state.context.exc, &count)); 451 } 452 return m_state.GetError(e_regSetEXC, Read); 453 } 454 455 kern_return_t DNBArchImplX86_64::SetGPRState() { 456 kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber()); 457 DNBLogThreadedIf( 458 LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u " 459 "(SetGPRState() for stop_count = %u)", 460 m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount()); 461 462 m_state.SetError(e_regSetGPR, Write, 463 ::thread_set_state(m_thread->MachPortNumber(), 464 __x86_64_THREAD_STATE, 465 (thread_state_t)&m_state.context.gpr, 466 e_regSetWordSizeGPR)); 467 DNBLogThreadedIf( 468 LOG_THREAD, 469 "::thread_set_state (0x%4.4x, %u, &gpr, %u) => 0x%8.8x" 470 "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx" 471 "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx" 472 "\n\t r8 = %16.16llx r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx" 473 "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx" 474 "\n\trip = %16.16llx" 475 "\n\tflg = %16.16llx cs = %16.16llx fs = %16.16llx gs = %16.16llx", 476 m_thread->MachPortNumber(), __x86_64_THREAD_STATE, e_regSetWordSizeGPR, 477 m_state.GetError(e_regSetGPR, Write), m_state.context.gpr.__rax, 478 m_state.context.gpr.__rbx, m_state.context.gpr.__rcx, 479 m_state.context.gpr.__rdx, m_state.context.gpr.__rdi, 480 m_state.context.gpr.__rsi, m_state.context.gpr.__rbp, 481 m_state.context.gpr.__rsp, m_state.context.gpr.__r8, 482 m_state.context.gpr.__r9, m_state.context.gpr.__r10, 483 m_state.context.gpr.__r11, m_state.context.gpr.__r12, 484 m_state.context.gpr.__r13, m_state.context.gpr.__r14, 485 m_state.context.gpr.__r15, m_state.context.gpr.__rip, 486 m_state.context.gpr.__rflags, m_state.context.gpr.__cs, 487 m_state.context.gpr.__fs, m_state.context.gpr.__gs); 488 return m_state.GetError(e_regSetGPR, Write); 489 } 490 491 kern_return_t DNBArchImplX86_64::SetFPUState() { 492 if (DEBUG_FPU_REGS) { 493 m_state.SetError(e_regSetFPU, Write, 0); 494 return m_state.GetError(e_regSetFPU, Write); 495 } else { 496 int flavor = __x86_64_FLOAT_STATE; 497 mach_msg_type_number_t count = e_regSetWordSizeFPU; 498 if (CPUHasAVX512f() || FORCE_AVX_REGS) { 499 count = e_regSetWordSizeAVX512f; 500 flavor = __x86_64_AVX512F_STATE; 501 m_state.SetError( 502 e_regSetFPU, Write, 503 ::thread_set_state(m_thread->MachPortNumber(), flavor, 504 (thread_state_t)&m_state.context.fpu, count)); 505 if (m_state.GetError(e_regSetFPU, Write) == KERN_SUCCESS) 506 return m_state.GetError(e_regSetFPU, Write); 507 else 508 DNBLogThreadedIf(LOG_THREAD, 509 "::thread_get_state attempted save of avx512 fpu regctx failed, will try saving avx regctx"); 510 } 511 512 if (CPUHasAVX() || FORCE_AVX_REGS) { 513 flavor = __x86_64_AVX_STATE; 514 count = e_regSetWordSizeAVX; 515 } 516 m_state.SetError( 517 e_regSetFPU, Write, 518 ::thread_set_state(m_thread->MachPortNumber(), flavor, 519 (thread_state_t)&m_state.context.fpu, count)); 520 return m_state.GetError(e_regSetFPU, Write); 521 } 522 } 523 524 kern_return_t DNBArchImplX86_64::SetEXCState() { 525 m_state.SetError(e_regSetEXC, Write, 526 ::thread_set_state(m_thread->MachPortNumber(), 527 __x86_64_EXCEPTION_STATE, 528 (thread_state_t)&m_state.context.exc, 529 e_regSetWordSizeEXC)); 530 return m_state.GetError(e_regSetEXC, Write); 531 } 532 533 kern_return_t DNBArchImplX86_64::GetDBGState(bool force) { 534 if (force || m_state.GetError(e_regSetDBG, Read)) { 535 mach_msg_type_number_t count = e_regSetWordSizeDBG; 536 m_state.SetError( 537 e_regSetDBG, Read, 538 ::thread_get_state(m_thread->MachPortNumber(), __x86_64_DEBUG_STATE, 539 (thread_state_t)&m_state.context.dbg, &count)); 540 } 541 return m_state.GetError(e_regSetDBG, Read); 542 } 543 544 kern_return_t DNBArchImplX86_64::SetDBGState(bool also_set_on_task) { 545 m_state.SetError(e_regSetDBG, Write, 546 ::thread_set_state(m_thread->MachPortNumber(), 547 __x86_64_DEBUG_STATE, 548 (thread_state_t)&m_state.context.dbg, 549 e_regSetWordSizeDBG)); 550 if (also_set_on_task) { 551 kern_return_t kret = ::task_set_state( 552 m_thread->Process()->Task().TaskPort(), __x86_64_DEBUG_STATE, 553 (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG); 554 if (kret != KERN_SUCCESS) 555 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::SetDBGState failed " 556 "to set debug control register state: " 557 "0x%8.8x.", 558 kret); 559 } 560 return m_state.GetError(e_regSetDBG, Write); 561 } 562 563 void DNBArchImplX86_64::ThreadWillResume() { 564 // Do we need to step this thread? If so, let the mach thread tell us so. 565 if (m_thread->IsStepping()) { 566 // This is the primary thread, let the arch do anything it needs 567 EnableHardwareSingleStep(true); 568 } 569 570 // Reset the debug status register, if necessary, before we resume. 571 kern_return_t kret = GetDBGState(false); 572 DNBLogThreadedIf( 573 LOG_WATCHPOINTS, 574 "DNBArchImplX86_64::ThreadWillResume() GetDBGState() => 0x%8.8x.", kret); 575 if (kret != KERN_SUCCESS) 576 return; 577 578 DBG &debug_state = m_state.context.dbg; 579 bool need_reset = false; 580 uint32_t i, num = NumSupportedHardwareWatchpoints(); 581 for (i = 0; i < num; ++i) 582 if (IsWatchpointHit(debug_state, i)) 583 need_reset = true; 584 585 if (need_reset) { 586 ClearWatchpointHits(debug_state); 587 kret = SetDBGState(false); 588 DNBLogThreadedIf( 589 LOG_WATCHPOINTS, 590 "DNBArchImplX86_64::ThreadWillResume() SetDBGState() => 0x%8.8x.", 591 kret); 592 } 593 } 594 595 bool DNBArchImplX86_64::ThreadDidStop() { 596 bool success = true; 597 598 m_state.InvalidateAllRegisterStates(); 599 600 // Are we stepping a single instruction? 601 if (GetGPRState(true) == KERN_SUCCESS) { 602 // We are single stepping, was this the primary thread? 603 if (m_thread->IsStepping()) { 604 // This was the primary thread, we need to clear the trace 605 // bit if so. 606 success = EnableHardwareSingleStep(false) == KERN_SUCCESS; 607 } else { 608 // The MachThread will automatically restore the suspend count 609 // in ThreadDidStop(), so we don't need to do anything here if 610 // we weren't the primary thread the last time 611 } 612 } 613 return success; 614 } 615 616 bool DNBArchImplX86_64::NotifyException(MachException::Data &exc) { 617 switch (exc.exc_type) { 618 case EXC_BAD_ACCESS: 619 break; 620 case EXC_BAD_INSTRUCTION: 621 break; 622 case EXC_ARITHMETIC: 623 break; 624 case EXC_EMULATION: 625 break; 626 case EXC_SOFTWARE: 627 break; 628 case EXC_BREAKPOINT: 629 if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 2) { 630 // exc_code = EXC_I386_BPT 631 // 632 nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS); 633 if (pc != INVALID_NUB_ADDRESS && pc > 0) { 634 pc -= 1; 635 // Check for a breakpoint at one byte prior to the current PC value 636 // since the PC will be just past the trap. 637 638 DNBBreakpoint *bp = 639 m_thread->Process()->Breakpoints().FindByAddress(pc); 640 if (bp) { 641 // Backup the PC for i386 since the trap was taken and the PC 642 // is at the address following the single byte trap instruction. 643 if (m_state.context.gpr.__rip > 0) { 644 m_state.context.gpr.__rip = pc; 645 // Write the new PC back out 646 SetGPRState(); 647 } 648 } 649 return true; 650 } 651 } else if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 1) { 652 // exc_code = EXC_I386_SGL 653 // 654 // Check whether this corresponds to a watchpoint hit event. 655 // If yes, set the exc_sub_code to the data break address. 656 nub_addr_t addr = 0; 657 uint32_t hw_index = GetHardwareWatchpointHit(addr); 658 if (hw_index != INVALID_NUB_HW_INDEX) { 659 exc.exc_data[1] = addr; 660 // Piggyback the hw_index in the exc.data. 661 exc.exc_data.push_back(hw_index); 662 } 663 664 return true; 665 } 666 break; 667 case EXC_SYSCALL: 668 break; 669 case EXC_MACH_SYSCALL: 670 break; 671 case EXC_RPC_ALERT: 672 break; 673 } 674 return false; 675 } 676 677 uint32_t DNBArchImplX86_64::NumSupportedHardwareWatchpoints() { 678 // Available debug address registers: dr0, dr1, dr2, dr3. 679 return 4; 680 } 681 682 uint32_t DNBArchImplX86_64::NumSupportedHardwareBreakpoints() { 683 DNBLogThreadedIf(LOG_BREAKPOINTS, 684 "DNBArchImplX86_64::NumSupportedHardwareBreakpoints"); 685 return 4; 686 } 687 688 static uint32_t size_and_rw_bits(nub_size_t size, bool read, bool write) { 689 uint32_t rw; 690 if (read) { 691 rw = 0x3; // READ or READ/WRITE 692 } else if (write) { 693 rw = 0x1; // WRITE 694 } else { 695 assert(0 && "read and write cannot both be false"); 696 } 697 698 switch (size) { 699 case 1: 700 return rw; 701 case 2: 702 return (0x1 << 2) | rw; 703 case 4: 704 return (0x3 << 2) | rw; 705 case 8: 706 return (0x2 << 2) | rw; 707 } 708 assert(0 && "invalid size, must be one of 1, 2, 4, or 8"); 709 return 0; 710 } 711 void DNBArchImplX86_64::SetWatchpoint(DBG &debug_state, uint32_t hw_index, 712 nub_addr_t addr, nub_size_t size, 713 bool read, bool write) { 714 // Set both dr7 (debug control register) and dri (debug address register). 715 716 // dr7{7-0} encodes the local/gloabl enable bits: 717 // global enable --. .-- local enable 718 // | | 719 // v v 720 // dr0 -> bits{1-0} 721 // dr1 -> bits{3-2} 722 // dr2 -> bits{5-4} 723 // dr3 -> bits{7-6} 724 // 725 // dr7{31-16} encodes the rw/len bits: 726 // b_x+3, b_x+2, b_x+1, b_x 727 // where bits{x+1, x} => rw 728 // 0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io 729 // read-or-write (unused) 730 // and bits{x+3, x+2} => len 731 // 0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte 732 // 733 // dr0 -> bits{19-16} 734 // dr1 -> bits{23-20} 735 // dr2 -> bits{27-24} 736 // dr3 -> bits{31-28} 737 debug_state.__dr7 |= 738 (1 << (2 * hw_index) | 739 size_and_rw_bits(size, read, write) << (16 + 4 * hw_index)); 740 switch (hw_index) { 741 case 0: 742 debug_state.__dr0 = addr; 743 break; 744 case 1: 745 debug_state.__dr1 = addr; 746 break; 747 case 2: 748 debug_state.__dr2 = addr; 749 break; 750 case 3: 751 debug_state.__dr3 = addr; 752 break; 753 default: 754 assert(0 && 755 "invalid hardware register index, must be one of 0, 1, 2, or 3"); 756 } 757 return; 758 } 759 760 void DNBArchImplX86_64::ClearWatchpoint(DBG &debug_state, uint32_t hw_index) { 761 debug_state.__dr7 &= ~(3 << (2 * hw_index)); 762 switch (hw_index) { 763 case 0: 764 debug_state.__dr0 = 0; 765 break; 766 case 1: 767 debug_state.__dr1 = 0; 768 break; 769 case 2: 770 debug_state.__dr2 = 0; 771 break; 772 case 3: 773 debug_state.__dr3 = 0; 774 break; 775 default: 776 assert(0 && 777 "invalid hardware register index, must be one of 0, 1, 2, or 3"); 778 } 779 return; 780 } 781 782 bool DNBArchImplX86_64::IsWatchpointVacant(const DBG &debug_state, 783 uint32_t hw_index) { 784 // Check dr7 (debug control register) for local/global enable bits: 785 // global enable --. .-- local enable 786 // | | 787 // v v 788 // dr0 -> bits{1-0} 789 // dr1 -> bits{3-2} 790 // dr2 -> bits{5-4} 791 // dr3 -> bits{7-6} 792 return (debug_state.__dr7 & (3 << (2 * hw_index))) == 0; 793 } 794 795 // Resets local copy of debug status register to wait for the next debug 796 // exception. 797 void DNBArchImplX86_64::ClearWatchpointHits(DBG &debug_state) { 798 // See also IsWatchpointHit(). 799 debug_state.__dr6 = 0; 800 return; 801 } 802 803 bool DNBArchImplX86_64::IsWatchpointHit(const DBG &debug_state, 804 uint32_t hw_index) { 805 // Check dr6 (debug status register) whether a watchpoint hits: 806 // is watchpoint hit? 807 // | 808 // v 809 // dr0 -> bits{0} 810 // dr1 -> bits{1} 811 // dr2 -> bits{2} 812 // dr3 -> bits{3} 813 return (debug_state.__dr6 & (1 << hw_index)); 814 } 815 816 nub_addr_t DNBArchImplX86_64::GetWatchAddress(const DBG &debug_state, 817 uint32_t hw_index) { 818 switch (hw_index) { 819 case 0: 820 return debug_state.__dr0; 821 case 1: 822 return debug_state.__dr1; 823 case 2: 824 return debug_state.__dr2; 825 case 3: 826 return debug_state.__dr3; 827 } 828 assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3"); 829 return 0; 830 } 831 832 bool DNBArchImplX86_64::StartTransForHWP() { 833 if (m_2pc_trans_state != Trans_Done && m_2pc_trans_state != Trans_Rolled_Back) 834 DNBLogError("%s inconsistent state detected, expected %d or %d, got: %d", 835 __FUNCTION__, Trans_Done, Trans_Rolled_Back, m_2pc_trans_state); 836 m_2pc_dbg_checkpoint = m_state.context.dbg; 837 m_2pc_trans_state = Trans_Pending; 838 return true; 839 } 840 bool DNBArchImplX86_64::RollbackTransForHWP() { 841 m_state.context.dbg = m_2pc_dbg_checkpoint; 842 if (m_2pc_trans_state != Trans_Pending) 843 DNBLogError("%s inconsistent state detected, expected %d, got: %d", 844 __FUNCTION__, Trans_Pending, m_2pc_trans_state); 845 m_2pc_trans_state = Trans_Rolled_Back; 846 kern_return_t kret = SetDBGState(false); 847 DNBLogThreadedIf( 848 LOG_WATCHPOINTS, 849 "DNBArchImplX86_64::RollbackTransForHWP() SetDBGState() => 0x%8.8x.", 850 kret); 851 852 return kret == KERN_SUCCESS; 853 } 854 bool DNBArchImplX86_64::FinishTransForHWP() { 855 m_2pc_trans_state = Trans_Done; 856 return true; 857 } 858 DNBArchImplX86_64::DBG DNBArchImplX86_64::GetDBGCheckpoint() { 859 return m_2pc_dbg_checkpoint; 860 } 861 862 void DNBArchImplX86_64::SetHardwareBreakpoint(DBG &debug_state, 863 uint32_t hw_index, 864 nub_addr_t addr, 865 nub_size_t size) { 866 // Set both dr7 (debug control register) and dri (debug address register). 867 868 // dr7{7-0} encodes the local/gloabl enable bits: 869 // global enable --. .-- local enable 870 // | | 871 // v v 872 // dr0 -> bits{1-0} 873 // dr1 -> bits{3-2} 874 // dr2 -> bits{5-4} 875 // dr3 -> bits{7-6} 876 // 877 // dr7{31-16} encodes the rw/len bits: 878 // b_x+3, b_x+2, b_x+1, b_x 879 // where bits{x+1, x} => rw 880 // 0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io 881 // read-or-write (unused) 882 // and bits{x+3, x+2} => len 883 // 0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte 884 // 885 // dr0 -> bits{19-16} 886 // dr1 -> bits{23-20} 887 // dr2 -> bits{27-24} 888 // dr3 -> bits{31-28} 889 debug_state.__dr7 |= (1 << (2 * hw_index) | 0 << (16 + 4 * hw_index)); 890 891 switch (hw_index) { 892 case 0: 893 debug_state.__dr0 = addr; 894 break; 895 case 1: 896 debug_state.__dr1 = addr; 897 break; 898 case 2: 899 debug_state.__dr2 = addr; 900 break; 901 case 3: 902 debug_state.__dr3 = addr; 903 break; 904 default: 905 assert(0 && 906 "invalid hardware register index, must be one of 0, 1, 2, or 3"); 907 } 908 return; 909 } 910 911 uint32_t DNBArchImplX86_64::EnableHardwareBreakpoint(nub_addr_t addr, 912 nub_size_t size, 913 bool also_set_on_task) { 914 DNBLogThreadedIf(LOG_BREAKPOINTS, 915 "DNBArchImplX86_64::EnableHardwareBreakpoint( addr = " 916 "0x%8.8llx, size = %llu )", 917 (uint64_t)addr, (uint64_t)size); 918 919 const uint32_t num_hw_breakpoints = NumSupportedHardwareBreakpoints(); 920 // Read the debug state 921 kern_return_t kret = GetDBGState(false); 922 923 if (kret != KERN_SUCCESS) { 924 return INVALID_NUB_HW_INDEX; 925 } 926 927 // Check to make sure we have the needed hardware support 928 uint32_t i = 0; 929 930 DBG &debug_state = m_state.context.dbg; 931 for (i = 0; i < num_hw_breakpoints; ++i) { 932 if (IsWatchpointVacant(debug_state, i)) { 933 break; 934 } 935 } 936 937 // See if we found an available hw breakpoint slot above 938 if (i < num_hw_breakpoints) { 939 DNBLogThreadedIf( 940 LOG_BREAKPOINTS, 941 "DNBArchImplX86_64::EnableHardwareBreakpoint( free slot = %u )", i); 942 943 StartTransForHWP(); 944 945 // Modify our local copy of the debug state, first. 946 SetHardwareBreakpoint(debug_state, i, addr, size); 947 // Now set the watch point in the inferior. 948 kret = SetDBGState(also_set_on_task); 949 950 DNBLogThreadedIf(LOG_BREAKPOINTS, 951 "DNBArchImplX86_64::" 952 "EnableHardwareBreakpoint() " 953 "SetDBGState() => 0x%8.8x.", 954 kret); 955 956 if (kret == KERN_SUCCESS) { 957 DNBLogThreadedIf( 958 LOG_BREAKPOINTS, 959 "DNBArchImplX86_64::EnableHardwareBreakpoint( enabled at slot = %u)", 960 i); 961 return i; 962 } 963 // Revert to the previous debug state voluntarily. The transaction 964 // coordinator knows that we have failed. 965 else { 966 m_state.context.dbg = GetDBGCheckpoint(); 967 } 968 } else { 969 DNBLogThreadedIf(LOG_BREAKPOINTS, 970 "DNBArchImplX86_64::EnableHardwareBreakpoint(addr = " 971 "0x%8.8llx, size = %llu) => all hardware breakpoint " 972 "resources are being used.", 973 (uint64_t)addr, (uint64_t)size); 974 } 975 976 return INVALID_NUB_HW_INDEX; 977 } 978 979 bool DNBArchImplX86_64::DisableHardwareBreakpoint(uint32_t hw_index, 980 bool also_set_on_task) { 981 kern_return_t kret = GetDBGState(false); 982 983 const uint32_t num_hw_points = NumSupportedHardwareBreakpoints(); 984 if (kret == KERN_SUCCESS) { 985 DBG &debug_state = m_state.context.dbg; 986 if (hw_index < num_hw_points && 987 !IsWatchpointVacant(debug_state, hw_index)) { 988 989 StartTransForHWP(); 990 991 // Modify our local copy of the debug state, first. 992 ClearWatchpoint(debug_state, hw_index); 993 // Now disable the watch point in the inferior. 994 kret = SetDBGState(true); 995 DNBLogThreadedIf(LOG_WATCHPOINTS, 996 "DNBArchImplX86_64::DisableHardwareBreakpoint( %u )", 997 hw_index); 998 999 if (kret == KERN_SUCCESS) 1000 return true; 1001 else // Revert to the previous debug state voluntarily. The transaction 1002 // coordinator knows that we have failed. 1003 m_state.context.dbg = GetDBGCheckpoint(); 1004 } 1005 } 1006 return false; 1007 } 1008 1009 uint32_t DNBArchImplX86_64::EnableHardwareWatchpoint(nub_addr_t addr, 1010 nub_size_t size, bool read, 1011 bool write, 1012 bool also_set_on_task) { 1013 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::" 1014 "EnableHardwareWatchpoint(addr = 0x%llx, " 1015 "size = %llu, read = %u, write = %u)", 1016 (uint64_t)addr, (uint64_t)size, read, write); 1017 1018 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints(); 1019 1020 // Can only watch 1, 2, 4, or 8 bytes. 1021 if (!(size == 1 || size == 2 || size == 4 || size == 8)) 1022 return INVALID_NUB_HW_INDEX; 1023 1024 // We must watch for either read or write 1025 if (!read && !write) 1026 return INVALID_NUB_HW_INDEX; 1027 1028 // Read the debug state 1029 kern_return_t kret = GetDBGState(false); 1030 1031 if (kret == KERN_SUCCESS) { 1032 // Check to make sure we have the needed hardware support 1033 uint32_t i = 0; 1034 1035 DBG &debug_state = m_state.context.dbg; 1036 for (i = 0; i < num_hw_watchpoints; ++i) { 1037 if (IsWatchpointVacant(debug_state, i)) 1038 break; 1039 } 1040 1041 // See if we found an available hw breakpoint slot above 1042 if (i < num_hw_watchpoints) { 1043 StartTransForHWP(); 1044 1045 // Modify our local copy of the debug state, first. 1046 SetWatchpoint(debug_state, i, addr, size, read, write); 1047 // Now set the watch point in the inferior. 1048 kret = SetDBGState(also_set_on_task); 1049 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::" 1050 "EnableHardwareWatchpoint() " 1051 "SetDBGState() => 0x%8.8x.", 1052 kret); 1053 1054 if (kret == KERN_SUCCESS) 1055 return i; 1056 else // Revert to the previous debug state voluntarily. The transaction 1057 // coordinator knows that we have failed. 1058 m_state.context.dbg = GetDBGCheckpoint(); 1059 } else { 1060 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::" 1061 "EnableHardwareWatchpoint(): All " 1062 "hardware resources (%u) are in use.", 1063 num_hw_watchpoints); 1064 } 1065 } 1066 return INVALID_NUB_HW_INDEX; 1067 } 1068 1069 bool DNBArchImplX86_64::DisableHardwareWatchpoint(uint32_t hw_index, 1070 bool also_set_on_task) { 1071 kern_return_t kret = GetDBGState(false); 1072 1073 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints(); 1074 if (kret == KERN_SUCCESS) { 1075 DBG &debug_state = m_state.context.dbg; 1076 if (hw_index < num_hw_points && 1077 !IsWatchpointVacant(debug_state, hw_index)) { 1078 StartTransForHWP(); 1079 1080 // Modify our local copy of the debug state, first. 1081 ClearWatchpoint(debug_state, hw_index); 1082 // Now disable the watch point in the inferior. 1083 kret = SetDBGState(also_set_on_task); 1084 DNBLogThreadedIf(LOG_WATCHPOINTS, 1085 "DNBArchImplX86_64::DisableHardwareWatchpoint( %u )", 1086 hw_index); 1087 1088 if (kret == KERN_SUCCESS) 1089 return true; 1090 else // Revert to the previous debug state voluntarily. The transaction 1091 // coordinator knows that we have failed. 1092 m_state.context.dbg = GetDBGCheckpoint(); 1093 } 1094 } 1095 return false; 1096 } 1097 1098 // Iterate through the debug status register; return the index of the first hit. 1099 uint32_t DNBArchImplX86_64::GetHardwareWatchpointHit(nub_addr_t &addr) { 1100 // Read the debug state 1101 kern_return_t kret = GetDBGState(true); 1102 DNBLogThreadedIf( 1103 LOG_WATCHPOINTS, 1104 "DNBArchImplX86_64::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", 1105 kret); 1106 if (kret == KERN_SUCCESS) { 1107 DBG &debug_state = m_state.context.dbg; 1108 uint32_t i, num = NumSupportedHardwareWatchpoints(); 1109 for (i = 0; i < num; ++i) { 1110 if (IsWatchpointHit(debug_state, i)) { 1111 addr = GetWatchAddress(debug_state, i); 1112 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::" 1113 "GetHardwareWatchpointHit() found => " 1114 "%u (addr = 0x%llx).", 1115 i, (uint64_t)addr); 1116 return i; 1117 } 1118 } 1119 } 1120 return INVALID_NUB_HW_INDEX; 1121 } 1122 1123 // Set the single step bit in the processor status register. 1124 kern_return_t DNBArchImplX86_64::EnableHardwareSingleStep(bool enable) { 1125 if (GetGPRState(false) == KERN_SUCCESS) { 1126 const uint32_t trace_bit = 0x100u; 1127 if (enable) 1128 m_state.context.gpr.__rflags |= trace_bit; 1129 else 1130 m_state.context.gpr.__rflags &= ~trace_bit; 1131 return SetGPRState(); 1132 } 1133 return m_state.GetError(e_regSetGPR, Read); 1134 } 1135 1136 // Register information definitions 1137 1138 enum { 1139 gpr_rax = 0, 1140 gpr_rbx, 1141 gpr_rcx, 1142 gpr_rdx, 1143 gpr_rdi, 1144 gpr_rsi, 1145 gpr_rbp, 1146 gpr_rsp, 1147 gpr_r8, 1148 gpr_r9, 1149 gpr_r10, 1150 gpr_r11, 1151 gpr_r12, 1152 gpr_r13, 1153 gpr_r14, 1154 gpr_r15, 1155 gpr_rip, 1156 gpr_rflags, 1157 gpr_cs, 1158 gpr_fs, 1159 gpr_gs, 1160 gpr_eax, 1161 gpr_ebx, 1162 gpr_ecx, 1163 gpr_edx, 1164 gpr_edi, 1165 gpr_esi, 1166 gpr_ebp, 1167 gpr_esp, 1168 gpr_r8d, // Low 32 bits or r8 1169 gpr_r9d, // Low 32 bits or r9 1170 gpr_r10d, // Low 32 bits or r10 1171 gpr_r11d, // Low 32 bits or r11 1172 gpr_r12d, // Low 32 bits or r12 1173 gpr_r13d, // Low 32 bits or r13 1174 gpr_r14d, // Low 32 bits or r14 1175 gpr_r15d, // Low 32 bits or r15 1176 gpr_ax, 1177 gpr_bx, 1178 gpr_cx, 1179 gpr_dx, 1180 gpr_di, 1181 gpr_si, 1182 gpr_bp, 1183 gpr_sp, 1184 gpr_r8w, // Low 16 bits or r8 1185 gpr_r9w, // Low 16 bits or r9 1186 gpr_r10w, // Low 16 bits or r10 1187 gpr_r11w, // Low 16 bits or r11 1188 gpr_r12w, // Low 16 bits or r12 1189 gpr_r13w, // Low 16 bits or r13 1190 gpr_r14w, // Low 16 bits or r14 1191 gpr_r15w, // Low 16 bits or r15 1192 gpr_ah, 1193 gpr_bh, 1194 gpr_ch, 1195 gpr_dh, 1196 gpr_al, 1197 gpr_bl, 1198 gpr_cl, 1199 gpr_dl, 1200 gpr_dil, 1201 gpr_sil, 1202 gpr_bpl, 1203 gpr_spl, 1204 gpr_r8l, // Low 8 bits or r8 1205 gpr_r9l, // Low 8 bits or r9 1206 gpr_r10l, // Low 8 bits or r10 1207 gpr_r11l, // Low 8 bits or r11 1208 gpr_r12l, // Low 8 bits or r12 1209 gpr_r13l, // Low 8 bits or r13 1210 gpr_r14l, // Low 8 bits or r14 1211 gpr_r15l, // Low 8 bits or r15 1212 k_num_gpr_regs 1213 }; 1214 1215 enum { 1216 fpu_fcw, 1217 fpu_fsw, 1218 fpu_ftw, 1219 fpu_fop, 1220 fpu_ip, 1221 fpu_cs, 1222 fpu_dp, 1223 fpu_ds, 1224 fpu_mxcsr, 1225 fpu_mxcsrmask, 1226 fpu_stmm0, 1227 fpu_stmm1, 1228 fpu_stmm2, 1229 fpu_stmm3, 1230 fpu_stmm4, 1231 fpu_stmm5, 1232 fpu_stmm6, 1233 fpu_stmm7, 1234 fpu_xmm0, 1235 fpu_xmm1, 1236 fpu_xmm2, 1237 fpu_xmm3, 1238 fpu_xmm4, 1239 fpu_xmm5, 1240 fpu_xmm6, 1241 fpu_xmm7, 1242 fpu_xmm8, 1243 fpu_xmm9, 1244 fpu_xmm10, 1245 fpu_xmm11, 1246 fpu_xmm12, 1247 fpu_xmm13, 1248 fpu_xmm14, 1249 fpu_xmm15, 1250 fpu_ymm0, 1251 fpu_ymm1, 1252 fpu_ymm2, 1253 fpu_ymm3, 1254 fpu_ymm4, 1255 fpu_ymm5, 1256 fpu_ymm6, 1257 fpu_ymm7, 1258 fpu_ymm8, 1259 fpu_ymm9, 1260 fpu_ymm10, 1261 fpu_ymm11, 1262 fpu_ymm12, 1263 fpu_ymm13, 1264 fpu_ymm14, 1265 fpu_ymm15, 1266 fpu_k0, 1267 fpu_k1, 1268 fpu_k2, 1269 fpu_k3, 1270 fpu_k4, 1271 fpu_k5, 1272 fpu_k6, 1273 fpu_k7, 1274 fpu_zmm0, 1275 fpu_zmm1, 1276 fpu_zmm2, 1277 fpu_zmm3, 1278 fpu_zmm4, 1279 fpu_zmm5, 1280 fpu_zmm6, 1281 fpu_zmm7, 1282 fpu_zmm8, 1283 fpu_zmm9, 1284 fpu_zmm10, 1285 fpu_zmm11, 1286 fpu_zmm12, 1287 fpu_zmm13, 1288 fpu_zmm14, 1289 fpu_zmm15, 1290 fpu_zmm16, 1291 fpu_zmm17, 1292 fpu_zmm18, 1293 fpu_zmm19, 1294 fpu_zmm20, 1295 fpu_zmm21, 1296 fpu_zmm22, 1297 fpu_zmm23, 1298 fpu_zmm24, 1299 fpu_zmm25, 1300 fpu_zmm26, 1301 fpu_zmm27, 1302 fpu_zmm28, 1303 fpu_zmm29, 1304 fpu_zmm30, 1305 fpu_zmm31, 1306 k_num_fpu_regs, 1307 1308 // Aliases 1309 fpu_fctrl = fpu_fcw, 1310 fpu_fstat = fpu_fsw, 1311 fpu_ftag = fpu_ftw, 1312 fpu_fiseg = fpu_cs, 1313 fpu_fioff = fpu_ip, 1314 fpu_foseg = fpu_ds, 1315 fpu_fooff = fpu_dp 1316 }; 1317 1318 enum { 1319 exc_trapno, 1320 exc_err, 1321 exc_faultvaddr, 1322 k_num_exc_regs, 1323 }; 1324 1325 enum ehframe_dwarf_regnums { 1326 ehframe_dwarf_rax = 0, 1327 ehframe_dwarf_rdx = 1, 1328 ehframe_dwarf_rcx = 2, 1329 ehframe_dwarf_rbx = 3, 1330 ehframe_dwarf_rsi = 4, 1331 ehframe_dwarf_rdi = 5, 1332 ehframe_dwarf_rbp = 6, 1333 ehframe_dwarf_rsp = 7, 1334 ehframe_dwarf_r8, 1335 ehframe_dwarf_r9, 1336 ehframe_dwarf_r10, 1337 ehframe_dwarf_r11, 1338 ehframe_dwarf_r12, 1339 ehframe_dwarf_r13, 1340 ehframe_dwarf_r14, 1341 ehframe_dwarf_r15, 1342 ehframe_dwarf_rip, 1343 ehframe_dwarf_xmm0, 1344 ehframe_dwarf_xmm1, 1345 ehframe_dwarf_xmm2, 1346 ehframe_dwarf_xmm3, 1347 ehframe_dwarf_xmm4, 1348 ehframe_dwarf_xmm5, 1349 ehframe_dwarf_xmm6, 1350 ehframe_dwarf_xmm7, 1351 ehframe_dwarf_xmm8, 1352 ehframe_dwarf_xmm9, 1353 ehframe_dwarf_xmm10, 1354 ehframe_dwarf_xmm11, 1355 ehframe_dwarf_xmm12, 1356 ehframe_dwarf_xmm13, 1357 ehframe_dwarf_xmm14, 1358 ehframe_dwarf_xmm15, 1359 ehframe_dwarf_stmm0, 1360 ehframe_dwarf_stmm1, 1361 ehframe_dwarf_stmm2, 1362 ehframe_dwarf_stmm3, 1363 ehframe_dwarf_stmm4, 1364 ehframe_dwarf_stmm5, 1365 ehframe_dwarf_stmm6, 1366 ehframe_dwarf_stmm7, 1367 ehframe_dwarf_ymm0 = ehframe_dwarf_xmm0, 1368 ehframe_dwarf_ymm1 = ehframe_dwarf_xmm1, 1369 ehframe_dwarf_ymm2 = ehframe_dwarf_xmm2, 1370 ehframe_dwarf_ymm3 = ehframe_dwarf_xmm3, 1371 ehframe_dwarf_ymm4 = ehframe_dwarf_xmm4, 1372 ehframe_dwarf_ymm5 = ehframe_dwarf_xmm5, 1373 ehframe_dwarf_ymm6 = ehframe_dwarf_xmm6, 1374 ehframe_dwarf_ymm7 = ehframe_dwarf_xmm7, 1375 ehframe_dwarf_ymm8 = ehframe_dwarf_xmm8, 1376 ehframe_dwarf_ymm9 = ehframe_dwarf_xmm9, 1377 ehframe_dwarf_ymm10 = ehframe_dwarf_xmm10, 1378 ehframe_dwarf_ymm11 = ehframe_dwarf_xmm11, 1379 ehframe_dwarf_ymm12 = ehframe_dwarf_xmm12, 1380 ehframe_dwarf_ymm13 = ehframe_dwarf_xmm13, 1381 ehframe_dwarf_ymm14 = ehframe_dwarf_xmm14, 1382 ehframe_dwarf_ymm15 = ehframe_dwarf_xmm15, 1383 ehframe_dwarf_zmm0 = ehframe_dwarf_xmm0, 1384 ehframe_dwarf_zmm1 = ehframe_dwarf_xmm1, 1385 ehframe_dwarf_zmm2 = ehframe_dwarf_xmm2, 1386 ehframe_dwarf_zmm3 = ehframe_dwarf_xmm3, 1387 ehframe_dwarf_zmm4 = ehframe_dwarf_xmm4, 1388 ehframe_dwarf_zmm5 = ehframe_dwarf_xmm5, 1389 ehframe_dwarf_zmm6 = ehframe_dwarf_xmm6, 1390 ehframe_dwarf_zmm7 = ehframe_dwarf_xmm7, 1391 ehframe_dwarf_zmm8 = ehframe_dwarf_xmm8, 1392 ehframe_dwarf_zmm9 = ehframe_dwarf_xmm9, 1393 ehframe_dwarf_zmm10 = ehframe_dwarf_xmm10, 1394 ehframe_dwarf_zmm11 = ehframe_dwarf_xmm11, 1395 ehframe_dwarf_zmm12 = ehframe_dwarf_xmm12, 1396 ehframe_dwarf_zmm13 = ehframe_dwarf_xmm13, 1397 ehframe_dwarf_zmm14 = ehframe_dwarf_xmm14, 1398 ehframe_dwarf_zmm15 = ehframe_dwarf_xmm15, 1399 ehframe_dwarf_zmm16 = 67, 1400 ehframe_dwarf_zmm17, 1401 ehframe_dwarf_zmm18, 1402 ehframe_dwarf_zmm19, 1403 ehframe_dwarf_zmm20, 1404 ehframe_dwarf_zmm21, 1405 ehframe_dwarf_zmm22, 1406 ehframe_dwarf_zmm23, 1407 ehframe_dwarf_zmm24, 1408 ehframe_dwarf_zmm25, 1409 ehframe_dwarf_zmm26, 1410 ehframe_dwarf_zmm27, 1411 ehframe_dwarf_zmm28, 1412 ehframe_dwarf_zmm29, 1413 ehframe_dwarf_zmm30, 1414 ehframe_dwarf_zmm31, 1415 ehframe_dwarf_k0 = 118, 1416 ehframe_dwarf_k1, 1417 ehframe_dwarf_k2, 1418 ehframe_dwarf_k3, 1419 ehframe_dwarf_k4, 1420 ehframe_dwarf_k5, 1421 ehframe_dwarf_k6, 1422 ehframe_dwarf_k7, 1423 }; 1424 1425 enum debugserver_regnums { 1426 debugserver_rax = 0, 1427 debugserver_rbx = 1, 1428 debugserver_rcx = 2, 1429 debugserver_rdx = 3, 1430 debugserver_rsi = 4, 1431 debugserver_rdi = 5, 1432 debugserver_rbp = 6, 1433 debugserver_rsp = 7, 1434 debugserver_r8 = 8, 1435 debugserver_r9 = 9, 1436 debugserver_r10 = 10, 1437 debugserver_r11 = 11, 1438 debugserver_r12 = 12, 1439 debugserver_r13 = 13, 1440 debugserver_r14 = 14, 1441 debugserver_r15 = 15, 1442 debugserver_rip = 16, 1443 debugserver_rflags = 17, 1444 debugserver_cs = 18, 1445 debugserver_ss = 19, 1446 debugserver_ds = 20, 1447 debugserver_es = 21, 1448 debugserver_fs = 22, 1449 debugserver_gs = 23, 1450 debugserver_stmm0 = 24, 1451 debugserver_stmm1 = 25, 1452 debugserver_stmm2 = 26, 1453 debugserver_stmm3 = 27, 1454 debugserver_stmm4 = 28, 1455 debugserver_stmm5 = 29, 1456 debugserver_stmm6 = 30, 1457 debugserver_stmm7 = 31, 1458 debugserver_fctrl = 32, 1459 debugserver_fcw = debugserver_fctrl, 1460 debugserver_fstat = 33, 1461 debugserver_fsw = debugserver_fstat, 1462 debugserver_ftag = 34, 1463 debugserver_ftw = debugserver_ftag, 1464 debugserver_fiseg = 35, 1465 debugserver_fpu_cs = debugserver_fiseg, 1466 debugserver_fioff = 36, 1467 debugserver_ip = debugserver_fioff, 1468 debugserver_foseg = 37, 1469 debugserver_fpu_ds = debugserver_foseg, 1470 debugserver_fooff = 38, 1471 debugserver_dp = debugserver_fooff, 1472 debugserver_fop = 39, 1473 debugserver_xmm0 = 40, 1474 debugserver_xmm1 = 41, 1475 debugserver_xmm2 = 42, 1476 debugserver_xmm3 = 43, 1477 debugserver_xmm4 = 44, 1478 debugserver_xmm5 = 45, 1479 debugserver_xmm6 = 46, 1480 debugserver_xmm7 = 47, 1481 debugserver_xmm8 = 48, 1482 debugserver_xmm9 = 49, 1483 debugserver_xmm10 = 50, 1484 debugserver_xmm11 = 51, 1485 debugserver_xmm12 = 52, 1486 debugserver_xmm13 = 53, 1487 debugserver_xmm14 = 54, 1488 debugserver_xmm15 = 55, 1489 debugserver_mxcsr = 56, 1490 debugserver_ymm0 = debugserver_xmm0, 1491 debugserver_ymm1 = debugserver_xmm1, 1492 debugserver_ymm2 = debugserver_xmm2, 1493 debugserver_ymm3 = debugserver_xmm3, 1494 debugserver_ymm4 = debugserver_xmm4, 1495 debugserver_ymm5 = debugserver_xmm5, 1496 debugserver_ymm6 = debugserver_xmm6, 1497 debugserver_ymm7 = debugserver_xmm7, 1498 debugserver_ymm8 = debugserver_xmm8, 1499 debugserver_ymm9 = debugserver_xmm9, 1500 debugserver_ymm10 = debugserver_xmm10, 1501 debugserver_ymm11 = debugserver_xmm11, 1502 debugserver_ymm12 = debugserver_xmm12, 1503 debugserver_ymm13 = debugserver_xmm13, 1504 debugserver_ymm14 = debugserver_xmm14, 1505 debugserver_ymm15 = debugserver_xmm15, 1506 debugserver_zmm0 = debugserver_xmm0, 1507 debugserver_zmm1 = debugserver_xmm1, 1508 debugserver_zmm2 = debugserver_xmm2, 1509 debugserver_zmm3 = debugserver_xmm3, 1510 debugserver_zmm4 = debugserver_xmm4, 1511 debugserver_zmm5 = debugserver_xmm5, 1512 debugserver_zmm6 = debugserver_xmm6, 1513 debugserver_zmm7 = debugserver_xmm7, 1514 debugserver_zmm8 = debugserver_xmm8, 1515 debugserver_zmm9 = debugserver_xmm9, 1516 debugserver_zmm10 = debugserver_xmm10, 1517 debugserver_zmm11 = debugserver_xmm11, 1518 debugserver_zmm12 = debugserver_xmm12, 1519 debugserver_zmm13 = debugserver_xmm13, 1520 debugserver_zmm14 = debugserver_xmm14, 1521 debugserver_zmm15 = debugserver_xmm15, 1522 debugserver_zmm16 = 67, 1523 debugserver_zmm17 = 68, 1524 debugserver_zmm18 = 69, 1525 debugserver_zmm19 = 70, 1526 debugserver_zmm20 = 71, 1527 debugserver_zmm21 = 72, 1528 debugserver_zmm22 = 73, 1529 debugserver_zmm23 = 74, 1530 debugserver_zmm24 = 75, 1531 debugserver_zmm25 = 76, 1532 debugserver_zmm26 = 77, 1533 debugserver_zmm27 = 78, 1534 debugserver_zmm28 = 79, 1535 debugserver_zmm29 = 80, 1536 debugserver_zmm30 = 81, 1537 debugserver_zmm31 = 82, 1538 debugserver_k0 = 118, 1539 debugserver_k1 = 119, 1540 debugserver_k2 = 120, 1541 debugserver_k3 = 121, 1542 debugserver_k4 = 122, 1543 debugserver_k5 = 123, 1544 debugserver_k6 = 124, 1545 debugserver_k7 = 125, 1546 }; 1547 1548 #define GPR_OFFSET(reg) (offsetof(DNBArchImplX86_64::GPR, __##reg)) 1549 #define FPU_OFFSET(reg) \ 1550 (offsetof(DNBArchImplX86_64::FPU, __fpu_##reg) + \ 1551 offsetof(DNBArchImplX86_64::Context, fpu.no_avx)) 1552 #define AVX_OFFSET(reg) \ 1553 (offsetof(DNBArchImplX86_64::AVX, __fpu_##reg) + \ 1554 offsetof(DNBArchImplX86_64::Context, fpu.avx)) 1555 #define AVX512F_OFFSET(reg) \ 1556 (offsetof(DNBArchImplX86_64::AVX512F, __fpu_##reg) + \ 1557 offsetof(DNBArchImplX86_64::Context, fpu.avx512f)) 1558 #define EXC_OFFSET(reg) \ 1559 (offsetof(DNBArchImplX86_64::EXC, __##reg) + \ 1560 offsetof(DNBArchImplX86_64::Context, exc)) 1561 #define AVX_OFFSET_YMM(n) (AVX_OFFSET(ymmh0) + (32 * n)) 1562 #define AVX512F_OFFSET_ZMM(n) (AVX512F_OFFSET(zmmh0) + (64 * n)) 1563 1564 #define GPR_SIZE(reg) (sizeof(((DNBArchImplX86_64::GPR *)NULL)->__##reg)) 1565 #define FPU_SIZE_UINT(reg) \ 1566 (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg)) 1567 #define FPU_SIZE_MMST(reg) \ 1568 (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__mmst_reg)) 1569 #define FPU_SIZE_XMM(reg) \ 1570 (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__xmm_reg)) 1571 #define FPU_SIZE_YMM(reg) (32) 1572 #define FPU_SIZE_ZMM(reg) (64) 1573 #define EXC_SIZE(reg) (sizeof(((DNBArchImplX86_64::EXC *)NULL)->__##reg)) 1574 1575 // These macros will auto define the register name, alt name, register size, 1576 // register offset, encoding, format and native register. This ensures that 1577 // the register state structures are defined correctly and have the correct 1578 // sizes and offsets. 1579 #define DEFINE_GPR(reg) \ 1580 { \ 1581 e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, GPR_SIZE(reg), \ 1582 GPR_OFFSET(reg), ehframe_dwarf_##reg, ehframe_dwarf_##reg, \ 1583 INVALID_NUB_REGNUM, debugserver_##reg, NULL, g_invalidate_##reg \ 1584 } 1585 #define DEFINE_GPR_ALT(reg, alt, gen) \ 1586 { \ 1587 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), \ 1588 GPR_OFFSET(reg), ehframe_dwarf_##reg, ehframe_dwarf_##reg, gen, \ 1589 debugserver_##reg, NULL, g_invalidate_##reg \ 1590 } 1591 #define DEFINE_GPR_ALT2(reg, alt) \ 1592 { \ 1593 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), \ 1594 GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1595 INVALID_NUB_REGNUM, debugserver_##reg, NULL, NULL \ 1596 } 1597 #define DEFINE_GPR_ALT3(reg, alt, gen) \ 1598 { \ 1599 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), \ 1600 GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gen, \ 1601 debugserver_##reg, NULL, NULL \ 1602 } 1603 #define DEFINE_GPR_ALT4(reg, alt, gen) \ 1604 { \ 1605 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), \ 1606 GPR_OFFSET(reg), ehframe_dwarf_##reg, ehframe_dwarf_##reg, gen, \ 1607 debugserver_##reg, NULL, NULL \ 1608 } 1609 1610 #define DEFINE_GPR_PSEUDO_32(reg32, reg64) \ 1611 { \ 1612 e_regSetGPR, gpr_##reg32, #reg32, NULL, Uint, Hex, 4, 0, \ 1613 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1614 INVALID_NUB_REGNUM, g_contained_##reg64, g_invalidate_##reg64 \ 1615 } 1616 #define DEFINE_GPR_PSEUDO_16(reg16, reg64) \ 1617 { \ 1618 e_regSetGPR, gpr_##reg16, #reg16, NULL, Uint, Hex, 2, 0, \ 1619 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1620 INVALID_NUB_REGNUM, g_contained_##reg64, g_invalidate_##reg64 \ 1621 } 1622 #define DEFINE_GPR_PSEUDO_8H(reg8, reg64) \ 1623 { \ 1624 e_regSetGPR, gpr_##reg8, #reg8, NULL, Uint, Hex, 1, 1, INVALID_NUB_REGNUM, \ 1625 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1626 g_contained_##reg64, g_invalidate_##reg64 \ 1627 } 1628 #define DEFINE_GPR_PSEUDO_8L(reg8, reg64) \ 1629 { \ 1630 e_regSetGPR, gpr_##reg8, #reg8, NULL, Uint, Hex, 1, 0, INVALID_NUB_REGNUM, \ 1631 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1632 g_contained_##reg64, g_invalidate_##reg64 \ 1633 } 1634 1635 // General purpose registers for 64 bit 1636 1637 const char *g_contained_rax[] = {"rax", NULL}; 1638 const char *g_contained_rbx[] = {"rbx", NULL}; 1639 const char *g_contained_rcx[] = {"rcx", NULL}; 1640 const char *g_contained_rdx[] = {"rdx", NULL}; 1641 const char *g_contained_rdi[] = {"rdi", NULL}; 1642 const char *g_contained_rsi[] = {"rsi", NULL}; 1643 const char *g_contained_rbp[] = {"rbp", NULL}; 1644 const char *g_contained_rsp[] = {"rsp", NULL}; 1645 const char *g_contained_r8[] = {"r8", NULL}; 1646 const char *g_contained_r9[] = {"r9", NULL}; 1647 const char *g_contained_r10[] = {"r10", NULL}; 1648 const char *g_contained_r11[] = {"r11", NULL}; 1649 const char *g_contained_r12[] = {"r12", NULL}; 1650 const char *g_contained_r13[] = {"r13", NULL}; 1651 const char *g_contained_r14[] = {"r14", NULL}; 1652 const char *g_contained_r15[] = {"r15", NULL}; 1653 1654 const char *g_invalidate_rax[] = {"rax", "eax", "ax", "ah", "al", NULL}; 1655 const char *g_invalidate_rbx[] = {"rbx", "ebx", "bx", "bh", "bl", NULL}; 1656 const char *g_invalidate_rcx[] = {"rcx", "ecx", "cx", "ch", "cl", NULL}; 1657 const char *g_invalidate_rdx[] = {"rdx", "edx", "dx", "dh", "dl", NULL}; 1658 const char *g_invalidate_rdi[] = {"rdi", "edi", "di", "dil", NULL}; 1659 const char *g_invalidate_rsi[] = {"rsi", "esi", "si", "sil", NULL}; 1660 const char *g_invalidate_rbp[] = {"rbp", "ebp", "bp", "bpl", NULL}; 1661 const char *g_invalidate_rsp[] = {"rsp", "esp", "sp", "spl", NULL}; 1662 const char *g_invalidate_r8[] = {"r8", "r8d", "r8w", "r8l", NULL}; 1663 const char *g_invalidate_r9[] = {"r9", "r9d", "r9w", "r9l", NULL}; 1664 const char *g_invalidate_r10[] = {"r10", "r10d", "r10w", "r10l", NULL}; 1665 const char *g_invalidate_r11[] = {"r11", "r11d", "r11w", "r11l", NULL}; 1666 const char *g_invalidate_r12[] = {"r12", "r12d", "r12w", "r12l", NULL}; 1667 const char *g_invalidate_r13[] = {"r13", "r13d", "r13w", "r13l", NULL}; 1668 const char *g_invalidate_r14[] = {"r14", "r14d", "r14w", "r14l", NULL}; 1669 const char *g_invalidate_r15[] = {"r15", "r15d", "r15w", "r15l", NULL}; 1670 1671 const DNBRegisterInfo DNBArchImplX86_64::g_gpr_registers[] = { 1672 DEFINE_GPR(rax), 1673 DEFINE_GPR(rbx), 1674 DEFINE_GPR_ALT(rcx, "arg4", GENERIC_REGNUM_ARG4), 1675 DEFINE_GPR_ALT(rdx, "arg3", GENERIC_REGNUM_ARG3), 1676 DEFINE_GPR_ALT(rdi, "arg1", GENERIC_REGNUM_ARG1), 1677 DEFINE_GPR_ALT(rsi, "arg2", GENERIC_REGNUM_ARG2), 1678 DEFINE_GPR_ALT(rbp, "fp", GENERIC_REGNUM_FP), 1679 DEFINE_GPR_ALT(rsp, "sp", GENERIC_REGNUM_SP), 1680 DEFINE_GPR_ALT(r8, "arg5", GENERIC_REGNUM_ARG5), 1681 DEFINE_GPR_ALT(r9, "arg6", GENERIC_REGNUM_ARG6), 1682 DEFINE_GPR(r10), 1683 DEFINE_GPR(r11), 1684 DEFINE_GPR(r12), 1685 DEFINE_GPR(r13), 1686 DEFINE_GPR(r14), 1687 DEFINE_GPR(r15), 1688 DEFINE_GPR_ALT4(rip, "pc", GENERIC_REGNUM_PC), 1689 DEFINE_GPR_ALT3(rflags, "flags", GENERIC_REGNUM_FLAGS), 1690 DEFINE_GPR_ALT2(cs, NULL), 1691 DEFINE_GPR_ALT2(fs, NULL), 1692 DEFINE_GPR_ALT2(gs, NULL), 1693 DEFINE_GPR_PSEUDO_32(eax, rax), 1694 DEFINE_GPR_PSEUDO_32(ebx, rbx), 1695 DEFINE_GPR_PSEUDO_32(ecx, rcx), 1696 DEFINE_GPR_PSEUDO_32(edx, rdx), 1697 DEFINE_GPR_PSEUDO_32(edi, rdi), 1698 DEFINE_GPR_PSEUDO_32(esi, rsi), 1699 DEFINE_GPR_PSEUDO_32(ebp, rbp), 1700 DEFINE_GPR_PSEUDO_32(esp, rsp), 1701 DEFINE_GPR_PSEUDO_32(r8d, r8), 1702 DEFINE_GPR_PSEUDO_32(r9d, r9), 1703 DEFINE_GPR_PSEUDO_32(r10d, r10), 1704 DEFINE_GPR_PSEUDO_32(r11d, r11), 1705 DEFINE_GPR_PSEUDO_32(r12d, r12), 1706 DEFINE_GPR_PSEUDO_32(r13d, r13), 1707 DEFINE_GPR_PSEUDO_32(r14d, r14), 1708 DEFINE_GPR_PSEUDO_32(r15d, r15), 1709 DEFINE_GPR_PSEUDO_16(ax, rax), 1710 DEFINE_GPR_PSEUDO_16(bx, rbx), 1711 DEFINE_GPR_PSEUDO_16(cx, rcx), 1712 DEFINE_GPR_PSEUDO_16(dx, rdx), 1713 DEFINE_GPR_PSEUDO_16(di, rdi), 1714 DEFINE_GPR_PSEUDO_16(si, rsi), 1715 DEFINE_GPR_PSEUDO_16(bp, rbp), 1716 DEFINE_GPR_PSEUDO_16(sp, rsp), 1717 DEFINE_GPR_PSEUDO_16(r8w, r8), 1718 DEFINE_GPR_PSEUDO_16(r9w, r9), 1719 DEFINE_GPR_PSEUDO_16(r10w, r10), 1720 DEFINE_GPR_PSEUDO_16(r11w, r11), 1721 DEFINE_GPR_PSEUDO_16(r12w, r12), 1722 DEFINE_GPR_PSEUDO_16(r13w, r13), 1723 DEFINE_GPR_PSEUDO_16(r14w, r14), 1724 DEFINE_GPR_PSEUDO_16(r15w, r15), 1725 DEFINE_GPR_PSEUDO_8H(ah, rax), 1726 DEFINE_GPR_PSEUDO_8H(bh, rbx), 1727 DEFINE_GPR_PSEUDO_8H(ch, rcx), 1728 DEFINE_GPR_PSEUDO_8H(dh, rdx), 1729 DEFINE_GPR_PSEUDO_8L(al, rax), 1730 DEFINE_GPR_PSEUDO_8L(bl, rbx), 1731 DEFINE_GPR_PSEUDO_8L(cl, rcx), 1732 DEFINE_GPR_PSEUDO_8L(dl, rdx), 1733 DEFINE_GPR_PSEUDO_8L(dil, rdi), 1734 DEFINE_GPR_PSEUDO_8L(sil, rsi), 1735 DEFINE_GPR_PSEUDO_8L(bpl, rbp), 1736 DEFINE_GPR_PSEUDO_8L(spl, rsp), 1737 DEFINE_GPR_PSEUDO_8L(r8l, r8), 1738 DEFINE_GPR_PSEUDO_8L(r9l, r9), 1739 DEFINE_GPR_PSEUDO_8L(r10l, r10), 1740 DEFINE_GPR_PSEUDO_8L(r11l, r11), 1741 DEFINE_GPR_PSEUDO_8L(r12l, r12), 1742 DEFINE_GPR_PSEUDO_8L(r13l, r13), 1743 DEFINE_GPR_PSEUDO_8L(r14l, r14), 1744 DEFINE_GPR_PSEUDO_8L(r15l, r15)}; 1745 1746 // Floating point registers 64 bit 1747 const DNBRegisterInfo DNBArchImplX86_64::g_fpu_registers_no_avx[] = { 1748 {e_regSetFPU, fpu_fcw, "fctrl", NULL, Uint, Hex, FPU_SIZE_UINT(fcw), 1749 FPU_OFFSET(fcw), -1U, -1U, -1U, -1U, NULL, NULL}, 1750 {e_regSetFPU, fpu_fsw, "fstat", NULL, Uint, Hex, FPU_SIZE_UINT(fsw), 1751 FPU_OFFSET(fsw), -1U, -1U, -1U, -1U, NULL, NULL}, 1752 {e_regSetFPU, fpu_ftw, "ftag", NULL, Uint, Hex, 2 /* sizeof __fpu_ftw + sizeof __fpu_rsrv1 */, 1753 FPU_OFFSET(ftw), -1U, -1U, -1U, -1U, NULL, NULL}, 1754 {e_regSetFPU, fpu_fop, "fop", NULL, Uint, Hex, FPU_SIZE_UINT(fop), 1755 FPU_OFFSET(fop), -1U, -1U, -1U, -1U, NULL, NULL}, 1756 {e_regSetFPU, fpu_ip, "fioff", NULL, Uint, Hex, FPU_SIZE_UINT(ip), 1757 FPU_OFFSET(ip), -1U, -1U, -1U, -1U, NULL, NULL}, 1758 {e_regSetFPU, fpu_cs, "fiseg", NULL, Uint, Hex, FPU_SIZE_UINT(cs), 1759 FPU_OFFSET(cs), -1U, -1U, -1U, -1U, NULL, NULL}, 1760 {e_regSetFPU, fpu_dp, "fooff", NULL, Uint, Hex, FPU_SIZE_UINT(dp), 1761 FPU_OFFSET(dp), -1U, -1U, -1U, -1U, NULL, NULL}, 1762 {e_regSetFPU, fpu_ds, "foseg", NULL, Uint, Hex, FPU_SIZE_UINT(ds), 1763 FPU_OFFSET(ds), -1U, -1U, -1U, -1U, NULL, NULL}, 1764 {e_regSetFPU, fpu_mxcsr, "mxcsr", NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr), 1765 FPU_OFFSET(mxcsr), -1U, -1U, -1U, -1U, NULL, NULL}, 1766 {e_regSetFPU, fpu_mxcsrmask, "mxcsrmask", NULL, Uint, Hex, 1767 FPU_SIZE_UINT(mxcsrmask), FPU_OFFSET(mxcsrmask), -1U, -1U, -1U, -1U, NULL, 1768 NULL}, 1769 1770 {e_regSetFPU, fpu_stmm0, "stmm0", "st0", Vector, VectorOfUInt8, 1771 FPU_SIZE_MMST(stmm0), FPU_OFFSET(stmm0), ehframe_dwarf_stmm0, 1772 ehframe_dwarf_stmm0, -1U, debugserver_stmm0, NULL, NULL}, 1773 {e_regSetFPU, fpu_stmm1, "stmm1", "st1", Vector, VectorOfUInt8, 1774 FPU_SIZE_MMST(stmm1), FPU_OFFSET(stmm1), ehframe_dwarf_stmm1, 1775 ehframe_dwarf_stmm1, -1U, debugserver_stmm1, NULL, NULL}, 1776 {e_regSetFPU, fpu_stmm2, "stmm2", "st2", Vector, VectorOfUInt8, 1777 FPU_SIZE_MMST(stmm2), FPU_OFFSET(stmm2), ehframe_dwarf_stmm2, 1778 ehframe_dwarf_stmm2, -1U, debugserver_stmm2, NULL, NULL}, 1779 {e_regSetFPU, fpu_stmm3, "stmm3", "st3", Vector, VectorOfUInt8, 1780 FPU_SIZE_MMST(stmm3), FPU_OFFSET(stmm3), ehframe_dwarf_stmm3, 1781 ehframe_dwarf_stmm3, -1U, debugserver_stmm3, NULL, NULL}, 1782 {e_regSetFPU, fpu_stmm4, "stmm4", "st4", Vector, VectorOfUInt8, 1783 FPU_SIZE_MMST(stmm4), FPU_OFFSET(stmm4), ehframe_dwarf_stmm4, 1784 ehframe_dwarf_stmm4, -1U, debugserver_stmm4, NULL, NULL}, 1785 {e_regSetFPU, fpu_stmm5, "stmm5", "st5", Vector, VectorOfUInt8, 1786 FPU_SIZE_MMST(stmm5), FPU_OFFSET(stmm5), ehframe_dwarf_stmm5, 1787 ehframe_dwarf_stmm5, -1U, debugserver_stmm5, NULL, NULL}, 1788 {e_regSetFPU, fpu_stmm6, "stmm6", "st6", Vector, VectorOfUInt8, 1789 FPU_SIZE_MMST(stmm6), FPU_OFFSET(stmm6), ehframe_dwarf_stmm6, 1790 ehframe_dwarf_stmm6, -1U, debugserver_stmm6, NULL, NULL}, 1791 {e_regSetFPU, fpu_stmm7, "stmm7", "st7", Vector, VectorOfUInt8, 1792 FPU_SIZE_MMST(stmm7), FPU_OFFSET(stmm7), ehframe_dwarf_stmm7, 1793 ehframe_dwarf_stmm7, -1U, debugserver_stmm7, NULL, NULL}, 1794 1795 {e_regSetFPU, fpu_xmm0, "xmm0", NULL, Vector, VectorOfUInt8, 1796 FPU_SIZE_XMM(xmm0), FPU_OFFSET(xmm0), ehframe_dwarf_xmm0, 1797 ehframe_dwarf_xmm0, -1U, debugserver_xmm0, NULL, NULL}, 1798 {e_regSetFPU, fpu_xmm1, "xmm1", NULL, Vector, VectorOfUInt8, 1799 FPU_SIZE_XMM(xmm1), FPU_OFFSET(xmm1), ehframe_dwarf_xmm1, 1800 ehframe_dwarf_xmm1, -1U, debugserver_xmm1, NULL, NULL}, 1801 {e_regSetFPU, fpu_xmm2, "xmm2", NULL, Vector, VectorOfUInt8, 1802 FPU_SIZE_XMM(xmm2), FPU_OFFSET(xmm2), ehframe_dwarf_xmm2, 1803 ehframe_dwarf_xmm2, -1U, debugserver_xmm2, NULL, NULL}, 1804 {e_regSetFPU, fpu_xmm3, "xmm3", NULL, Vector, VectorOfUInt8, 1805 FPU_SIZE_XMM(xmm3), FPU_OFFSET(xmm3), ehframe_dwarf_xmm3, 1806 ehframe_dwarf_xmm3, -1U, debugserver_xmm3, NULL, NULL}, 1807 {e_regSetFPU, fpu_xmm4, "xmm4", NULL, Vector, VectorOfUInt8, 1808 FPU_SIZE_XMM(xmm4), FPU_OFFSET(xmm4), ehframe_dwarf_xmm4, 1809 ehframe_dwarf_xmm4, -1U, debugserver_xmm4, NULL, NULL}, 1810 {e_regSetFPU, fpu_xmm5, "xmm5", NULL, Vector, VectorOfUInt8, 1811 FPU_SIZE_XMM(xmm5), FPU_OFFSET(xmm5), ehframe_dwarf_xmm5, 1812 ehframe_dwarf_xmm5, -1U, debugserver_xmm5, NULL, NULL}, 1813 {e_regSetFPU, fpu_xmm6, "xmm6", NULL, Vector, VectorOfUInt8, 1814 FPU_SIZE_XMM(xmm6), FPU_OFFSET(xmm6), ehframe_dwarf_xmm6, 1815 ehframe_dwarf_xmm6, -1U, debugserver_xmm6, NULL, NULL}, 1816 {e_regSetFPU, fpu_xmm7, "xmm7", NULL, Vector, VectorOfUInt8, 1817 FPU_SIZE_XMM(xmm7), FPU_OFFSET(xmm7), ehframe_dwarf_xmm7, 1818 ehframe_dwarf_xmm7, -1U, debugserver_xmm7, NULL, NULL}, 1819 {e_regSetFPU, fpu_xmm8, "xmm8", NULL, Vector, VectorOfUInt8, 1820 FPU_SIZE_XMM(xmm8), FPU_OFFSET(xmm8), ehframe_dwarf_xmm8, 1821 ehframe_dwarf_xmm8, -1U, debugserver_xmm8, NULL, NULL}, 1822 {e_regSetFPU, fpu_xmm9, "xmm9", NULL, Vector, VectorOfUInt8, 1823 FPU_SIZE_XMM(xmm9), FPU_OFFSET(xmm9), ehframe_dwarf_xmm9, 1824 ehframe_dwarf_xmm9, -1U, debugserver_xmm9, NULL, NULL}, 1825 {e_regSetFPU, fpu_xmm10, "xmm10", NULL, Vector, VectorOfUInt8, 1826 FPU_SIZE_XMM(xmm10), FPU_OFFSET(xmm10), ehframe_dwarf_xmm10, 1827 ehframe_dwarf_xmm10, -1U, debugserver_xmm10, NULL, NULL}, 1828 {e_regSetFPU, fpu_xmm11, "xmm11", NULL, Vector, VectorOfUInt8, 1829 FPU_SIZE_XMM(xmm11), FPU_OFFSET(xmm11), ehframe_dwarf_xmm11, 1830 ehframe_dwarf_xmm11, -1U, debugserver_xmm11, NULL, NULL}, 1831 {e_regSetFPU, fpu_xmm12, "xmm12", NULL, Vector, VectorOfUInt8, 1832 FPU_SIZE_XMM(xmm12), FPU_OFFSET(xmm12), ehframe_dwarf_xmm12, 1833 ehframe_dwarf_xmm12, -1U, debugserver_xmm12, NULL, NULL}, 1834 {e_regSetFPU, fpu_xmm13, "xmm13", NULL, Vector, VectorOfUInt8, 1835 FPU_SIZE_XMM(xmm13), FPU_OFFSET(xmm13), ehframe_dwarf_xmm13, 1836 ehframe_dwarf_xmm13, -1U, debugserver_xmm13, NULL, NULL}, 1837 {e_regSetFPU, fpu_xmm14, "xmm14", NULL, Vector, VectorOfUInt8, 1838 FPU_SIZE_XMM(xmm14), FPU_OFFSET(xmm14), ehframe_dwarf_xmm14, 1839 ehframe_dwarf_xmm14, -1U, debugserver_xmm14, NULL, NULL}, 1840 {e_regSetFPU, fpu_xmm15, "xmm15", NULL, Vector, VectorOfUInt8, 1841 FPU_SIZE_XMM(xmm15), FPU_OFFSET(xmm15), ehframe_dwarf_xmm15, 1842 ehframe_dwarf_xmm15, -1U, debugserver_xmm15, NULL, NULL}, 1843 }; 1844 1845 static const char *g_contained_ymm0[] = {"ymm0", NULL}; 1846 static const char *g_contained_ymm1[] = {"ymm1", NULL}; 1847 static const char *g_contained_ymm2[] = {"ymm2", NULL}; 1848 static const char *g_contained_ymm3[] = {"ymm3", NULL}; 1849 static const char *g_contained_ymm4[] = {"ymm4", NULL}; 1850 static const char *g_contained_ymm5[] = {"ymm5", NULL}; 1851 static const char *g_contained_ymm6[] = {"ymm6", NULL}; 1852 static const char *g_contained_ymm7[] = {"ymm7", NULL}; 1853 static const char *g_contained_ymm8[] = {"ymm8", NULL}; 1854 static const char *g_contained_ymm9[] = {"ymm9", NULL}; 1855 static const char *g_contained_ymm10[] = {"ymm10", NULL}; 1856 static const char *g_contained_ymm11[] = {"ymm11", NULL}; 1857 static const char *g_contained_ymm12[] = {"ymm12", NULL}; 1858 static const char *g_contained_ymm13[] = {"ymm13", NULL}; 1859 static const char *g_contained_ymm14[] = {"ymm14", NULL}; 1860 static const char *g_contained_ymm15[] = {"ymm15", NULL}; 1861 1862 const DNBRegisterInfo DNBArchImplX86_64::g_fpu_registers_avx[] = { 1863 {e_regSetFPU, fpu_fcw, "fctrl", NULL, Uint, Hex, FPU_SIZE_UINT(fcw), 1864 AVX_OFFSET(fcw), -1U, -1U, -1U, -1U, NULL, NULL}, 1865 {e_regSetFPU, fpu_fsw, "fstat", NULL, Uint, Hex, FPU_SIZE_UINT(fsw), 1866 AVX_OFFSET(fsw), -1U, -1U, -1U, -1U, NULL, NULL}, 1867 {e_regSetFPU, fpu_ftw, "ftag", NULL, Uint, Hex, 2 /* sizeof __fpu_ftw + sizeof __fpu_rsrv1 */, 1868 AVX_OFFSET(ftw), -1U, -1U, -1U, -1U, NULL, NULL}, 1869 {e_regSetFPU, fpu_fop, "fop", NULL, Uint, Hex, FPU_SIZE_UINT(fop), 1870 AVX_OFFSET(fop), -1U, -1U, -1U, -1U, NULL, NULL}, 1871 {e_regSetFPU, fpu_ip, "fioff", NULL, Uint, Hex, FPU_SIZE_UINT(ip), 1872 AVX_OFFSET(ip), -1U, -1U, -1U, -1U, NULL, NULL}, 1873 {e_regSetFPU, fpu_cs, "fiseg", NULL, Uint, Hex, FPU_SIZE_UINT(cs), 1874 AVX_OFFSET(cs), -1U, -1U, -1U, -1U, NULL, NULL}, 1875 {e_regSetFPU, fpu_dp, "fooff", NULL, Uint, Hex, FPU_SIZE_UINT(dp), 1876 AVX_OFFSET(dp), -1U, -1U, -1U, -1U, NULL, NULL}, 1877 {e_regSetFPU, fpu_ds, "foseg", NULL, Uint, Hex, FPU_SIZE_UINT(ds), 1878 AVX_OFFSET(ds), -1U, -1U, -1U, -1U, NULL, NULL}, 1879 {e_regSetFPU, fpu_mxcsr, "mxcsr", NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr), 1880 AVX_OFFSET(mxcsr), -1U, -1U, -1U, -1U, NULL, NULL}, 1881 {e_regSetFPU, fpu_mxcsrmask, "mxcsrmask", NULL, Uint, Hex, 1882 FPU_SIZE_UINT(mxcsrmask), AVX_OFFSET(mxcsrmask), -1U, -1U, -1U, -1U, NULL, 1883 NULL}, 1884 1885 {e_regSetFPU, fpu_stmm0, "stmm0", "st0", Vector, VectorOfUInt8, 1886 FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), ehframe_dwarf_stmm0, 1887 ehframe_dwarf_stmm0, -1U, debugserver_stmm0, NULL, NULL}, 1888 {e_regSetFPU, fpu_stmm1, "stmm1", "st1", Vector, VectorOfUInt8, 1889 FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), ehframe_dwarf_stmm1, 1890 ehframe_dwarf_stmm1, -1U, debugserver_stmm1, NULL, NULL}, 1891 {e_regSetFPU, fpu_stmm2, "stmm2", "st2", Vector, VectorOfUInt8, 1892 FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), ehframe_dwarf_stmm2, 1893 ehframe_dwarf_stmm2, -1U, debugserver_stmm2, NULL, NULL}, 1894 {e_regSetFPU, fpu_stmm3, "stmm3", "st3", Vector, VectorOfUInt8, 1895 FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), ehframe_dwarf_stmm3, 1896 ehframe_dwarf_stmm3, -1U, debugserver_stmm3, NULL, NULL}, 1897 {e_regSetFPU, fpu_stmm4, "stmm4", "st4", Vector, VectorOfUInt8, 1898 FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), ehframe_dwarf_stmm4, 1899 ehframe_dwarf_stmm4, -1U, debugserver_stmm4, NULL, NULL}, 1900 {e_regSetFPU, fpu_stmm5, "stmm5", "st5", Vector, VectorOfUInt8, 1901 FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), ehframe_dwarf_stmm5, 1902 ehframe_dwarf_stmm5, -1U, debugserver_stmm5, NULL, NULL}, 1903 {e_regSetFPU, fpu_stmm6, "stmm6", "st6", Vector, VectorOfUInt8, 1904 FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), ehframe_dwarf_stmm6, 1905 ehframe_dwarf_stmm6, -1U, debugserver_stmm6, NULL, NULL}, 1906 {e_regSetFPU, fpu_stmm7, "stmm7", "st7", Vector, VectorOfUInt8, 1907 FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), ehframe_dwarf_stmm7, 1908 ehframe_dwarf_stmm7, -1U, debugserver_stmm7, NULL, NULL}, 1909 1910 {e_regSetFPU, fpu_ymm0, "ymm0", NULL, Vector, VectorOfUInt8, 1911 FPU_SIZE_YMM(ymm0), AVX_OFFSET_YMM(0), ehframe_dwarf_ymm0, 1912 ehframe_dwarf_ymm0, -1U, debugserver_ymm0, NULL, NULL}, 1913 {e_regSetFPU, fpu_ymm1, "ymm1", NULL, Vector, VectorOfUInt8, 1914 FPU_SIZE_YMM(ymm1), AVX_OFFSET_YMM(1), ehframe_dwarf_ymm1, 1915 ehframe_dwarf_ymm1, -1U, debugserver_ymm1, NULL, NULL}, 1916 {e_regSetFPU, fpu_ymm2, "ymm2", NULL, Vector, VectorOfUInt8, 1917 FPU_SIZE_YMM(ymm2), AVX_OFFSET_YMM(2), ehframe_dwarf_ymm2, 1918 ehframe_dwarf_ymm2, -1U, debugserver_ymm2, NULL, NULL}, 1919 {e_regSetFPU, fpu_ymm3, "ymm3", NULL, Vector, VectorOfUInt8, 1920 FPU_SIZE_YMM(ymm3), AVX_OFFSET_YMM(3), ehframe_dwarf_ymm3, 1921 ehframe_dwarf_ymm3, -1U, debugserver_ymm3, NULL, NULL}, 1922 {e_regSetFPU, fpu_ymm4, "ymm4", NULL, Vector, VectorOfUInt8, 1923 FPU_SIZE_YMM(ymm4), AVX_OFFSET_YMM(4), ehframe_dwarf_ymm4, 1924 ehframe_dwarf_ymm4, -1U, debugserver_ymm4, NULL, NULL}, 1925 {e_regSetFPU, fpu_ymm5, "ymm5", NULL, Vector, VectorOfUInt8, 1926 FPU_SIZE_YMM(ymm5), AVX_OFFSET_YMM(5), ehframe_dwarf_ymm5, 1927 ehframe_dwarf_ymm5, -1U, debugserver_ymm5, NULL, NULL}, 1928 {e_regSetFPU, fpu_ymm6, "ymm6", NULL, Vector, VectorOfUInt8, 1929 FPU_SIZE_YMM(ymm6), AVX_OFFSET_YMM(6), ehframe_dwarf_ymm6, 1930 ehframe_dwarf_ymm6, -1U, debugserver_ymm6, NULL, NULL}, 1931 {e_regSetFPU, fpu_ymm7, "ymm7", NULL, Vector, VectorOfUInt8, 1932 FPU_SIZE_YMM(ymm7), AVX_OFFSET_YMM(7), ehframe_dwarf_ymm7, 1933 ehframe_dwarf_ymm7, -1U, debugserver_ymm7, NULL, NULL}, 1934 {e_regSetFPU, fpu_ymm8, "ymm8", NULL, Vector, VectorOfUInt8, 1935 FPU_SIZE_YMM(ymm8), AVX_OFFSET_YMM(8), ehframe_dwarf_ymm8, 1936 ehframe_dwarf_ymm8, -1U, debugserver_ymm8, NULL, NULL}, 1937 {e_regSetFPU, fpu_ymm9, "ymm9", NULL, Vector, VectorOfUInt8, 1938 FPU_SIZE_YMM(ymm9), AVX_OFFSET_YMM(9), ehframe_dwarf_ymm9, 1939 ehframe_dwarf_ymm9, -1U, debugserver_ymm9, NULL, NULL}, 1940 {e_regSetFPU, fpu_ymm10, "ymm10", NULL, Vector, VectorOfUInt8, 1941 FPU_SIZE_YMM(ymm10), AVX_OFFSET_YMM(10), ehframe_dwarf_ymm10, 1942 ehframe_dwarf_ymm10, -1U, debugserver_ymm10, NULL, NULL}, 1943 {e_regSetFPU, fpu_ymm11, "ymm11", NULL, Vector, VectorOfUInt8, 1944 FPU_SIZE_YMM(ymm11), AVX_OFFSET_YMM(11), ehframe_dwarf_ymm11, 1945 ehframe_dwarf_ymm11, -1U, debugserver_ymm11, NULL, NULL}, 1946 {e_regSetFPU, fpu_ymm12, "ymm12", NULL, Vector, VectorOfUInt8, 1947 FPU_SIZE_YMM(ymm12), AVX_OFFSET_YMM(12), ehframe_dwarf_ymm12, 1948 ehframe_dwarf_ymm12, -1U, debugserver_ymm12, NULL, NULL}, 1949 {e_regSetFPU, fpu_ymm13, "ymm13", NULL, Vector, VectorOfUInt8, 1950 FPU_SIZE_YMM(ymm13), AVX_OFFSET_YMM(13), ehframe_dwarf_ymm13, 1951 ehframe_dwarf_ymm13, -1U, debugserver_ymm13, NULL, NULL}, 1952 {e_regSetFPU, fpu_ymm14, "ymm14", NULL, Vector, VectorOfUInt8, 1953 FPU_SIZE_YMM(ymm14), AVX_OFFSET_YMM(14), ehframe_dwarf_ymm14, 1954 ehframe_dwarf_ymm14, -1U, debugserver_ymm14, NULL, NULL}, 1955 {e_regSetFPU, fpu_ymm15, "ymm15", NULL, Vector, VectorOfUInt8, 1956 FPU_SIZE_YMM(ymm15), AVX_OFFSET_YMM(15), ehframe_dwarf_ymm15, 1957 ehframe_dwarf_ymm15, -1U, debugserver_ymm15, NULL, NULL}, 1958 1959 {e_regSetFPU, fpu_xmm0, "xmm0", NULL, Vector, VectorOfUInt8, 1960 FPU_SIZE_XMM(xmm0), 0, ehframe_dwarf_xmm0, ehframe_dwarf_xmm0, -1U, 1961 debugserver_xmm0, g_contained_ymm0, NULL}, 1962 {e_regSetFPU, fpu_xmm1, "xmm1", NULL, Vector, VectorOfUInt8, 1963 FPU_SIZE_XMM(xmm1), 0, ehframe_dwarf_xmm1, ehframe_dwarf_xmm1, -1U, 1964 debugserver_xmm1, g_contained_ymm1, NULL}, 1965 {e_regSetFPU, fpu_xmm2, "xmm2", NULL, Vector, VectorOfUInt8, 1966 FPU_SIZE_XMM(xmm2), 0, ehframe_dwarf_xmm2, ehframe_dwarf_xmm2, -1U, 1967 debugserver_xmm2, g_contained_ymm2, NULL}, 1968 {e_regSetFPU, fpu_xmm3, "xmm3", NULL, Vector, VectorOfUInt8, 1969 FPU_SIZE_XMM(xmm3), 0, ehframe_dwarf_xmm3, ehframe_dwarf_xmm3, -1U, 1970 debugserver_xmm3, g_contained_ymm3, NULL}, 1971 {e_regSetFPU, fpu_xmm4, "xmm4", NULL, Vector, VectorOfUInt8, 1972 FPU_SIZE_XMM(xmm4), 0, ehframe_dwarf_xmm4, ehframe_dwarf_xmm4, -1U, 1973 debugserver_xmm4, g_contained_ymm4, NULL}, 1974 {e_regSetFPU, fpu_xmm5, "xmm5", NULL, Vector, VectorOfUInt8, 1975 FPU_SIZE_XMM(xmm5), 0, ehframe_dwarf_xmm5, ehframe_dwarf_xmm5, -1U, 1976 debugserver_xmm5, g_contained_ymm5, NULL}, 1977 {e_regSetFPU, fpu_xmm6, "xmm6", NULL, Vector, VectorOfUInt8, 1978 FPU_SIZE_XMM(xmm6), 0, ehframe_dwarf_xmm6, ehframe_dwarf_xmm6, -1U, 1979 debugserver_xmm6, g_contained_ymm6, NULL}, 1980 {e_regSetFPU, fpu_xmm7, "xmm7", NULL, Vector, VectorOfUInt8, 1981 FPU_SIZE_XMM(xmm7), 0, ehframe_dwarf_xmm7, ehframe_dwarf_xmm7, -1U, 1982 debugserver_xmm7, g_contained_ymm7, NULL}, 1983 {e_regSetFPU, fpu_xmm8, "xmm8", NULL, Vector, VectorOfUInt8, 1984 FPU_SIZE_XMM(xmm8), 0, ehframe_dwarf_xmm8, ehframe_dwarf_xmm8, -1U, 1985 debugserver_xmm8, g_contained_ymm8, NULL}, 1986 {e_regSetFPU, fpu_xmm9, "xmm9", NULL, Vector, VectorOfUInt8, 1987 FPU_SIZE_XMM(xmm9), 0, ehframe_dwarf_xmm9, ehframe_dwarf_xmm9, -1U, 1988 debugserver_xmm9, g_contained_ymm9, NULL}, 1989 {e_regSetFPU, fpu_xmm10, "xmm10", NULL, Vector, VectorOfUInt8, 1990 FPU_SIZE_XMM(xmm10), 0, ehframe_dwarf_xmm10, ehframe_dwarf_xmm10, -1U, 1991 debugserver_xmm10, g_contained_ymm10, NULL}, 1992 {e_regSetFPU, fpu_xmm11, "xmm11", NULL, Vector, VectorOfUInt8, 1993 FPU_SIZE_XMM(xmm11), 0, ehframe_dwarf_xmm11, ehframe_dwarf_xmm11, -1U, 1994 debugserver_xmm11, g_contained_ymm11, NULL}, 1995 {e_regSetFPU, fpu_xmm12, "xmm12", NULL, Vector, VectorOfUInt8, 1996 FPU_SIZE_XMM(xmm12), 0, ehframe_dwarf_xmm12, ehframe_dwarf_xmm12, -1U, 1997 debugserver_xmm12, g_contained_ymm12, NULL}, 1998 {e_regSetFPU, fpu_xmm13, "xmm13", NULL, Vector, VectorOfUInt8, 1999 FPU_SIZE_XMM(xmm13), 0, ehframe_dwarf_xmm13, ehframe_dwarf_xmm13, -1U, 2000 debugserver_xmm13, g_contained_ymm13, NULL}, 2001 {e_regSetFPU, fpu_xmm14, "xmm14", NULL, Vector, VectorOfUInt8, 2002 FPU_SIZE_XMM(xmm14), 0, ehframe_dwarf_xmm14, ehframe_dwarf_xmm14, -1U, 2003 debugserver_xmm14, g_contained_ymm14, NULL}, 2004 {e_regSetFPU, fpu_xmm15, "xmm15", NULL, Vector, VectorOfUInt8, 2005 FPU_SIZE_XMM(xmm15), 0, ehframe_dwarf_xmm15, ehframe_dwarf_xmm15, -1U, 2006 debugserver_xmm15, g_contained_ymm15, NULL} 2007 2008 }; 2009 2010 static const char *g_contained_zmm0[] = {"zmm0", NULL}; 2011 static const char *g_contained_zmm1[] = {"zmm1", NULL}; 2012 static const char *g_contained_zmm2[] = {"zmm2", NULL}; 2013 static const char *g_contained_zmm3[] = {"zmm3", NULL}; 2014 static const char *g_contained_zmm4[] = {"zmm4", NULL}; 2015 static const char *g_contained_zmm5[] = {"zmm5", NULL}; 2016 static const char *g_contained_zmm6[] = {"zmm6", NULL}; 2017 static const char *g_contained_zmm7[] = {"zmm7", NULL}; 2018 static const char *g_contained_zmm8[] = {"zmm8", NULL}; 2019 static const char *g_contained_zmm9[] = {"zmm9", NULL}; 2020 static const char *g_contained_zmm10[] = {"zmm10", NULL}; 2021 static const char *g_contained_zmm11[] = {"zmm11", NULL}; 2022 static const char *g_contained_zmm12[] = {"zmm12", NULL}; 2023 static const char *g_contained_zmm13[] = {"zmm13", NULL}; 2024 static const char *g_contained_zmm14[] = {"zmm14", NULL}; 2025 static const char *g_contained_zmm15[] = {"zmm15", NULL}; 2026 2027 #define STR(s) #s 2028 2029 #define ZMM_REG_DEF(reg) \ 2030 { \ 2031 e_regSetFPU, fpu_zmm##reg, STR(zmm##reg), NULL, Vector, VectorOfUInt8, \ 2032 FPU_SIZE_ZMM(zmm##reg), AVX512F_OFFSET_ZMM(reg), \ 2033 ehframe_dwarf_zmm##reg, ehframe_dwarf_zmm##reg, -1U, \ 2034 debugserver_zmm##reg, NULL, NULL \ 2035 } 2036 2037 #define YMM_REG_ALIAS(reg) \ 2038 { \ 2039 e_regSetFPU, fpu_ymm##reg, STR(ymm##reg), NULL, Vector, VectorOfUInt8, \ 2040 FPU_SIZE_YMM(ymm##reg), 0, ehframe_dwarf_ymm##reg, \ 2041 ehframe_dwarf_ymm##reg, -1U, debugserver_ymm##reg, \ 2042 g_contained_zmm##reg, NULL \ 2043 } 2044 2045 #define XMM_REG_ALIAS(reg) \ 2046 { \ 2047 e_regSetFPU, fpu_xmm##reg, STR(xmm##reg), NULL, Vector, VectorOfUInt8, \ 2048 FPU_SIZE_XMM(xmm##reg), 0, ehframe_dwarf_xmm##reg, \ 2049 ehframe_dwarf_xmm##reg, -1U, debugserver_xmm##reg, \ 2050 g_contained_zmm##reg, NULL \ 2051 } 2052 2053 #define AVX512_K_REG_DEF(reg) \ 2054 { \ 2055 e_regSetFPU, fpu_k##reg, STR(k##reg), NULL, Vector, VectorOfUInt8, 8, \ 2056 AVX512F_OFFSET(k##reg), ehframe_dwarf_k##reg, ehframe_dwarf_k##reg, \ 2057 -1U, debugserver_k##reg, NULL, NULL \ 2058 } 2059 2060 const DNBRegisterInfo DNBArchImplX86_64::g_fpu_registers_avx512f[] = { 2061 {e_regSetFPU, fpu_fcw, "fctrl", NULL, Uint, Hex, FPU_SIZE_UINT(fcw), 2062 AVX_OFFSET(fcw), -1U, -1U, -1U, -1U, NULL, NULL}, 2063 {e_regSetFPU, fpu_fsw, "fstat", NULL, Uint, Hex, FPU_SIZE_UINT(fsw), 2064 AVX_OFFSET(fsw), -1U, -1U, -1U, -1U, NULL, NULL}, 2065 {e_regSetFPU, fpu_ftw, "ftag", NULL, Uint, Hex, 2 /* sizeof __fpu_ftw + sizeof __fpu_rsrv1 */, 2066 AVX_OFFSET(ftw), -1U, -1U, -1U, -1U, NULL, NULL}, 2067 {e_regSetFPU, fpu_fop, "fop", NULL, Uint, Hex, FPU_SIZE_UINT(fop), 2068 AVX_OFFSET(fop), -1U, -1U, -1U, -1U, NULL, NULL}, 2069 {e_regSetFPU, fpu_ip, "fioff", NULL, Uint, Hex, FPU_SIZE_UINT(ip), 2070 AVX_OFFSET(ip), -1U, -1U, -1U, -1U, NULL, NULL}, 2071 {e_regSetFPU, fpu_cs, "fiseg", NULL, Uint, Hex, FPU_SIZE_UINT(cs), 2072 AVX_OFFSET(cs), -1U, -1U, -1U, -1U, NULL, NULL}, 2073 {e_regSetFPU, fpu_dp, "fooff", NULL, Uint, Hex, FPU_SIZE_UINT(dp), 2074 AVX_OFFSET(dp), -1U, -1U, -1U, -1U, NULL, NULL}, 2075 {e_regSetFPU, fpu_ds, "foseg", NULL, Uint, Hex, FPU_SIZE_UINT(ds), 2076 AVX_OFFSET(ds), -1U, -1U, -1U, -1U, NULL, NULL}, 2077 {e_regSetFPU, fpu_mxcsr, "mxcsr", NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr), 2078 AVX_OFFSET(mxcsr), -1U, -1U, -1U, -1U, NULL, NULL}, 2079 {e_regSetFPU, fpu_mxcsrmask, "mxcsrmask", NULL, Uint, Hex, 2080 FPU_SIZE_UINT(mxcsrmask), AVX_OFFSET(mxcsrmask), -1U, -1U, -1U, -1U, NULL, 2081 NULL}, 2082 2083 {e_regSetFPU, fpu_stmm0, "stmm0", "st0", Vector, VectorOfUInt8, 2084 FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), ehframe_dwarf_stmm0, 2085 ehframe_dwarf_stmm0, -1U, debugserver_stmm0, NULL, NULL}, 2086 {e_regSetFPU, fpu_stmm1, "stmm1", "st1", Vector, VectorOfUInt8, 2087 FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), ehframe_dwarf_stmm1, 2088 ehframe_dwarf_stmm1, -1U, debugserver_stmm1, NULL, NULL}, 2089 {e_regSetFPU, fpu_stmm2, "stmm2", "st2", Vector, VectorOfUInt8, 2090 FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), ehframe_dwarf_stmm2, 2091 ehframe_dwarf_stmm2, -1U, debugserver_stmm2, NULL, NULL}, 2092 {e_regSetFPU, fpu_stmm3, "stmm3", "st3", Vector, VectorOfUInt8, 2093 FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), ehframe_dwarf_stmm3, 2094 ehframe_dwarf_stmm3, -1U, debugserver_stmm3, NULL, NULL}, 2095 {e_regSetFPU, fpu_stmm4, "stmm4", "st4", Vector, VectorOfUInt8, 2096 FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), ehframe_dwarf_stmm4, 2097 ehframe_dwarf_stmm4, -1U, debugserver_stmm4, NULL, NULL}, 2098 {e_regSetFPU, fpu_stmm5, "stmm5", "st5", Vector, VectorOfUInt8, 2099 FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), ehframe_dwarf_stmm5, 2100 ehframe_dwarf_stmm5, -1U, debugserver_stmm5, NULL, NULL}, 2101 {e_regSetFPU, fpu_stmm6, "stmm6", "st6", Vector, VectorOfUInt8, 2102 FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), ehframe_dwarf_stmm6, 2103 ehframe_dwarf_stmm6, -1U, debugserver_stmm6, NULL, NULL}, 2104 {e_regSetFPU, fpu_stmm7, "stmm7", "st7", Vector, VectorOfUInt8, 2105 FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), ehframe_dwarf_stmm7, 2106 ehframe_dwarf_stmm7, -1U, debugserver_stmm7, NULL, NULL}, 2107 2108 AVX512_K_REG_DEF(0), 2109 AVX512_K_REG_DEF(1), 2110 AVX512_K_REG_DEF(2), 2111 AVX512_K_REG_DEF(3), 2112 AVX512_K_REG_DEF(4), 2113 AVX512_K_REG_DEF(5), 2114 AVX512_K_REG_DEF(6), 2115 AVX512_K_REG_DEF(7), 2116 2117 ZMM_REG_DEF(0), 2118 ZMM_REG_DEF(1), 2119 ZMM_REG_DEF(2), 2120 ZMM_REG_DEF(3), 2121 ZMM_REG_DEF(4), 2122 ZMM_REG_DEF(5), 2123 ZMM_REG_DEF(6), 2124 ZMM_REG_DEF(7), 2125 ZMM_REG_DEF(8), 2126 ZMM_REG_DEF(9), 2127 ZMM_REG_DEF(10), 2128 ZMM_REG_DEF(11), 2129 ZMM_REG_DEF(12), 2130 ZMM_REG_DEF(13), 2131 ZMM_REG_DEF(14), 2132 ZMM_REG_DEF(15), 2133 ZMM_REG_DEF(16), 2134 ZMM_REG_DEF(17), 2135 ZMM_REG_DEF(18), 2136 ZMM_REG_DEF(19), 2137 ZMM_REG_DEF(20), 2138 ZMM_REG_DEF(21), 2139 ZMM_REG_DEF(22), 2140 ZMM_REG_DEF(23), 2141 ZMM_REG_DEF(24), 2142 ZMM_REG_DEF(25), 2143 ZMM_REG_DEF(26), 2144 ZMM_REG_DEF(27), 2145 ZMM_REG_DEF(28), 2146 ZMM_REG_DEF(29), 2147 ZMM_REG_DEF(30), 2148 ZMM_REG_DEF(31), 2149 2150 YMM_REG_ALIAS(0), 2151 YMM_REG_ALIAS(1), 2152 YMM_REG_ALIAS(2), 2153 YMM_REG_ALIAS(3), 2154 YMM_REG_ALIAS(4), 2155 YMM_REG_ALIAS(5), 2156 YMM_REG_ALIAS(6), 2157 YMM_REG_ALIAS(7), 2158 YMM_REG_ALIAS(8), 2159 YMM_REG_ALIAS(9), 2160 YMM_REG_ALIAS(10), 2161 YMM_REG_ALIAS(11), 2162 YMM_REG_ALIAS(12), 2163 YMM_REG_ALIAS(13), 2164 YMM_REG_ALIAS(14), 2165 YMM_REG_ALIAS(15), 2166 2167 XMM_REG_ALIAS(0), 2168 XMM_REG_ALIAS(1), 2169 XMM_REG_ALIAS(2), 2170 XMM_REG_ALIAS(3), 2171 XMM_REG_ALIAS(4), 2172 XMM_REG_ALIAS(5), 2173 XMM_REG_ALIAS(6), 2174 XMM_REG_ALIAS(7), 2175 XMM_REG_ALIAS(8), 2176 XMM_REG_ALIAS(9), 2177 XMM_REG_ALIAS(10), 2178 XMM_REG_ALIAS(11), 2179 XMM_REG_ALIAS(12), 2180 XMM_REG_ALIAS(13), 2181 XMM_REG_ALIAS(14), 2182 XMM_REG_ALIAS(15), 2183 2184 }; 2185 2186 2187 // Exception registers 2188 2189 const DNBRegisterInfo DNBArchImplX86_64::g_exc_registers[] = { 2190 {e_regSetEXC, exc_trapno, "trapno", NULL, Uint, Hex, EXC_SIZE(trapno), 2191 EXC_OFFSET(trapno), -1U, -1U, -1U, -1U, NULL, NULL}, 2192 {e_regSetEXC, exc_err, "err", NULL, Uint, Hex, EXC_SIZE(err), 2193 EXC_OFFSET(err), -1U, -1U, -1U, -1U, NULL, NULL}, 2194 {e_regSetEXC, exc_faultvaddr, "faultvaddr", NULL, Uint, Hex, 2195 EXC_SIZE(faultvaddr), EXC_OFFSET(faultvaddr), -1U, -1U, -1U, -1U, NULL, 2196 NULL}}; 2197 2198 // Number of registers in each register set 2199 const size_t DNBArchImplX86_64::k_num_gpr_registers = 2200 sizeof(g_gpr_registers) / sizeof(DNBRegisterInfo); 2201 const size_t DNBArchImplX86_64::k_num_fpu_registers_no_avx = 2202 sizeof(g_fpu_registers_no_avx) / sizeof(DNBRegisterInfo); 2203 const size_t DNBArchImplX86_64::k_num_fpu_registers_avx = 2204 sizeof(g_fpu_registers_avx) / sizeof(DNBRegisterInfo); 2205 const size_t DNBArchImplX86_64::k_num_exc_registers = 2206 sizeof(g_exc_registers) / sizeof(DNBRegisterInfo); 2207 const size_t DNBArchImplX86_64::k_num_all_registers_no_avx = 2208 k_num_gpr_registers + k_num_fpu_registers_no_avx + k_num_exc_registers; 2209 const size_t DNBArchImplX86_64::k_num_all_registers_avx = 2210 k_num_gpr_registers + k_num_fpu_registers_avx + k_num_exc_registers; 2211 const size_t DNBArchImplX86_64::k_num_fpu_registers_avx512f = 2212 sizeof(g_fpu_registers_avx512f) / sizeof(DNBRegisterInfo); 2213 const size_t DNBArchImplX86_64::k_num_all_registers_avx512f = 2214 k_num_gpr_registers + k_num_fpu_registers_avx512f + k_num_exc_registers; 2215 2216 // Register set definitions. The first definitions at register set index 2217 // of zero is for all registers, followed by other registers sets. The 2218 // register information for the all register set need not be filled in. 2219 const DNBRegisterSetInfo DNBArchImplX86_64::g_reg_sets_no_avx[] = { 2220 {"x86_64 Registers", NULL, k_num_all_registers_no_avx}, 2221 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers}, 2222 {"Floating Point Registers", g_fpu_registers_no_avx, 2223 k_num_fpu_registers_no_avx}, 2224 {"Exception State Registers", g_exc_registers, k_num_exc_registers}}; 2225 2226 const DNBRegisterSetInfo DNBArchImplX86_64::g_reg_sets_avx[] = { 2227 {"x86_64 Registers", NULL, k_num_all_registers_avx}, 2228 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers}, 2229 {"Floating Point Registers", g_fpu_registers_avx, k_num_fpu_registers_avx}, 2230 {"Exception State Registers", g_exc_registers, k_num_exc_registers}}; 2231 2232 const DNBRegisterSetInfo DNBArchImplX86_64::g_reg_sets_avx512f[] = { 2233 {"x86_64 Registers", NULL, k_num_all_registers_avx}, 2234 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers}, 2235 {"Floating Point Registers", g_fpu_registers_avx512f, 2236 k_num_fpu_registers_avx512f}, 2237 {"Exception State Registers", g_exc_registers, k_num_exc_registers}}; 2238 2239 // Total number of register sets for this architecture 2240 const size_t DNBArchImplX86_64::k_num_register_sets = 2241 sizeof(g_reg_sets_avx) / sizeof(DNBRegisterSetInfo); 2242 2243 DNBArchProtocol *DNBArchImplX86_64::Create(MachThread *thread) { 2244 DNBArchImplX86_64 *obj = new DNBArchImplX86_64(thread); 2245 return obj; 2246 } 2247 2248 const uint8_t * 2249 DNBArchImplX86_64::SoftwareBreakpointOpcode(nub_size_t byte_size) { 2250 static const uint8_t g_breakpoint_opcode[] = {0xCC}; 2251 if (byte_size == 1) 2252 return g_breakpoint_opcode; 2253 return NULL; 2254 } 2255 2256 const DNBRegisterSetInfo * 2257 DNBArchImplX86_64::GetRegisterSetInfo(nub_size_t *num_reg_sets) { 2258 *num_reg_sets = k_num_register_sets; 2259 2260 if (CPUHasAVX512f() || FORCE_AVX_REGS) 2261 return g_reg_sets_avx512f; 2262 if (CPUHasAVX() || FORCE_AVX_REGS) 2263 return g_reg_sets_avx; 2264 else 2265 return g_reg_sets_no_avx; 2266 } 2267 2268 void DNBArchImplX86_64::Initialize() { 2269 DNBArchPluginInfo arch_plugin_info = { 2270 CPU_TYPE_X86_64, DNBArchImplX86_64::Create, 2271 DNBArchImplX86_64::GetRegisterSetInfo, 2272 DNBArchImplX86_64::SoftwareBreakpointOpcode}; 2273 2274 // Register this arch plug-in with the main protocol class 2275 DNBArchProtocol::RegisterArchPlugin(arch_plugin_info); 2276 } 2277 2278 bool DNBArchImplX86_64::GetRegisterValue(uint32_t set, uint32_t reg, 2279 DNBRegisterValue *value) { 2280 if (set == REGISTER_SET_GENERIC) { 2281 switch (reg) { 2282 case GENERIC_REGNUM_PC: // Program Counter 2283 set = e_regSetGPR; 2284 reg = gpr_rip; 2285 break; 2286 2287 case GENERIC_REGNUM_SP: // Stack Pointer 2288 set = e_regSetGPR; 2289 reg = gpr_rsp; 2290 break; 2291 2292 case GENERIC_REGNUM_FP: // Frame Pointer 2293 set = e_regSetGPR; 2294 reg = gpr_rbp; 2295 break; 2296 2297 case GENERIC_REGNUM_FLAGS: // Processor flags register 2298 set = e_regSetGPR; 2299 reg = gpr_rflags; 2300 break; 2301 2302 case GENERIC_REGNUM_RA: // Return Address 2303 default: 2304 return false; 2305 } 2306 } 2307 2308 if (GetRegisterState(set, false) != KERN_SUCCESS) 2309 return false; 2310 2311 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 2312 if (regInfo) { 2313 value->info = *regInfo; 2314 switch (set) { 2315 case e_regSetGPR: 2316 if (reg < k_num_gpr_registers) { 2317 value->value.uint64 = ((uint64_t *)(&m_state.context.gpr))[reg]; 2318 return true; 2319 } 2320 break; 2321 2322 case e_regSetFPU: 2323 if (reg > fpu_xmm15 && !(CPUHasAVX() || FORCE_AVX_REGS)) 2324 return false; 2325 if (reg > fpu_ymm15 && !(CPUHasAVX512f() || FORCE_AVX_REGS)) 2326 return false; 2327 switch (reg) { 2328 2329 case fpu_fcw: 2330 value->value.uint16 = 2331 *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)); 2332 return true; 2333 case fpu_fsw: 2334 value->value.uint16 = 2335 *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)); 2336 return true; 2337 case fpu_ftw: 2338 memcpy (&value->value.uint16, &m_state.context.fpu.no_avx.__fpu_ftw, 2); 2339 return true; 2340 case fpu_fop: 2341 value->value.uint16 = m_state.context.fpu.no_avx.__fpu_fop; 2342 return true; 2343 case fpu_ip: 2344 value->value.uint32 = m_state.context.fpu.no_avx.__fpu_ip; 2345 return true; 2346 case fpu_cs: 2347 value->value.uint16 = m_state.context.fpu.no_avx.__fpu_cs; 2348 return true; 2349 case fpu_dp: 2350 value->value.uint32 = m_state.context.fpu.no_avx.__fpu_dp; 2351 return true; 2352 case fpu_ds: 2353 value->value.uint16 = m_state.context.fpu.no_avx.__fpu_ds; 2354 return true; 2355 case fpu_mxcsr: 2356 value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsr; 2357 return true; 2358 case fpu_mxcsrmask: 2359 value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsrmask; 2360 return true; 2361 2362 case fpu_stmm0: 2363 case fpu_stmm1: 2364 case fpu_stmm2: 2365 case fpu_stmm3: 2366 case fpu_stmm4: 2367 case fpu_stmm5: 2368 case fpu_stmm6: 2369 case fpu_stmm7: 2370 memcpy(&value->value.uint8, 2371 &m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), 10); 2372 return true; 2373 2374 case fpu_xmm0: 2375 case fpu_xmm1: 2376 case fpu_xmm2: 2377 case fpu_xmm3: 2378 case fpu_xmm4: 2379 case fpu_xmm5: 2380 case fpu_xmm6: 2381 case fpu_xmm7: 2382 case fpu_xmm8: 2383 case fpu_xmm9: 2384 case fpu_xmm10: 2385 case fpu_xmm11: 2386 case fpu_xmm12: 2387 case fpu_xmm13: 2388 case fpu_xmm14: 2389 case fpu_xmm15: 2390 memcpy(&value->value.uint8, 2391 &m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), 16); 2392 return true; 2393 2394 case fpu_ymm0: 2395 case fpu_ymm1: 2396 case fpu_ymm2: 2397 case fpu_ymm3: 2398 case fpu_ymm4: 2399 case fpu_ymm5: 2400 case fpu_ymm6: 2401 case fpu_ymm7: 2402 case fpu_ymm8: 2403 case fpu_ymm9: 2404 case fpu_ymm10: 2405 case fpu_ymm11: 2406 case fpu_ymm12: 2407 case fpu_ymm13: 2408 case fpu_ymm14: 2409 case fpu_ymm15: 2410 memcpy(&value->value.uint8, 2411 &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), 16); 2412 memcpy((&value->value.uint8) + 16, 2413 &m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), 16); 2414 return true; 2415 case fpu_k0: 2416 case fpu_k1: 2417 case fpu_k2: 2418 case fpu_k3: 2419 case fpu_k4: 2420 case fpu_k5: 2421 case fpu_k6: 2422 case fpu_k7: 2423 memcpy((&value->value.uint8), 2424 &m_state.context.fpu.avx512f.__fpu_k0 + (reg - fpu_k0), 8); 2425 return true; 2426 case fpu_zmm0: 2427 case fpu_zmm1: 2428 case fpu_zmm2: 2429 case fpu_zmm3: 2430 case fpu_zmm4: 2431 case fpu_zmm5: 2432 case fpu_zmm6: 2433 case fpu_zmm7: 2434 case fpu_zmm8: 2435 case fpu_zmm9: 2436 case fpu_zmm10: 2437 case fpu_zmm11: 2438 case fpu_zmm12: 2439 case fpu_zmm13: 2440 case fpu_zmm14: 2441 case fpu_zmm15: 2442 memcpy(&value->value.uint8, 2443 &m_state.context.fpu.avx512f.__fpu_xmm0 + (reg - fpu_zmm0), 16); 2444 memcpy((&value->value.uint8) + 16, 2445 &m_state.context.fpu.avx512f.__fpu_ymmh0 + (reg - fpu_zmm0), 16); 2446 memcpy((&value->value.uint8) + 32, 2447 &m_state.context.fpu.avx512f.__fpu_zmmh0 + (reg - fpu_zmm0), 32); 2448 return true; 2449 case fpu_zmm16: 2450 case fpu_zmm17: 2451 case fpu_zmm18: 2452 case fpu_zmm19: 2453 case fpu_zmm20: 2454 case fpu_zmm21: 2455 case fpu_zmm22: 2456 case fpu_zmm23: 2457 case fpu_zmm24: 2458 case fpu_zmm25: 2459 case fpu_zmm26: 2460 case fpu_zmm27: 2461 case fpu_zmm28: 2462 case fpu_zmm29: 2463 case fpu_zmm30: 2464 case fpu_zmm31: 2465 memcpy(&value->value.uint8, 2466 &m_state.context.fpu.avx512f.__fpu_zmm16 + (reg - fpu_zmm16), 64); 2467 return true; 2468 } 2469 break; 2470 2471 case e_regSetEXC: 2472 switch (reg) { 2473 case exc_trapno: 2474 value->value.uint32 = m_state.context.exc.__trapno; 2475 return true; 2476 case exc_err: 2477 value->value.uint32 = m_state.context.exc.__err; 2478 return true; 2479 case exc_faultvaddr: 2480 value->value.uint64 = m_state.context.exc.__faultvaddr; 2481 return true; 2482 } 2483 break; 2484 } 2485 } 2486 return false; 2487 } 2488 2489 bool DNBArchImplX86_64::SetRegisterValue(uint32_t set, uint32_t reg, 2490 const DNBRegisterValue *value) { 2491 if (set == REGISTER_SET_GENERIC) { 2492 switch (reg) { 2493 case GENERIC_REGNUM_PC: // Program Counter 2494 set = e_regSetGPR; 2495 reg = gpr_rip; 2496 break; 2497 2498 case GENERIC_REGNUM_SP: // Stack Pointer 2499 set = e_regSetGPR; 2500 reg = gpr_rsp; 2501 break; 2502 2503 case GENERIC_REGNUM_FP: // Frame Pointer 2504 set = e_regSetGPR; 2505 reg = gpr_rbp; 2506 break; 2507 2508 case GENERIC_REGNUM_FLAGS: // Processor flags register 2509 set = e_regSetGPR; 2510 reg = gpr_rflags; 2511 break; 2512 2513 case GENERIC_REGNUM_RA: // Return Address 2514 default: 2515 return false; 2516 } 2517 } 2518 2519 if (GetRegisterState(set, false) != KERN_SUCCESS) 2520 return false; 2521 2522 bool success = false; 2523 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 2524 if (regInfo) { 2525 switch (set) { 2526 case e_regSetGPR: 2527 if (reg < k_num_gpr_registers) { 2528 ((uint64_t *)(&m_state.context.gpr))[reg] = value->value.uint64; 2529 success = true; 2530 } 2531 break; 2532 if (reg > fpu_xmm15 && !(CPUHasAVX() || FORCE_AVX_REGS)) 2533 return false; 2534 if (reg > fpu_ymm15 && !(CPUHasAVX512f() || FORCE_AVX_REGS)) 2535 return false; 2536 case e_regSetFPU: 2537 switch (reg) { 2538 case fpu_fcw: 2539 *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)) = 2540 value->value.uint16; 2541 success = true; 2542 break; 2543 case fpu_fsw: 2544 *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)) = 2545 value->value.uint16; 2546 success = true; 2547 break; 2548 case fpu_ftw: 2549 memcpy (&m_state.context.fpu.no_avx.__fpu_ftw, &value->value.uint8, 2); 2550 success = true; 2551 break; 2552 case fpu_fop: 2553 m_state.context.fpu.no_avx.__fpu_fop = value->value.uint16; 2554 success = true; 2555 break; 2556 case fpu_ip: 2557 m_state.context.fpu.no_avx.__fpu_ip = value->value.uint32; 2558 success = true; 2559 break; 2560 case fpu_cs: 2561 m_state.context.fpu.no_avx.__fpu_cs = value->value.uint16; 2562 success = true; 2563 break; 2564 case fpu_dp: 2565 m_state.context.fpu.no_avx.__fpu_dp = value->value.uint32; 2566 success = true; 2567 break; 2568 case fpu_ds: 2569 m_state.context.fpu.no_avx.__fpu_ds = value->value.uint16; 2570 success = true; 2571 break; 2572 case fpu_mxcsr: 2573 m_state.context.fpu.no_avx.__fpu_mxcsr = value->value.uint32; 2574 success = true; 2575 break; 2576 case fpu_mxcsrmask: 2577 m_state.context.fpu.no_avx.__fpu_mxcsrmask = value->value.uint32; 2578 success = true; 2579 break; 2580 2581 case fpu_stmm0: 2582 case fpu_stmm1: 2583 case fpu_stmm2: 2584 case fpu_stmm3: 2585 case fpu_stmm4: 2586 case fpu_stmm5: 2587 case fpu_stmm6: 2588 case fpu_stmm7: 2589 memcpy(&m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), 2590 &value->value.uint8, 10); 2591 success = true; 2592 break; 2593 2594 case fpu_xmm0: 2595 case fpu_xmm1: 2596 case fpu_xmm2: 2597 case fpu_xmm3: 2598 case fpu_xmm4: 2599 case fpu_xmm5: 2600 case fpu_xmm6: 2601 case fpu_xmm7: 2602 case fpu_xmm8: 2603 case fpu_xmm9: 2604 case fpu_xmm10: 2605 case fpu_xmm11: 2606 case fpu_xmm12: 2607 case fpu_xmm13: 2608 case fpu_xmm14: 2609 case fpu_xmm15: 2610 memcpy(&m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), 2611 &value->value.uint8, 16); 2612 success = true; 2613 break; 2614 2615 case fpu_ymm0: 2616 case fpu_ymm1: 2617 case fpu_ymm2: 2618 case fpu_ymm3: 2619 case fpu_ymm4: 2620 case fpu_ymm5: 2621 case fpu_ymm6: 2622 case fpu_ymm7: 2623 case fpu_ymm8: 2624 case fpu_ymm9: 2625 case fpu_ymm10: 2626 case fpu_ymm11: 2627 case fpu_ymm12: 2628 case fpu_ymm13: 2629 case fpu_ymm14: 2630 case fpu_ymm15: 2631 memcpy(&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), 2632 &value->value.uint8, 16); 2633 memcpy(&m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), 2634 (&value->value.uint8) + 16, 16); 2635 return true; 2636 case fpu_k0: 2637 case fpu_k1: 2638 case fpu_k2: 2639 case fpu_k3: 2640 case fpu_k4: 2641 case fpu_k5: 2642 case fpu_k6: 2643 case fpu_k7: 2644 memcpy(&m_state.context.fpu.avx512f.__fpu_k0 + (reg - fpu_k0), 2645 &value->value.uint8, 8); 2646 return true; 2647 case fpu_zmm0: 2648 case fpu_zmm1: 2649 case fpu_zmm2: 2650 case fpu_zmm3: 2651 case fpu_zmm4: 2652 case fpu_zmm5: 2653 case fpu_zmm6: 2654 case fpu_zmm7: 2655 case fpu_zmm8: 2656 case fpu_zmm9: 2657 case fpu_zmm10: 2658 case fpu_zmm11: 2659 case fpu_zmm12: 2660 case fpu_zmm13: 2661 case fpu_zmm14: 2662 case fpu_zmm15: 2663 memcpy(&m_state.context.fpu.avx512f.__fpu_xmm0 + (reg - fpu_zmm0), 2664 &value->value.uint8, 16); 2665 memcpy(&m_state.context.fpu.avx512f.__fpu_ymmh0 + (reg - fpu_zmm0), 2666 &value->value.uint8 + 16, 16); 2667 memcpy(&m_state.context.fpu.avx512f.__fpu_zmmh0 + (reg - fpu_zmm0), 2668 &value->value.uint8 + 32, 32); 2669 return true; 2670 case fpu_zmm16: 2671 case fpu_zmm17: 2672 case fpu_zmm18: 2673 case fpu_zmm19: 2674 case fpu_zmm20: 2675 case fpu_zmm21: 2676 case fpu_zmm22: 2677 case fpu_zmm23: 2678 case fpu_zmm24: 2679 case fpu_zmm25: 2680 case fpu_zmm26: 2681 case fpu_zmm27: 2682 case fpu_zmm28: 2683 case fpu_zmm29: 2684 case fpu_zmm30: 2685 case fpu_zmm31: 2686 memcpy(&m_state.context.fpu.avx512f.__fpu_zmm16 + (reg - fpu_zmm16), 2687 &value->value.uint8, 64); 2688 return true; 2689 } 2690 break; 2691 2692 case e_regSetEXC: 2693 switch (reg) { 2694 case exc_trapno: 2695 m_state.context.exc.__trapno = value->value.uint32; 2696 success = true; 2697 break; 2698 case exc_err: 2699 m_state.context.exc.__err = value->value.uint32; 2700 success = true; 2701 break; 2702 case exc_faultvaddr: 2703 m_state.context.exc.__faultvaddr = value->value.uint64; 2704 success = true; 2705 break; 2706 } 2707 break; 2708 } 2709 } 2710 2711 if (success) 2712 return SetRegisterState(set) == KERN_SUCCESS; 2713 return false; 2714 } 2715 2716 uint32_t DNBArchImplX86_64::GetRegisterContextSize() { 2717 static uint32_t g_cached_size = 0; 2718 if (g_cached_size == 0) { 2719 if (CPUHasAVX512f() || FORCE_AVX_REGS) { 2720 for (size_t i = 0; i < k_num_fpu_registers_avx512f; ++i) { 2721 if (g_fpu_registers_avx512f[i].value_regs == NULL) 2722 g_cached_size += g_fpu_registers_avx512f[i].size; 2723 } 2724 } else if (CPUHasAVX() || FORCE_AVX_REGS) { 2725 for (size_t i = 0; i < k_num_fpu_registers_avx; ++i) { 2726 if (g_fpu_registers_avx[i].value_regs == NULL) 2727 g_cached_size += g_fpu_registers_avx[i].size; 2728 } 2729 } else { 2730 for (size_t i = 0; i < k_num_fpu_registers_no_avx; ++i) { 2731 if (g_fpu_registers_no_avx[i].value_regs == NULL) 2732 g_cached_size += g_fpu_registers_no_avx[i].size; 2733 } 2734 } 2735 DNBLogThreaded("DNBArchImplX86_64::GetRegisterContextSize() - GPR = %zu, " 2736 "FPU = %u, EXC = %zu", 2737 sizeof(GPR), g_cached_size, sizeof(EXC)); 2738 g_cached_size += sizeof(GPR); 2739 g_cached_size += sizeof(EXC); 2740 DNBLogThreaded( 2741 "DNBArchImplX86_64::GetRegisterContextSize() - GPR + FPU + EXC = %u", 2742 g_cached_size); 2743 } 2744 return g_cached_size; 2745 } 2746 2747 nub_size_t DNBArchImplX86_64::GetRegisterContext(void *buf, 2748 nub_size_t buf_len) { 2749 uint32_t size = GetRegisterContextSize(); 2750 2751 if (buf && buf_len) { 2752 bool force = false; 2753 kern_return_t kret; 2754 2755 if ((kret = GetGPRState(force)) != KERN_SUCCESS) { 2756 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf " 2757 "= %p, len = %llu) error: GPR regs failed " 2758 "to read: %u ", 2759 buf, (uint64_t)buf_len, kret); 2760 size = 0; 2761 } else if ((kret = GetFPUState(force)) != KERN_SUCCESS) { 2762 DNBLogThreadedIf( 2763 LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = " 2764 "%llu) error: %s regs failed to read: %u", 2765 buf, (uint64_t)buf_len, CPUHasAVX() ? "AVX" : "FPU", kret); 2766 size = 0; 2767 } else if ((kret = GetEXCState(force)) != KERN_SUCCESS) { 2768 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf " 2769 "= %p, len = %llu) error: EXC regs failed " 2770 "to read: %u", 2771 buf, (uint64_t)buf_len, kret); 2772 size = 0; 2773 } else { 2774 uint8_t *p = (uint8_t *)buf; 2775 // Copy the GPR registers 2776 memcpy(p, &m_state.context.gpr, sizeof(GPR)); 2777 p += sizeof(GPR); 2778 2779 // Walk around the gaps in the FPU regs 2780 memcpy(p, &m_state.context.fpu.no_avx.__fpu_fcw, 5); 2781 // We read 5 bytes, but we skip 6 to account for __fpu_rsrv1 2782 // to match the g_fpu_registers_* tables. 2783 p += 6; 2784 memcpy(p, &m_state.context.fpu.no_avx.__fpu_fop, 8); 2785 p += 8; 2786 memcpy(p, &m_state.context.fpu.no_avx.__fpu_dp, 6); 2787 p += 6; 2788 memcpy(p, &m_state.context.fpu.no_avx.__fpu_mxcsr, 8); 2789 p += 8; 2790 2791 // Work around the padding between the stmm registers as they are 16 2792 // byte structs with 10 bytes of the value in each 2793 for (size_t i = 0; i < 8; ++i) { 2794 memcpy(p, &m_state.context.fpu.no_avx.__fpu_stmm0 + i, 10); 2795 p += 10; 2796 } 2797 2798 if(CPUHasAVX512f() || FORCE_AVX_REGS) { 2799 for (size_t i = 0; i < 8; ++i) { 2800 memcpy(p, &m_state.context.fpu.avx512f.__fpu_k0 + i, 8); 2801 p += 8; 2802 } 2803 } 2804 2805 if (CPUHasAVX() || FORCE_AVX_REGS) { 2806 // Interleave the XMM and YMMH registers to make the YMM registers 2807 for (size_t i = 0; i < 16; ++i) { 2808 memcpy(p, &m_state.context.fpu.avx.__fpu_xmm0 + i, 16); 2809 p += 16; 2810 memcpy(p, &m_state.context.fpu.avx.__fpu_ymmh0 + i, 16); 2811 p += 16; 2812 } 2813 if(CPUHasAVX512f() || FORCE_AVX_REGS) { 2814 for (size_t i = 0; i < 16; ++i) { 2815 memcpy(p, &m_state.context.fpu.avx512f.__fpu_zmmh0 + i, 32); 2816 p += 32; 2817 } 2818 for (size_t i = 0; i < 16; ++i) { 2819 memcpy(p, &m_state.context.fpu.avx512f.__fpu_zmm16 + i, 64); 2820 p += 64; 2821 } 2822 } 2823 } else { 2824 // Copy the XMM registers in a single block 2825 memcpy(p, &m_state.context.fpu.no_avx.__fpu_xmm0, 16 * 16); 2826 p += 16 * 16; 2827 } 2828 2829 // Copy the exception registers 2830 memcpy(p, &m_state.context.exc, sizeof(EXC)); 2831 p += sizeof(EXC); 2832 2833 // make sure we end up with exactly what we think we should have 2834 size_t bytes_written = p - (uint8_t *)buf; 2835 UNUSED_IF_ASSERT_DISABLED(bytes_written); 2836 assert(bytes_written == size); 2837 } 2838 } 2839 2840 DNBLogThreadedIf( 2841 LOG_THREAD, 2842 "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %llu) => %u", buf, 2843 (uint64_t)buf_len, size); 2844 // Return the size of the register context even if NULL was passed in 2845 return size; 2846 } 2847 2848 nub_size_t DNBArchImplX86_64::SetRegisterContext(const void *buf, 2849 nub_size_t buf_len) { 2850 uint32_t size = GetRegisterContextSize(); 2851 if (buf == NULL || buf_len == 0) 2852 size = 0; 2853 2854 if (size) { 2855 if (size > buf_len) 2856 size = static_cast<uint32_t>(buf_len); 2857 2858 const uint8_t *p = (const uint8_t *)buf; 2859 // Copy the GPR registers 2860 memcpy(&m_state.context.gpr, p, sizeof(GPR)); 2861 p += sizeof(GPR); 2862 2863 // Copy fcw through mxcsrmask as there is no padding 2864 memcpy(&m_state.context.fpu.no_avx.__fpu_fcw, p, 5); 2865 // We wrote 5 bytes, but we skip 6 to account for __fpu_rsrv1 2866 // to match the g_fpu_registers_* tables. 2867 p += 6; 2868 memcpy(&m_state.context.fpu.no_avx.__fpu_fop, p, 8); 2869 p += 8; 2870 memcpy(&m_state.context.fpu.no_avx.__fpu_dp, p, 6); 2871 p += 6; 2872 memcpy(&m_state.context.fpu.no_avx.__fpu_mxcsr, p, 8); 2873 p += 8; 2874 2875 // Work around the padding between the stmm registers as they are 16 2876 // byte structs with 10 bytes of the value in each 2877 for (size_t i = 0; i < 8; ++i) { 2878 memcpy(&m_state.context.fpu.no_avx.__fpu_stmm0 + i, p, 10); 2879 p += 10; 2880 } 2881 2882 if(CPUHasAVX512f() || FORCE_AVX_REGS) { 2883 for (size_t i = 0; i < 8; ++i) { 2884 memcpy(&m_state.context.fpu.avx512f.__fpu_k0 + i, p, 8); 2885 p += 8; 2886 } 2887 } 2888 2889 if (CPUHasAVX() || FORCE_AVX_REGS) { 2890 // Interleave the XMM and YMMH registers to make the YMM registers 2891 for (size_t i = 0; i < 16; ++i) { 2892 memcpy(&m_state.context.fpu.avx.__fpu_xmm0 + i, p, 16); 2893 p += 16; 2894 memcpy(&m_state.context.fpu.avx.__fpu_ymmh0 + i, p, 16); 2895 p += 16; 2896 } 2897 if(CPUHasAVX512f() || FORCE_AVX_REGS) { 2898 for (size_t i = 0; i < 16; ++i) { 2899 memcpy(&m_state.context.fpu.avx512f.__fpu_zmmh0 + i, p, 32); 2900 p += 32; 2901 } 2902 for (size_t i = 0; i < 16; ++i) { 2903 memcpy(&m_state.context.fpu.avx512f.__fpu_zmm16 + i, p, 64); 2904 p += 64; 2905 } 2906 } 2907 } else { 2908 // Copy the XMM registers in a single block 2909 memcpy(&m_state.context.fpu.no_avx.__fpu_xmm0, p, 16 * 16); 2910 p += 16 * 16; 2911 } 2912 2913 // Copy the exception registers 2914 memcpy(&m_state.context.exc, p, sizeof(EXC)); 2915 p += sizeof(EXC); 2916 2917 // make sure we end up with exactly what we think we should have 2918 size_t bytes_written = p - (const uint8_t *)buf; 2919 UNUSED_IF_ASSERT_DISABLED(bytes_written); 2920 assert(bytes_written == size); 2921 2922 kern_return_t kret; 2923 if ((kret = SetGPRState()) != KERN_SUCCESS) 2924 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf " 2925 "= %p, len = %llu) error: GPR regs failed " 2926 "to write: %u", 2927 buf, (uint64_t)buf_len, kret); 2928 if ((kret = SetFPUState()) != KERN_SUCCESS) 2929 DNBLogThreadedIf( 2930 LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = " 2931 "%llu) error: %s regs failed to write: %u", 2932 buf, (uint64_t)buf_len, CPUHasAVX() ? "AVX" : "FPU", kret); 2933 if ((kret = SetEXCState()) != KERN_SUCCESS) 2934 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf " 2935 "= %p, len = %llu) error: EXP regs failed " 2936 "to write: %u", 2937 buf, (uint64_t)buf_len, kret); 2938 } 2939 DNBLogThreadedIf( 2940 LOG_THREAD, 2941 "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %llu) => %llu", 2942 buf, (uint64_t)buf_len, (uint64_t)size); 2943 return size; 2944 } 2945 2946 uint32_t DNBArchImplX86_64::SaveRegisterState() { 2947 kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber()); 2948 DNBLogThreadedIf( 2949 LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u " 2950 "(SetGPRState() for stop_count = %u)", 2951 m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount()); 2952 2953 // Always re-read the registers because above we call thread_abort_safely(); 2954 bool force = true; 2955 2956 if ((kret = GetGPRState(force)) != KERN_SUCCESS) { 2957 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplX86_64::SaveRegisterState () " 2958 "error: GPR regs failed to read: %u ", 2959 kret); 2960 } else if ((kret = GetFPUState(force)) != KERN_SUCCESS) { 2961 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplX86_64::SaveRegisterState () " 2962 "error: %s regs failed to read: %u", 2963 CPUHasAVX() ? "AVX" : "FPU", kret); 2964 } else { 2965 const uint32_t save_id = GetNextRegisterStateSaveID(); 2966 m_saved_register_states[save_id] = m_state.context; 2967 return save_id; 2968 } 2969 return 0; 2970 } 2971 bool DNBArchImplX86_64::RestoreRegisterState(uint32_t save_id) { 2972 SaveRegisterStates::iterator pos = m_saved_register_states.find(save_id); 2973 if (pos != m_saved_register_states.end()) { 2974 m_state.context.gpr = pos->second.gpr; 2975 m_state.context.fpu = pos->second.fpu; 2976 m_state.SetError(e_regSetGPR, Read, 0); 2977 m_state.SetError(e_regSetFPU, Read, 0); 2978 kern_return_t kret; 2979 bool success = true; 2980 if ((kret = SetGPRState()) != KERN_SUCCESS) { 2981 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplX86_64::RestoreRegisterState " 2982 "(save_id = %u) error: GPR regs failed to " 2983 "write: %u", 2984 save_id, kret); 2985 success = false; 2986 } else if ((kret = SetFPUState()) != KERN_SUCCESS) { 2987 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplX86_64::RestoreRegisterState " 2988 "(save_id = %u) error: %s regs failed to " 2989 "write: %u", 2990 save_id, CPUHasAVX() ? "AVX" : "FPU", kret); 2991 success = false; 2992 } 2993 m_saved_register_states.erase(pos); 2994 return success; 2995 } 2996 return false; 2997 } 2998 2999 kern_return_t DNBArchImplX86_64::GetRegisterState(int set, bool force) { 3000 switch (set) { 3001 case e_regSetALL: 3002 return GetGPRState(force) | GetFPUState(force) | GetEXCState(force); 3003 case e_regSetGPR: 3004 return GetGPRState(force); 3005 case e_regSetFPU: 3006 return GetFPUState(force); 3007 case e_regSetEXC: 3008 return GetEXCState(force); 3009 default: 3010 break; 3011 } 3012 return KERN_INVALID_ARGUMENT; 3013 } 3014 3015 kern_return_t DNBArchImplX86_64::SetRegisterState(int set) { 3016 // Make sure we have a valid context to set. 3017 if (RegisterSetStateIsValid(set)) { 3018 switch (set) { 3019 case e_regSetALL: 3020 return SetGPRState() | SetFPUState() | SetEXCState(); 3021 case e_regSetGPR: 3022 return SetGPRState(); 3023 case e_regSetFPU: 3024 return SetFPUState(); 3025 case e_regSetEXC: 3026 return SetEXCState(); 3027 default: 3028 break; 3029 } 3030 } 3031 return KERN_INVALID_ARGUMENT; 3032 } 3033 3034 bool DNBArchImplX86_64::RegisterSetStateIsValid(int set) const { 3035 return m_state.RegsAreValid(set); 3036 } 3037 3038 #endif // #if defined (__i386__) || defined (__x86_64__) 3039