1 //===-- DNBArchImplI386.cpp -------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Created by Greg Clayton on 6/25/07. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #if defined(__i386__) || defined(__x86_64__) 15 16 #include <sys/cdefs.h> 17 18 #include "DNBLog.h" 19 #include "MacOSX/i386/DNBArchImplI386.h" 20 #include "MachProcess.h" 21 #include "MachThread.h" 22 23 extern "C" bool CPUHasAVX(); // Defined over in DNBArchImplX86_64.cpp 24 25 #if defined(LLDB_DEBUGSERVER_RELEASE) || defined(LLDB_DEBUGSERVER_DEBUG) 26 enum debugState { debugStateUnknown, debugStateOff, debugStateOn }; 27 28 static debugState sFPUDebugState = debugStateUnknown; 29 static debugState sAVXForceState = debugStateUnknown; 30 31 static bool DebugFPURegs() { 32 if (sFPUDebugState == debugStateUnknown) { 33 if (getenv("DNB_DEBUG_FPU_REGS")) 34 sFPUDebugState = debugStateOn; 35 else 36 sFPUDebugState = debugStateOff; 37 } 38 39 return (sFPUDebugState == debugStateOn); 40 } 41 42 static bool ForceAVXRegs() { 43 if (sFPUDebugState == debugStateUnknown) { 44 if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS")) 45 sAVXForceState = debugStateOn; 46 else 47 sAVXForceState = debugStateOff; 48 } 49 50 return (sAVXForceState == debugStateOn); 51 } 52 53 #define DEBUG_FPU_REGS (DebugFPURegs()) 54 #define FORCE_AVX_REGS (ForceAVXRegs()) 55 #else 56 #define DEBUG_FPU_REGS (0) 57 #define FORCE_AVX_REGS (0) 58 #endif 59 60 enum { 61 gpr_eax = 0, 62 gpr_ebx = 1, 63 gpr_ecx = 2, 64 gpr_edx = 3, 65 gpr_edi = 4, 66 gpr_esi = 5, 67 gpr_ebp = 6, 68 gpr_esp = 7, 69 gpr_ss = 8, 70 gpr_eflags = 9, 71 gpr_eip = 10, 72 gpr_cs = 11, 73 gpr_ds = 12, 74 gpr_es = 13, 75 gpr_fs = 14, 76 gpr_gs = 15, 77 gpr_ax, 78 gpr_bx, 79 gpr_cx, 80 gpr_dx, 81 gpr_di, 82 gpr_si, 83 gpr_bp, 84 gpr_sp, 85 gpr_ah, 86 gpr_bh, 87 gpr_ch, 88 gpr_dh, 89 gpr_al, 90 gpr_bl, 91 gpr_cl, 92 gpr_dl, 93 gpr_dil, 94 gpr_sil, 95 gpr_bpl, 96 gpr_spl, 97 k_num_gpr_regs 98 }; 99 100 enum { 101 fpu_fcw, 102 fpu_fsw, 103 fpu_ftw, 104 fpu_fop, 105 fpu_ip, 106 fpu_cs, 107 fpu_dp, 108 fpu_ds, 109 fpu_mxcsr, 110 fpu_mxcsrmask, 111 fpu_stmm0, 112 fpu_stmm1, 113 fpu_stmm2, 114 fpu_stmm3, 115 fpu_stmm4, 116 fpu_stmm5, 117 fpu_stmm6, 118 fpu_stmm7, 119 fpu_xmm0, 120 fpu_xmm1, 121 fpu_xmm2, 122 fpu_xmm3, 123 fpu_xmm4, 124 fpu_xmm5, 125 fpu_xmm6, 126 fpu_xmm7, 127 fpu_ymm0, 128 fpu_ymm1, 129 fpu_ymm2, 130 fpu_ymm3, 131 fpu_ymm4, 132 fpu_ymm5, 133 fpu_ymm6, 134 fpu_ymm7, 135 k_num_fpu_regs, 136 137 // Aliases 138 fpu_fctrl = fpu_fcw, 139 fpu_fstat = fpu_fsw, 140 fpu_ftag = fpu_ftw, 141 fpu_fiseg = fpu_cs, 142 fpu_fioff = fpu_ip, 143 fpu_foseg = fpu_ds, 144 fpu_fooff = fpu_dp 145 }; 146 147 enum { 148 exc_trapno, 149 exc_err, 150 exc_faultvaddr, 151 k_num_exc_regs, 152 }; 153 154 enum { 155 ehframe_eax = 0, 156 ehframe_ecx, 157 ehframe_edx, 158 ehframe_ebx, 159 160 // On i386 Darwin the eh_frame register numbers for ebp and esp are reversed 161 // from DWARF. 162 // It's due to an ancient compiler bug in the output of the eh_frame. 163 // Specifically, on i386 darwin eh_frame, 4 is ebp, 5 is esp. 164 // On i386 darwin debug_frame (and debug_info), 4 is esp, 5 is ebp. 165 ehframe_ebp, 166 ehframe_esp, 167 ehframe_esi, 168 ehframe_edi, 169 ehframe_eip, 170 ehframe_eflags 171 }; 172 173 enum { 174 dwarf_eax = 0, 175 dwarf_ecx, 176 dwarf_edx, 177 dwarf_ebx, 178 dwarf_esp, 179 dwarf_ebp, 180 dwarf_esi, 181 dwarf_edi, 182 dwarf_eip, 183 dwarf_eflags, 184 dwarf_stmm0 = 11, 185 dwarf_stmm1, 186 dwarf_stmm2, 187 dwarf_stmm3, 188 dwarf_stmm4, 189 dwarf_stmm5, 190 dwarf_stmm6, 191 dwarf_stmm7, 192 dwarf_xmm0 = 21, 193 dwarf_xmm1, 194 dwarf_xmm2, 195 dwarf_xmm3, 196 dwarf_xmm4, 197 dwarf_xmm5, 198 dwarf_xmm6, 199 dwarf_xmm7, 200 dwarf_ymm0 = dwarf_xmm0, 201 dwarf_ymm1 = dwarf_xmm1, 202 dwarf_ymm2 = dwarf_xmm2, 203 dwarf_ymm3 = dwarf_xmm3, 204 dwarf_ymm4 = dwarf_xmm4, 205 dwarf_ymm5 = dwarf_xmm5, 206 dwarf_ymm6 = dwarf_xmm6, 207 dwarf_ymm7 = dwarf_xmm7, 208 }; 209 210 enum { 211 debugserver_eax = 0, 212 debugserver_ecx = 1, 213 debugserver_edx = 2, 214 debugserver_ebx = 3, 215 debugserver_esp = 4, 216 debugserver_ebp = 5, 217 debugserver_esi = 6, 218 debugserver_edi = 7, 219 debugserver_eip = 8, 220 debugserver_eflags = 9, 221 debugserver_cs = 10, 222 debugserver_ss = 11, 223 debugserver_ds = 12, 224 debugserver_es = 13, 225 debugserver_fs = 14, 226 debugserver_gs = 15, 227 debugserver_stmm0 = 16, 228 debugserver_stmm1 = 17, 229 debugserver_stmm2 = 18, 230 debugserver_stmm3 = 19, 231 debugserver_stmm4 = 20, 232 debugserver_stmm5 = 21, 233 debugserver_stmm6 = 22, 234 debugserver_stmm7 = 23, 235 debugserver_fctrl = 24, 236 debugserver_fcw = debugserver_fctrl, 237 debugserver_fstat = 25, 238 debugserver_fsw = debugserver_fstat, 239 debugserver_ftag = 26, 240 debugserver_ftw = debugserver_ftag, 241 debugserver_fiseg = 27, 242 debugserver_fpu_cs = debugserver_fiseg, 243 debugserver_fioff = 28, 244 debugserver_ip = debugserver_fioff, 245 debugserver_foseg = 29, 246 debugserver_fpu_ds = debugserver_foseg, 247 debugserver_fooff = 30, 248 debugserver_dp = debugserver_fooff, 249 debugserver_fop = 31, 250 debugserver_xmm0 = 32, 251 debugserver_xmm1 = 33, 252 debugserver_xmm2 = 34, 253 debugserver_xmm3 = 35, 254 debugserver_xmm4 = 36, 255 debugserver_xmm5 = 37, 256 debugserver_xmm6 = 38, 257 debugserver_xmm7 = 39, 258 debugserver_mxcsr = 40, 259 debugserver_mm0 = 41, 260 debugserver_mm1 = 42, 261 debugserver_mm2 = 43, 262 debugserver_mm3 = 44, 263 debugserver_mm4 = 45, 264 debugserver_mm5 = 46, 265 debugserver_mm6 = 47, 266 debugserver_mm7 = 48, 267 debugserver_ymm0 = debugserver_xmm0, 268 debugserver_ymm1 = debugserver_xmm1, 269 debugserver_ymm2 = debugserver_xmm2, 270 debugserver_ymm3 = debugserver_xmm3, 271 debugserver_ymm4 = debugserver_xmm4, 272 debugserver_ymm5 = debugserver_xmm5, 273 debugserver_ymm6 = debugserver_xmm6, 274 debugserver_ymm7 = debugserver_xmm7 275 }; 276 277 uint64_t DNBArchImplI386::GetPC(uint64_t failValue) { 278 // Get program counter 279 if (GetGPRState(false) == KERN_SUCCESS) 280 return m_state.context.gpr.__eip; 281 return failValue; 282 } 283 284 kern_return_t DNBArchImplI386::SetPC(uint64_t value) { 285 // Get program counter 286 kern_return_t err = GetGPRState(false); 287 if (err == KERN_SUCCESS) { 288 m_state.context.gpr.__eip = static_cast<uint32_t>(value); 289 err = SetGPRState(); 290 } 291 return err == KERN_SUCCESS; 292 } 293 294 uint64_t DNBArchImplI386::GetSP(uint64_t failValue) { 295 // Get stack pointer 296 if (GetGPRState(false) == KERN_SUCCESS) 297 return m_state.context.gpr.__esp; 298 return failValue; 299 } 300 301 // Uncomment the value below to verify the values in the debugger. 302 //#define DEBUG_GPR_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED 303 //#define SET_GPR(reg) m_state.context.gpr.__##reg = gpr_##reg 304 305 kern_return_t DNBArchImplI386::GetGPRState(bool force) { 306 if (force || m_state.GetError(e_regSetGPR, Read)) { 307 #if DEBUG_GPR_VALUES 308 SET_GPR(eax); 309 SET_GPR(ebx); 310 SET_GPR(ecx); 311 SET_GPR(edx); 312 SET_GPR(edi); 313 SET_GPR(esi); 314 SET_GPR(ebp); 315 SET_GPR(esp); 316 SET_GPR(ss); 317 SET_GPR(eflags); 318 SET_GPR(eip); 319 SET_GPR(cs); 320 SET_GPR(ds); 321 SET_GPR(es); 322 SET_GPR(fs); 323 SET_GPR(gs); 324 m_state.SetError(e_regSetGPR, Read, 0); 325 #else 326 mach_msg_type_number_t count = e_regSetWordSizeGPR; 327 m_state.SetError( 328 e_regSetGPR, Read, 329 ::thread_get_state(m_thread->MachPortNumber(), __i386_THREAD_STATE, 330 (thread_state_t)&m_state.context.gpr, &count)); 331 #endif 332 } 333 return m_state.GetError(e_regSetGPR, Read); 334 } 335 336 // Uncomment the value below to verify the values in the debugger. 337 //#define DEBUG_FPU_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED 338 339 kern_return_t DNBArchImplI386::GetFPUState(bool force) { 340 if (force || m_state.GetError(e_regSetFPU, Read)) { 341 if (DEBUG_FPU_REGS) { 342 if (CPUHasAVX() || FORCE_AVX_REGS) { 343 m_state.context.fpu.avx.__fpu_reserved[0] = -1; 344 m_state.context.fpu.avx.__fpu_reserved[1] = -1; 345 *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fcw) = 0x1234; 346 *(uint16_t *)&(m_state.context.fpu.avx.__fpu_fsw) = 0x5678; 347 m_state.context.fpu.avx.__fpu_ftw = 1; 348 m_state.context.fpu.avx.__fpu_rsrv1 = UINT8_MAX; 349 m_state.context.fpu.avx.__fpu_fop = 2; 350 m_state.context.fpu.avx.__fpu_ip = 3; 351 m_state.context.fpu.avx.__fpu_cs = 4; 352 m_state.context.fpu.avx.__fpu_rsrv2 = 5; 353 m_state.context.fpu.avx.__fpu_dp = 6; 354 m_state.context.fpu.avx.__fpu_ds = 7; 355 m_state.context.fpu.avx.__fpu_rsrv3 = UINT16_MAX; 356 m_state.context.fpu.avx.__fpu_mxcsr = 8; 357 m_state.context.fpu.avx.__fpu_mxcsrmask = 9; 358 int i; 359 for (i = 0; i < 16; ++i) { 360 if (i < 10) { 361 m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = 'a'; 362 m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = 'b'; 363 m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = 'c'; 364 m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = 'd'; 365 m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = 'e'; 366 m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = 'f'; 367 m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = 'g'; 368 m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = 'h'; 369 } else { 370 m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN; 371 m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN; 372 m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN; 373 m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN; 374 m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN; 375 m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN; 376 m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN; 377 m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN; 378 } 379 380 m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg[i] = '0'; 381 m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg[i] = '1'; 382 m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg[i] = '2'; 383 m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg[i] = '3'; 384 m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg[i] = '4'; 385 m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg[i] = '5'; 386 m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg[i] = '6'; 387 m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg[i] = '7'; 388 } 389 for (i = 0; i < sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i) 390 m_state.context.fpu.avx.__fpu_rsrv4[i] = INT8_MIN; 391 m_state.context.fpu.avx.__fpu_reserved1 = -1; 392 for (i = 0; i < sizeof(m_state.context.fpu.avx.__avx_reserved1); ++i) 393 m_state.context.fpu.avx.__avx_reserved1[i] = INT8_MIN; 394 395 for (i = 0; i < 16; ++i) { 396 m_state.context.fpu.avx.__fpu_ymmh0.__xmm_reg[i] = '0'; 397 m_state.context.fpu.avx.__fpu_ymmh1.__xmm_reg[i] = '1'; 398 m_state.context.fpu.avx.__fpu_ymmh2.__xmm_reg[i] = '2'; 399 m_state.context.fpu.avx.__fpu_ymmh3.__xmm_reg[i] = '3'; 400 m_state.context.fpu.avx.__fpu_ymmh4.__xmm_reg[i] = '4'; 401 m_state.context.fpu.avx.__fpu_ymmh5.__xmm_reg[i] = '5'; 402 m_state.context.fpu.avx.__fpu_ymmh6.__xmm_reg[i] = '6'; 403 m_state.context.fpu.avx.__fpu_ymmh7.__xmm_reg[i] = '7'; 404 } 405 } else { 406 m_state.context.fpu.no_avx.__fpu_reserved[0] = -1; 407 m_state.context.fpu.no_avx.__fpu_reserved[1] = -1; 408 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fcw) = 0x1234; 409 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fsw) = 0x5678; 410 m_state.context.fpu.no_avx.__fpu_ftw = 1; 411 m_state.context.fpu.no_avx.__fpu_rsrv1 = UINT8_MAX; 412 m_state.context.fpu.no_avx.__fpu_fop = 2; 413 m_state.context.fpu.no_avx.__fpu_ip = 3; 414 m_state.context.fpu.no_avx.__fpu_cs = 4; 415 m_state.context.fpu.no_avx.__fpu_rsrv2 = 5; 416 m_state.context.fpu.no_avx.__fpu_dp = 6; 417 m_state.context.fpu.no_avx.__fpu_ds = 7; 418 m_state.context.fpu.no_avx.__fpu_rsrv3 = UINT16_MAX; 419 m_state.context.fpu.no_avx.__fpu_mxcsr = 8; 420 m_state.context.fpu.no_avx.__fpu_mxcsrmask = 9; 421 int i; 422 for (i = 0; i < 16; ++i) { 423 if (i < 10) { 424 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = 'a'; 425 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = 'b'; 426 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = 'c'; 427 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = 'd'; 428 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = 'e'; 429 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = 'f'; 430 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = 'g'; 431 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = 'h'; 432 } else { 433 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN; 434 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN; 435 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN; 436 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN; 437 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN; 438 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN; 439 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN; 440 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN; 441 } 442 443 m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg[i] = '0'; 444 m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg[i] = '1'; 445 m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg[i] = '2'; 446 m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg[i] = '3'; 447 m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg[i] = '4'; 448 m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg[i] = '5'; 449 m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg[i] = '6'; 450 m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg[i] = '7'; 451 } 452 for (i = 0; i < sizeof(m_state.context.fpu.avx.__fpu_rsrv4); ++i) 453 m_state.context.fpu.no_avx.__fpu_rsrv4[i] = INT8_MIN; 454 m_state.context.fpu.no_avx.__fpu_reserved1 = -1; 455 } 456 m_state.SetError(e_regSetFPU, Read, 0); 457 } else { 458 if (CPUHasAVX() || FORCE_AVX_REGS) { 459 mach_msg_type_number_t count = e_regSetWordSizeAVX; 460 m_state.SetError(e_regSetFPU, Read, 461 ::thread_get_state( 462 m_thread->MachPortNumber(), __i386_AVX_STATE, 463 (thread_state_t)&m_state.context.fpu.avx, &count)); 464 DNBLogThreadedIf(LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &avx, " 465 "%u (%u passed in)) => 0x%8.8x", 466 m_thread->MachPortNumber(), __i386_AVX_STATE, count, 467 e_regSetWordSizeAVX, 468 m_state.GetError(e_regSetFPU, Read)); 469 } else { 470 mach_msg_type_number_t count = e_regSetWordSizeFPU; 471 m_state.SetError( 472 e_regSetFPU, Read, 473 ::thread_get_state(m_thread->MachPortNumber(), __i386_FLOAT_STATE, 474 (thread_state_t)&m_state.context.fpu.no_avx, 475 &count)); 476 DNBLogThreadedIf(LOG_THREAD, "::thread_get_state (0x%4.4x, %u, &fpu, " 477 "%u (%u passed in) => 0x%8.8x", 478 m_thread->MachPortNumber(), __i386_FLOAT_STATE, count, 479 e_regSetWordSizeFPU, 480 m_state.GetError(e_regSetFPU, Read)); 481 } 482 } 483 } 484 return m_state.GetError(e_regSetFPU, Read); 485 } 486 487 kern_return_t DNBArchImplI386::GetEXCState(bool force) { 488 if (force || m_state.GetError(e_regSetEXC, Read)) { 489 mach_msg_type_number_t count = e_regSetWordSizeEXC; 490 m_state.SetError( 491 e_regSetEXC, Read, 492 ::thread_get_state(m_thread->MachPortNumber(), __i386_EXCEPTION_STATE, 493 (thread_state_t)&m_state.context.exc, &count)); 494 } 495 return m_state.GetError(e_regSetEXC, Read); 496 } 497 498 kern_return_t DNBArchImplI386::SetGPRState() { 499 kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber()); 500 DNBLogThreadedIf( 501 LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u " 502 "(SetGPRState() for stop_count = %u)", 503 m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount()); 504 505 m_state.SetError(e_regSetGPR, Write, 506 ::thread_set_state(m_thread->MachPortNumber(), 507 __i386_THREAD_STATE, 508 (thread_state_t)&m_state.context.gpr, 509 e_regSetWordSizeGPR)); 510 return m_state.GetError(e_regSetGPR, Write); 511 } 512 513 kern_return_t DNBArchImplI386::SetFPUState() { 514 if (DEBUG_FPU_REGS) { 515 m_state.SetError(e_regSetFPU, Write, 0); 516 return m_state.GetError(e_regSetFPU, Write); 517 } else { 518 if (CPUHasAVX() || FORCE_AVX_REGS) 519 m_state.SetError( 520 e_regSetFPU, Write, 521 ::thread_set_state(m_thread->MachPortNumber(), __i386_AVX_STATE, 522 (thread_state_t)&m_state.context.fpu.avx, 523 e_regSetWordSizeAVX)); 524 else 525 m_state.SetError( 526 e_regSetFPU, Write, 527 ::thread_set_state(m_thread->MachPortNumber(), __i386_FLOAT_STATE, 528 (thread_state_t)&m_state.context.fpu.no_avx, 529 e_regSetWordSizeFPU)); 530 return m_state.GetError(e_regSetFPU, Write); 531 } 532 } 533 534 kern_return_t DNBArchImplI386::SetEXCState() { 535 m_state.SetError(e_regSetEXC, Write, 536 ::thread_set_state(m_thread->MachPortNumber(), 537 __i386_EXCEPTION_STATE, 538 (thread_state_t)&m_state.context.exc, 539 e_regSetWordSizeEXC)); 540 return m_state.GetError(e_regSetEXC, Write); 541 } 542 543 kern_return_t DNBArchImplI386::GetDBGState(bool force) { 544 if (force || m_state.GetError(e_regSetDBG, Read)) { 545 mach_msg_type_number_t count = e_regSetWordSizeDBG; 546 m_state.SetError( 547 e_regSetDBG, Read, 548 ::thread_get_state(m_thread->MachPortNumber(), __i386_DEBUG_STATE, 549 (thread_state_t)&m_state.context.dbg, &count)); 550 } 551 return m_state.GetError(e_regSetDBG, Read); 552 } 553 554 kern_return_t DNBArchImplI386::SetDBGState(bool also_set_on_task) { 555 m_state.SetError(e_regSetDBG, Write, 556 ::thread_set_state(m_thread->MachPortNumber(), 557 __i386_DEBUG_STATE, 558 (thread_state_t)&m_state.context.dbg, 559 e_regSetWordSizeDBG)); 560 if (also_set_on_task) { 561 kern_return_t kret = ::task_set_state( 562 m_thread->Process()->Task().TaskPort(), __i386_DEBUG_STATE, 563 (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG); 564 if (kret != KERN_SUCCESS) 565 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::SetDBGState failed " 566 "to set debug control register state: " 567 "0x%8.8x.", 568 kret); 569 } 570 return m_state.GetError(e_regSetDBG, Write); 571 } 572 573 void DNBArchImplI386::ThreadWillResume() { 574 // Do we need to step this thread? If so, let the mach thread tell us so. 575 if (m_thread->IsStepping()) { 576 // This is the primary thread, let the arch do anything it needs 577 EnableHardwareSingleStep(true); 578 } 579 580 // Reset the debug status register, if necessary, before we resume. 581 kern_return_t kret = GetDBGState(false); 582 DNBLogThreadedIf( 583 LOG_WATCHPOINTS, 584 "DNBArchImplI386::ThreadWillResume() GetDBGState() => 0x%8.8x.", kret); 585 if (kret != KERN_SUCCESS) 586 return; 587 588 DBG &debug_state = m_state.context.dbg; 589 bool need_reset = false; 590 uint32_t i, num = NumSupportedHardwareWatchpoints(); 591 for (i = 0; i < num; ++i) 592 if (IsWatchpointHit(debug_state, i)) 593 need_reset = true; 594 595 if (need_reset) { 596 ClearWatchpointHits(debug_state); 597 kret = SetDBGState(false); 598 DNBLogThreadedIf( 599 LOG_WATCHPOINTS, 600 "DNBArchImplI386::ThreadWillResume() SetDBGState() => 0x%8.8x.", kret); 601 } 602 } 603 604 bool DNBArchImplI386::ThreadDidStop() { 605 bool success = true; 606 607 m_state.InvalidateAllRegisterStates(); 608 609 // Are we stepping a single instruction? 610 if (GetGPRState(true) == KERN_SUCCESS) { 611 // We are single stepping, was this the primary thread? 612 if (m_thread->IsStepping()) { 613 // This was the primary thread, we need to clear the trace 614 // bit if so. 615 success = EnableHardwareSingleStep(false) == KERN_SUCCESS; 616 } else { 617 // The MachThread will automatically restore the suspend count 618 // in ThreadDidStop(), so we don't need to do anything here if 619 // we weren't the primary thread the last time 620 } 621 } 622 return success; 623 } 624 625 bool DNBArchImplI386::NotifyException(MachException::Data &exc) { 626 switch (exc.exc_type) { 627 case EXC_BAD_ACCESS: 628 break; 629 case EXC_BAD_INSTRUCTION: 630 break; 631 case EXC_ARITHMETIC: 632 break; 633 case EXC_EMULATION: 634 break; 635 case EXC_SOFTWARE: 636 break; 637 case EXC_BREAKPOINT: 638 if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 2) { 639 // exc_code = EXC_I386_BPT 640 // 641 nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS); 642 if (pc != INVALID_NUB_ADDRESS && pc > 0) { 643 pc -= 1; 644 // Check for a breakpoint at one byte prior to the current PC value 645 // since the PC will be just past the trap. 646 647 DNBBreakpoint *bp = 648 m_thread->Process()->Breakpoints().FindByAddress(pc); 649 if (bp) { 650 // Backup the PC for i386 since the trap was taken and the PC 651 // is at the address following the single byte trap instruction. 652 if (m_state.context.gpr.__eip > 0) { 653 m_state.context.gpr.__eip = static_cast<uint32_t>(pc); 654 // Write the new PC back out 655 SetGPRState(); 656 } 657 } 658 return true; 659 } 660 } else if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 1) { 661 // exc_code = EXC_I386_SGL 662 // 663 // Check whether this corresponds to a watchpoint hit event. 664 // If yes, set the exc_sub_code to the data break address. 665 nub_addr_t addr = 0; 666 uint32_t hw_index = GetHardwareWatchpointHit(addr); 667 if (hw_index != INVALID_NUB_HW_INDEX) { 668 exc.exc_data[1] = addr; 669 // Piggyback the hw_index in the exc.data. 670 exc.exc_data.push_back(hw_index); 671 } 672 673 return true; 674 } 675 break; 676 case EXC_SYSCALL: 677 break; 678 case EXC_MACH_SYSCALL: 679 break; 680 case EXC_RPC_ALERT: 681 break; 682 } 683 return false; 684 } 685 686 uint32_t DNBArchImplI386::NumSupportedHardwareWatchpoints() { 687 // Available debug address registers: dr0, dr1, dr2, dr3. 688 return 4; 689 } 690 691 static uint32_t size_and_rw_bits(nub_size_t size, bool read, bool write) { 692 uint32_t rw; 693 if (read) { 694 rw = 0x3; // READ or READ/WRITE 695 } else if (write) { 696 rw = 0x1; // WRITE 697 } else { 698 assert(0 && "read and write cannot both be false"); 699 } 700 701 switch (size) { 702 case 1: 703 return rw; 704 case 2: 705 return (0x1 << 2) | rw; 706 case 4: 707 return (0x3 << 2) | rw; 708 case 8: 709 return (0x2 << 2) | rw; 710 } 711 assert(0 && "invalid size, must be one of 1, 2, 4, or 8"); 712 return 0; 713 } 714 715 void DNBArchImplI386::SetWatchpoint(DBG &debug_state, uint32_t hw_index, 716 nub_addr_t addr, nub_size_t size, bool read, 717 bool write) { 718 // Set both dr7 (debug control register) and dri (debug address register). 719 720 // dr7{7-0} encodes the local/gloabl enable bits: 721 // global enable --. .-- local enable 722 // | | 723 // v v 724 // dr0 -> bits{1-0} 725 // dr1 -> bits{3-2} 726 // dr2 -> bits{5-4} 727 // dr3 -> bits{7-6} 728 // 729 // dr7{31-16} encodes the rw/len bits: 730 // b_x+3, b_x+2, b_x+1, b_x 731 // where bits{x+1, x} => rw 732 // 0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io 733 // read-or-write (unused) 734 // and bits{x+3, x+2} => len 735 // 0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte 736 // 737 // dr0 -> bits{19-16} 738 // dr1 -> bits{23-20} 739 // dr2 -> bits{27-24} 740 // dr3 -> bits{31-28} 741 debug_state.__dr7 |= 742 (1 << (2 * hw_index) | 743 size_and_rw_bits(size, read, write) << (16 + 4 * hw_index)); 744 uint32_t addr_32 = addr & 0xffffffff; 745 switch (hw_index) { 746 case 0: 747 debug_state.__dr0 = addr_32; 748 break; 749 case 1: 750 debug_state.__dr1 = addr_32; 751 break; 752 case 2: 753 debug_state.__dr2 = addr_32; 754 break; 755 case 3: 756 debug_state.__dr3 = addr_32; 757 break; 758 default: 759 assert(0 && 760 "invalid hardware register index, must be one of 0, 1, 2, or 3"); 761 } 762 return; 763 } 764 765 void DNBArchImplI386::ClearWatchpoint(DBG &debug_state, uint32_t hw_index) { 766 debug_state.__dr7 &= ~(3 << (2 * hw_index)); 767 switch (hw_index) { 768 case 0: 769 debug_state.__dr0 = 0; 770 break; 771 case 1: 772 debug_state.__dr1 = 0; 773 break; 774 case 2: 775 debug_state.__dr2 = 0; 776 break; 777 case 3: 778 debug_state.__dr3 = 0; 779 break; 780 default: 781 assert(0 && 782 "invalid hardware register index, must be one of 0, 1, 2, or 3"); 783 } 784 return; 785 } 786 787 bool DNBArchImplI386::IsWatchpointVacant(const DBG &debug_state, 788 uint32_t hw_index) { 789 // Check dr7 (debug control register) for local/global enable bits: 790 // global enable --. .-- local enable 791 // | | 792 // v v 793 // dr0 -> bits{1-0} 794 // dr1 -> bits{3-2} 795 // dr2 -> bits{5-4} 796 // dr3 -> bits{7-6} 797 return (debug_state.__dr7 & (3 << (2 * hw_index))) == 0; 798 } 799 800 // Resets local copy of debug status register to wait for the next debug 801 // exception. 802 void DNBArchImplI386::ClearWatchpointHits(DBG &debug_state) { 803 // See also IsWatchpointHit(). 804 debug_state.__dr6 = 0; 805 return; 806 } 807 808 bool DNBArchImplI386::IsWatchpointHit(const DBG &debug_state, 809 uint32_t hw_index) { 810 // Check dr6 (debug status register) whether a watchpoint hits: 811 // is watchpoint hit? 812 // | 813 // v 814 // dr0 -> bits{0} 815 // dr1 -> bits{1} 816 // dr2 -> bits{2} 817 // dr3 -> bits{3} 818 return (debug_state.__dr6 & (1 << hw_index)); 819 } 820 821 nub_addr_t DNBArchImplI386::GetWatchAddress(const DBG &debug_state, 822 uint32_t hw_index) { 823 switch (hw_index) { 824 case 0: 825 return debug_state.__dr0; 826 case 1: 827 return debug_state.__dr1; 828 case 2: 829 return debug_state.__dr2; 830 case 3: 831 return debug_state.__dr3; 832 } 833 assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3"); 834 return 0; 835 } 836 837 bool DNBArchImplI386::StartTransForHWP() { 838 if (m_2pc_trans_state != Trans_Done && m_2pc_trans_state != Trans_Rolled_Back) 839 DNBLogError("%s inconsistent state detected, expected %d or %d, got: %d", 840 __FUNCTION__, Trans_Done, Trans_Rolled_Back, m_2pc_trans_state); 841 m_2pc_dbg_checkpoint = m_state.context.dbg; 842 m_2pc_trans_state = Trans_Pending; 843 return true; 844 } 845 bool DNBArchImplI386::RollbackTransForHWP() { 846 m_state.context.dbg = m_2pc_dbg_checkpoint; 847 if (m_2pc_trans_state != Trans_Pending) 848 DNBLogError("%s inconsistent state detected, expected %d, got: %d", 849 __FUNCTION__, Trans_Pending, m_2pc_trans_state); 850 m_2pc_trans_state = Trans_Rolled_Back; 851 kern_return_t kret = SetDBGState(false); 852 DNBLogThreadedIf( 853 LOG_WATCHPOINTS, 854 "DNBArchImplI386::RollbackTransForHWP() SetDBGState() => 0x%8.8x.", kret); 855 856 if (kret == KERN_SUCCESS) 857 return true; 858 else 859 return false; 860 } 861 bool DNBArchImplI386::FinishTransForHWP() { 862 m_2pc_trans_state = Trans_Done; 863 return true; 864 } 865 DNBArchImplI386::DBG DNBArchImplI386::GetDBGCheckpoint() { 866 return m_2pc_dbg_checkpoint; 867 } 868 869 uint32_t DNBArchImplI386::EnableHardwareWatchpoint(nub_addr_t addr, 870 nub_size_t size, bool read, 871 bool write, 872 bool also_set_on_task) { 873 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::EnableHardwareWatchpoint(" 874 "addr = 0x%llx, size = %llu, read = %u, " 875 "write = %u)", 876 (uint64_t)addr, (uint64_t)size, read, write); 877 878 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints(); 879 880 // Can only watch 1, 2, 4, or 8 bytes. 881 if (!(size == 1 || size == 2 || size == 4 || size == 8)) 882 return INVALID_NUB_HW_INDEX; 883 884 // We must watch for either read or write 885 if (read == false && write == false) 886 return INVALID_NUB_HW_INDEX; 887 888 // Read the debug state 889 kern_return_t kret = GetDBGState(false); 890 891 if (kret == KERN_SUCCESS) { 892 // Check to make sure we have the needed hardware support 893 uint32_t i = 0; 894 895 DBG &debug_state = m_state.context.dbg; 896 for (i = 0; i < num_hw_watchpoints; ++i) { 897 if (IsWatchpointVacant(debug_state, i)) 898 break; 899 } 900 901 // See if we found an available hw breakpoint slot above 902 if (i < num_hw_watchpoints) { 903 StartTransForHWP(); 904 905 // Modify our local copy of the debug state, first. 906 SetWatchpoint(debug_state, i, addr, size, read, write); 907 // Now set the watch point in the inferior. 908 kret = SetDBGState(also_set_on_task); 909 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::" 910 "EnableHardwareWatchpoint() " 911 "SetDBGState() => 0x%8.8x.", 912 kret); 913 914 if (kret == KERN_SUCCESS) 915 return i; 916 else // Revert to the previous debug state voluntarily. The transaction 917 // coordinator knows that we have failed. 918 m_state.context.dbg = GetDBGCheckpoint(); 919 } else { 920 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::" 921 "EnableHardwareWatchpoint(): All " 922 "hardware resources (%u) are in use.", 923 num_hw_watchpoints); 924 } 925 } 926 return INVALID_NUB_HW_INDEX; 927 } 928 929 bool DNBArchImplI386::DisableHardwareWatchpoint(uint32_t hw_index, 930 bool also_set_on_task) { 931 kern_return_t kret = GetDBGState(false); 932 933 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints(); 934 if (kret == KERN_SUCCESS) { 935 DBG &debug_state = m_state.context.dbg; 936 if (hw_index < num_hw_points && 937 !IsWatchpointVacant(debug_state, hw_index)) { 938 StartTransForHWP(); 939 940 // Modify our local copy of the debug state, first. 941 ClearWatchpoint(debug_state, hw_index); 942 // Now disable the watch point in the inferior. 943 kret = SetDBGState(also_set_on_task); 944 DNBLogThreadedIf(LOG_WATCHPOINTS, 945 "DNBArchImplI386::DisableHardwareWatchpoint( %u )", 946 hw_index); 947 948 if (kret == KERN_SUCCESS) 949 return true; 950 else // Revert to the previous debug state voluntarily. The transaction 951 // coordinator knows that we have failed. 952 m_state.context.dbg = GetDBGCheckpoint(); 953 } 954 } 955 return false; 956 } 957 958 // Iterate through the debug status register; return the index of the first hit. 959 uint32_t DNBArchImplI386::GetHardwareWatchpointHit(nub_addr_t &addr) { 960 // Read the debug state 961 kern_return_t kret = GetDBGState(true); 962 DNBLogThreadedIf( 963 LOG_WATCHPOINTS, 964 "DNBArchImplI386::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", 965 kret); 966 if (kret == KERN_SUCCESS) { 967 DBG &debug_state = m_state.context.dbg; 968 uint32_t i, num = NumSupportedHardwareWatchpoints(); 969 for (i = 0; i < num; ++i) { 970 if (IsWatchpointHit(debug_state, i)) { 971 addr = GetWatchAddress(debug_state, i); 972 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::" 973 "GetHardwareWatchpointHit() found => " 974 "%u (addr = 0x%llx).", 975 i, (uint64_t)addr); 976 return i; 977 } 978 } 979 } 980 return INVALID_NUB_HW_INDEX; 981 } 982 983 // Set the single step bit in the processor status register. 984 kern_return_t DNBArchImplI386::EnableHardwareSingleStep(bool enable) { 985 if (GetGPRState(false) == KERN_SUCCESS) { 986 const uint32_t trace_bit = 0x100u; 987 if (enable) 988 m_state.context.gpr.__eflags |= trace_bit; 989 else 990 m_state.context.gpr.__eflags &= ~trace_bit; 991 return SetGPRState(); 992 } 993 return m_state.GetError(e_regSetGPR, Read); 994 } 995 996 //---------------------------------------------------------------------- 997 // Register information definitions 998 //---------------------------------------------------------------------- 999 1000 #define DEFINE_GPR_PSEUDO_16(reg16, reg32) \ 1001 { \ 1002 e_regSetGPR, gpr_##reg16, #reg16, NULL, Uint, Hex, 2, 0, \ 1003 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1004 INVALID_NUB_REGNUM, g_contained_##reg32, g_invalidate_##reg32 \ 1005 } 1006 #define DEFINE_GPR_PSEUDO_8H(reg8, reg32) \ 1007 { \ 1008 e_regSetGPR, gpr_##reg8, #reg8, NULL, Uint, Hex, 1, 1, INVALID_NUB_REGNUM, \ 1009 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1010 g_contained_##reg32, g_invalidate_##reg32 \ 1011 } 1012 #define DEFINE_GPR_PSEUDO_8L(reg8, reg32) \ 1013 { \ 1014 e_regSetGPR, gpr_##reg8, #reg8, NULL, Uint, Hex, 1, 0, INVALID_NUB_REGNUM, \ 1015 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1016 g_contained_##reg32, g_invalidate_##reg32 \ 1017 } 1018 1019 #define GPR_OFFSET(reg) (offsetof(DNBArchImplI386::GPR, __##reg)) 1020 #define FPU_OFFSET(reg) \ 1021 (offsetof(DNBArchImplI386::FPU, __fpu_##reg) + \ 1022 offsetof(DNBArchImplI386::Context, fpu.no_avx)) 1023 #define AVX_OFFSET(reg) \ 1024 (offsetof(DNBArchImplI386::AVX, __fpu_##reg) + \ 1025 offsetof(DNBArchImplI386::Context, fpu.avx)) 1026 #define EXC_OFFSET(reg) \ 1027 (offsetof(DNBArchImplI386::EXC, __##reg) + \ 1028 offsetof(DNBArchImplI386::Context, exc)) 1029 1030 #define GPR_SIZE(reg) (sizeof(((DNBArchImplI386::GPR *)NULL)->__##reg)) 1031 #define FPU_SIZE_UINT(reg) (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg)) 1032 #define FPU_SIZE_MMST(reg) \ 1033 (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg.__mmst_reg)) 1034 #define FPU_SIZE_XMM(reg) \ 1035 (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg.__xmm_reg)) 1036 #define FPU_SIZE_YMM(reg) (32) 1037 #define EXC_SIZE(reg) (sizeof(((DNBArchImplI386::EXC *)NULL)->__##reg)) 1038 1039 // This does not accurately identify the location of ymm0...7 in 1040 // Context.fpu.avx. That is because there is a bunch of padding 1041 // in Context.fpu.avx that we don't need. Offset macros lay out 1042 // the register state that Debugserver transmits to the debugger 1043 // -- not to interpret the thread_get_state info. 1044 #define AVX_OFFSET_YMM(n) (AVX_OFFSET(xmm7) + FPU_SIZE_XMM(xmm7) + (32 * n)) 1045 1046 // These macros will auto define the register name, alt name, register size, 1047 // register offset, encoding, format and native register. This ensures that 1048 // the register state structures are defined correctly and have the correct 1049 // sizes and offsets. 1050 1051 const char *g_contained_eax[] = {"eax", NULL}; 1052 const char *g_contained_ebx[] = {"ebx", NULL}; 1053 const char *g_contained_ecx[] = {"ecx", NULL}; 1054 const char *g_contained_edx[] = {"edx", NULL}; 1055 const char *g_contained_edi[] = {"edi", NULL}; 1056 const char *g_contained_esi[] = {"esi", NULL}; 1057 const char *g_contained_ebp[] = {"ebp", NULL}; 1058 const char *g_contained_esp[] = {"esp", NULL}; 1059 1060 const char *g_invalidate_eax[] = {"eax", "ax", "ah", "al", NULL}; 1061 const char *g_invalidate_ebx[] = {"ebx", "bx", "bh", "bl", NULL}; 1062 const char *g_invalidate_ecx[] = {"ecx", "cx", "ch", "cl", NULL}; 1063 const char *g_invalidate_edx[] = {"edx", "dx", "dh", "dl", NULL}; 1064 const char *g_invalidate_edi[] = {"edi", "di", "dil", NULL}; 1065 const char *g_invalidate_esi[] = {"esi", "si", "sil", NULL}; 1066 const char *g_invalidate_ebp[] = {"ebp", "bp", "bpl", NULL}; 1067 const char *g_invalidate_esp[] = {"esp", "sp", "spl", NULL}; 1068 1069 // General purpose registers for 64 bit 1070 const DNBRegisterInfo DNBArchImplI386::g_gpr_registers[] = { 1071 {e_regSetGPR, gpr_eax, "eax", NULL, Uint, Hex, GPR_SIZE(eax), 1072 GPR_OFFSET(eax), ehframe_eax, dwarf_eax, INVALID_NUB_REGNUM, 1073 debugserver_eax, NULL, g_invalidate_eax}, 1074 {e_regSetGPR, gpr_ebx, "ebx", NULL, Uint, Hex, GPR_SIZE(ebx), 1075 GPR_OFFSET(ebx), ehframe_ebx, dwarf_ebx, INVALID_NUB_REGNUM, 1076 debugserver_ebx, NULL, g_invalidate_ebx}, 1077 {e_regSetGPR, gpr_ecx, "ecx", NULL, Uint, Hex, GPR_SIZE(ecx), 1078 GPR_OFFSET(ecx), ehframe_ecx, dwarf_ecx, INVALID_NUB_REGNUM, 1079 debugserver_ecx, NULL, g_invalidate_ecx}, 1080 {e_regSetGPR, gpr_edx, "edx", NULL, Uint, Hex, GPR_SIZE(edx), 1081 GPR_OFFSET(edx), ehframe_edx, dwarf_edx, INVALID_NUB_REGNUM, 1082 debugserver_edx, NULL, g_invalidate_edx}, 1083 {e_regSetGPR, gpr_edi, "edi", NULL, Uint, Hex, GPR_SIZE(edi), 1084 GPR_OFFSET(edi), ehframe_edi, dwarf_edi, INVALID_NUB_REGNUM, 1085 debugserver_edi, NULL, g_invalidate_edi}, 1086 {e_regSetGPR, gpr_esi, "esi", NULL, Uint, Hex, GPR_SIZE(esi), 1087 GPR_OFFSET(esi), ehframe_esi, dwarf_esi, INVALID_NUB_REGNUM, 1088 debugserver_esi, NULL, g_invalidate_esi}, 1089 {e_regSetGPR, gpr_ebp, "ebp", "fp", Uint, Hex, GPR_SIZE(ebp), 1090 GPR_OFFSET(ebp), ehframe_ebp, dwarf_ebp, GENERIC_REGNUM_FP, 1091 debugserver_ebp, NULL, g_invalidate_ebp}, 1092 {e_regSetGPR, gpr_esp, "esp", "sp", Uint, Hex, GPR_SIZE(esp), 1093 GPR_OFFSET(esp), ehframe_esp, dwarf_esp, GENERIC_REGNUM_SP, 1094 debugserver_esp, NULL, g_invalidate_esp}, 1095 {e_regSetGPR, gpr_ss, "ss", NULL, Uint, Hex, GPR_SIZE(ss), GPR_OFFSET(ss), 1096 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, debugserver_ss, 1097 NULL, NULL}, 1098 {e_regSetGPR, gpr_eflags, "eflags", "flags", Uint, Hex, GPR_SIZE(eflags), 1099 GPR_OFFSET(eflags), ehframe_eflags, dwarf_eflags, GENERIC_REGNUM_FLAGS, 1100 debugserver_eflags, NULL, NULL}, 1101 {e_regSetGPR, gpr_eip, "eip", "pc", Uint, Hex, GPR_SIZE(eip), 1102 GPR_OFFSET(eip), ehframe_eip, dwarf_eip, GENERIC_REGNUM_PC, 1103 debugserver_eip, NULL, NULL}, 1104 {e_regSetGPR, gpr_cs, "cs", NULL, Uint, Hex, GPR_SIZE(cs), GPR_OFFSET(cs), 1105 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, debugserver_cs, 1106 NULL, NULL}, 1107 {e_regSetGPR, gpr_ds, "ds", NULL, Uint, Hex, GPR_SIZE(ds), GPR_OFFSET(ds), 1108 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, debugserver_ds, 1109 NULL, NULL}, 1110 {e_regSetGPR, gpr_es, "es", NULL, Uint, Hex, GPR_SIZE(es), GPR_OFFSET(es), 1111 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, debugserver_es, 1112 NULL, NULL}, 1113 {e_regSetGPR, gpr_fs, "fs", NULL, Uint, Hex, GPR_SIZE(fs), GPR_OFFSET(fs), 1114 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, debugserver_fs, 1115 NULL, NULL}, 1116 {e_regSetGPR, gpr_gs, "gs", NULL, Uint, Hex, GPR_SIZE(gs), GPR_OFFSET(gs), 1117 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, debugserver_gs, 1118 NULL, NULL}, 1119 DEFINE_GPR_PSEUDO_16(ax, eax), 1120 DEFINE_GPR_PSEUDO_16(bx, ebx), 1121 DEFINE_GPR_PSEUDO_16(cx, ecx), 1122 DEFINE_GPR_PSEUDO_16(dx, edx), 1123 DEFINE_GPR_PSEUDO_16(di, edi), 1124 DEFINE_GPR_PSEUDO_16(si, esi), 1125 DEFINE_GPR_PSEUDO_16(bp, ebp), 1126 DEFINE_GPR_PSEUDO_16(sp, esp), 1127 DEFINE_GPR_PSEUDO_8H(ah, eax), 1128 DEFINE_GPR_PSEUDO_8H(bh, ebx), 1129 DEFINE_GPR_PSEUDO_8H(ch, ecx), 1130 DEFINE_GPR_PSEUDO_8H(dh, edx), 1131 DEFINE_GPR_PSEUDO_8L(al, eax), 1132 DEFINE_GPR_PSEUDO_8L(bl, ebx), 1133 DEFINE_GPR_PSEUDO_8L(cl, ecx), 1134 DEFINE_GPR_PSEUDO_8L(dl, edx), 1135 DEFINE_GPR_PSEUDO_8L(dil, edi), 1136 DEFINE_GPR_PSEUDO_8L(sil, esi), 1137 DEFINE_GPR_PSEUDO_8L(bpl, ebp), 1138 DEFINE_GPR_PSEUDO_8L(spl, esp)}; 1139 1140 const DNBRegisterInfo DNBArchImplI386::g_fpu_registers_no_avx[] = { 1141 {e_regSetFPU, fpu_fcw, "fctrl", NULL, Uint, Hex, FPU_SIZE_UINT(fcw), 1142 FPU_OFFSET(fcw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1143 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1144 {e_regSetFPU, fpu_fsw, "fstat", NULL, Uint, Hex, FPU_SIZE_UINT(fsw), 1145 FPU_OFFSET(fsw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1146 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1147 {e_regSetFPU, fpu_ftw, "ftag", NULL, Uint, Hex, FPU_SIZE_UINT(ftw), 1148 FPU_OFFSET(ftw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1149 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1150 {e_regSetFPU, fpu_fop, "fop", NULL, Uint, Hex, FPU_SIZE_UINT(fop), 1151 FPU_OFFSET(fop), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1152 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1153 {e_regSetFPU, fpu_ip, "fioff", NULL, Uint, Hex, FPU_SIZE_UINT(ip), 1154 FPU_OFFSET(ip), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1155 INVALID_NUB_REGNUM, NULL, NULL}, 1156 {e_regSetFPU, fpu_cs, "fiseg", NULL, Uint, Hex, FPU_SIZE_UINT(cs), 1157 FPU_OFFSET(cs), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1158 INVALID_NUB_REGNUM, NULL, NULL}, 1159 {e_regSetFPU, fpu_dp, "fooff", NULL, Uint, Hex, FPU_SIZE_UINT(dp), 1160 FPU_OFFSET(dp), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1161 INVALID_NUB_REGNUM, NULL, NULL}, 1162 {e_regSetFPU, fpu_ds, "foseg", NULL, Uint, Hex, FPU_SIZE_UINT(ds), 1163 FPU_OFFSET(ds), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1164 INVALID_NUB_REGNUM, NULL, NULL}, 1165 {e_regSetFPU, fpu_mxcsr, "mxcsr", NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr), 1166 FPU_OFFSET(mxcsr), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1167 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1168 {e_regSetFPU, fpu_mxcsrmask, "mxcsrmask", NULL, Uint, Hex, 1169 FPU_SIZE_UINT(mxcsrmask), FPU_OFFSET(mxcsrmask), INVALID_NUB_REGNUM, 1170 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1171 1172 {e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, 1173 FPU_SIZE_MMST(stmm0), FPU_OFFSET(stmm0), INVALID_NUB_REGNUM, dwarf_stmm0, 1174 INVALID_NUB_REGNUM, debugserver_stmm0, NULL, NULL}, 1175 {e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, 1176 FPU_SIZE_MMST(stmm1), FPU_OFFSET(stmm1), INVALID_NUB_REGNUM, dwarf_stmm1, 1177 INVALID_NUB_REGNUM, debugserver_stmm1, NULL, NULL}, 1178 {e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, 1179 FPU_SIZE_MMST(stmm2), FPU_OFFSET(stmm2), INVALID_NUB_REGNUM, dwarf_stmm2, 1180 INVALID_NUB_REGNUM, debugserver_stmm2, NULL, NULL}, 1181 {e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, 1182 FPU_SIZE_MMST(stmm3), FPU_OFFSET(stmm3), INVALID_NUB_REGNUM, dwarf_stmm3, 1183 INVALID_NUB_REGNUM, debugserver_stmm3, NULL, NULL}, 1184 {e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, 1185 FPU_SIZE_MMST(stmm4), FPU_OFFSET(stmm4), INVALID_NUB_REGNUM, dwarf_stmm4, 1186 INVALID_NUB_REGNUM, debugserver_stmm4, NULL, NULL}, 1187 {e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, 1188 FPU_SIZE_MMST(stmm5), FPU_OFFSET(stmm5), INVALID_NUB_REGNUM, dwarf_stmm5, 1189 INVALID_NUB_REGNUM, debugserver_stmm5, NULL, NULL}, 1190 {e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, 1191 FPU_SIZE_MMST(stmm6), FPU_OFFSET(stmm6), INVALID_NUB_REGNUM, dwarf_stmm6, 1192 INVALID_NUB_REGNUM, debugserver_stmm6, NULL, NULL}, 1193 {e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, 1194 FPU_SIZE_MMST(stmm7), FPU_OFFSET(stmm7), INVALID_NUB_REGNUM, dwarf_stmm7, 1195 INVALID_NUB_REGNUM, debugserver_stmm7, NULL, NULL}, 1196 1197 {e_regSetFPU, fpu_xmm0, "xmm0", NULL, Vector, VectorOfUInt8, 1198 FPU_SIZE_XMM(xmm0), FPU_OFFSET(xmm0), INVALID_NUB_REGNUM, dwarf_xmm0, 1199 INVALID_NUB_REGNUM, debugserver_xmm0, NULL, NULL}, 1200 {e_regSetFPU, fpu_xmm1, "xmm1", NULL, Vector, VectorOfUInt8, 1201 FPU_SIZE_XMM(xmm1), FPU_OFFSET(xmm1), INVALID_NUB_REGNUM, dwarf_xmm1, 1202 INVALID_NUB_REGNUM, debugserver_xmm1, NULL, NULL}, 1203 {e_regSetFPU, fpu_xmm2, "xmm2", NULL, Vector, VectorOfUInt8, 1204 FPU_SIZE_XMM(xmm2), FPU_OFFSET(xmm2), INVALID_NUB_REGNUM, dwarf_xmm2, 1205 INVALID_NUB_REGNUM, debugserver_xmm2, NULL, NULL}, 1206 {e_regSetFPU, fpu_xmm3, "xmm3", NULL, Vector, VectorOfUInt8, 1207 FPU_SIZE_XMM(xmm3), FPU_OFFSET(xmm3), INVALID_NUB_REGNUM, dwarf_xmm3, 1208 INVALID_NUB_REGNUM, debugserver_xmm3, NULL, NULL}, 1209 {e_regSetFPU, fpu_xmm4, "xmm4", NULL, Vector, VectorOfUInt8, 1210 FPU_SIZE_XMM(xmm4), FPU_OFFSET(xmm4), INVALID_NUB_REGNUM, dwarf_xmm4, 1211 INVALID_NUB_REGNUM, debugserver_xmm4, NULL, NULL}, 1212 {e_regSetFPU, fpu_xmm5, "xmm5", NULL, Vector, VectorOfUInt8, 1213 FPU_SIZE_XMM(xmm5), FPU_OFFSET(xmm5), INVALID_NUB_REGNUM, dwarf_xmm5, 1214 INVALID_NUB_REGNUM, debugserver_xmm5, NULL, NULL}, 1215 {e_regSetFPU, fpu_xmm6, "xmm6", NULL, Vector, VectorOfUInt8, 1216 FPU_SIZE_XMM(xmm6), FPU_OFFSET(xmm6), INVALID_NUB_REGNUM, dwarf_xmm6, 1217 INVALID_NUB_REGNUM, debugserver_xmm6, NULL, NULL}, 1218 {e_regSetFPU, fpu_xmm7, "xmm7", NULL, Vector, VectorOfUInt8, 1219 FPU_SIZE_XMM(xmm7), FPU_OFFSET(xmm7), INVALID_NUB_REGNUM, dwarf_xmm7, 1220 INVALID_NUB_REGNUM, debugserver_xmm7, NULL, NULL}}; 1221 1222 static const char *g_contained_ymm0[] = {"ymm0", NULL}; 1223 static const char *g_contained_ymm1[] = {"ymm1", NULL}; 1224 static const char *g_contained_ymm2[] = {"ymm2", NULL}; 1225 static const char *g_contained_ymm3[] = {"ymm3", NULL}; 1226 static const char *g_contained_ymm4[] = {"ymm4", NULL}; 1227 static const char *g_contained_ymm5[] = {"ymm5", NULL}; 1228 static const char *g_contained_ymm6[] = {"ymm6", NULL}; 1229 static const char *g_contained_ymm7[] = {"ymm7", NULL}; 1230 1231 const DNBRegisterInfo DNBArchImplI386::g_fpu_registers_avx[] = { 1232 {e_regSetFPU, fpu_fcw, "fctrl", NULL, Uint, Hex, FPU_SIZE_UINT(fcw), 1233 AVX_OFFSET(fcw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1234 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1235 {e_regSetFPU, fpu_fsw, "fstat", NULL, Uint, Hex, FPU_SIZE_UINT(fsw), 1236 AVX_OFFSET(fsw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1237 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1238 {e_regSetFPU, fpu_ftw, "ftag", NULL, Uint, Hex, FPU_SIZE_UINT(ftw), 1239 AVX_OFFSET(ftw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1240 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1241 {e_regSetFPU, fpu_fop, "fop", NULL, Uint, Hex, FPU_SIZE_UINT(fop), 1242 AVX_OFFSET(fop), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1243 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1244 {e_regSetFPU, fpu_ip, "fioff", NULL, Uint, Hex, FPU_SIZE_UINT(ip), 1245 AVX_OFFSET(ip), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1246 INVALID_NUB_REGNUM, NULL, NULL}, 1247 {e_regSetFPU, fpu_cs, "fiseg", NULL, Uint, Hex, FPU_SIZE_UINT(cs), 1248 AVX_OFFSET(cs), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1249 INVALID_NUB_REGNUM, NULL, NULL}, 1250 {e_regSetFPU, fpu_dp, "fooff", NULL, Uint, Hex, FPU_SIZE_UINT(dp), 1251 AVX_OFFSET(dp), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1252 INVALID_NUB_REGNUM, NULL, NULL}, 1253 {e_regSetFPU, fpu_ds, "foseg", NULL, Uint, Hex, FPU_SIZE_UINT(ds), 1254 AVX_OFFSET(ds), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1255 INVALID_NUB_REGNUM, NULL, NULL}, 1256 {e_regSetFPU, fpu_mxcsr, "mxcsr", NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr), 1257 AVX_OFFSET(mxcsr), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1258 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1259 {e_regSetFPU, fpu_mxcsrmask, "mxcsrmask", NULL, Uint, Hex, 1260 FPU_SIZE_UINT(mxcsrmask), AVX_OFFSET(mxcsrmask), INVALID_NUB_REGNUM, 1261 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1262 1263 {e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, 1264 FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), INVALID_NUB_REGNUM, dwarf_stmm0, 1265 INVALID_NUB_REGNUM, debugserver_stmm0, NULL, NULL}, 1266 {e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, 1267 FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), INVALID_NUB_REGNUM, dwarf_stmm1, 1268 INVALID_NUB_REGNUM, debugserver_stmm1, NULL, NULL}, 1269 {e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, 1270 FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), INVALID_NUB_REGNUM, dwarf_stmm2, 1271 INVALID_NUB_REGNUM, debugserver_stmm2, NULL, NULL}, 1272 {e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, 1273 FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), INVALID_NUB_REGNUM, dwarf_stmm3, 1274 INVALID_NUB_REGNUM, debugserver_stmm3, NULL, NULL}, 1275 {e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, 1276 FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), INVALID_NUB_REGNUM, dwarf_stmm4, 1277 INVALID_NUB_REGNUM, debugserver_stmm4, NULL, NULL}, 1278 {e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, 1279 FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), INVALID_NUB_REGNUM, dwarf_stmm5, 1280 INVALID_NUB_REGNUM, debugserver_stmm5, NULL, NULL}, 1281 {e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, 1282 FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), INVALID_NUB_REGNUM, dwarf_stmm6, 1283 INVALID_NUB_REGNUM, debugserver_stmm6, NULL, NULL}, 1284 {e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, 1285 FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), INVALID_NUB_REGNUM, dwarf_stmm7, 1286 INVALID_NUB_REGNUM, debugserver_stmm7, NULL, NULL}, 1287 1288 {e_regSetFPU, fpu_ymm0, "ymm0", NULL, Vector, VectorOfUInt8, 1289 FPU_SIZE_YMM(ymm0), AVX_OFFSET_YMM(0), INVALID_NUB_REGNUM, dwarf_ymm0, 1290 INVALID_NUB_REGNUM, debugserver_ymm0, NULL, NULL}, 1291 {e_regSetFPU, fpu_ymm1, "ymm1", NULL, Vector, VectorOfUInt8, 1292 FPU_SIZE_YMM(ymm1), AVX_OFFSET_YMM(1), INVALID_NUB_REGNUM, dwarf_ymm1, 1293 INVALID_NUB_REGNUM, debugserver_ymm1, NULL, NULL}, 1294 {e_regSetFPU, fpu_ymm2, "ymm2", NULL, Vector, VectorOfUInt8, 1295 FPU_SIZE_YMM(ymm2), AVX_OFFSET_YMM(2), INVALID_NUB_REGNUM, dwarf_ymm2, 1296 INVALID_NUB_REGNUM, debugserver_ymm2, NULL, NULL}, 1297 {e_regSetFPU, fpu_ymm3, "ymm3", NULL, Vector, VectorOfUInt8, 1298 FPU_SIZE_YMM(ymm3), AVX_OFFSET_YMM(3), INVALID_NUB_REGNUM, dwarf_ymm3, 1299 INVALID_NUB_REGNUM, debugserver_ymm3, NULL, NULL}, 1300 {e_regSetFPU, fpu_ymm4, "ymm4", NULL, Vector, VectorOfUInt8, 1301 FPU_SIZE_YMM(ymm4), AVX_OFFSET_YMM(4), INVALID_NUB_REGNUM, dwarf_ymm4, 1302 INVALID_NUB_REGNUM, debugserver_ymm4, NULL, NULL}, 1303 {e_regSetFPU, fpu_ymm5, "ymm5", NULL, Vector, VectorOfUInt8, 1304 FPU_SIZE_YMM(ymm5), AVX_OFFSET_YMM(5), INVALID_NUB_REGNUM, dwarf_ymm5, 1305 INVALID_NUB_REGNUM, debugserver_ymm5, NULL, NULL}, 1306 {e_regSetFPU, fpu_ymm6, "ymm6", NULL, Vector, VectorOfUInt8, 1307 FPU_SIZE_YMM(ymm6), AVX_OFFSET_YMM(6), INVALID_NUB_REGNUM, dwarf_ymm6, 1308 INVALID_NUB_REGNUM, debugserver_ymm6, NULL, NULL}, 1309 {e_regSetFPU, fpu_ymm7, "ymm7", NULL, Vector, VectorOfUInt8, 1310 FPU_SIZE_YMM(ymm7), AVX_OFFSET_YMM(7), INVALID_NUB_REGNUM, dwarf_ymm7, 1311 INVALID_NUB_REGNUM, debugserver_ymm7, NULL, NULL}, 1312 1313 {e_regSetFPU, fpu_xmm0, "xmm0", NULL, Vector, VectorOfUInt8, 1314 FPU_SIZE_XMM(xmm0), 0, INVALID_NUB_REGNUM, dwarf_xmm0, INVALID_NUB_REGNUM, 1315 debugserver_xmm0, g_contained_ymm0, NULL}, 1316 {e_regSetFPU, fpu_xmm1, "xmm1", NULL, Vector, VectorOfUInt8, 1317 FPU_SIZE_XMM(xmm1), 0, INVALID_NUB_REGNUM, dwarf_xmm1, INVALID_NUB_REGNUM, 1318 debugserver_xmm1, g_contained_ymm1, NULL}, 1319 {e_regSetFPU, fpu_xmm2, "xmm2", NULL, Vector, VectorOfUInt8, 1320 FPU_SIZE_XMM(xmm2), 0, INVALID_NUB_REGNUM, dwarf_xmm2, INVALID_NUB_REGNUM, 1321 debugserver_xmm2, g_contained_ymm2, NULL}, 1322 {e_regSetFPU, fpu_xmm3, "xmm3", NULL, Vector, VectorOfUInt8, 1323 FPU_SIZE_XMM(xmm3), 0, INVALID_NUB_REGNUM, dwarf_xmm3, INVALID_NUB_REGNUM, 1324 debugserver_xmm3, g_contained_ymm3, NULL}, 1325 {e_regSetFPU, fpu_xmm4, "xmm4", NULL, Vector, VectorOfUInt8, 1326 FPU_SIZE_XMM(xmm4), 0, INVALID_NUB_REGNUM, dwarf_xmm4, INVALID_NUB_REGNUM, 1327 debugserver_xmm4, g_contained_ymm4, NULL}, 1328 {e_regSetFPU, fpu_xmm5, "xmm5", NULL, Vector, VectorOfUInt8, 1329 FPU_SIZE_XMM(xmm5), 0, INVALID_NUB_REGNUM, dwarf_xmm5, INVALID_NUB_REGNUM, 1330 debugserver_xmm5, g_contained_ymm5, NULL}, 1331 {e_regSetFPU, fpu_xmm6, "xmm6", NULL, Vector, VectorOfUInt8, 1332 FPU_SIZE_XMM(xmm6), 0, INVALID_NUB_REGNUM, dwarf_xmm6, INVALID_NUB_REGNUM, 1333 debugserver_xmm6, g_contained_ymm6, NULL}, 1334 {e_regSetFPU, fpu_xmm7, "xmm7", NULL, Vector, VectorOfUInt8, 1335 FPU_SIZE_XMM(xmm7), 0, INVALID_NUB_REGNUM, dwarf_xmm7, INVALID_NUB_REGNUM, 1336 debugserver_xmm7, g_contained_ymm7, NULL}, 1337 1338 }; 1339 1340 const DNBRegisterInfo DNBArchImplI386::g_exc_registers[] = { 1341 {e_regSetEXC, exc_trapno, "trapno", NULL, Uint, Hex, EXC_SIZE(trapno), 1342 EXC_OFFSET(trapno), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1343 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1344 {e_regSetEXC, exc_err, "err", NULL, Uint, Hex, EXC_SIZE(err), 1345 EXC_OFFSET(err), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1346 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1347 {e_regSetEXC, exc_faultvaddr, "faultvaddr", NULL, Uint, Hex, 1348 EXC_SIZE(faultvaddr), EXC_OFFSET(faultvaddr), INVALID_NUB_REGNUM, 1349 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}}; 1350 1351 // Number of registers in each register set 1352 const size_t DNBArchImplI386::k_num_gpr_registers = 1353 sizeof(g_gpr_registers) / sizeof(DNBRegisterInfo); 1354 const size_t DNBArchImplI386::k_num_fpu_registers_no_avx = 1355 sizeof(g_fpu_registers_no_avx) / sizeof(DNBRegisterInfo); 1356 const size_t DNBArchImplI386::k_num_fpu_registers_avx = 1357 sizeof(g_fpu_registers_avx) / sizeof(DNBRegisterInfo); 1358 const size_t DNBArchImplI386::k_num_exc_registers = 1359 sizeof(g_exc_registers) / sizeof(DNBRegisterInfo); 1360 const size_t DNBArchImplI386::k_num_all_registers_no_avx = 1361 k_num_gpr_registers + k_num_fpu_registers_no_avx + k_num_exc_registers; 1362 const size_t DNBArchImplI386::k_num_all_registers_avx = 1363 k_num_gpr_registers + k_num_fpu_registers_avx + k_num_exc_registers; 1364 1365 //---------------------------------------------------------------------- 1366 // Register set definitions. The first definitions at register set index 1367 // of zero is for all registers, followed by other registers sets. The 1368 // register information for the all register set need not be filled in. 1369 //---------------------------------------------------------------------- 1370 const DNBRegisterSetInfo DNBArchImplI386::g_reg_sets_no_avx[] = { 1371 {"i386 Registers", NULL, k_num_all_registers_no_avx}, 1372 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers}, 1373 {"Floating Point Registers", g_fpu_registers_no_avx, 1374 k_num_fpu_registers_no_avx}, 1375 {"Exception State Registers", g_exc_registers, k_num_exc_registers}}; 1376 1377 const DNBRegisterSetInfo DNBArchImplI386::g_reg_sets_avx[] = { 1378 {"i386 Registers", NULL, k_num_all_registers_avx}, 1379 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers}, 1380 {"Floating Point Registers", g_fpu_registers_avx, k_num_fpu_registers_avx}, 1381 {"Exception State Registers", g_exc_registers, k_num_exc_registers}}; 1382 1383 // Total number of register sets for this architecture 1384 const size_t DNBArchImplI386::k_num_register_sets = 1385 sizeof(g_reg_sets_no_avx) / sizeof(DNBRegisterSetInfo); 1386 1387 DNBArchProtocol *DNBArchImplI386::Create(MachThread *thread) { 1388 DNBArchImplI386 *obj = new DNBArchImplI386(thread); 1389 return obj; 1390 } 1391 1392 const uint8_t *DNBArchImplI386::SoftwareBreakpointOpcode(nub_size_t byte_size) { 1393 static const uint8_t g_breakpoint_opcode[] = {0xCC}; 1394 if (byte_size == 1) 1395 return g_breakpoint_opcode; 1396 return NULL; 1397 } 1398 1399 const DNBRegisterSetInfo * 1400 DNBArchImplI386::GetRegisterSetInfo(nub_size_t *num_reg_sets) { 1401 *num_reg_sets = k_num_register_sets; 1402 if (CPUHasAVX() || FORCE_AVX_REGS) 1403 return g_reg_sets_avx; 1404 else 1405 return g_reg_sets_no_avx; 1406 } 1407 1408 void DNBArchImplI386::Initialize() { 1409 DNBArchPluginInfo arch_plugin_info = { 1410 CPU_TYPE_I386, DNBArchImplI386::Create, 1411 DNBArchImplI386::GetRegisterSetInfo, 1412 DNBArchImplI386::SoftwareBreakpointOpcode}; 1413 1414 // Register this arch plug-in with the main protocol class 1415 DNBArchProtocol::RegisterArchPlugin(arch_plugin_info); 1416 } 1417 1418 bool DNBArchImplI386::GetRegisterValue(uint32_t set, uint32_t reg, 1419 DNBRegisterValue *value) { 1420 if (set == REGISTER_SET_GENERIC) { 1421 switch (reg) { 1422 case GENERIC_REGNUM_PC: // Program Counter 1423 set = e_regSetGPR; 1424 reg = gpr_eip; 1425 break; 1426 1427 case GENERIC_REGNUM_SP: // Stack Pointer 1428 set = e_regSetGPR; 1429 reg = gpr_esp; 1430 break; 1431 1432 case GENERIC_REGNUM_FP: // Frame Pointer 1433 set = e_regSetGPR; 1434 reg = gpr_ebp; 1435 break; 1436 1437 case GENERIC_REGNUM_FLAGS: // Processor flags register 1438 set = e_regSetGPR; 1439 reg = gpr_eflags; 1440 break; 1441 1442 case GENERIC_REGNUM_RA: // Return Address 1443 default: 1444 return false; 1445 } 1446 } 1447 1448 if (GetRegisterState(set, false) != KERN_SUCCESS) 1449 return false; 1450 1451 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1452 if (regInfo) { 1453 value->info = *regInfo; 1454 switch (set) { 1455 case e_regSetGPR: 1456 if (reg < k_num_gpr_registers) { 1457 value->value.uint32 = ((uint32_t *)(&m_state.context.gpr))[reg]; 1458 return true; 1459 } 1460 break; 1461 1462 case e_regSetFPU: 1463 if (CPUHasAVX() || FORCE_AVX_REGS) { 1464 switch (reg) { 1465 case fpu_fcw: 1466 value->value.uint16 = 1467 *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)); 1468 return true; 1469 case fpu_fsw: 1470 value->value.uint16 = 1471 *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)); 1472 return true; 1473 case fpu_ftw: 1474 value->value.uint8 = m_state.context.fpu.avx.__fpu_ftw; 1475 return true; 1476 case fpu_fop: 1477 value->value.uint16 = m_state.context.fpu.avx.__fpu_fop; 1478 return true; 1479 case fpu_ip: 1480 value->value.uint32 = m_state.context.fpu.avx.__fpu_ip; 1481 return true; 1482 case fpu_cs: 1483 value->value.uint16 = m_state.context.fpu.avx.__fpu_cs; 1484 return true; 1485 case fpu_dp: 1486 value->value.uint32 = m_state.context.fpu.avx.__fpu_dp; 1487 return true; 1488 case fpu_ds: 1489 value->value.uint16 = m_state.context.fpu.avx.__fpu_ds; 1490 return true; 1491 case fpu_mxcsr: 1492 value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsr; 1493 return true; 1494 case fpu_mxcsrmask: 1495 value->value.uint32 = m_state.context.fpu.avx.__fpu_mxcsrmask; 1496 return true; 1497 1498 case fpu_stmm0: 1499 memcpy(&value->value.uint8, 1500 m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg, 10); 1501 return true; 1502 case fpu_stmm1: 1503 memcpy(&value->value.uint8, 1504 m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg, 10); 1505 return true; 1506 case fpu_stmm2: 1507 memcpy(&value->value.uint8, 1508 m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg, 10); 1509 return true; 1510 case fpu_stmm3: 1511 memcpy(&value->value.uint8, 1512 m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg, 10); 1513 return true; 1514 case fpu_stmm4: 1515 memcpy(&value->value.uint8, 1516 m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg, 10); 1517 return true; 1518 case fpu_stmm5: 1519 memcpy(&value->value.uint8, 1520 m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg, 10); 1521 return true; 1522 case fpu_stmm6: 1523 memcpy(&value->value.uint8, 1524 m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg, 10); 1525 return true; 1526 case fpu_stmm7: 1527 memcpy(&value->value.uint8, 1528 m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg, 10); 1529 return true; 1530 1531 case fpu_xmm0: 1532 memcpy(&value->value.uint8, 1533 m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg, 16); 1534 return true; 1535 case fpu_xmm1: 1536 memcpy(&value->value.uint8, 1537 m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg, 16); 1538 return true; 1539 case fpu_xmm2: 1540 memcpy(&value->value.uint8, 1541 m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg, 16); 1542 return true; 1543 case fpu_xmm3: 1544 memcpy(&value->value.uint8, 1545 m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg, 16); 1546 return true; 1547 case fpu_xmm4: 1548 memcpy(&value->value.uint8, 1549 m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg, 16); 1550 return true; 1551 case fpu_xmm5: 1552 memcpy(&value->value.uint8, 1553 m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg, 16); 1554 return true; 1555 case fpu_xmm6: 1556 memcpy(&value->value.uint8, 1557 m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg, 16); 1558 return true; 1559 case fpu_xmm7: 1560 memcpy(&value->value.uint8, 1561 m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg, 16); 1562 return true; 1563 1564 #define MEMCPY_YMM(n) \ 1565 memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm##n.__xmm_reg, \ 1566 16); \ 1567 memcpy((&value->value.uint8) + 16, \ 1568 m_state.context.fpu.avx.__fpu_ymmh##n.__xmm_reg, 16); 1569 case fpu_ymm0: 1570 MEMCPY_YMM(0); 1571 return true; 1572 case fpu_ymm1: 1573 MEMCPY_YMM(1); 1574 return true; 1575 case fpu_ymm2: 1576 MEMCPY_YMM(2); 1577 return true; 1578 case fpu_ymm3: 1579 MEMCPY_YMM(3); 1580 return true; 1581 case fpu_ymm4: 1582 MEMCPY_YMM(4); 1583 return true; 1584 case fpu_ymm5: 1585 MEMCPY_YMM(5); 1586 return true; 1587 case fpu_ymm6: 1588 MEMCPY_YMM(6); 1589 return true; 1590 case fpu_ymm7: 1591 MEMCPY_YMM(7); 1592 return true; 1593 #undef MEMCPY_YMM 1594 } 1595 } else { 1596 switch (reg) { 1597 case fpu_fcw: 1598 value->value.uint16 = 1599 *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)); 1600 return true; 1601 case fpu_fsw: 1602 value->value.uint16 = 1603 *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)); 1604 return true; 1605 case fpu_ftw: 1606 value->value.uint8 = m_state.context.fpu.no_avx.__fpu_ftw; 1607 return true; 1608 case fpu_fop: 1609 value->value.uint16 = m_state.context.fpu.no_avx.__fpu_fop; 1610 return true; 1611 case fpu_ip: 1612 value->value.uint32 = m_state.context.fpu.no_avx.__fpu_ip; 1613 return true; 1614 case fpu_cs: 1615 value->value.uint16 = m_state.context.fpu.no_avx.__fpu_cs; 1616 return true; 1617 case fpu_dp: 1618 value->value.uint32 = m_state.context.fpu.no_avx.__fpu_dp; 1619 return true; 1620 case fpu_ds: 1621 value->value.uint16 = m_state.context.fpu.no_avx.__fpu_ds; 1622 return true; 1623 case fpu_mxcsr: 1624 value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsr; 1625 return true; 1626 case fpu_mxcsrmask: 1627 value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsrmask; 1628 return true; 1629 1630 case fpu_stmm0: 1631 memcpy(&value->value.uint8, 1632 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg, 10); 1633 return true; 1634 case fpu_stmm1: 1635 memcpy(&value->value.uint8, 1636 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg, 10); 1637 return true; 1638 case fpu_stmm2: 1639 memcpy(&value->value.uint8, 1640 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg, 10); 1641 return true; 1642 case fpu_stmm3: 1643 memcpy(&value->value.uint8, 1644 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg, 10); 1645 return true; 1646 case fpu_stmm4: 1647 memcpy(&value->value.uint8, 1648 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg, 10); 1649 return true; 1650 case fpu_stmm5: 1651 memcpy(&value->value.uint8, 1652 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg, 10); 1653 return true; 1654 case fpu_stmm6: 1655 memcpy(&value->value.uint8, 1656 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg, 10); 1657 return true; 1658 case fpu_stmm7: 1659 memcpy(&value->value.uint8, 1660 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg, 10); 1661 return true; 1662 1663 case fpu_xmm0: 1664 memcpy(&value->value.uint8, 1665 m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg, 16); 1666 return true; 1667 case fpu_xmm1: 1668 memcpy(&value->value.uint8, 1669 m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg, 16); 1670 return true; 1671 case fpu_xmm2: 1672 memcpy(&value->value.uint8, 1673 m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg, 16); 1674 return true; 1675 case fpu_xmm3: 1676 memcpy(&value->value.uint8, 1677 m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg, 16); 1678 return true; 1679 case fpu_xmm4: 1680 memcpy(&value->value.uint8, 1681 m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg, 16); 1682 return true; 1683 case fpu_xmm5: 1684 memcpy(&value->value.uint8, 1685 m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg, 16); 1686 return true; 1687 case fpu_xmm6: 1688 memcpy(&value->value.uint8, 1689 m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg, 16); 1690 return true; 1691 case fpu_xmm7: 1692 memcpy(&value->value.uint8, 1693 m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg, 16); 1694 return true; 1695 } 1696 } 1697 break; 1698 1699 case e_regSetEXC: 1700 if (reg < k_num_exc_registers) { 1701 value->value.uint32 = (&m_state.context.exc.__trapno)[reg]; 1702 return true; 1703 } 1704 break; 1705 } 1706 } 1707 return false; 1708 } 1709 1710 bool DNBArchImplI386::SetRegisterValue(uint32_t set, uint32_t reg, 1711 const DNBRegisterValue *value) { 1712 if (set == REGISTER_SET_GENERIC) { 1713 switch (reg) { 1714 case GENERIC_REGNUM_PC: // Program Counter 1715 set = e_regSetGPR; 1716 reg = gpr_eip; 1717 break; 1718 1719 case GENERIC_REGNUM_SP: // Stack Pointer 1720 set = e_regSetGPR; 1721 reg = gpr_esp; 1722 break; 1723 1724 case GENERIC_REGNUM_FP: // Frame Pointer 1725 set = e_regSetGPR; 1726 reg = gpr_ebp; 1727 break; 1728 1729 case GENERIC_REGNUM_FLAGS: // Processor flags register 1730 set = e_regSetGPR; 1731 reg = gpr_eflags; 1732 break; 1733 1734 case GENERIC_REGNUM_RA: // Return Address 1735 default: 1736 return false; 1737 } 1738 } 1739 1740 if (GetRegisterState(set, false) != KERN_SUCCESS) 1741 return false; 1742 1743 bool success = false; 1744 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1745 if (regInfo) { 1746 switch (set) { 1747 case e_regSetGPR: 1748 if (reg < k_num_gpr_registers) { 1749 ((uint32_t *)(&m_state.context.gpr))[reg] = value->value.uint32; 1750 success = true; 1751 } 1752 break; 1753 1754 case e_regSetFPU: 1755 if (CPUHasAVX() || FORCE_AVX_REGS) { 1756 switch (reg) { 1757 case fpu_fcw: 1758 *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fcw)) = 1759 value->value.uint16; 1760 success = true; 1761 break; 1762 case fpu_fsw: 1763 *((uint16_t *)(&m_state.context.fpu.avx.__fpu_fsw)) = 1764 value->value.uint16; 1765 success = true; 1766 break; 1767 case fpu_ftw: 1768 m_state.context.fpu.avx.__fpu_ftw = value->value.uint8; 1769 success = true; 1770 break; 1771 case fpu_fop: 1772 m_state.context.fpu.avx.__fpu_fop = value->value.uint16; 1773 success = true; 1774 break; 1775 case fpu_ip: 1776 m_state.context.fpu.avx.__fpu_ip = value->value.uint32; 1777 success = true; 1778 break; 1779 case fpu_cs: 1780 m_state.context.fpu.avx.__fpu_cs = value->value.uint16; 1781 success = true; 1782 break; 1783 case fpu_dp: 1784 m_state.context.fpu.avx.__fpu_dp = value->value.uint32; 1785 success = true; 1786 break; 1787 case fpu_ds: 1788 m_state.context.fpu.avx.__fpu_ds = value->value.uint16; 1789 success = true; 1790 break; 1791 case fpu_mxcsr: 1792 m_state.context.fpu.avx.__fpu_mxcsr = value->value.uint32; 1793 success = true; 1794 break; 1795 case fpu_mxcsrmask: 1796 m_state.context.fpu.avx.__fpu_mxcsrmask = value->value.uint32; 1797 success = true; 1798 break; 1799 1800 case fpu_stmm0: 1801 memcpy(m_state.context.fpu.avx.__fpu_stmm0.__mmst_reg, 1802 &value->value.uint8, 10); 1803 success = true; 1804 break; 1805 case fpu_stmm1: 1806 memcpy(m_state.context.fpu.avx.__fpu_stmm1.__mmst_reg, 1807 &value->value.uint8, 10); 1808 success = true; 1809 break; 1810 case fpu_stmm2: 1811 memcpy(m_state.context.fpu.avx.__fpu_stmm2.__mmst_reg, 1812 &value->value.uint8, 10); 1813 success = true; 1814 break; 1815 case fpu_stmm3: 1816 memcpy(m_state.context.fpu.avx.__fpu_stmm3.__mmst_reg, 1817 &value->value.uint8, 10); 1818 success = true; 1819 break; 1820 case fpu_stmm4: 1821 memcpy(m_state.context.fpu.avx.__fpu_stmm4.__mmst_reg, 1822 &value->value.uint8, 10); 1823 success = true; 1824 break; 1825 case fpu_stmm5: 1826 memcpy(m_state.context.fpu.avx.__fpu_stmm5.__mmst_reg, 1827 &value->value.uint8, 10); 1828 success = true; 1829 break; 1830 case fpu_stmm6: 1831 memcpy(m_state.context.fpu.avx.__fpu_stmm6.__mmst_reg, 1832 &value->value.uint8, 10); 1833 success = true; 1834 break; 1835 case fpu_stmm7: 1836 memcpy(m_state.context.fpu.avx.__fpu_stmm7.__mmst_reg, 1837 &value->value.uint8, 10); 1838 success = true; 1839 break; 1840 1841 case fpu_xmm0: 1842 memcpy(m_state.context.fpu.avx.__fpu_xmm0.__xmm_reg, 1843 &value->value.uint8, 16); 1844 success = true; 1845 break; 1846 case fpu_xmm1: 1847 memcpy(m_state.context.fpu.avx.__fpu_xmm1.__xmm_reg, 1848 &value->value.uint8, 16); 1849 success = true; 1850 break; 1851 case fpu_xmm2: 1852 memcpy(m_state.context.fpu.avx.__fpu_xmm2.__xmm_reg, 1853 &value->value.uint8, 16); 1854 success = true; 1855 break; 1856 case fpu_xmm3: 1857 memcpy(m_state.context.fpu.avx.__fpu_xmm3.__xmm_reg, 1858 &value->value.uint8, 16); 1859 success = true; 1860 break; 1861 case fpu_xmm4: 1862 memcpy(m_state.context.fpu.avx.__fpu_xmm4.__xmm_reg, 1863 &value->value.uint8, 16); 1864 success = true; 1865 break; 1866 case fpu_xmm5: 1867 memcpy(m_state.context.fpu.avx.__fpu_xmm5.__xmm_reg, 1868 &value->value.uint8, 16); 1869 success = true; 1870 break; 1871 case fpu_xmm6: 1872 memcpy(m_state.context.fpu.avx.__fpu_xmm6.__xmm_reg, 1873 &value->value.uint8, 16); 1874 success = true; 1875 break; 1876 case fpu_xmm7: 1877 memcpy(m_state.context.fpu.avx.__fpu_xmm7.__xmm_reg, 1878 &value->value.uint8, 16); 1879 success = true; 1880 break; 1881 1882 #define MEMCPY_YMM(n) \ 1883 memcpy(m_state.context.fpu.avx.__fpu_xmm##n.__xmm_reg, &value->value.uint8, \ 1884 16); \ 1885 memcpy(m_state.context.fpu.avx.__fpu_ymmh##n.__xmm_reg, \ 1886 (&value->value.uint8) + 16, 16); 1887 case fpu_ymm0: 1888 MEMCPY_YMM(0); 1889 return true; 1890 case fpu_ymm1: 1891 MEMCPY_YMM(1); 1892 return true; 1893 case fpu_ymm2: 1894 MEMCPY_YMM(2); 1895 return true; 1896 case fpu_ymm3: 1897 MEMCPY_YMM(3); 1898 return true; 1899 case fpu_ymm4: 1900 MEMCPY_YMM(4); 1901 return true; 1902 case fpu_ymm5: 1903 MEMCPY_YMM(5); 1904 return true; 1905 case fpu_ymm6: 1906 MEMCPY_YMM(6); 1907 return true; 1908 case fpu_ymm7: 1909 MEMCPY_YMM(7); 1910 return true; 1911 #undef MEMCPY_YMM 1912 } 1913 } else { 1914 switch (reg) { 1915 case fpu_fcw: 1916 *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)) = 1917 value->value.uint16; 1918 success = true; 1919 break; 1920 case fpu_fsw: 1921 *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)) = 1922 value->value.uint16; 1923 success = true; 1924 break; 1925 case fpu_ftw: 1926 m_state.context.fpu.no_avx.__fpu_ftw = value->value.uint8; 1927 success = true; 1928 break; 1929 case fpu_fop: 1930 m_state.context.fpu.no_avx.__fpu_fop = value->value.uint16; 1931 success = true; 1932 break; 1933 case fpu_ip: 1934 m_state.context.fpu.no_avx.__fpu_ip = value->value.uint32; 1935 success = true; 1936 break; 1937 case fpu_cs: 1938 m_state.context.fpu.no_avx.__fpu_cs = value->value.uint16; 1939 success = true; 1940 break; 1941 case fpu_dp: 1942 m_state.context.fpu.no_avx.__fpu_dp = value->value.uint32; 1943 success = true; 1944 break; 1945 case fpu_ds: 1946 m_state.context.fpu.no_avx.__fpu_ds = value->value.uint16; 1947 success = true; 1948 break; 1949 case fpu_mxcsr: 1950 m_state.context.fpu.no_avx.__fpu_mxcsr = value->value.uint32; 1951 success = true; 1952 break; 1953 case fpu_mxcsrmask: 1954 m_state.context.fpu.no_avx.__fpu_mxcsrmask = value->value.uint32; 1955 success = true; 1956 break; 1957 1958 case fpu_stmm0: 1959 memcpy(m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg, 1960 &value->value.uint8, 10); 1961 success = true; 1962 break; 1963 case fpu_stmm1: 1964 memcpy(m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg, 1965 &value->value.uint8, 10); 1966 success = true; 1967 break; 1968 case fpu_stmm2: 1969 memcpy(m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg, 1970 &value->value.uint8, 10); 1971 success = true; 1972 break; 1973 case fpu_stmm3: 1974 memcpy(m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg, 1975 &value->value.uint8, 10); 1976 success = true; 1977 break; 1978 case fpu_stmm4: 1979 memcpy(m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg, 1980 &value->value.uint8, 10); 1981 success = true; 1982 break; 1983 case fpu_stmm5: 1984 memcpy(m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg, 1985 &value->value.uint8, 10); 1986 success = true; 1987 break; 1988 case fpu_stmm6: 1989 memcpy(m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg, 1990 &value->value.uint8, 10); 1991 success = true; 1992 break; 1993 case fpu_stmm7: 1994 memcpy(m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg, 1995 &value->value.uint8, 10); 1996 success = true; 1997 break; 1998 1999 case fpu_xmm0: 2000 memcpy(m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg, 2001 &value->value.uint8, 16); 2002 success = true; 2003 break; 2004 case fpu_xmm1: 2005 memcpy(m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg, 2006 &value->value.uint8, 16); 2007 success = true; 2008 break; 2009 case fpu_xmm2: 2010 memcpy(m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg, 2011 &value->value.uint8, 16); 2012 success = true; 2013 break; 2014 case fpu_xmm3: 2015 memcpy(m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg, 2016 &value->value.uint8, 16); 2017 success = true; 2018 break; 2019 case fpu_xmm4: 2020 memcpy(m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg, 2021 &value->value.uint8, 16); 2022 success = true; 2023 break; 2024 case fpu_xmm5: 2025 memcpy(m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg, 2026 &value->value.uint8, 16); 2027 success = true; 2028 break; 2029 case fpu_xmm6: 2030 memcpy(m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg, 2031 &value->value.uint8, 16); 2032 success = true; 2033 break; 2034 case fpu_xmm7: 2035 memcpy(m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg, 2036 &value->value.uint8, 16); 2037 success = true; 2038 break; 2039 } 2040 } 2041 break; 2042 2043 case e_regSetEXC: 2044 if (reg < k_num_exc_registers) { 2045 (&m_state.context.exc.__trapno)[reg] = value->value.uint32; 2046 success = true; 2047 } 2048 break; 2049 } 2050 } 2051 2052 if (success) 2053 return SetRegisterState(set) == KERN_SUCCESS; 2054 return false; 2055 } 2056 2057 uint32_t DNBArchImplI386::GetRegisterContextSize() { 2058 static uint32_t g_cached_size = 0; 2059 if (g_cached_size == 0) { 2060 if (CPUHasAVX() || FORCE_AVX_REGS) { 2061 for (size_t i = 0; i < k_num_fpu_registers_avx; ++i) { 2062 if (g_fpu_registers_avx[i].value_regs == NULL) 2063 g_cached_size += g_fpu_registers_avx[i].size; 2064 } 2065 } else { 2066 for (size_t i = 0; i < k_num_fpu_registers_no_avx; ++i) { 2067 if (g_fpu_registers_no_avx[i].value_regs == NULL) 2068 g_cached_size += g_fpu_registers_no_avx[i].size; 2069 } 2070 } 2071 DNBLogThreaded("DNBArchImplX86_64::GetRegisterContextSize() - GPR = %zu, " 2072 "FPU = %u, EXC = %zu", 2073 sizeof(GPR), g_cached_size, sizeof(EXC)); 2074 g_cached_size += sizeof(GPR); 2075 g_cached_size += sizeof(EXC); 2076 DNBLogThreaded( 2077 "DNBArchImplX86_64::GetRegisterContextSize() - GPR + FPU + EXC = %u", 2078 g_cached_size); 2079 } 2080 return g_cached_size; 2081 } 2082 2083 nub_size_t DNBArchImplI386::GetRegisterContext(void *buf, nub_size_t buf_len) { 2084 uint32_t size = GetRegisterContextSize(); 2085 2086 if (buf && buf_len) { 2087 if (size > buf_len) 2088 size = static_cast<uint32_t>(buf_len); 2089 2090 bool force = false; 2091 kern_return_t kret; 2092 if ((kret = GetGPRState(force)) != KERN_SUCCESS) { 2093 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = " 2094 "%p, len = %llu) error: GPR regs failed to " 2095 "read: %u ", 2096 buf, (uint64_t)buf_len, kret); 2097 size = 0; 2098 } else if ((kret = GetFPUState(force)) != KERN_SUCCESS) { 2099 DNBLogThreadedIf( 2100 LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = %p, len = " 2101 "%llu) error: %s regs failed to read: %u", 2102 buf, (uint64_t)buf_len, CPUHasAVX() ? "AVX" : "FPU", kret); 2103 size = 0; 2104 } else if ((kret = GetEXCState(force)) != KERN_SUCCESS) { 2105 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = " 2106 "%p, len = %llu) error: EXC regs failed to " 2107 "read: %u", 2108 buf, (uint64_t)buf_len, kret); 2109 size = 0; 2110 } else { 2111 uint8_t *p = (uint8_t *)buf; 2112 // Copy the GPR registers 2113 memcpy(p, &m_state.context.gpr, sizeof(GPR)); 2114 p += sizeof(GPR); 2115 2116 if (CPUHasAVX() || FORCE_AVX_REGS) { 2117 // Walk around the gaps in the FPU regs 2118 memcpy(p, &m_state.context.fpu.avx.__fpu_fcw, 5); 2119 p += 5; 2120 memcpy(p, &m_state.context.fpu.avx.__fpu_fop, 8); 2121 p += 8; 2122 memcpy(p, &m_state.context.fpu.avx.__fpu_dp, 6); 2123 p += 6; 2124 memcpy(p, &m_state.context.fpu.avx.__fpu_mxcsr, 8); 2125 p += 8; 2126 2127 // Work around the padding between the stmm registers as they are 16 2128 // byte structs with 10 bytes of the value in each 2129 for (size_t i = 0; i < 8; ++i) { 2130 memcpy(p, &m_state.context.fpu.avx.__fpu_stmm0 + i, 10); 2131 p += 10; 2132 } 2133 2134 // Interleave the XMM and YMMH registers to make the YMM registers 2135 for (size_t i = 0; i < 8; ++i) { 2136 memcpy(p, &m_state.context.fpu.avx.__fpu_xmm0 + i, 16); 2137 p += 16; 2138 memcpy(p, &m_state.context.fpu.avx.__fpu_ymmh0 + i, 16); 2139 p += 16; 2140 } 2141 } else { 2142 // Walk around the gaps in the FPU regs 2143 memcpy(p, &m_state.context.fpu.no_avx.__fpu_fcw, 5); 2144 p += 5; 2145 memcpy(p, &m_state.context.fpu.no_avx.__fpu_fop, 8); 2146 p += 8; 2147 memcpy(p, &m_state.context.fpu.no_avx.__fpu_dp, 6); 2148 p += 6; 2149 memcpy(p, &m_state.context.fpu.no_avx.__fpu_mxcsr, 8); 2150 p += 8; 2151 2152 // Work around the padding between the stmm registers as they are 16 2153 // byte structs with 10 bytes of the value in each 2154 for (size_t i = 0; i < 8; ++i) { 2155 memcpy(p, &m_state.context.fpu.no_avx.__fpu_stmm0 + i, 10); 2156 p += 10; 2157 } 2158 2159 // Copy the XMM registers in a single block 2160 memcpy(p, &m_state.context.fpu.no_avx.__fpu_xmm0, 8 * 16); 2161 p += 8 * 16; 2162 } 2163 2164 // Copy the exception registers 2165 memcpy(p, &m_state.context.exc, sizeof(EXC)); 2166 p += sizeof(EXC); 2167 2168 // make sure we end up with exactly what we think we should have 2169 size_t bytes_written = p - (uint8_t *)buf; 2170 UNUSED_IF_ASSERT_DISABLED(bytes_written); 2171 assert(bytes_written == size); 2172 } 2173 } 2174 DNBLogThreadedIf( 2175 LOG_THREAD, 2176 "DNBArchImplI386::GetRegisterContext (buf = %p, len = %llu) => %llu", buf, 2177 (uint64_t)buf_len, (uint64_t)size); 2178 // Return the size of the register context even if NULL was passed in 2179 return size; 2180 } 2181 2182 nub_size_t DNBArchImplI386::SetRegisterContext(const void *buf, 2183 nub_size_t buf_len) { 2184 nub_size_t size = sizeof(m_state.context); 2185 if (buf == NULL || buf_len == 0) 2186 size = 0; 2187 2188 if (size) { 2189 if (size > buf_len) 2190 size = buf_len; 2191 2192 uint8_t *p = (uint8_t *)buf; 2193 // Copy the GPR registers 2194 memcpy(&m_state.context.gpr, p, sizeof(GPR)); 2195 p += sizeof(GPR); 2196 2197 if (CPUHasAVX() || FORCE_AVX_REGS) { 2198 // Walk around the gaps in the FPU regs 2199 memcpy(&m_state.context.fpu.avx.__fpu_fcw, p, 5); 2200 p += 5; 2201 memcpy(&m_state.context.fpu.avx.__fpu_fop, p, 8); 2202 p += 8; 2203 memcpy(&m_state.context.fpu.avx.__fpu_dp, p, 6); 2204 p += 6; 2205 memcpy(&m_state.context.fpu.avx.__fpu_mxcsr, p, 8); 2206 p += 8; 2207 2208 // Work around the padding between the stmm registers as they are 16 2209 // byte structs with 10 bytes of the value in each 2210 for (size_t i = 0; i < 8; ++i) { 2211 memcpy(&m_state.context.fpu.avx.__fpu_stmm0 + i, p, 10); 2212 p += 10; 2213 } 2214 2215 // Interleave the XMM and YMMH registers to make the YMM registers 2216 for (size_t i = 0; i < 8; ++i) { 2217 memcpy(&m_state.context.fpu.avx.__fpu_xmm0 + i, p, 16); 2218 p += 16; 2219 memcpy(&m_state.context.fpu.avx.__fpu_ymmh0 + i, p, 16); 2220 p += 16; 2221 } 2222 } else { 2223 // Copy fcw through mxcsrmask as there is no padding 2224 memcpy(&m_state.context.fpu.no_avx.__fpu_fcw, p, 5); 2225 p += 5; 2226 memcpy(&m_state.context.fpu.no_avx.__fpu_fop, p, 8); 2227 p += 8; 2228 memcpy(&m_state.context.fpu.no_avx.__fpu_dp, p, 6); 2229 p += 6; 2230 memcpy(&m_state.context.fpu.no_avx.__fpu_mxcsr, p, 8); 2231 p += 8; 2232 2233 // Work around the padding between the stmm registers as they are 16 2234 // byte structs with 10 bytes of the value in each 2235 for (size_t i = 0; i < 8; ++i) { 2236 memcpy(&m_state.context.fpu.no_avx.__fpu_stmm0 + i, p, 10); 2237 p += 10; 2238 } 2239 2240 // Copy the XMM registers in a single block 2241 memcpy(&m_state.context.fpu.no_avx.__fpu_xmm0, p, 8 * 16); 2242 p += 8 * 16; 2243 } 2244 2245 // Copy the exception registers 2246 memcpy(&m_state.context.exc, p, sizeof(EXC)); 2247 p += sizeof(EXC); 2248 2249 // make sure we end up with exactly what we think we should have 2250 size_t bytes_written = p - (uint8_t *)buf; 2251 UNUSED_IF_ASSERT_DISABLED(bytes_written); 2252 assert(bytes_written == size); 2253 kern_return_t kret; 2254 if ((kret = SetGPRState()) != KERN_SUCCESS) 2255 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = " 2256 "%p, len = %llu) error: GPR regs failed to " 2257 "write: %u", 2258 buf, (uint64_t)buf_len, kret); 2259 if ((kret = SetFPUState()) != KERN_SUCCESS) 2260 DNBLogThreadedIf( 2261 LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = %p, len = " 2262 "%llu) error: %s regs failed to write: %u", 2263 buf, (uint64_t)buf_len, CPUHasAVX() ? "AVX" : "FPU", kret); 2264 if ((kret = SetEXCState()) != KERN_SUCCESS) 2265 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = " 2266 "%p, len = %llu) error: EXP regs failed to " 2267 "write: %u", 2268 buf, (uint64_t)buf_len, kret); 2269 } 2270 DNBLogThreadedIf( 2271 LOG_THREAD, 2272 "DNBArchImplI386::SetRegisterContext (buf = %p, len = %llu) => %llu", buf, 2273 (uint64_t)buf_len, (uint64_t)size); 2274 return size; 2275 } 2276 2277 uint32_t DNBArchImplI386::SaveRegisterState() { 2278 kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber()); 2279 DNBLogThreadedIf( 2280 LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u " 2281 "(SetGPRState() for stop_count = %u)", 2282 m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount()); 2283 2284 bool force = true; 2285 2286 if ((kret = GetGPRState(force)) != KERN_SUCCESS) { 2287 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::SaveRegisterState () error: " 2288 "GPR regs failed to read: %u ", 2289 kret); 2290 } else if ((kret = GetFPUState(force)) != KERN_SUCCESS) { 2291 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::SaveRegisterState () error: " 2292 "%s regs failed to read: %u", 2293 CPUHasAVX() ? "AVX" : "FPU", kret); 2294 } else { 2295 const uint32_t save_id = GetNextRegisterStateSaveID(); 2296 m_saved_register_states[save_id] = m_state.context; 2297 return save_id; 2298 } 2299 return 0; 2300 } 2301 bool DNBArchImplI386::RestoreRegisterState(uint32_t save_id) { 2302 SaveRegisterStates::iterator pos = m_saved_register_states.find(save_id); 2303 if (pos != m_saved_register_states.end()) { 2304 m_state.context.gpr = pos->second.gpr; 2305 m_state.context.fpu = pos->second.fpu; 2306 m_state.context.exc = pos->second.exc; 2307 m_state.SetError(e_regSetGPR, Read, 0); 2308 m_state.SetError(e_regSetFPU, Read, 0); 2309 m_state.SetError(e_regSetEXC, Read, 0); 2310 kern_return_t kret; 2311 bool success = true; 2312 if ((kret = SetGPRState()) != KERN_SUCCESS) { 2313 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::RestoreRegisterState " 2314 "(save_id = %u) error: GPR regs failed to " 2315 "write: %u", 2316 save_id, kret); 2317 success = false; 2318 } else if ((kret = SetFPUState()) != KERN_SUCCESS) { 2319 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::RestoreRegisterState " 2320 "(save_id = %u) error: %s regs failed to " 2321 "write: %u", 2322 save_id, CPUHasAVX() ? "AVX" : "FPU", kret); 2323 success = false; 2324 } 2325 m_saved_register_states.erase(pos); 2326 return success; 2327 } 2328 return false; 2329 } 2330 2331 kern_return_t DNBArchImplI386::GetRegisterState(int set, bool force) { 2332 switch (set) { 2333 case e_regSetALL: 2334 return GetGPRState(force) | GetFPUState(force) | GetEXCState(force); 2335 case e_regSetGPR: 2336 return GetGPRState(force); 2337 case e_regSetFPU: 2338 return GetFPUState(force); 2339 case e_regSetEXC: 2340 return GetEXCState(force); 2341 default: 2342 break; 2343 } 2344 return KERN_INVALID_ARGUMENT; 2345 } 2346 2347 kern_return_t DNBArchImplI386::SetRegisterState(int set) { 2348 // Make sure we have a valid context to set. 2349 if (RegisterSetStateIsValid(set)) { 2350 switch (set) { 2351 case e_regSetALL: 2352 return SetGPRState() | SetFPUState() | SetEXCState(); 2353 case e_regSetGPR: 2354 return SetGPRState(); 2355 case e_regSetFPU: 2356 return SetFPUState(); 2357 case e_regSetEXC: 2358 return SetEXCState(); 2359 default: 2360 break; 2361 } 2362 } 2363 return KERN_INVALID_ARGUMENT; 2364 } 2365 2366 bool DNBArchImplI386::RegisterSetStateIsValid(int set) const { 2367 return m_state.RegsAreValid(set); 2368 } 2369 2370 #endif // #if defined (__i386__) 2371