1 //===-- DNBArchImplI386.cpp -------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Created by Greg Clayton on 6/25/07. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #if defined(__i386__) || defined(__x86_64__) 14 15 #include <sys/cdefs.h> 16 17 #include "DNBLog.h" 18 #include "MacOSX/i386/DNBArchImplI386.h" 19 #include "MachProcess.h" 20 #include "MachThread.h" 21 22 extern "C" bool CPUHasAVX(); // Defined over in DNBArchImplX86_64.cpp 23 extern "C" bool CPUHasAVX512f(); // Defined over in DNBArchImplX86_64.cpp 24 #if defined(LLDB_DEBUGSERVER_RELEASE) || defined(LLDB_DEBUGSERVER_DEBUG) 25 enum debugState { debugStateUnknown, debugStateOff, debugStateOn }; 26 27 static debugState sFPUDebugState = debugStateUnknown; 28 static debugState sAVXForceState = debugStateUnknown; 29 30 static bool DebugFPURegs() { 31 if (sFPUDebugState == debugStateUnknown) { 32 if (getenv("DNB_DEBUG_FPU_REGS")) 33 sFPUDebugState = debugStateOn; 34 else 35 sFPUDebugState = debugStateOff; 36 } 37 38 return (sFPUDebugState == debugStateOn); 39 } 40 41 static bool ForceAVXRegs() { 42 if (sFPUDebugState == debugStateUnknown) { 43 if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS")) 44 sAVXForceState = debugStateOn; 45 else 46 sAVXForceState = debugStateOff; 47 } 48 49 return (sAVXForceState == debugStateOn); 50 } 51 52 #define DEBUG_FPU_REGS (DebugFPURegs()) 53 #define FORCE_AVX_REGS (ForceAVXRegs()) 54 #else 55 #define DEBUG_FPU_REGS (0) 56 #define FORCE_AVX_REGS (0) 57 #endif 58 59 enum { 60 gpr_eax = 0, 61 gpr_ebx = 1, 62 gpr_ecx = 2, 63 gpr_edx = 3, 64 gpr_edi = 4, 65 gpr_esi = 5, 66 gpr_ebp = 6, 67 gpr_esp = 7, 68 gpr_ss = 8, 69 gpr_eflags = 9, 70 gpr_eip = 10, 71 gpr_cs = 11, 72 gpr_ds = 12, 73 gpr_es = 13, 74 gpr_fs = 14, 75 gpr_gs = 15, 76 gpr_ax, 77 gpr_bx, 78 gpr_cx, 79 gpr_dx, 80 gpr_di, 81 gpr_si, 82 gpr_bp, 83 gpr_sp, 84 gpr_ah, 85 gpr_bh, 86 gpr_ch, 87 gpr_dh, 88 gpr_al, 89 gpr_bl, 90 gpr_cl, 91 gpr_dl, 92 gpr_dil, 93 gpr_sil, 94 gpr_bpl, 95 gpr_spl, 96 k_num_gpr_regs 97 }; 98 99 enum { 100 fpu_fcw, 101 fpu_fsw, 102 fpu_ftw, 103 fpu_fop, 104 fpu_ip, 105 fpu_cs, 106 fpu_dp, 107 fpu_ds, 108 fpu_mxcsr, 109 fpu_mxcsrmask, 110 fpu_stmm0, 111 fpu_stmm1, 112 fpu_stmm2, 113 fpu_stmm3, 114 fpu_stmm4, 115 fpu_stmm5, 116 fpu_stmm6, 117 fpu_stmm7, 118 fpu_xmm0, 119 fpu_xmm1, 120 fpu_xmm2, 121 fpu_xmm3, 122 fpu_xmm4, 123 fpu_xmm5, 124 fpu_xmm6, 125 fpu_xmm7, 126 fpu_ymm0, 127 fpu_ymm1, 128 fpu_ymm2, 129 fpu_ymm3, 130 fpu_ymm4, 131 fpu_ymm5, 132 fpu_ymm6, 133 fpu_ymm7, 134 fpu_k0, 135 fpu_k1, 136 fpu_k2, 137 fpu_k3, 138 fpu_k4, 139 fpu_k5, 140 fpu_k6, 141 fpu_k7, 142 fpu_zmm0, 143 fpu_zmm1, 144 fpu_zmm2, 145 fpu_zmm3, 146 fpu_zmm4, 147 fpu_zmm5, 148 fpu_zmm6, 149 fpu_zmm7, 150 k_num_fpu_regs, 151 152 // Aliases 153 fpu_fctrl = fpu_fcw, 154 fpu_fstat = fpu_fsw, 155 fpu_ftag = fpu_ftw, 156 fpu_fiseg = fpu_cs, 157 fpu_fioff = fpu_ip, 158 fpu_foseg = fpu_ds, 159 fpu_fooff = fpu_dp 160 }; 161 162 enum { 163 exc_trapno, 164 exc_err, 165 exc_faultvaddr, 166 k_num_exc_regs, 167 }; 168 169 enum { 170 ehframe_eax = 0, 171 ehframe_ecx, 172 ehframe_edx, 173 ehframe_ebx, 174 175 // On i386 Darwin the eh_frame register numbers for ebp and esp are reversed 176 // from DWARF. 177 // It's due to an ancient compiler bug in the output of the eh_frame. 178 // Specifically, on i386 darwin eh_frame, 4 is ebp, 5 is esp. 179 // On i386 darwin debug_frame (and debug_info), 4 is esp, 5 is ebp. 180 ehframe_ebp, 181 ehframe_esp, 182 ehframe_esi, 183 ehframe_edi, 184 ehframe_eip, 185 ehframe_eflags 186 }; 187 188 enum { 189 dwarf_eax = 0, 190 dwarf_ecx, 191 dwarf_edx, 192 dwarf_ebx, 193 dwarf_esp, 194 dwarf_ebp, 195 dwarf_esi, 196 dwarf_edi, 197 dwarf_eip, 198 dwarf_eflags, 199 dwarf_stmm0 = 11, 200 dwarf_stmm1, 201 dwarf_stmm2, 202 dwarf_stmm3, 203 dwarf_stmm4, 204 dwarf_stmm5, 205 dwarf_stmm6, 206 dwarf_stmm7, 207 dwarf_xmm0 = 21, 208 dwarf_xmm1, 209 dwarf_xmm2, 210 dwarf_xmm3, 211 dwarf_xmm4, 212 dwarf_xmm5, 213 dwarf_xmm6, 214 dwarf_xmm7, 215 dwarf_ymm0 = dwarf_xmm0, 216 dwarf_ymm1 = dwarf_xmm1, 217 dwarf_ymm2 = dwarf_xmm2, 218 dwarf_ymm3 = dwarf_xmm3, 219 dwarf_ymm4 = dwarf_xmm4, 220 dwarf_ymm5 = dwarf_xmm5, 221 dwarf_ymm6 = dwarf_xmm6, 222 dwarf_ymm7 = dwarf_xmm7, 223 dwarf_zmm0 = dwarf_xmm0, 224 dwarf_zmm1 = dwarf_xmm1, 225 dwarf_zmm2 = dwarf_xmm2, 226 dwarf_zmm3 = dwarf_xmm3, 227 dwarf_zmm4 = dwarf_xmm4, 228 dwarf_zmm5 = dwarf_xmm5, 229 dwarf_zmm6 = dwarf_xmm6, 230 dwarf_zmm7 = dwarf_xmm7, 231 dwarf_k0 = 118, 232 dwarf_k1, 233 dwarf_k2, 234 dwarf_k3, 235 dwarf_k4, 236 dwarf_k5, 237 dwarf_k6, 238 dwarf_k7, 239 }; 240 241 enum { 242 debugserver_eax = 0, 243 debugserver_ecx = 1, 244 debugserver_edx = 2, 245 debugserver_ebx = 3, 246 debugserver_esp = 4, 247 debugserver_ebp = 5, 248 debugserver_esi = 6, 249 debugserver_edi = 7, 250 debugserver_eip = 8, 251 debugserver_eflags = 9, 252 debugserver_cs = 10, 253 debugserver_ss = 11, 254 debugserver_ds = 12, 255 debugserver_es = 13, 256 debugserver_fs = 14, 257 debugserver_gs = 15, 258 debugserver_stmm0 = 16, 259 debugserver_stmm1 = 17, 260 debugserver_stmm2 = 18, 261 debugserver_stmm3 = 19, 262 debugserver_stmm4 = 20, 263 debugserver_stmm5 = 21, 264 debugserver_stmm6 = 22, 265 debugserver_stmm7 = 23, 266 debugserver_fctrl = 24, 267 debugserver_fcw = debugserver_fctrl, 268 debugserver_fstat = 25, 269 debugserver_fsw = debugserver_fstat, 270 debugserver_ftag = 26, 271 debugserver_ftw = debugserver_ftag, 272 debugserver_fiseg = 27, 273 debugserver_fpu_cs = debugserver_fiseg, 274 debugserver_fioff = 28, 275 debugserver_ip = debugserver_fioff, 276 debugserver_foseg = 29, 277 debugserver_fpu_ds = debugserver_foseg, 278 debugserver_fooff = 30, 279 debugserver_dp = debugserver_fooff, 280 debugserver_fop = 31, 281 debugserver_xmm0 = 32, 282 debugserver_xmm1 = 33, 283 debugserver_xmm2 = 34, 284 debugserver_xmm3 = 35, 285 debugserver_xmm4 = 36, 286 debugserver_xmm5 = 37, 287 debugserver_xmm6 = 38, 288 debugserver_xmm7 = 39, 289 debugserver_mxcsr = 40, 290 debugserver_mm0 = 41, 291 debugserver_mm1 = 42, 292 debugserver_mm2 = 43, 293 debugserver_mm3 = 44, 294 debugserver_mm4 = 45, 295 debugserver_mm5 = 46, 296 debugserver_mm6 = 47, 297 debugserver_mm7 = 48, 298 debugserver_ymm0 = debugserver_xmm0, 299 debugserver_ymm1 = debugserver_xmm1, 300 debugserver_ymm2 = debugserver_xmm2, 301 debugserver_ymm3 = debugserver_xmm3, 302 debugserver_ymm4 = debugserver_xmm4, 303 debugserver_ymm5 = debugserver_xmm5, 304 debugserver_ymm6 = debugserver_xmm6, 305 debugserver_ymm7 = debugserver_xmm7, 306 debugserver_zmm0 = debugserver_xmm0, 307 debugserver_zmm1 = debugserver_xmm1, 308 debugserver_zmm2 = debugserver_xmm2, 309 debugserver_zmm3 = debugserver_xmm3, 310 debugserver_zmm4 = debugserver_xmm4, 311 debugserver_zmm5 = debugserver_xmm5, 312 debugserver_zmm6 = debugserver_xmm6, 313 debugserver_zmm7 = debugserver_xmm7, 314 debugserver_k0 = 118, 315 debugserver_k1 = 119, 316 debugserver_k2 = 120, 317 debugserver_k3 = 121, 318 debugserver_k4 = 122, 319 debugserver_k5 = 123, 320 debugserver_k6 = 124, 321 debugserver_k7 = 125, 322 }; 323 324 uint64_t DNBArchImplI386::GetPC(uint64_t failValue) { 325 // Get program counter 326 if (GetGPRState(false) == KERN_SUCCESS) 327 return m_state.context.gpr.__eip; 328 return failValue; 329 } 330 331 kern_return_t DNBArchImplI386::SetPC(uint64_t value) { 332 // Get program counter 333 kern_return_t err = GetGPRState(false); 334 if (err == KERN_SUCCESS) { 335 m_state.context.gpr.__eip = static_cast<uint32_t>(value); 336 err = SetGPRState(); 337 } 338 return err == KERN_SUCCESS; 339 } 340 341 uint64_t DNBArchImplI386::GetSP(uint64_t failValue) { 342 // Get stack pointer 343 if (GetGPRState(false) == KERN_SUCCESS) 344 return m_state.context.gpr.__esp; 345 return failValue; 346 } 347 348 // Uncomment the value below to verify the values in the debugger. 349 //#define DEBUG_GPR_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED 350 //#define SET_GPR(reg) m_state.context.gpr.__##reg = gpr_##reg 351 352 kern_return_t DNBArchImplI386::GetGPRState(bool force) { 353 if (force || m_state.GetError(e_regSetGPR, Read)) { 354 #if DEBUG_GPR_VALUES 355 SET_GPR(eax); 356 SET_GPR(ebx); 357 SET_GPR(ecx); 358 SET_GPR(edx); 359 SET_GPR(edi); 360 SET_GPR(esi); 361 SET_GPR(ebp); 362 SET_GPR(esp); 363 SET_GPR(ss); 364 SET_GPR(eflags); 365 SET_GPR(eip); 366 SET_GPR(cs); 367 SET_GPR(ds); 368 SET_GPR(es); 369 SET_GPR(fs); 370 SET_GPR(gs); 371 m_state.SetError(e_regSetGPR, Read, 0); 372 #else 373 mach_msg_type_number_t count = e_regSetWordSizeGPR; 374 m_state.SetError( 375 e_regSetGPR, Read, 376 ::thread_get_state(m_thread->MachPortNumber(), __i386_THREAD_STATE, 377 (thread_state_t)&m_state.context.gpr, &count)); 378 #endif 379 } 380 return m_state.GetError(e_regSetGPR, Read); 381 } 382 383 // Uncomment the value below to verify the values in the debugger. 384 //#define DEBUG_FPU_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED 385 386 kern_return_t DNBArchImplI386::GetFPUState(bool force) { 387 if (force || m_state.GetError(e_regSetFPU, Read)) { 388 if (DEBUG_FPU_REGS) { 389 390 m_state.context.fpu.no_avx.__fpu_reserved[0] = -1; 391 m_state.context.fpu.no_avx.__fpu_reserved[1] = -1; 392 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fcw) = 0x1234; 393 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fsw) = 0x5678; 394 m_state.context.fpu.no_avx.__fpu_ftw = 1; 395 m_state.context.fpu.no_avx.__fpu_rsrv1 = UINT8_MAX; 396 m_state.context.fpu.no_avx.__fpu_fop = 2; 397 m_state.context.fpu.no_avx.__fpu_ip = 3; 398 m_state.context.fpu.no_avx.__fpu_cs = 4; 399 m_state.context.fpu.no_avx.__fpu_rsrv2 = 5; 400 m_state.context.fpu.no_avx.__fpu_dp = 6; 401 m_state.context.fpu.no_avx.__fpu_ds = 7; 402 m_state.context.fpu.no_avx.__fpu_rsrv3 = UINT16_MAX; 403 m_state.context.fpu.no_avx.__fpu_mxcsr = 8; 404 m_state.context.fpu.no_avx.__fpu_mxcsrmask = 9; 405 for (int i = 0; i < 16; ++i) { 406 if (i < 10) { 407 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = 'a'; 408 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = 'b'; 409 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = 'c'; 410 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = 'd'; 411 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = 'e'; 412 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = 'f'; 413 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = 'g'; 414 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = 'h'; 415 } else { 416 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN; 417 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN; 418 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN; 419 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN; 420 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN; 421 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN; 422 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN; 423 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN; 424 } 425 426 m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg[i] = '0'; 427 m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg[i] = '1'; 428 m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg[i] = '2'; 429 m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg[i] = '3'; 430 m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg[i] = '4'; 431 m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg[i] = '5'; 432 m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg[i] = '6'; 433 m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg[i] = '7'; 434 } 435 for (int i = 0; i < sizeof(m_state.context.fpu.no_avx.__fpu_rsrv4); ++i) 436 m_state.context.fpu.no_avx.__fpu_rsrv4[i] = INT8_MIN; 437 m_state.context.fpu.no_avx.__fpu_reserved1 = -1; 438 439 if (CPUHasAVX() || FORCE_AVX_REGS) { 440 for (int i = 0; i < sizeof(m_state.context.fpu.avx.__avx_reserved1); 441 ++i) 442 m_state.context.fpu.avx.__avx_reserved1[i] = INT8_MIN; 443 444 for (int i = 0; i < 16; ++i) { 445 m_state.context.fpu.avx.__fpu_ymmh0.__xmm_reg[i] = '0'; 446 m_state.context.fpu.avx.__fpu_ymmh1.__xmm_reg[i] = '1'; 447 m_state.context.fpu.avx.__fpu_ymmh2.__xmm_reg[i] = '2'; 448 m_state.context.fpu.avx.__fpu_ymmh3.__xmm_reg[i] = '3'; 449 m_state.context.fpu.avx.__fpu_ymmh4.__xmm_reg[i] = '4'; 450 m_state.context.fpu.avx.__fpu_ymmh5.__xmm_reg[i] = '5'; 451 m_state.context.fpu.avx.__fpu_ymmh6.__xmm_reg[i] = '6'; 452 m_state.context.fpu.avx.__fpu_ymmh7.__xmm_reg[i] = '7'; 453 } 454 } 455 if (CPUHasAVX512f() || FORCE_AVX_REGS) { 456 for (int i = 0; i < 8; ++i) { 457 m_state.context.fpu.avx512f.__fpu_k0.__opmask_reg[i] = '0'; 458 m_state.context.fpu.avx512f.__fpu_k1.__opmask_reg[i] = '1'; 459 m_state.context.fpu.avx512f.__fpu_k2.__opmask_reg[i] = '2'; 460 m_state.context.fpu.avx512f.__fpu_k3.__opmask_reg[i] = '3'; 461 m_state.context.fpu.avx512f.__fpu_k4.__opmask_reg[i] = '4'; 462 m_state.context.fpu.avx512f.__fpu_k5.__opmask_reg[i] = '5'; 463 m_state.context.fpu.avx512f.__fpu_k6.__opmask_reg[i] = '6'; 464 m_state.context.fpu.avx512f.__fpu_k7.__opmask_reg[i] = '7'; 465 } 466 467 for (int i = 0; i < 32; ++i) { 468 m_state.context.fpu.avx512f.__fpu_zmmh0.__ymm_reg[i] = '0'; 469 m_state.context.fpu.avx512f.__fpu_zmmh1.__ymm_reg[i] = '1'; 470 m_state.context.fpu.avx512f.__fpu_zmmh2.__ymm_reg[i] = '2'; 471 m_state.context.fpu.avx512f.__fpu_zmmh3.__ymm_reg[i] = '3'; 472 m_state.context.fpu.avx512f.__fpu_zmmh4.__ymm_reg[i] = '4'; 473 m_state.context.fpu.avx512f.__fpu_zmmh5.__ymm_reg[i] = '5'; 474 m_state.context.fpu.avx512f.__fpu_zmmh6.__ymm_reg[i] = '6'; 475 m_state.context.fpu.avx512f.__fpu_zmmh7.__ymm_reg[i] = '7'; 476 } 477 } 478 m_state.SetError(e_regSetFPU, Read, 0); 479 } else { 480 mach_msg_type_number_t count = e_regSetWordSizeFPU; 481 int flavor = __i386_FLOAT_STATE; 482 483 // On a machine with the AVX512 register set, a process only gets a 484 // full AVX512 register context after it uses the AVX512 registers; 485 // if the process has not yet triggered this change, trying to fetch 486 // the AVX512 registers will fail. Fall through to fetching the AVX 487 // registers. 488 if (CPUHasAVX512f() || FORCE_AVX_REGS) { 489 count = e_regSetWordSizeAVX512f; 490 flavor = __i386_AVX512F_STATE; 491 m_state.SetError(e_regSetFPU, Read, 492 ::thread_get_state(m_thread->MachPortNumber(), flavor, 493 (thread_state_t)&m_state.context.fpu, 494 &count)); 495 DNBLogThreadedIf(LOG_THREAD, 496 "::thread_get_state (0x%4.4x, %u, &fpu, %u => 0x%8.8x", 497 m_thread->MachPortNumber(), flavor, (uint32_t)count, 498 m_state.GetError(e_regSetFPU, Read)); 499 if (m_state.GetError(e_regSetFPU, Read) == KERN_SUCCESS) 500 return m_state.GetError(e_regSetFPU, Read); 501 } 502 if (CPUHasAVX()) { 503 count = e_regSetWordSizeAVX; 504 flavor = __i386_AVX_STATE; 505 } 506 m_state.SetError(e_regSetFPU, Read, 507 ::thread_get_state(m_thread->MachPortNumber(), flavor, 508 (thread_state_t)&m_state.context.fpu, 509 &count)); 510 DNBLogThreadedIf(LOG_THREAD, 511 "::thread_get_state (0x%4.4x, %u, &fpu, %u => 0x%8.8x", 512 m_thread->MachPortNumber(), flavor, (uint32_t)count, 513 m_state.GetError(e_regSetFPU, Read)); 514 } 515 } 516 return m_state.GetError(e_regSetFPU, Read); 517 } 518 519 kern_return_t DNBArchImplI386::GetEXCState(bool force) { 520 if (force || m_state.GetError(e_regSetEXC, Read)) { 521 mach_msg_type_number_t count = e_regSetWordSizeEXC; 522 m_state.SetError( 523 e_regSetEXC, Read, 524 ::thread_get_state(m_thread->MachPortNumber(), __i386_EXCEPTION_STATE, 525 (thread_state_t)&m_state.context.exc, &count)); 526 } 527 return m_state.GetError(e_regSetEXC, Read); 528 } 529 530 kern_return_t DNBArchImplI386::SetGPRState() { 531 kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber()); 532 DNBLogThreadedIf( 533 LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u " 534 "(SetGPRState() for stop_count = %u)", 535 m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount()); 536 537 m_state.SetError(e_regSetGPR, Write, 538 ::thread_set_state(m_thread->MachPortNumber(), 539 __i386_THREAD_STATE, 540 (thread_state_t)&m_state.context.gpr, 541 e_regSetWordSizeGPR)); 542 return m_state.GetError(e_regSetGPR, Write); 543 } 544 545 kern_return_t DNBArchImplI386::SetFPUState() { 546 if (DEBUG_FPU_REGS) { 547 m_state.SetError(e_regSetFPU, Write, 0); 548 return m_state.GetError(e_regSetFPU, Write); 549 } else { 550 int flavor = __i386_FLOAT_STATE; 551 mach_msg_type_number_t count = e_regSetWordSizeFPU; 552 if (CPUHasAVX512f() || FORCE_AVX_REGS) { 553 flavor = __i386_AVX512F_STATE; 554 count = e_regSetWordSizeAVX512f; 555 } else 556 if (CPUHasAVX()) { 557 flavor = __i386_AVX_STATE; 558 count = e_regSetWordSizeAVX; 559 } 560 561 m_state.SetError(e_regSetFPU, Write, 562 ::thread_set_state(m_thread->MachPortNumber(), flavor, 563 (thread_state_t)&m_state.context.fpu, 564 count)); 565 return m_state.GetError(e_regSetFPU, Write); 566 } 567 } 568 569 kern_return_t DNBArchImplI386::SetEXCState() { 570 m_state.SetError(e_regSetEXC, Write, 571 ::thread_set_state(m_thread->MachPortNumber(), 572 __i386_EXCEPTION_STATE, 573 (thread_state_t)&m_state.context.exc, 574 e_regSetWordSizeEXC)); 575 return m_state.GetError(e_regSetEXC, Write); 576 } 577 578 kern_return_t DNBArchImplI386::GetDBGState(bool force) { 579 if (force || m_state.GetError(e_regSetDBG, Read)) { 580 mach_msg_type_number_t count = e_regSetWordSizeDBG; 581 m_state.SetError( 582 e_regSetDBG, Read, 583 ::thread_get_state(m_thread->MachPortNumber(), __i386_DEBUG_STATE, 584 (thread_state_t)&m_state.context.dbg, &count)); 585 } 586 return m_state.GetError(e_regSetDBG, Read); 587 } 588 589 kern_return_t DNBArchImplI386::SetDBGState(bool also_set_on_task) { 590 m_state.SetError(e_regSetDBG, Write, 591 ::thread_set_state(m_thread->MachPortNumber(), 592 __i386_DEBUG_STATE, 593 (thread_state_t)&m_state.context.dbg, 594 e_regSetWordSizeDBG)); 595 if (also_set_on_task) { 596 kern_return_t kret = ::task_set_state( 597 m_thread->Process()->Task().TaskPort(), __i386_DEBUG_STATE, 598 (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG); 599 if (kret != KERN_SUCCESS) 600 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::SetDBGState failed " 601 "to set debug control register state: " 602 "0x%8.8x.", 603 kret); 604 } 605 return m_state.GetError(e_regSetDBG, Write); 606 } 607 608 void DNBArchImplI386::ThreadWillResume() { 609 // Do we need to step this thread? If so, let the mach thread tell us so. 610 if (m_thread->IsStepping()) { 611 // This is the primary thread, let the arch do anything it needs 612 EnableHardwareSingleStep(true); 613 } 614 615 // Reset the debug status register, if necessary, before we resume. 616 kern_return_t kret = GetDBGState(false); 617 DNBLogThreadedIf( 618 LOG_WATCHPOINTS, 619 "DNBArchImplI386::ThreadWillResume() GetDBGState() => 0x%8.8x.", kret); 620 if (kret != KERN_SUCCESS) 621 return; 622 623 DBG &debug_state = m_state.context.dbg; 624 bool need_reset = false; 625 uint32_t i, num = NumSupportedHardwareWatchpoints(); 626 for (i = 0; i < num; ++i) 627 if (IsWatchpointHit(debug_state, i)) 628 need_reset = true; 629 630 if (need_reset) { 631 ClearWatchpointHits(debug_state); 632 kret = SetDBGState(false); 633 DNBLogThreadedIf( 634 LOG_WATCHPOINTS, 635 "DNBArchImplI386::ThreadWillResume() SetDBGState() => 0x%8.8x.", kret); 636 } 637 } 638 639 bool DNBArchImplI386::ThreadDidStop() { 640 bool success = true; 641 642 m_state.InvalidateAllRegisterStates(); 643 644 // Are we stepping a single instruction? 645 if (GetGPRState(true) == KERN_SUCCESS) { 646 // We are single stepping, was this the primary thread? 647 if (m_thread->IsStepping()) { 648 // This was the primary thread, we need to clear the trace 649 // bit if so. 650 success = EnableHardwareSingleStep(false) == KERN_SUCCESS; 651 } else { 652 // The MachThread will automatically restore the suspend count 653 // in ThreadDidStop(), so we don't need to do anything here if 654 // we weren't the primary thread the last time 655 } 656 } 657 return success; 658 } 659 660 bool DNBArchImplI386::NotifyException(MachException::Data &exc) { 661 switch (exc.exc_type) { 662 case EXC_BAD_ACCESS: 663 break; 664 case EXC_BAD_INSTRUCTION: 665 break; 666 case EXC_ARITHMETIC: 667 break; 668 case EXC_EMULATION: 669 break; 670 case EXC_SOFTWARE: 671 break; 672 case EXC_BREAKPOINT: 673 if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 2) { 674 // exc_code = EXC_I386_BPT 675 // 676 nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS); 677 if (pc != INVALID_NUB_ADDRESS && pc > 0) { 678 pc -= 1; 679 // Check for a breakpoint at one byte prior to the current PC value 680 // since the PC will be just past the trap. 681 682 DNBBreakpoint *bp = 683 m_thread->Process()->Breakpoints().FindByAddress(pc); 684 if (bp) { 685 // Backup the PC for i386 since the trap was taken and the PC 686 // is at the address following the single byte trap instruction. 687 if (m_state.context.gpr.__eip > 0) { 688 m_state.context.gpr.__eip = static_cast<uint32_t>(pc); 689 // Write the new PC back out 690 SetGPRState(); 691 } 692 } 693 return true; 694 } 695 } else if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 1) { 696 // exc_code = EXC_I386_SGL 697 // 698 // Check whether this corresponds to a watchpoint hit event. 699 // If yes, set the exc_sub_code to the data break address. 700 nub_addr_t addr = 0; 701 uint32_t hw_index = GetHardwareWatchpointHit(addr); 702 if (hw_index != INVALID_NUB_HW_INDEX) { 703 exc.exc_data[1] = addr; 704 // Piggyback the hw_index in the exc.data. 705 exc.exc_data.push_back(hw_index); 706 } 707 708 return true; 709 } 710 break; 711 case EXC_SYSCALL: 712 break; 713 case EXC_MACH_SYSCALL: 714 break; 715 case EXC_RPC_ALERT: 716 break; 717 } 718 return false; 719 } 720 721 uint32_t DNBArchImplI386::NumSupportedHardwareWatchpoints() { 722 // Available debug address registers: dr0, dr1, dr2, dr3. 723 return 4; 724 } 725 726 static uint32_t size_and_rw_bits(nub_size_t size, bool read, bool write) { 727 uint32_t rw; 728 if (read) { 729 rw = 0x3; // READ or READ/WRITE 730 } else if (write) { 731 rw = 0x1; // WRITE 732 } else { 733 assert(0 && "read and write cannot both be false"); 734 } 735 736 switch (size) { 737 case 1: 738 return rw; 739 case 2: 740 return (0x1 << 2) | rw; 741 case 4: 742 return (0x3 << 2) | rw; 743 case 8: 744 return (0x2 << 2) | rw; 745 } 746 assert(0 && "invalid size, must be one of 1, 2, 4, or 8"); 747 return 0; 748 } 749 750 void DNBArchImplI386::SetWatchpoint(DBG &debug_state, uint32_t hw_index, 751 nub_addr_t addr, nub_size_t size, bool read, 752 bool write) { 753 // Set both dr7 (debug control register) and dri (debug address register). 754 755 // dr7{7-0} encodes the local/gloabl enable bits: 756 // global enable --. .-- local enable 757 // | | 758 // v v 759 // dr0 -> bits{1-0} 760 // dr1 -> bits{3-2} 761 // dr2 -> bits{5-4} 762 // dr3 -> bits{7-6} 763 // 764 // dr7{31-16} encodes the rw/len bits: 765 // b_x+3, b_x+2, b_x+1, b_x 766 // where bits{x+1, x} => rw 767 // 0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io 768 // read-or-write (unused) 769 // and bits{x+3, x+2} => len 770 // 0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte 771 // 772 // dr0 -> bits{19-16} 773 // dr1 -> bits{23-20} 774 // dr2 -> bits{27-24} 775 // dr3 -> bits{31-28} 776 debug_state.__dr7 |= 777 (1 << (2 * hw_index) | 778 size_and_rw_bits(size, read, write) << (16 + 4 * hw_index)); 779 uint32_t addr_32 = addr & 0xffffffff; 780 switch (hw_index) { 781 case 0: 782 debug_state.__dr0 = addr_32; 783 break; 784 case 1: 785 debug_state.__dr1 = addr_32; 786 break; 787 case 2: 788 debug_state.__dr2 = addr_32; 789 break; 790 case 3: 791 debug_state.__dr3 = addr_32; 792 break; 793 default: 794 assert(0 && 795 "invalid hardware register index, must be one of 0, 1, 2, or 3"); 796 } 797 return; 798 } 799 800 void DNBArchImplI386::ClearWatchpoint(DBG &debug_state, uint32_t hw_index) { 801 debug_state.__dr7 &= ~(3 << (2 * hw_index)); 802 switch (hw_index) { 803 case 0: 804 debug_state.__dr0 = 0; 805 break; 806 case 1: 807 debug_state.__dr1 = 0; 808 break; 809 case 2: 810 debug_state.__dr2 = 0; 811 break; 812 case 3: 813 debug_state.__dr3 = 0; 814 break; 815 default: 816 assert(0 && 817 "invalid hardware register index, must be one of 0, 1, 2, or 3"); 818 } 819 return; 820 } 821 822 bool DNBArchImplI386::IsWatchpointVacant(const DBG &debug_state, 823 uint32_t hw_index) { 824 // Check dr7 (debug control register) for local/global enable bits: 825 // global enable --. .-- local enable 826 // | | 827 // v v 828 // dr0 -> bits{1-0} 829 // dr1 -> bits{3-2} 830 // dr2 -> bits{5-4} 831 // dr3 -> bits{7-6} 832 return (debug_state.__dr7 & (3 << (2 * hw_index))) == 0; 833 } 834 835 // Resets local copy of debug status register to wait for the next debug 836 // exception. 837 void DNBArchImplI386::ClearWatchpointHits(DBG &debug_state) { 838 // See also IsWatchpointHit(). 839 debug_state.__dr6 = 0; 840 return; 841 } 842 843 bool DNBArchImplI386::IsWatchpointHit(const DBG &debug_state, 844 uint32_t hw_index) { 845 // Check dr6 (debug status register) whether a watchpoint hits: 846 // is watchpoint hit? 847 // | 848 // v 849 // dr0 -> bits{0} 850 // dr1 -> bits{1} 851 // dr2 -> bits{2} 852 // dr3 -> bits{3} 853 return (debug_state.__dr6 & (1 << hw_index)); 854 } 855 856 nub_addr_t DNBArchImplI386::GetWatchAddress(const DBG &debug_state, 857 uint32_t hw_index) { 858 switch (hw_index) { 859 case 0: 860 return debug_state.__dr0; 861 case 1: 862 return debug_state.__dr1; 863 case 2: 864 return debug_state.__dr2; 865 case 3: 866 return debug_state.__dr3; 867 } 868 assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3"); 869 return 0; 870 } 871 872 bool DNBArchImplI386::StartTransForHWP() { 873 if (m_2pc_trans_state != Trans_Done && m_2pc_trans_state != Trans_Rolled_Back) 874 DNBLogError("%s inconsistent state detected, expected %d or %d, got: %d", 875 __FUNCTION__, Trans_Done, Trans_Rolled_Back, m_2pc_trans_state); 876 m_2pc_dbg_checkpoint = m_state.context.dbg; 877 m_2pc_trans_state = Trans_Pending; 878 return true; 879 } 880 bool DNBArchImplI386::RollbackTransForHWP() { 881 m_state.context.dbg = m_2pc_dbg_checkpoint; 882 if (m_2pc_trans_state != Trans_Pending) 883 DNBLogError("%s inconsistent state detected, expected %d, got: %d", 884 __FUNCTION__, Trans_Pending, m_2pc_trans_state); 885 m_2pc_trans_state = Trans_Rolled_Back; 886 kern_return_t kret = SetDBGState(false); 887 DNBLogThreadedIf( 888 LOG_WATCHPOINTS, 889 "DNBArchImplI386::RollbackTransForHWP() SetDBGState() => 0x%8.8x.", kret); 890 891 return kret == KERN_SUCCESS; 892 } 893 bool DNBArchImplI386::FinishTransForHWP() { 894 m_2pc_trans_state = Trans_Done; 895 return true; 896 } 897 DNBArchImplI386::DBG DNBArchImplI386::GetDBGCheckpoint() { 898 return m_2pc_dbg_checkpoint; 899 } 900 901 uint32_t DNBArchImplI386::EnableHardwareWatchpoint(nub_addr_t addr, 902 nub_size_t size, bool read, 903 bool write, 904 bool also_set_on_task) { 905 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::EnableHardwareWatchpoint(" 906 "addr = 0x%llx, size = %llu, read = %u, " 907 "write = %u)", 908 (uint64_t)addr, (uint64_t)size, read, write); 909 910 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints(); 911 912 // Can only watch 1, 2, 4, or 8 bytes. 913 if (!(size == 1 || size == 2 || size == 4 || size == 8)) 914 return INVALID_NUB_HW_INDEX; 915 916 // We must watch for either read or write 917 if (!read && !write) 918 return INVALID_NUB_HW_INDEX; 919 920 // Read the debug state 921 kern_return_t kret = GetDBGState(false); 922 923 if (kret == KERN_SUCCESS) { 924 // Check to make sure we have the needed hardware support 925 uint32_t i = 0; 926 927 DBG &debug_state = m_state.context.dbg; 928 for (i = 0; i < num_hw_watchpoints; ++i) { 929 if (IsWatchpointVacant(debug_state, i)) 930 break; 931 } 932 933 // See if we found an available hw breakpoint slot above 934 if (i < num_hw_watchpoints) { 935 StartTransForHWP(); 936 937 // Modify our local copy of the debug state, first. 938 SetWatchpoint(debug_state, i, addr, size, read, write); 939 // Now set the watch point in the inferior. 940 kret = SetDBGState(also_set_on_task); 941 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::" 942 "EnableHardwareWatchpoint() " 943 "SetDBGState() => 0x%8.8x.", 944 kret); 945 946 if (kret == KERN_SUCCESS) 947 return i; 948 else // Revert to the previous debug state voluntarily. The transaction 949 // coordinator knows that we have failed. 950 m_state.context.dbg = GetDBGCheckpoint(); 951 } else { 952 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::" 953 "EnableHardwareWatchpoint(): All " 954 "hardware resources (%u) are in use.", 955 num_hw_watchpoints); 956 } 957 } 958 return INVALID_NUB_HW_INDEX; 959 } 960 961 bool DNBArchImplI386::DisableHardwareWatchpoint(uint32_t hw_index, 962 bool also_set_on_task) { 963 kern_return_t kret = GetDBGState(false); 964 965 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints(); 966 if (kret == KERN_SUCCESS) { 967 DBG &debug_state = m_state.context.dbg; 968 if (hw_index < num_hw_points && 969 !IsWatchpointVacant(debug_state, hw_index)) { 970 StartTransForHWP(); 971 972 // Modify our local copy of the debug state, first. 973 ClearWatchpoint(debug_state, hw_index); 974 // Now disable the watch point in the inferior. 975 kret = SetDBGState(also_set_on_task); 976 DNBLogThreadedIf(LOG_WATCHPOINTS, 977 "DNBArchImplI386::DisableHardwareWatchpoint( %u )", 978 hw_index); 979 980 if (kret == KERN_SUCCESS) 981 return true; 982 else // Revert to the previous debug state voluntarily. The transaction 983 // coordinator knows that we have failed. 984 m_state.context.dbg = GetDBGCheckpoint(); 985 } 986 } 987 return false; 988 } 989 990 // Iterate through the debug status register; return the index of the first hit. 991 uint32_t DNBArchImplI386::GetHardwareWatchpointHit(nub_addr_t &addr) { 992 // Read the debug state 993 kern_return_t kret = GetDBGState(true); 994 DNBLogThreadedIf( 995 LOG_WATCHPOINTS, 996 "DNBArchImplI386::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", 997 kret); 998 if (kret == KERN_SUCCESS) { 999 DBG &debug_state = m_state.context.dbg; 1000 uint32_t i, num = NumSupportedHardwareWatchpoints(); 1001 for (i = 0; i < num; ++i) { 1002 if (IsWatchpointHit(debug_state, i)) { 1003 addr = GetWatchAddress(debug_state, i); 1004 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::" 1005 "GetHardwareWatchpointHit() found => " 1006 "%u (addr = 0x%llx).", 1007 i, (uint64_t)addr); 1008 return i; 1009 } 1010 } 1011 } 1012 return INVALID_NUB_HW_INDEX; 1013 } 1014 1015 // Set the single step bit in the processor status register. 1016 kern_return_t DNBArchImplI386::EnableHardwareSingleStep(bool enable) { 1017 if (GetGPRState(false) == KERN_SUCCESS) { 1018 const uint32_t trace_bit = 0x100u; 1019 if (enable) 1020 m_state.context.gpr.__eflags |= trace_bit; 1021 else 1022 m_state.context.gpr.__eflags &= ~trace_bit; 1023 return SetGPRState(); 1024 } 1025 return m_state.GetError(e_regSetGPR, Read); 1026 } 1027 1028 // Register information definitions 1029 1030 #define DEFINE_GPR_PSEUDO_16(reg16, reg32) \ 1031 { \ 1032 e_regSetGPR, gpr_##reg16, #reg16, NULL, Uint, Hex, 2, 0, \ 1033 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1034 INVALID_NUB_REGNUM, g_contained_##reg32, g_invalidate_##reg32 \ 1035 } 1036 #define DEFINE_GPR_PSEUDO_8H(reg8, reg32) \ 1037 { \ 1038 e_regSetGPR, gpr_##reg8, #reg8, NULL, Uint, Hex, 1, 1, INVALID_NUB_REGNUM, \ 1039 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1040 g_contained_##reg32, g_invalidate_##reg32 \ 1041 } 1042 #define DEFINE_GPR_PSEUDO_8L(reg8, reg32) \ 1043 { \ 1044 e_regSetGPR, gpr_##reg8, #reg8, NULL, Uint, Hex, 1, 0, INVALID_NUB_REGNUM, \ 1045 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1046 g_contained_##reg32, g_invalidate_##reg32 \ 1047 } 1048 1049 #define GPR_OFFSET(reg) (offsetof(DNBArchImplI386::GPR, __##reg)) 1050 #define FPU_OFFSET(reg) \ 1051 (offsetof(DNBArchImplI386::FPU, __fpu_##reg) + \ 1052 offsetof(DNBArchImplI386::Context, fpu.no_avx)) 1053 #define AVX_OFFSET(reg) \ 1054 (offsetof(DNBArchImplI386::AVX, __fpu_##reg) + \ 1055 offsetof(DNBArchImplI386::Context, fpu.avx)) 1056 #define AVX512F_OFFSET(reg) \ 1057 (offsetof(DNBArchImplI386::AVX512F, __fpu_##reg) + \ 1058 offsetof(DNBArchImplI386::Context, fpu.avx512f)) 1059 #define EXC_OFFSET(reg) \ 1060 (offsetof(DNBArchImplI386::EXC, __##reg) + \ 1061 offsetof(DNBArchImplI386::Context, exc)) 1062 1063 #define GPR_SIZE(reg) (sizeof(((DNBArchImplI386::GPR *)NULL)->__##reg)) 1064 #define FPU_SIZE_UINT(reg) (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg)) 1065 #define FPU_SIZE_MMST(reg) \ 1066 (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg.__mmst_reg)) 1067 #define FPU_SIZE_XMM(reg) \ 1068 (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg.__xmm_reg)) 1069 #define FPU_SIZE_YMM(reg) (32) 1070 #define FPU_SIZE_ZMM(reg) (64) 1071 #define EXC_SIZE(reg) (sizeof(((DNBArchImplI386::EXC *)NULL)->__##reg)) 1072 1073 // This does not accurately identify the location of ymm0...7 in 1074 // Context.fpu.avx. That is because there is a bunch of padding 1075 // in Context.fpu.avx that we don't need. Offset macros lay out 1076 // the register state that Debugserver transmits to the debugger 1077 // -- not to interpret the thread_get_state info. 1078 #define AVX_OFFSET_YMM(n) (AVX_OFFSET(xmm7) + FPU_SIZE_XMM(xmm7) + (32 * n)) 1079 1080 // TODO: Test this and come back. 1081 #define AVX512F_OFFSET_ZMM(n) (AVX_OFFSET_YMM(7) + FPU_SIZE_XMM(xmm7) + (64 * n)) 1082 1083 // These macros will auto define the register name, alt name, register size, 1084 // register offset, encoding, format and native register. This ensures that 1085 // the register state structures are defined correctly and have the correct 1086 // sizes and offsets. 1087 1088 const char *g_contained_eax[] = {"eax", NULL}; 1089 const char *g_contained_ebx[] = {"ebx", NULL}; 1090 const char *g_contained_ecx[] = {"ecx", NULL}; 1091 const char *g_contained_edx[] = {"edx", NULL}; 1092 const char *g_contained_edi[] = {"edi", NULL}; 1093 const char *g_contained_esi[] = {"esi", NULL}; 1094 const char *g_contained_ebp[] = {"ebp", NULL}; 1095 const char *g_contained_esp[] = {"esp", NULL}; 1096 1097 const char *g_invalidate_eax[] = {"eax", "ax", "ah", "al", NULL}; 1098 const char *g_invalidate_ebx[] = {"ebx", "bx", "bh", "bl", NULL}; 1099 const char *g_invalidate_ecx[] = {"ecx", "cx", "ch", "cl", NULL}; 1100 const char *g_invalidate_edx[] = {"edx", "dx", "dh", "dl", NULL}; 1101 const char *g_invalidate_edi[] = {"edi", "di", "dil", NULL}; 1102 const char *g_invalidate_esi[] = {"esi", "si", "sil", NULL}; 1103 const char *g_invalidate_ebp[] = {"ebp", "bp", "bpl", NULL}; 1104 const char *g_invalidate_esp[] = {"esp", "sp", "spl", NULL}; 1105 1106 // General purpose registers for 64 bit 1107 const DNBRegisterInfo DNBArchImplI386::g_gpr_registers[] = { 1108 {e_regSetGPR, gpr_eax, "eax", NULL, Uint, Hex, GPR_SIZE(eax), 1109 GPR_OFFSET(eax), ehframe_eax, dwarf_eax, INVALID_NUB_REGNUM, 1110 debugserver_eax, NULL, g_invalidate_eax}, 1111 {e_regSetGPR, gpr_ebx, "ebx", NULL, Uint, Hex, GPR_SIZE(ebx), 1112 GPR_OFFSET(ebx), ehframe_ebx, dwarf_ebx, INVALID_NUB_REGNUM, 1113 debugserver_ebx, NULL, g_invalidate_ebx}, 1114 {e_regSetGPR, gpr_ecx, "ecx", NULL, Uint, Hex, GPR_SIZE(ecx), 1115 GPR_OFFSET(ecx), ehframe_ecx, dwarf_ecx, INVALID_NUB_REGNUM, 1116 debugserver_ecx, NULL, g_invalidate_ecx}, 1117 {e_regSetGPR, gpr_edx, "edx", NULL, Uint, Hex, GPR_SIZE(edx), 1118 GPR_OFFSET(edx), ehframe_edx, dwarf_edx, INVALID_NUB_REGNUM, 1119 debugserver_edx, NULL, g_invalidate_edx}, 1120 {e_regSetGPR, gpr_edi, "edi", NULL, Uint, Hex, GPR_SIZE(edi), 1121 GPR_OFFSET(edi), ehframe_edi, dwarf_edi, INVALID_NUB_REGNUM, 1122 debugserver_edi, NULL, g_invalidate_edi}, 1123 {e_regSetGPR, gpr_esi, "esi", NULL, Uint, Hex, GPR_SIZE(esi), 1124 GPR_OFFSET(esi), ehframe_esi, dwarf_esi, INVALID_NUB_REGNUM, 1125 debugserver_esi, NULL, g_invalidate_esi}, 1126 {e_regSetGPR, gpr_ebp, "ebp", "fp", Uint, Hex, GPR_SIZE(ebp), 1127 GPR_OFFSET(ebp), ehframe_ebp, dwarf_ebp, GENERIC_REGNUM_FP, 1128 debugserver_ebp, NULL, g_invalidate_ebp}, 1129 {e_regSetGPR, gpr_esp, "esp", "sp", Uint, Hex, GPR_SIZE(esp), 1130 GPR_OFFSET(esp), ehframe_esp, dwarf_esp, GENERIC_REGNUM_SP, 1131 debugserver_esp, NULL, g_invalidate_esp}, 1132 {e_regSetGPR, gpr_ss, "ss", NULL, Uint, Hex, GPR_SIZE(ss), GPR_OFFSET(ss), 1133 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, debugserver_ss, 1134 NULL, NULL}, 1135 {e_regSetGPR, gpr_eflags, "eflags", "flags", Uint, Hex, GPR_SIZE(eflags), 1136 GPR_OFFSET(eflags), ehframe_eflags, dwarf_eflags, GENERIC_REGNUM_FLAGS, 1137 debugserver_eflags, NULL, NULL}, 1138 {e_regSetGPR, gpr_eip, "eip", "pc", Uint, Hex, GPR_SIZE(eip), 1139 GPR_OFFSET(eip), ehframe_eip, dwarf_eip, GENERIC_REGNUM_PC, 1140 debugserver_eip, NULL, NULL}, 1141 {e_regSetGPR, gpr_cs, "cs", NULL, Uint, Hex, GPR_SIZE(cs), GPR_OFFSET(cs), 1142 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, debugserver_cs, 1143 NULL, NULL}, 1144 {e_regSetGPR, gpr_ds, "ds", NULL, Uint, Hex, GPR_SIZE(ds), GPR_OFFSET(ds), 1145 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, debugserver_ds, 1146 NULL, NULL}, 1147 {e_regSetGPR, gpr_es, "es", NULL, Uint, Hex, GPR_SIZE(es), GPR_OFFSET(es), 1148 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, debugserver_es, 1149 NULL, NULL}, 1150 {e_regSetGPR, gpr_fs, "fs", NULL, Uint, Hex, GPR_SIZE(fs), GPR_OFFSET(fs), 1151 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, debugserver_fs, 1152 NULL, NULL}, 1153 {e_regSetGPR, gpr_gs, "gs", NULL, Uint, Hex, GPR_SIZE(gs), GPR_OFFSET(gs), 1154 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, debugserver_gs, 1155 NULL, NULL}, 1156 DEFINE_GPR_PSEUDO_16(ax, eax), 1157 DEFINE_GPR_PSEUDO_16(bx, ebx), 1158 DEFINE_GPR_PSEUDO_16(cx, ecx), 1159 DEFINE_GPR_PSEUDO_16(dx, edx), 1160 DEFINE_GPR_PSEUDO_16(di, edi), 1161 DEFINE_GPR_PSEUDO_16(si, esi), 1162 DEFINE_GPR_PSEUDO_16(bp, ebp), 1163 DEFINE_GPR_PSEUDO_16(sp, esp), 1164 DEFINE_GPR_PSEUDO_8H(ah, eax), 1165 DEFINE_GPR_PSEUDO_8H(bh, ebx), 1166 DEFINE_GPR_PSEUDO_8H(ch, ecx), 1167 DEFINE_GPR_PSEUDO_8H(dh, edx), 1168 DEFINE_GPR_PSEUDO_8L(al, eax), 1169 DEFINE_GPR_PSEUDO_8L(bl, ebx), 1170 DEFINE_GPR_PSEUDO_8L(cl, ecx), 1171 DEFINE_GPR_PSEUDO_8L(dl, edx), 1172 DEFINE_GPR_PSEUDO_8L(dil, edi), 1173 DEFINE_GPR_PSEUDO_8L(sil, esi), 1174 DEFINE_GPR_PSEUDO_8L(bpl, ebp), 1175 DEFINE_GPR_PSEUDO_8L(spl, esp)}; 1176 1177 const DNBRegisterInfo DNBArchImplI386::g_fpu_registers_no_avx[] = { 1178 {e_regSetFPU, fpu_fcw, "fctrl", NULL, Uint, Hex, FPU_SIZE_UINT(fcw), 1179 FPU_OFFSET(fcw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1180 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1181 {e_regSetFPU, fpu_fsw, "fstat", NULL, Uint, Hex, FPU_SIZE_UINT(fsw), 1182 FPU_OFFSET(fsw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1183 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1184 {e_regSetFPU, fpu_ftw, "ftag", NULL, Uint, Hex, 2 /* sizeof __fpu_ftw + sizeof __fpu_rsrv1 */, 1185 FPU_OFFSET(ftw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1186 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1187 {e_regSetFPU, fpu_fop, "fop", NULL, Uint, Hex, FPU_SIZE_UINT(fop), 1188 FPU_OFFSET(fop), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1189 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1190 {e_regSetFPU, fpu_ip, "fioff", NULL, Uint, Hex, FPU_SIZE_UINT(ip), 1191 FPU_OFFSET(ip), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1192 INVALID_NUB_REGNUM, NULL, NULL}, 1193 {e_regSetFPU, fpu_cs, "fiseg", NULL, Uint, Hex, FPU_SIZE_UINT(cs), 1194 FPU_OFFSET(cs), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1195 INVALID_NUB_REGNUM, NULL, NULL}, 1196 {e_regSetFPU, fpu_dp, "fooff", NULL, Uint, Hex, FPU_SIZE_UINT(dp), 1197 FPU_OFFSET(dp), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1198 INVALID_NUB_REGNUM, NULL, NULL}, 1199 {e_regSetFPU, fpu_ds, "foseg", NULL, Uint, Hex, FPU_SIZE_UINT(ds), 1200 FPU_OFFSET(ds), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1201 INVALID_NUB_REGNUM, NULL, NULL}, 1202 {e_regSetFPU, fpu_mxcsr, "mxcsr", NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr), 1203 FPU_OFFSET(mxcsr), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1204 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1205 {e_regSetFPU, fpu_mxcsrmask, "mxcsrmask", NULL, Uint, Hex, 1206 FPU_SIZE_UINT(mxcsrmask), FPU_OFFSET(mxcsrmask), INVALID_NUB_REGNUM, 1207 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1208 1209 {e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, 1210 FPU_SIZE_MMST(stmm0), FPU_OFFSET(stmm0), INVALID_NUB_REGNUM, dwarf_stmm0, 1211 INVALID_NUB_REGNUM, debugserver_stmm0, NULL, NULL}, 1212 {e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, 1213 FPU_SIZE_MMST(stmm1), FPU_OFFSET(stmm1), INVALID_NUB_REGNUM, dwarf_stmm1, 1214 INVALID_NUB_REGNUM, debugserver_stmm1, NULL, NULL}, 1215 {e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, 1216 FPU_SIZE_MMST(stmm2), FPU_OFFSET(stmm2), INVALID_NUB_REGNUM, dwarf_stmm2, 1217 INVALID_NUB_REGNUM, debugserver_stmm2, NULL, NULL}, 1218 {e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, 1219 FPU_SIZE_MMST(stmm3), FPU_OFFSET(stmm3), INVALID_NUB_REGNUM, dwarf_stmm3, 1220 INVALID_NUB_REGNUM, debugserver_stmm3, NULL, NULL}, 1221 {e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, 1222 FPU_SIZE_MMST(stmm4), FPU_OFFSET(stmm4), INVALID_NUB_REGNUM, dwarf_stmm4, 1223 INVALID_NUB_REGNUM, debugserver_stmm4, NULL, NULL}, 1224 {e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, 1225 FPU_SIZE_MMST(stmm5), FPU_OFFSET(stmm5), INVALID_NUB_REGNUM, dwarf_stmm5, 1226 INVALID_NUB_REGNUM, debugserver_stmm5, NULL, NULL}, 1227 {e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, 1228 FPU_SIZE_MMST(stmm6), FPU_OFFSET(stmm6), INVALID_NUB_REGNUM, dwarf_stmm6, 1229 INVALID_NUB_REGNUM, debugserver_stmm6, NULL, NULL}, 1230 {e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, 1231 FPU_SIZE_MMST(stmm7), FPU_OFFSET(stmm7), INVALID_NUB_REGNUM, dwarf_stmm7, 1232 INVALID_NUB_REGNUM, debugserver_stmm7, NULL, NULL}, 1233 1234 {e_regSetFPU, fpu_xmm0, "xmm0", NULL, Vector, VectorOfUInt8, 1235 FPU_SIZE_XMM(xmm0), FPU_OFFSET(xmm0), INVALID_NUB_REGNUM, dwarf_xmm0, 1236 INVALID_NUB_REGNUM, debugserver_xmm0, NULL, NULL}, 1237 {e_regSetFPU, fpu_xmm1, "xmm1", NULL, Vector, VectorOfUInt8, 1238 FPU_SIZE_XMM(xmm1), FPU_OFFSET(xmm1), INVALID_NUB_REGNUM, dwarf_xmm1, 1239 INVALID_NUB_REGNUM, debugserver_xmm1, NULL, NULL}, 1240 {e_regSetFPU, fpu_xmm2, "xmm2", NULL, Vector, VectorOfUInt8, 1241 FPU_SIZE_XMM(xmm2), FPU_OFFSET(xmm2), INVALID_NUB_REGNUM, dwarf_xmm2, 1242 INVALID_NUB_REGNUM, debugserver_xmm2, NULL, NULL}, 1243 {e_regSetFPU, fpu_xmm3, "xmm3", NULL, Vector, VectorOfUInt8, 1244 FPU_SIZE_XMM(xmm3), FPU_OFFSET(xmm3), INVALID_NUB_REGNUM, dwarf_xmm3, 1245 INVALID_NUB_REGNUM, debugserver_xmm3, NULL, NULL}, 1246 {e_regSetFPU, fpu_xmm4, "xmm4", NULL, Vector, VectorOfUInt8, 1247 FPU_SIZE_XMM(xmm4), FPU_OFFSET(xmm4), INVALID_NUB_REGNUM, dwarf_xmm4, 1248 INVALID_NUB_REGNUM, debugserver_xmm4, NULL, NULL}, 1249 {e_regSetFPU, fpu_xmm5, "xmm5", NULL, Vector, VectorOfUInt8, 1250 FPU_SIZE_XMM(xmm5), FPU_OFFSET(xmm5), INVALID_NUB_REGNUM, dwarf_xmm5, 1251 INVALID_NUB_REGNUM, debugserver_xmm5, NULL, NULL}, 1252 {e_regSetFPU, fpu_xmm6, "xmm6", NULL, Vector, VectorOfUInt8, 1253 FPU_SIZE_XMM(xmm6), FPU_OFFSET(xmm6), INVALID_NUB_REGNUM, dwarf_xmm6, 1254 INVALID_NUB_REGNUM, debugserver_xmm6, NULL, NULL}, 1255 {e_regSetFPU, fpu_xmm7, "xmm7", NULL, Vector, VectorOfUInt8, 1256 FPU_SIZE_XMM(xmm7), FPU_OFFSET(xmm7), INVALID_NUB_REGNUM, dwarf_xmm7, 1257 INVALID_NUB_REGNUM, debugserver_xmm7, NULL, NULL}}; 1258 1259 static const char *g_contained_ymm0[] = {"ymm0", NULL}; 1260 static const char *g_contained_ymm1[] = {"ymm1", NULL}; 1261 static const char *g_contained_ymm2[] = {"ymm2", NULL}; 1262 static const char *g_contained_ymm3[] = {"ymm3", NULL}; 1263 static const char *g_contained_ymm4[] = {"ymm4", NULL}; 1264 static const char *g_contained_ymm5[] = {"ymm5", NULL}; 1265 static const char *g_contained_ymm6[] = {"ymm6", NULL}; 1266 static const char *g_contained_ymm7[] = {"ymm7", NULL}; 1267 1268 const DNBRegisterInfo DNBArchImplI386::g_fpu_registers_avx[] = { 1269 {e_regSetFPU, fpu_fcw, "fctrl", NULL, Uint, Hex, FPU_SIZE_UINT(fcw), 1270 AVX_OFFSET(fcw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1271 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1272 {e_regSetFPU, fpu_fsw, "fstat", NULL, Uint, Hex, FPU_SIZE_UINT(fsw), 1273 AVX_OFFSET(fsw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1274 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1275 {e_regSetFPU, fpu_ftw, "ftag", NULL, Uint, Hex, 2 /* sizeof __fpu_ftw + sizeof __fpu_rsrv1 */, 1276 AVX_OFFSET(ftw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1277 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1278 {e_regSetFPU, fpu_fop, "fop", NULL, Uint, Hex, FPU_SIZE_UINT(fop), 1279 AVX_OFFSET(fop), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1280 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1281 {e_regSetFPU, fpu_ip, "fioff", NULL, Uint, Hex, FPU_SIZE_UINT(ip), 1282 AVX_OFFSET(ip), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1283 INVALID_NUB_REGNUM, NULL, NULL}, 1284 {e_regSetFPU, fpu_cs, "fiseg", NULL, Uint, Hex, FPU_SIZE_UINT(cs), 1285 AVX_OFFSET(cs), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1286 INVALID_NUB_REGNUM, NULL, NULL}, 1287 {e_regSetFPU, fpu_dp, "fooff", NULL, Uint, Hex, FPU_SIZE_UINT(dp), 1288 AVX_OFFSET(dp), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1289 INVALID_NUB_REGNUM, NULL, NULL}, 1290 {e_regSetFPU, fpu_ds, "foseg", NULL, Uint, Hex, FPU_SIZE_UINT(ds), 1291 AVX_OFFSET(ds), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1292 INVALID_NUB_REGNUM, NULL, NULL}, 1293 {e_regSetFPU, fpu_mxcsr, "mxcsr", NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr), 1294 AVX_OFFSET(mxcsr), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1295 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1296 {e_regSetFPU, fpu_mxcsrmask, "mxcsrmask", NULL, Uint, Hex, 1297 FPU_SIZE_UINT(mxcsrmask), AVX_OFFSET(mxcsrmask), INVALID_NUB_REGNUM, 1298 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1299 1300 {e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, 1301 FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), INVALID_NUB_REGNUM, dwarf_stmm0, 1302 INVALID_NUB_REGNUM, debugserver_stmm0, NULL, NULL}, 1303 {e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, 1304 FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), INVALID_NUB_REGNUM, dwarf_stmm1, 1305 INVALID_NUB_REGNUM, debugserver_stmm1, NULL, NULL}, 1306 {e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, 1307 FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), INVALID_NUB_REGNUM, dwarf_stmm2, 1308 INVALID_NUB_REGNUM, debugserver_stmm2, NULL, NULL}, 1309 {e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, 1310 FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), INVALID_NUB_REGNUM, dwarf_stmm3, 1311 INVALID_NUB_REGNUM, debugserver_stmm3, NULL, NULL}, 1312 {e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, 1313 FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), INVALID_NUB_REGNUM, dwarf_stmm4, 1314 INVALID_NUB_REGNUM, debugserver_stmm4, NULL, NULL}, 1315 {e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, 1316 FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), INVALID_NUB_REGNUM, dwarf_stmm5, 1317 INVALID_NUB_REGNUM, debugserver_stmm5, NULL, NULL}, 1318 {e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, 1319 FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), INVALID_NUB_REGNUM, dwarf_stmm6, 1320 INVALID_NUB_REGNUM, debugserver_stmm6, NULL, NULL}, 1321 {e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, 1322 FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), INVALID_NUB_REGNUM, dwarf_stmm7, 1323 INVALID_NUB_REGNUM, debugserver_stmm7, NULL, NULL}, 1324 1325 {e_regSetFPU, fpu_ymm0, "ymm0", NULL, Vector, VectorOfUInt8, 1326 FPU_SIZE_YMM(ymm0), AVX_OFFSET_YMM(0), INVALID_NUB_REGNUM, dwarf_ymm0, 1327 INVALID_NUB_REGNUM, debugserver_ymm0, NULL, NULL}, 1328 {e_regSetFPU, fpu_ymm1, "ymm1", NULL, Vector, VectorOfUInt8, 1329 FPU_SIZE_YMM(ymm1), AVX_OFFSET_YMM(1), INVALID_NUB_REGNUM, dwarf_ymm1, 1330 INVALID_NUB_REGNUM, debugserver_ymm1, NULL, NULL}, 1331 {e_regSetFPU, fpu_ymm2, "ymm2", NULL, Vector, VectorOfUInt8, 1332 FPU_SIZE_YMM(ymm2), AVX_OFFSET_YMM(2), INVALID_NUB_REGNUM, dwarf_ymm2, 1333 INVALID_NUB_REGNUM, debugserver_ymm2, NULL, NULL}, 1334 {e_regSetFPU, fpu_ymm3, "ymm3", NULL, Vector, VectorOfUInt8, 1335 FPU_SIZE_YMM(ymm3), AVX_OFFSET_YMM(3), INVALID_NUB_REGNUM, dwarf_ymm3, 1336 INVALID_NUB_REGNUM, debugserver_ymm3, NULL, NULL}, 1337 {e_regSetFPU, fpu_ymm4, "ymm4", NULL, Vector, VectorOfUInt8, 1338 FPU_SIZE_YMM(ymm4), AVX_OFFSET_YMM(4), INVALID_NUB_REGNUM, dwarf_ymm4, 1339 INVALID_NUB_REGNUM, debugserver_ymm4, NULL, NULL}, 1340 {e_regSetFPU, fpu_ymm5, "ymm5", NULL, Vector, VectorOfUInt8, 1341 FPU_SIZE_YMM(ymm5), AVX_OFFSET_YMM(5), INVALID_NUB_REGNUM, dwarf_ymm5, 1342 INVALID_NUB_REGNUM, debugserver_ymm5, NULL, NULL}, 1343 {e_regSetFPU, fpu_ymm6, "ymm6", NULL, Vector, VectorOfUInt8, 1344 FPU_SIZE_YMM(ymm6), AVX_OFFSET_YMM(6), INVALID_NUB_REGNUM, dwarf_ymm6, 1345 INVALID_NUB_REGNUM, debugserver_ymm6, NULL, NULL}, 1346 {e_regSetFPU, fpu_ymm7, "ymm7", NULL, Vector, VectorOfUInt8, 1347 FPU_SIZE_YMM(ymm7), AVX_OFFSET_YMM(7), INVALID_NUB_REGNUM, dwarf_ymm7, 1348 INVALID_NUB_REGNUM, debugserver_ymm7, NULL, NULL}, 1349 1350 {e_regSetFPU, fpu_xmm0, "xmm0", NULL, Vector, VectorOfUInt8, 1351 FPU_SIZE_XMM(xmm0), 0, INVALID_NUB_REGNUM, dwarf_xmm0, INVALID_NUB_REGNUM, 1352 debugserver_xmm0, g_contained_ymm0, NULL}, 1353 {e_regSetFPU, fpu_xmm1, "xmm1", NULL, Vector, VectorOfUInt8, 1354 FPU_SIZE_XMM(xmm1), 0, INVALID_NUB_REGNUM, dwarf_xmm1, INVALID_NUB_REGNUM, 1355 debugserver_xmm1, g_contained_ymm1, NULL}, 1356 {e_regSetFPU, fpu_xmm2, "xmm2", NULL, Vector, VectorOfUInt8, 1357 FPU_SIZE_XMM(xmm2), 0, INVALID_NUB_REGNUM, dwarf_xmm2, INVALID_NUB_REGNUM, 1358 debugserver_xmm2, g_contained_ymm2, NULL}, 1359 {e_regSetFPU, fpu_xmm3, "xmm3", NULL, Vector, VectorOfUInt8, 1360 FPU_SIZE_XMM(xmm3), 0, INVALID_NUB_REGNUM, dwarf_xmm3, INVALID_NUB_REGNUM, 1361 debugserver_xmm3, g_contained_ymm3, NULL}, 1362 {e_regSetFPU, fpu_xmm4, "xmm4", NULL, Vector, VectorOfUInt8, 1363 FPU_SIZE_XMM(xmm4), 0, INVALID_NUB_REGNUM, dwarf_xmm4, INVALID_NUB_REGNUM, 1364 debugserver_xmm4, g_contained_ymm4, NULL}, 1365 {e_regSetFPU, fpu_xmm5, "xmm5", NULL, Vector, VectorOfUInt8, 1366 FPU_SIZE_XMM(xmm5), 0, INVALID_NUB_REGNUM, dwarf_xmm5, INVALID_NUB_REGNUM, 1367 debugserver_xmm5, g_contained_ymm5, NULL}, 1368 {e_regSetFPU, fpu_xmm6, "xmm6", NULL, Vector, VectorOfUInt8, 1369 FPU_SIZE_XMM(xmm6), 0, INVALID_NUB_REGNUM, dwarf_xmm6, INVALID_NUB_REGNUM, 1370 debugserver_xmm6, g_contained_ymm6, NULL}, 1371 {e_regSetFPU, fpu_xmm7, "xmm7", NULL, Vector, VectorOfUInt8, 1372 FPU_SIZE_XMM(xmm7), 0, INVALID_NUB_REGNUM, dwarf_xmm7, INVALID_NUB_REGNUM, 1373 debugserver_xmm7, g_contained_ymm7, NULL}, 1374 1375 }; 1376 1377 1378 #define STR(s) #s 1379 1380 #define ZMM_REG_DEF(reg) \ 1381 { \ 1382 e_regSetFPU, fpu_zmm##reg, STR(zmm##reg), NULL, Vector, VectorOfUInt8, \ 1383 FPU_SIZE_ZMM(zmm##reg), AVX512F_OFFSET_ZMM(reg), INVALID_NUB_REGNUM, \ 1384 dwarf_zmm##reg, INVALID_NUB_REGNUM, debugserver_zmm##reg, NULL, NULL \ 1385 } 1386 1387 #define YMM_REG_ALIAS(reg) \ 1388 { \ 1389 e_regSetFPU, fpu_ymm##reg, STR(ymm##reg), NULL, Vector, VectorOfUInt8, \ 1390 FPU_SIZE_YMM(ymm##reg), 0, INVALID_NUB_REGNUM, dwarf_ymm##reg, \ 1391 INVALID_NUB_REGNUM, debugserver_ymm##reg, g_contained_zmm##reg, NULL \ 1392 } 1393 1394 #define XMM_REG_ALIAS(reg) \ 1395 { \ 1396 e_regSetFPU, fpu_xmm##reg, STR(xmm##reg), NULL, Vector, VectorOfUInt8, \ 1397 FPU_SIZE_XMM(xmm##reg), 0, INVALID_NUB_REGNUM, dwarf_xmm##reg, \ 1398 INVALID_NUB_REGNUM, debugserver_xmm##reg, g_contained_zmm##reg, NULL \ 1399 } 1400 1401 #define AVX512_K_REG_DEF(reg) \ 1402 { \ 1403 e_regSetFPU, fpu_k##reg, STR(k##reg), NULL, Vector, VectorOfUInt8, 8, \ 1404 AVX512F_OFFSET(k##reg), dwarf_k##reg, dwarf_k##reg, -1U, \ 1405 debugserver_k##reg, NULL, NULL \ 1406 } 1407 1408 static const char *g_contained_zmm0[] = {"zmm0", NULL}; 1409 static const char *g_contained_zmm1[] = {"zmm1", NULL}; 1410 static const char *g_contained_zmm2[] = {"zmm2", NULL}; 1411 static const char *g_contained_zmm3[] = {"zmm3", NULL}; 1412 static const char *g_contained_zmm4[] = {"zmm4", NULL}; 1413 static const char *g_contained_zmm5[] = {"zmm5", NULL}; 1414 static const char *g_contained_zmm6[] = {"zmm6", NULL}; 1415 static const char *g_contained_zmm7[] = {"zmm7", NULL}; 1416 1417 const DNBRegisterInfo DNBArchImplI386::g_fpu_registers_avx512f[] = { 1418 {e_regSetFPU, fpu_fcw, "fctrl", NULL, Uint, Hex, FPU_SIZE_UINT(fcw), 1419 AVX_OFFSET(fcw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1420 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1421 {e_regSetFPU, fpu_fsw, "fstat", NULL, Uint, Hex, FPU_SIZE_UINT(fsw), 1422 AVX_OFFSET(fsw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1423 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1424 {e_regSetFPU, fpu_ftw, "ftag", NULL, Uint, Hex, 2 /* sizeof __fpu_ftw + sizeof __fpu_rsrv1 */, 1425 FPU_OFFSET(ftw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1426 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1427 {e_regSetFPU, fpu_fop, "fop", NULL, Uint, Hex, FPU_SIZE_UINT(fop), 1428 AVX_OFFSET(fop), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1429 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1430 {e_regSetFPU, fpu_ip, "fioff", NULL, Uint, Hex, FPU_SIZE_UINT(ip), 1431 AVX_OFFSET(ip), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1432 INVALID_NUB_REGNUM, NULL, NULL}, 1433 {e_regSetFPU, fpu_cs, "fiseg", NULL, Uint, Hex, FPU_SIZE_UINT(cs), 1434 AVX_OFFSET(cs), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1435 INVALID_NUB_REGNUM, NULL, NULL}, 1436 {e_regSetFPU, fpu_dp, "fooff", NULL, Uint, Hex, FPU_SIZE_UINT(dp), 1437 AVX_OFFSET(dp), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1438 INVALID_NUB_REGNUM, NULL, NULL}, 1439 {e_regSetFPU, fpu_ds, "foseg", NULL, Uint, Hex, FPU_SIZE_UINT(ds), 1440 AVX_OFFSET(ds), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1441 INVALID_NUB_REGNUM, NULL, NULL}, 1442 {e_regSetFPU, fpu_mxcsr, "mxcsr", NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr), 1443 AVX_OFFSET(mxcsr), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1444 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1445 {e_regSetFPU, fpu_mxcsrmask, "mxcsrmask", NULL, Uint, Hex, 1446 FPU_SIZE_UINT(mxcsrmask), AVX_OFFSET(mxcsrmask), INVALID_NUB_REGNUM, 1447 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1448 1449 {e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, 1450 FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), INVALID_NUB_REGNUM, dwarf_stmm0, 1451 INVALID_NUB_REGNUM, debugserver_stmm0, NULL, NULL}, 1452 {e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, 1453 FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), INVALID_NUB_REGNUM, dwarf_stmm1, 1454 INVALID_NUB_REGNUM, debugserver_stmm1, NULL, NULL}, 1455 {e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, 1456 FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), INVALID_NUB_REGNUM, dwarf_stmm2, 1457 INVALID_NUB_REGNUM, debugserver_stmm2, NULL, NULL}, 1458 {e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, 1459 FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), INVALID_NUB_REGNUM, dwarf_stmm3, 1460 INVALID_NUB_REGNUM, debugserver_stmm3, NULL, NULL}, 1461 {e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, 1462 FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), INVALID_NUB_REGNUM, dwarf_stmm4, 1463 INVALID_NUB_REGNUM, debugserver_stmm4, NULL, NULL}, 1464 {e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, 1465 FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), INVALID_NUB_REGNUM, dwarf_stmm5, 1466 INVALID_NUB_REGNUM, debugserver_stmm5, NULL, NULL}, 1467 {e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, 1468 FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), INVALID_NUB_REGNUM, dwarf_stmm6, 1469 INVALID_NUB_REGNUM, debugserver_stmm6, NULL, NULL}, 1470 {e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, 1471 FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), INVALID_NUB_REGNUM, dwarf_stmm7, 1472 INVALID_NUB_REGNUM, debugserver_stmm7, NULL, NULL}, 1473 1474 AVX512_K_REG_DEF(0), 1475 AVX512_K_REG_DEF(1), 1476 AVX512_K_REG_DEF(2), 1477 AVX512_K_REG_DEF(3), 1478 AVX512_K_REG_DEF(4), 1479 AVX512_K_REG_DEF(5), 1480 AVX512_K_REG_DEF(6), 1481 AVX512_K_REG_DEF(7), 1482 1483 ZMM_REG_DEF(0), 1484 ZMM_REG_DEF(1), 1485 ZMM_REG_DEF(2), 1486 ZMM_REG_DEF(3), 1487 ZMM_REG_DEF(4), 1488 ZMM_REG_DEF(5), 1489 ZMM_REG_DEF(6), 1490 ZMM_REG_DEF(7), 1491 1492 YMM_REG_ALIAS(0), 1493 YMM_REG_ALIAS(1), 1494 YMM_REG_ALIAS(2), 1495 YMM_REG_ALIAS(3), 1496 YMM_REG_ALIAS(4), 1497 YMM_REG_ALIAS(5), 1498 YMM_REG_ALIAS(6), 1499 YMM_REG_ALIAS(7), 1500 1501 XMM_REG_ALIAS(0), 1502 XMM_REG_ALIAS(1), 1503 XMM_REG_ALIAS(2), 1504 XMM_REG_ALIAS(3), 1505 XMM_REG_ALIAS(4), 1506 XMM_REG_ALIAS(5), 1507 XMM_REG_ALIAS(6), 1508 XMM_REG_ALIAS(7) 1509 1510 }; 1511 1512 const DNBRegisterInfo DNBArchImplI386::g_exc_registers[] = { 1513 {e_regSetEXC, exc_trapno, "trapno", NULL, Uint, Hex, EXC_SIZE(trapno), 1514 EXC_OFFSET(trapno), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1515 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1516 {e_regSetEXC, exc_err, "err", NULL, Uint, Hex, EXC_SIZE(err), 1517 EXC_OFFSET(err), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1518 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1519 {e_regSetEXC, exc_faultvaddr, "faultvaddr", NULL, Uint, Hex, 1520 EXC_SIZE(faultvaddr), EXC_OFFSET(faultvaddr), INVALID_NUB_REGNUM, 1521 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}}; 1522 1523 // Number of registers in each register set 1524 const size_t DNBArchImplI386::k_num_gpr_registers = 1525 sizeof(g_gpr_registers) / sizeof(DNBRegisterInfo); 1526 const size_t DNBArchImplI386::k_num_fpu_registers_no_avx = 1527 sizeof(g_fpu_registers_no_avx) / sizeof(DNBRegisterInfo); 1528 const size_t DNBArchImplI386::k_num_fpu_registers_avx = 1529 sizeof(g_fpu_registers_avx) / sizeof(DNBRegisterInfo); 1530 const size_t DNBArchImplI386::k_num_fpu_registers_avx512f = 1531 sizeof(g_fpu_registers_avx512f) / sizeof(DNBRegisterInfo); 1532 const size_t DNBArchImplI386::k_num_exc_registers = 1533 sizeof(g_exc_registers) / sizeof(DNBRegisterInfo); 1534 const size_t DNBArchImplI386::k_num_all_registers_no_avx = 1535 k_num_gpr_registers + k_num_fpu_registers_no_avx + k_num_exc_registers; 1536 const size_t DNBArchImplI386::k_num_all_registers_avx = 1537 k_num_gpr_registers + k_num_fpu_registers_avx + k_num_exc_registers; 1538 const size_t DNBArchImplI386::k_num_all_registers_avx512f = 1539 k_num_gpr_registers + k_num_fpu_registers_avx512f + k_num_exc_registers; 1540 1541 // Register set definitions. The first definitions at register set index 1542 // of zero is for all registers, followed by other registers sets. The 1543 // register information for the all register set need not be filled in. 1544 const DNBRegisterSetInfo DNBArchImplI386::g_reg_sets_no_avx[] = { 1545 {"i386 Registers", NULL, k_num_all_registers_no_avx}, 1546 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers}, 1547 {"Floating Point Registers", g_fpu_registers_no_avx, 1548 k_num_fpu_registers_no_avx}, 1549 {"Exception State Registers", g_exc_registers, k_num_exc_registers}}; 1550 1551 const DNBRegisterSetInfo DNBArchImplI386::g_reg_sets_avx[] = { 1552 {"i386 Registers", NULL, k_num_all_registers_avx}, 1553 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers}, 1554 {"Floating Point Registers", g_fpu_registers_avx, k_num_fpu_registers_avx}, 1555 {"Exception State Registers", g_exc_registers, k_num_exc_registers}}; 1556 1557 const DNBRegisterSetInfo DNBArchImplI386::g_reg_sets_avx512f[] = { 1558 {"i386 Registers", NULL, k_num_all_registers_avx512f}, 1559 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers}, 1560 {"Floating Point Registers", g_fpu_registers_avx512f, 1561 k_num_fpu_registers_avx512f}, 1562 {"Exception State Registers", g_exc_registers, k_num_exc_registers}}; 1563 1564 // Total number of register sets for this architecture 1565 const size_t DNBArchImplI386::k_num_register_sets = 1566 sizeof(g_reg_sets_avx) / sizeof(DNBRegisterSetInfo); 1567 1568 DNBArchProtocol *DNBArchImplI386::Create(MachThread *thread) { 1569 DNBArchImplI386 *obj = new DNBArchImplI386(thread); 1570 return obj; 1571 } 1572 1573 const uint8_t *DNBArchImplI386::SoftwareBreakpointOpcode(nub_size_t byte_size) { 1574 static const uint8_t g_breakpoint_opcode[] = {0xCC}; 1575 if (byte_size == 1) 1576 return g_breakpoint_opcode; 1577 return NULL; 1578 } 1579 1580 const DNBRegisterSetInfo * 1581 DNBArchImplI386::GetRegisterSetInfo(nub_size_t *num_reg_sets) { 1582 *num_reg_sets = k_num_register_sets; 1583 if (CPUHasAVX512f() || FORCE_AVX_REGS) 1584 return g_reg_sets_avx512f; 1585 if (CPUHasAVX()) 1586 return g_reg_sets_avx; 1587 else 1588 return g_reg_sets_no_avx; 1589 } 1590 1591 void DNBArchImplI386::Initialize() { 1592 DNBArchPluginInfo arch_plugin_info = { 1593 CPU_TYPE_I386, DNBArchImplI386::Create, 1594 DNBArchImplI386::GetRegisterSetInfo, 1595 DNBArchImplI386::SoftwareBreakpointOpcode}; 1596 1597 // Register this arch plug-in with the main protocol class 1598 DNBArchProtocol::RegisterArchPlugin(arch_plugin_info); 1599 } 1600 1601 bool DNBArchImplI386::GetRegisterValue(uint32_t set, uint32_t reg, 1602 DNBRegisterValue *value) { 1603 if (set == REGISTER_SET_GENERIC) { 1604 switch (reg) { 1605 case GENERIC_REGNUM_PC: // Program Counter 1606 set = e_regSetGPR; 1607 reg = gpr_eip; 1608 break; 1609 1610 case GENERIC_REGNUM_SP: // Stack Pointer 1611 set = e_regSetGPR; 1612 reg = gpr_esp; 1613 break; 1614 1615 case GENERIC_REGNUM_FP: // Frame Pointer 1616 set = e_regSetGPR; 1617 reg = gpr_ebp; 1618 break; 1619 1620 case GENERIC_REGNUM_FLAGS: // Processor flags register 1621 set = e_regSetGPR; 1622 reg = gpr_eflags; 1623 break; 1624 1625 case GENERIC_REGNUM_RA: // Return Address 1626 default: 1627 return false; 1628 } 1629 } 1630 1631 if (GetRegisterState(set, false) != KERN_SUCCESS) 1632 return false; 1633 1634 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1635 if (regInfo) { 1636 value->info = *regInfo; 1637 switch (set) { 1638 case e_regSetGPR: 1639 if (reg < k_num_gpr_registers) { 1640 value->value.uint32 = ((uint32_t *)(&m_state.context.gpr))[reg]; 1641 return true; 1642 } 1643 break; 1644 1645 case e_regSetFPU: 1646 if (reg > fpu_xmm7 && !(CPUHasAVX() || FORCE_AVX_REGS)) 1647 return false; 1648 if (reg > fpu_ymm7 && !(CPUHasAVX512f() || FORCE_AVX_REGS)) 1649 return false; 1650 switch (reg) { 1651 case fpu_fcw: 1652 value->value.uint16 = 1653 *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)); 1654 return true; 1655 case fpu_fsw: 1656 value->value.uint16 = 1657 *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)); 1658 return true; 1659 case fpu_ftw: 1660 memcpy (&value->value.uint16, &m_state.context.fpu.no_avx.__fpu_ftw, 2); 1661 return true; 1662 case fpu_fop: 1663 value->value.uint16 = m_state.context.fpu.no_avx.__fpu_fop; 1664 return true; 1665 case fpu_ip: 1666 value->value.uint32 = m_state.context.fpu.no_avx.__fpu_ip; 1667 return true; 1668 case fpu_cs: 1669 value->value.uint16 = m_state.context.fpu.no_avx.__fpu_cs; 1670 return true; 1671 case fpu_dp: 1672 value->value.uint32 = m_state.context.fpu.no_avx.__fpu_dp; 1673 return true; 1674 case fpu_ds: 1675 value->value.uint16 = m_state.context.fpu.no_avx.__fpu_ds; 1676 return true; 1677 case fpu_mxcsr: 1678 value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsr; 1679 return true; 1680 case fpu_mxcsrmask: 1681 value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsrmask; 1682 return true; 1683 1684 case fpu_stmm0: 1685 memcpy(&value->value.uint8, 1686 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg, 10); 1687 return true; 1688 case fpu_stmm1: 1689 memcpy(&value->value.uint8, 1690 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg, 10); 1691 return true; 1692 case fpu_stmm2: 1693 memcpy(&value->value.uint8, 1694 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg, 10); 1695 return true; 1696 case fpu_stmm3: 1697 memcpy(&value->value.uint8, 1698 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg, 10); 1699 return true; 1700 case fpu_stmm4: 1701 memcpy(&value->value.uint8, 1702 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg, 10); 1703 return true; 1704 case fpu_stmm5: 1705 memcpy(&value->value.uint8, 1706 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg, 10); 1707 return true; 1708 case fpu_stmm6: 1709 memcpy(&value->value.uint8, 1710 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg, 10); 1711 return true; 1712 case fpu_stmm7: 1713 memcpy(&value->value.uint8, 1714 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg, 10); 1715 return true; 1716 1717 case fpu_xmm0: 1718 memcpy(&value->value.uint8, 1719 m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg, 16); 1720 return true; 1721 case fpu_xmm1: 1722 memcpy(&value->value.uint8, 1723 m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg, 16); 1724 return true; 1725 case fpu_xmm2: 1726 memcpy(&value->value.uint8, 1727 m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg, 16); 1728 return true; 1729 case fpu_xmm3: 1730 memcpy(&value->value.uint8, 1731 m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg, 16); 1732 return true; 1733 case fpu_xmm4: 1734 memcpy(&value->value.uint8, 1735 m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg, 16); 1736 return true; 1737 case fpu_xmm5: 1738 memcpy(&value->value.uint8, 1739 m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg, 16); 1740 return true; 1741 case fpu_xmm6: 1742 memcpy(&value->value.uint8, 1743 m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg, 16); 1744 return true; 1745 case fpu_xmm7: 1746 memcpy(&value->value.uint8, 1747 m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg, 16); 1748 return true; 1749 1750 #define MEMCPY_YMM(n) \ 1751 memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm##n.__xmm_reg, \ 1752 16); \ 1753 memcpy((&value->value.uint8) + 16, \ 1754 m_state.context.fpu.avx.__fpu_ymmh##n.__xmm_reg, 16); 1755 case fpu_ymm0: 1756 MEMCPY_YMM(0); 1757 return true; 1758 case fpu_ymm1: 1759 MEMCPY_YMM(1); 1760 return true; 1761 case fpu_ymm2: 1762 MEMCPY_YMM(2); 1763 return true; 1764 case fpu_ymm3: 1765 MEMCPY_YMM(3); 1766 return true; 1767 case fpu_ymm4: 1768 MEMCPY_YMM(4); 1769 return true; 1770 case fpu_ymm5: 1771 MEMCPY_YMM(5); 1772 return true; 1773 case fpu_ymm6: 1774 MEMCPY_YMM(6); 1775 return true; 1776 case fpu_ymm7: 1777 MEMCPY_YMM(7); 1778 return true; 1779 #undef MEMCPY_YMM 1780 1781 case fpu_k0: 1782 case fpu_k1: 1783 case fpu_k2: 1784 case fpu_k3: 1785 case fpu_k4: 1786 case fpu_k5: 1787 case fpu_k6: 1788 case fpu_k7: 1789 memcpy((&value->value.uint8), 1790 &m_state.context.fpu.avx512f.__fpu_k0 + (reg - fpu_k0), 8); 1791 return true; 1792 case fpu_zmm0: 1793 case fpu_zmm1: 1794 case fpu_zmm2: 1795 case fpu_zmm3: 1796 case fpu_zmm4: 1797 case fpu_zmm5: 1798 case fpu_zmm6: 1799 case fpu_zmm7: 1800 memcpy(&value->value.uint8, 1801 &m_state.context.fpu.avx512f.__fpu_xmm0 + (reg - fpu_zmm0), 16); 1802 memcpy(&value->value.uint8 + 16, 1803 &m_state.context.fpu.avx512f.__fpu_ymmh0 + (reg - fpu_zmm0), 16); 1804 memcpy(&value->value.uint8 + 32, 1805 &m_state.context.fpu.avx512f.__fpu_zmmh0 + (reg - fpu_zmm0), 32); 1806 return true; 1807 } 1808 break; 1809 1810 case e_regSetEXC: 1811 if (reg < k_num_exc_registers) { 1812 value->value.uint32 = (&m_state.context.exc.__trapno)[reg]; 1813 return true; 1814 } 1815 break; 1816 } 1817 } 1818 return false; 1819 } 1820 1821 bool DNBArchImplI386::SetRegisterValue(uint32_t set, uint32_t reg, 1822 const DNBRegisterValue *value) { 1823 if (set == REGISTER_SET_GENERIC) { 1824 switch (reg) { 1825 case GENERIC_REGNUM_PC: // Program Counter 1826 set = e_regSetGPR; 1827 reg = gpr_eip; 1828 break; 1829 1830 case GENERIC_REGNUM_SP: // Stack Pointer 1831 set = e_regSetGPR; 1832 reg = gpr_esp; 1833 break; 1834 1835 case GENERIC_REGNUM_FP: // Frame Pointer 1836 set = e_regSetGPR; 1837 reg = gpr_ebp; 1838 break; 1839 1840 case GENERIC_REGNUM_FLAGS: // Processor flags register 1841 set = e_regSetGPR; 1842 reg = gpr_eflags; 1843 break; 1844 1845 case GENERIC_REGNUM_RA: // Return Address 1846 default: 1847 return false; 1848 } 1849 } 1850 1851 if (GetRegisterState(set, false) != KERN_SUCCESS) 1852 return false; 1853 1854 bool success = false; 1855 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1856 if (regInfo) { 1857 switch (set) { 1858 case e_regSetGPR: 1859 if (reg < k_num_gpr_registers) { 1860 ((uint32_t *)(&m_state.context.gpr))[reg] = value->value.uint32; 1861 success = true; 1862 } 1863 break; 1864 1865 case e_regSetFPU: 1866 if (reg > fpu_xmm7 && !(CPUHasAVX() || FORCE_AVX_REGS)) 1867 return false; 1868 if (reg > fpu_ymm7 && !(CPUHasAVX512f() || FORCE_AVX_REGS)) 1869 return false; 1870 switch (reg) { 1871 case fpu_fcw: 1872 *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)) = 1873 value->value.uint16; 1874 success = true; 1875 break; 1876 case fpu_fsw: 1877 *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)) = 1878 value->value.uint16; 1879 success = true; 1880 break; 1881 case fpu_ftw: 1882 memcpy (&m_state.context.fpu.no_avx.__fpu_ftw, &value->value.uint16, 2); 1883 success = true; 1884 break; 1885 case fpu_fop: 1886 m_state.context.fpu.no_avx.__fpu_fop = value->value.uint16; 1887 success = true; 1888 break; 1889 case fpu_ip: 1890 m_state.context.fpu.no_avx.__fpu_ip = value->value.uint32; 1891 success = true; 1892 break; 1893 case fpu_cs: 1894 m_state.context.fpu.no_avx.__fpu_cs = value->value.uint16; 1895 success = true; 1896 break; 1897 case fpu_dp: 1898 m_state.context.fpu.no_avx.__fpu_dp = value->value.uint32; 1899 success = true; 1900 break; 1901 case fpu_ds: 1902 m_state.context.fpu.no_avx.__fpu_ds = value->value.uint16; 1903 success = true; 1904 break; 1905 case fpu_mxcsr: 1906 m_state.context.fpu.no_avx.__fpu_mxcsr = value->value.uint32; 1907 success = true; 1908 break; 1909 case fpu_mxcsrmask: 1910 m_state.context.fpu.no_avx.__fpu_mxcsrmask = value->value.uint32; 1911 success = true; 1912 break; 1913 1914 case fpu_stmm0: 1915 memcpy(m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg, 1916 &value->value.uint8, 10); 1917 success = true; 1918 break; 1919 case fpu_stmm1: 1920 memcpy(m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg, 1921 &value->value.uint8, 10); 1922 success = true; 1923 break; 1924 case fpu_stmm2: 1925 memcpy(m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg, 1926 &value->value.uint8, 10); 1927 success = true; 1928 break; 1929 case fpu_stmm3: 1930 memcpy(m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg, 1931 &value->value.uint8, 10); 1932 success = true; 1933 break; 1934 case fpu_stmm4: 1935 memcpy(m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg, 1936 &value->value.uint8, 10); 1937 success = true; 1938 break; 1939 case fpu_stmm5: 1940 memcpy(m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg, 1941 &value->value.uint8, 10); 1942 success = true; 1943 break; 1944 case fpu_stmm6: 1945 memcpy(m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg, 1946 &value->value.uint8, 10); 1947 success = true; 1948 break; 1949 case fpu_stmm7: 1950 memcpy(m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg, 1951 &value->value.uint8, 10); 1952 success = true; 1953 break; 1954 1955 case fpu_xmm0: 1956 memcpy(m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg, 1957 &value->value.uint8, 16); 1958 success = true; 1959 break; 1960 case fpu_xmm1: 1961 memcpy(m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg, 1962 &value->value.uint8, 16); 1963 success = true; 1964 break; 1965 case fpu_xmm2: 1966 memcpy(m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg, 1967 &value->value.uint8, 16); 1968 success = true; 1969 break; 1970 case fpu_xmm3: 1971 memcpy(m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg, 1972 &value->value.uint8, 16); 1973 success = true; 1974 break; 1975 case fpu_xmm4: 1976 memcpy(m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg, 1977 &value->value.uint8, 16); 1978 success = true; 1979 break; 1980 case fpu_xmm5: 1981 memcpy(m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg, 1982 &value->value.uint8, 16); 1983 success = true; 1984 break; 1985 case fpu_xmm6: 1986 memcpy(m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg, 1987 &value->value.uint8, 16); 1988 success = true; 1989 break; 1990 case fpu_xmm7: 1991 memcpy(m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg, 1992 &value->value.uint8, 16); 1993 success = true; 1994 break; 1995 1996 #define MEMCPY_YMM(n) \ 1997 memcpy(m_state.context.fpu.avx.__fpu_xmm##n.__xmm_reg, &value->value.uint8, \ 1998 16); \ 1999 memcpy(m_state.context.fpu.avx.__fpu_ymmh##n.__xmm_reg, \ 2000 (&value->value.uint8) + 16, 16); 2001 case fpu_ymm0: 2002 MEMCPY_YMM(0); 2003 return true; 2004 case fpu_ymm1: 2005 MEMCPY_YMM(1); 2006 return true; 2007 case fpu_ymm2: 2008 MEMCPY_YMM(2); 2009 return true; 2010 case fpu_ymm3: 2011 MEMCPY_YMM(3); 2012 return true; 2013 case fpu_ymm4: 2014 MEMCPY_YMM(4); 2015 return true; 2016 case fpu_ymm5: 2017 MEMCPY_YMM(5); 2018 return true; 2019 case fpu_ymm6: 2020 MEMCPY_YMM(6); 2021 return true; 2022 case fpu_ymm7: 2023 MEMCPY_YMM(7); 2024 return true; 2025 #undef MEMCPY_YMM 2026 2027 case fpu_k0: 2028 case fpu_k1: 2029 case fpu_k2: 2030 case fpu_k3: 2031 case fpu_k4: 2032 case fpu_k5: 2033 case fpu_k6: 2034 case fpu_k7: 2035 memcpy(&m_state.context.fpu.avx512f.__fpu_k0 + (reg - fpu_k0), 2036 &value->value.uint8, 8); 2037 return true; 2038 case fpu_zmm0: 2039 case fpu_zmm1: 2040 case fpu_zmm2: 2041 case fpu_zmm3: 2042 case fpu_zmm4: 2043 case fpu_zmm5: 2044 case fpu_zmm6: 2045 case fpu_zmm7: 2046 memcpy(&m_state.context.fpu.avx512f.__fpu_xmm0 + (reg - fpu_zmm0), 2047 &value->value.uint8, 16); 2048 memcpy(&m_state.context.fpu.avx512f.__fpu_ymmh0 + (reg - fpu_zmm0), 2049 &value->value.uint8 + 16, 16); 2050 memcpy(&m_state.context.fpu.avx512f.__fpu_zmmh0 + (reg - fpu_zmm0), 2051 &value->value.uint8 + 32, 32); 2052 return true; 2053 } 2054 break; 2055 2056 case e_regSetEXC: 2057 if (reg < k_num_exc_registers) { 2058 (&m_state.context.exc.__trapno)[reg] = value->value.uint32; 2059 success = true; 2060 } 2061 break; 2062 } 2063 } 2064 2065 if (success) 2066 return SetRegisterState(set) == KERN_SUCCESS; 2067 return false; 2068 } 2069 2070 uint32_t DNBArchImplI386::GetRegisterContextSize() { 2071 static uint32_t g_cached_size = 0; 2072 if (g_cached_size == 0) { 2073 if(CPUHasAVX512f() || FORCE_AVX_REGS) { 2074 for (size_t i = 0; i < k_num_fpu_registers_avx512f; ++i) { 2075 if (g_fpu_registers_avx512f[i].value_regs == NULL) 2076 g_cached_size += g_fpu_registers_avx512f[i].size; 2077 } 2078 } else 2079 if (CPUHasAVX()) { 2080 for (size_t i = 0; i < k_num_fpu_registers_avx; ++i) { 2081 if (g_fpu_registers_avx[i].value_regs == NULL) 2082 g_cached_size += g_fpu_registers_avx[i].size; 2083 } 2084 } else { 2085 for (size_t i = 0; i < k_num_fpu_registers_no_avx; ++i) { 2086 if (g_fpu_registers_no_avx[i].value_regs == NULL) 2087 g_cached_size += g_fpu_registers_no_avx[i].size; 2088 } 2089 } 2090 DNBLogThreaded("DNBArchImplX86_64::GetRegisterContextSize() - GPR = %zu, " 2091 "FPU = %u, EXC = %zu", 2092 sizeof(GPR), g_cached_size, sizeof(EXC)); 2093 g_cached_size += sizeof(GPR); 2094 g_cached_size += sizeof(EXC); 2095 DNBLogThreaded( 2096 "DNBArchImplX86_64::GetRegisterContextSize() - GPR + FPU + EXC = %u", 2097 g_cached_size); 2098 } 2099 return g_cached_size; 2100 } 2101 2102 nub_size_t DNBArchImplI386::GetRegisterContext(void *buf, nub_size_t buf_len) { 2103 uint32_t size = GetRegisterContextSize(); 2104 2105 if (buf && buf_len) { 2106 if (size > buf_len) 2107 size = static_cast<uint32_t>(buf_len); 2108 2109 bool force = false; 2110 kern_return_t kret; 2111 if ((kret = GetGPRState(force)) != KERN_SUCCESS) { 2112 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = " 2113 "%p, len = %llu) error: GPR regs failed to " 2114 "read: %u ", 2115 buf, (uint64_t)buf_len, kret); 2116 size = 0; 2117 } else if ((kret = GetFPUState(force)) != KERN_SUCCESS) { 2118 DNBLogThreadedIf( 2119 LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = %p, len = " 2120 "%llu) error: %s regs failed to read: %u", 2121 buf, (uint64_t)buf_len, CPUHasAVX() ? "AVX" : "FPU", kret); 2122 size = 0; 2123 } else if ((kret = GetEXCState(force)) != KERN_SUCCESS) { 2124 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = " 2125 "%p, len = %llu) error: EXC regs failed to " 2126 "read: %u", 2127 buf, (uint64_t)buf_len, kret); 2128 size = 0; 2129 } else { 2130 uint8_t *p = (uint8_t *)buf; 2131 // Copy the GPR registers 2132 memcpy(p, &m_state.context.gpr, sizeof(GPR)); 2133 p += sizeof(GPR); 2134 2135 // Walk around the gaps in the FPU regs 2136 memcpy(p, &m_state.context.fpu.no_avx.__fpu_fcw, 5); 2137 p += 5; 2138 memcpy(p, &m_state.context.fpu.no_avx.__fpu_fop, 8); 2139 p += 8; 2140 memcpy(p, &m_state.context.fpu.no_avx.__fpu_dp, 6); 2141 p += 6; 2142 memcpy(p, &m_state.context.fpu.no_avx.__fpu_mxcsr, 8); 2143 p += 8; 2144 2145 // Work around the padding between the stmm registers as they are 16 2146 // byte structs with 10 bytes of the value in each 2147 for (size_t i = 0; i < 8; ++i) { 2148 memcpy(p, &m_state.context.fpu.no_avx.__fpu_stmm0 + i, 10); 2149 p += 10; 2150 } 2151 2152 if (CPUHasAVX512f() || FORCE_AVX_REGS) { 2153 for (size_t i = 0; i < 8; ++i) { 2154 memcpy(p, &m_state.context.fpu.avx512f.__fpu_k0 + i, 8); 2155 p += 8; 2156 } 2157 } 2158 2159 if (CPUHasAVX() || FORCE_AVX_REGS) { 2160 // Interleave the XMM and YMMH registers to make the YMM registers 2161 for (size_t i = 0; i < 8; ++i) { 2162 memcpy(p, &m_state.context.fpu.avx.__fpu_xmm0 + i, 16); 2163 p += 16; 2164 memcpy(p, &m_state.context.fpu.avx.__fpu_ymmh0 + i, 16); 2165 p += 16; 2166 } 2167 if(CPUHasAVX512f() || FORCE_AVX_REGS) { 2168 for (size_t i = 0; i < 8; ++i) { 2169 memcpy(p, &m_state.context.fpu.avx512f.__fpu_zmmh0 + i, 32); 2170 p += 32; 2171 } 2172 } 2173 } else { 2174 // Copy the XMM registers in a single block 2175 memcpy(p, &m_state.context.fpu.no_avx.__fpu_xmm0, 8 * 16); 2176 p += 8 * 16; 2177 } 2178 2179 // Copy the exception registers 2180 memcpy(p, &m_state.context.exc, sizeof(EXC)); 2181 p += sizeof(EXC); 2182 2183 // make sure we end up with exactly what we think we should have 2184 size_t bytes_written = p - (uint8_t *)buf; 2185 UNUSED_IF_ASSERT_DISABLED(bytes_written); 2186 assert(bytes_written == size); 2187 } 2188 } 2189 DNBLogThreadedIf( 2190 LOG_THREAD, 2191 "DNBArchImplI386::GetRegisterContext (buf = %p, len = %llu) => %llu", buf, 2192 (uint64_t)buf_len, (uint64_t)size); 2193 // Return the size of the register context even if NULL was passed in 2194 return size; 2195 } 2196 2197 nub_size_t DNBArchImplI386::SetRegisterContext(const void *buf, 2198 nub_size_t buf_len) { 2199 nub_size_t size = sizeof(m_state.context); 2200 if (buf == NULL || buf_len == 0) 2201 size = 0; 2202 2203 if (size) { 2204 if (size > buf_len) 2205 size = buf_len; 2206 2207 const uint8_t *p = (const uint8_t *)buf; 2208 // Copy the GPR registers 2209 memcpy(&m_state.context.gpr, p, sizeof(GPR)); 2210 p += sizeof(GPR); 2211 2212 // Copy fcw through mxcsrmask as there is no padding 2213 memcpy(&m_state.context.fpu.no_avx.__fpu_fcw, p, 5); 2214 p += 5; 2215 memcpy(&m_state.context.fpu.no_avx.__fpu_fop, p, 8); 2216 p += 8; 2217 memcpy(&m_state.context.fpu.no_avx.__fpu_dp, p, 6); 2218 p += 6; 2219 memcpy(&m_state.context.fpu.no_avx.__fpu_mxcsr, p, 8); 2220 p += 8; 2221 2222 // Work around the padding between the stmm registers as they are 16 2223 // byte structs with 10 bytes of the value in each 2224 for (size_t i = 0; i < 8; ++i) { 2225 memcpy(&m_state.context.fpu.no_avx.__fpu_stmm0 + i, p, 10); 2226 p += 10; 2227 } 2228 2229 if(CPUHasAVX512f() || FORCE_AVX_REGS) { 2230 for (size_t i = 0; i < 8; ++i) { 2231 memcpy(&m_state.context.fpu.avx512f.__fpu_k0 + i, p, 8); 2232 p += 8; 2233 } 2234 } 2235 2236 if (CPUHasAVX() || FORCE_AVX_REGS) { 2237 // Interleave the XMM and YMMH registers to make the YMM registers 2238 for (size_t i = 0; i < 8; ++i) { 2239 memcpy(&m_state.context.fpu.avx.__fpu_xmm0 + i, p, 16); 2240 p += 16; 2241 memcpy(&m_state.context.fpu.avx.__fpu_ymmh0 + i, p, 16); 2242 p += 16; 2243 } 2244 2245 if(CPUHasAVX512f() || FORCE_AVX_REGS) { 2246 for (size_t i = 0; i < 8; ++i) { 2247 memcpy(&m_state.context.fpu.avx512f.__fpu_zmmh0 + i, p, 32); 2248 p += 32; 2249 } 2250 } 2251 } else { 2252 // Copy the XMM registers in a single block 2253 memcpy(&m_state.context.fpu.no_avx.__fpu_xmm0, p, 8 * 16); 2254 p += 8 * 16; 2255 } 2256 2257 // Copy the exception registers 2258 memcpy(&m_state.context.exc, p, sizeof(EXC)); 2259 p += sizeof(EXC); 2260 2261 // make sure we end up with exactly what we think we should have 2262 size_t bytes_written = p - (const uint8_t *)buf; 2263 UNUSED_IF_ASSERT_DISABLED(bytes_written); 2264 assert(bytes_written == size); 2265 kern_return_t kret; 2266 if ((kret = SetGPRState()) != KERN_SUCCESS) 2267 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = " 2268 "%p, len = %llu) error: GPR regs failed to " 2269 "write: %u", 2270 buf, (uint64_t)buf_len, kret); 2271 if ((kret = SetFPUState()) != KERN_SUCCESS) 2272 DNBLogThreadedIf( 2273 LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = %p, len = " 2274 "%llu) error: %s regs failed to write: %u", 2275 buf, (uint64_t)buf_len, CPUHasAVX() ? "AVX" : "FPU", kret); 2276 if ((kret = SetEXCState()) != KERN_SUCCESS) 2277 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = " 2278 "%p, len = %llu) error: EXP regs failed to " 2279 "write: %u", 2280 buf, (uint64_t)buf_len, kret); 2281 } 2282 DNBLogThreadedIf( 2283 LOG_THREAD, 2284 "DNBArchImplI386::SetRegisterContext (buf = %p, len = %llu) => %llu", buf, 2285 (uint64_t)buf_len, (uint64_t)size); 2286 return size; 2287 } 2288 2289 uint32_t DNBArchImplI386::SaveRegisterState() { 2290 kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber()); 2291 DNBLogThreadedIf( 2292 LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u " 2293 "(SetGPRState() for stop_count = %u)", 2294 m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount()); 2295 2296 bool force = true; 2297 2298 if ((kret = GetGPRState(force)) != KERN_SUCCESS) { 2299 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::SaveRegisterState () error: " 2300 "GPR regs failed to read: %u ", 2301 kret); 2302 } else if ((kret = GetFPUState(force)) != KERN_SUCCESS) { 2303 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::SaveRegisterState () error: " 2304 "%s regs failed to read: %u", 2305 CPUHasAVX() ? "AVX" : "FPU", kret); 2306 } else { 2307 const uint32_t save_id = GetNextRegisterStateSaveID(); 2308 m_saved_register_states[save_id] = m_state.context; 2309 return save_id; 2310 } 2311 return 0; 2312 } 2313 bool DNBArchImplI386::RestoreRegisterState(uint32_t save_id) { 2314 SaveRegisterStates::iterator pos = m_saved_register_states.find(save_id); 2315 if (pos != m_saved_register_states.end()) { 2316 m_state.context.gpr = pos->second.gpr; 2317 m_state.context.fpu = pos->second.fpu; 2318 m_state.context.exc = pos->second.exc; 2319 m_state.SetError(e_regSetGPR, Read, 0); 2320 m_state.SetError(e_regSetFPU, Read, 0); 2321 m_state.SetError(e_regSetEXC, Read, 0); 2322 kern_return_t kret; 2323 bool success = true; 2324 if ((kret = SetGPRState()) != KERN_SUCCESS) { 2325 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::RestoreRegisterState " 2326 "(save_id = %u) error: GPR regs failed to " 2327 "write: %u", 2328 save_id, kret); 2329 success = false; 2330 } else if ((kret = SetFPUState()) != KERN_SUCCESS) { 2331 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::RestoreRegisterState " 2332 "(save_id = %u) error: %s regs failed to " 2333 "write: %u", 2334 save_id, CPUHasAVX() ? "AVX" : "FPU", kret); 2335 success = false; 2336 } 2337 m_saved_register_states.erase(pos); 2338 return success; 2339 } 2340 return false; 2341 } 2342 2343 kern_return_t DNBArchImplI386::GetRegisterState(int set, bool force) { 2344 switch (set) { 2345 case e_regSetALL: 2346 return GetGPRState(force) | GetFPUState(force) | GetEXCState(force); 2347 case e_regSetGPR: 2348 return GetGPRState(force); 2349 case e_regSetFPU: 2350 return GetFPUState(force); 2351 case e_regSetEXC: 2352 return GetEXCState(force); 2353 default: 2354 break; 2355 } 2356 return KERN_INVALID_ARGUMENT; 2357 } 2358 2359 kern_return_t DNBArchImplI386::SetRegisterState(int set) { 2360 // Make sure we have a valid context to set. 2361 if (RegisterSetStateIsValid(set)) { 2362 switch (set) { 2363 case e_regSetALL: 2364 return SetGPRState() | SetFPUState() | SetEXCState(); 2365 case e_regSetGPR: 2366 return SetGPRState(); 2367 case e_regSetFPU: 2368 return SetFPUState(); 2369 case e_regSetEXC: 2370 return SetEXCState(); 2371 default: 2372 break; 2373 } 2374 } 2375 return KERN_INVALID_ARGUMENT; 2376 } 2377 2378 bool DNBArchImplI386::RegisterSetStateIsValid(int set) const { 2379 return m_state.RegsAreValid(set); 2380 } 2381 2382 #endif // #if defined (__i386__) 2383