1 //===-- DNBArchImplI386.cpp -------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Created by Greg Clayton on 6/25/07. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #if defined(__i386__) || defined(__x86_64__) 14 15 #include <sys/cdefs.h> 16 17 #include "DNBLog.h" 18 #include "MacOSX/i386/DNBArchImplI386.h" 19 #include "MachProcess.h" 20 #include "MachThread.h" 21 22 extern "C" bool CPUHasAVX(); // Defined over in DNBArchImplX86_64.cpp 23 extern "C" bool CPUHasAVX512f(); // Defined over in DNBArchImplX86_64.cpp 24 #if defined(LLDB_DEBUGSERVER_RELEASE) || defined(LLDB_DEBUGSERVER_DEBUG) 25 enum debugState { debugStateUnknown, debugStateOff, debugStateOn }; 26 27 static debugState sFPUDebugState = debugStateUnknown; 28 static debugState sAVXForceState = debugStateUnknown; 29 30 static bool DebugFPURegs() { 31 if (sFPUDebugState == debugStateUnknown) { 32 if (getenv("DNB_DEBUG_FPU_REGS")) 33 sFPUDebugState = debugStateOn; 34 else 35 sFPUDebugState = debugStateOff; 36 } 37 38 return (sFPUDebugState == debugStateOn); 39 } 40 41 static bool ForceAVXRegs() { 42 if (sFPUDebugState == debugStateUnknown) { 43 if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS")) 44 sAVXForceState = debugStateOn; 45 else 46 sAVXForceState = debugStateOff; 47 } 48 49 return (sAVXForceState == debugStateOn); 50 } 51 52 #define DEBUG_FPU_REGS (DebugFPURegs()) 53 #define FORCE_AVX_REGS (ForceAVXRegs()) 54 #else 55 #define DEBUG_FPU_REGS (0) 56 #define FORCE_AVX_REGS (0) 57 #endif 58 59 enum { 60 gpr_eax = 0, 61 gpr_ebx = 1, 62 gpr_ecx = 2, 63 gpr_edx = 3, 64 gpr_edi = 4, 65 gpr_esi = 5, 66 gpr_ebp = 6, 67 gpr_esp = 7, 68 gpr_ss = 8, 69 gpr_eflags = 9, 70 gpr_eip = 10, 71 gpr_cs = 11, 72 gpr_ds = 12, 73 gpr_es = 13, 74 gpr_fs = 14, 75 gpr_gs = 15, 76 gpr_ax, 77 gpr_bx, 78 gpr_cx, 79 gpr_dx, 80 gpr_di, 81 gpr_si, 82 gpr_bp, 83 gpr_sp, 84 gpr_ah, 85 gpr_bh, 86 gpr_ch, 87 gpr_dh, 88 gpr_al, 89 gpr_bl, 90 gpr_cl, 91 gpr_dl, 92 gpr_dil, 93 gpr_sil, 94 gpr_bpl, 95 gpr_spl, 96 k_num_gpr_regs 97 }; 98 99 enum { 100 fpu_fcw, 101 fpu_fsw, 102 fpu_ftw, 103 fpu_fop, 104 fpu_ip, 105 fpu_cs, 106 fpu_dp, 107 fpu_ds, 108 fpu_mxcsr, 109 fpu_mxcsrmask, 110 fpu_stmm0, 111 fpu_stmm1, 112 fpu_stmm2, 113 fpu_stmm3, 114 fpu_stmm4, 115 fpu_stmm5, 116 fpu_stmm6, 117 fpu_stmm7, 118 fpu_xmm0, 119 fpu_xmm1, 120 fpu_xmm2, 121 fpu_xmm3, 122 fpu_xmm4, 123 fpu_xmm5, 124 fpu_xmm6, 125 fpu_xmm7, 126 fpu_ymm0, 127 fpu_ymm1, 128 fpu_ymm2, 129 fpu_ymm3, 130 fpu_ymm4, 131 fpu_ymm5, 132 fpu_ymm6, 133 fpu_ymm7, 134 fpu_k0, 135 fpu_k1, 136 fpu_k2, 137 fpu_k3, 138 fpu_k4, 139 fpu_k5, 140 fpu_k6, 141 fpu_k7, 142 fpu_zmm0, 143 fpu_zmm1, 144 fpu_zmm2, 145 fpu_zmm3, 146 fpu_zmm4, 147 fpu_zmm5, 148 fpu_zmm6, 149 fpu_zmm7, 150 k_num_fpu_regs, 151 152 // Aliases 153 fpu_fctrl = fpu_fcw, 154 fpu_fstat = fpu_fsw, 155 fpu_ftag = fpu_ftw, 156 fpu_fiseg = fpu_cs, 157 fpu_fioff = fpu_ip, 158 fpu_foseg = fpu_ds, 159 fpu_fooff = fpu_dp 160 }; 161 162 enum { 163 exc_trapno, 164 exc_err, 165 exc_faultvaddr, 166 k_num_exc_regs, 167 }; 168 169 enum { 170 ehframe_eax = 0, 171 ehframe_ecx, 172 ehframe_edx, 173 ehframe_ebx, 174 175 // On i386 Darwin the eh_frame register numbers for ebp and esp are reversed 176 // from DWARF. 177 // It's due to an ancient compiler bug in the output of the eh_frame. 178 // Specifically, on i386 darwin eh_frame, 4 is ebp, 5 is esp. 179 // On i386 darwin debug_frame (and debug_info), 4 is esp, 5 is ebp. 180 ehframe_ebp, 181 ehframe_esp, 182 ehframe_esi, 183 ehframe_edi, 184 ehframe_eip, 185 ehframe_eflags 186 }; 187 188 enum { 189 dwarf_eax = 0, 190 dwarf_ecx, 191 dwarf_edx, 192 dwarf_ebx, 193 dwarf_esp, 194 dwarf_ebp, 195 dwarf_esi, 196 dwarf_edi, 197 dwarf_eip, 198 dwarf_eflags, 199 dwarf_stmm0 = 11, 200 dwarf_stmm1, 201 dwarf_stmm2, 202 dwarf_stmm3, 203 dwarf_stmm4, 204 dwarf_stmm5, 205 dwarf_stmm6, 206 dwarf_stmm7, 207 dwarf_xmm0 = 21, 208 dwarf_xmm1, 209 dwarf_xmm2, 210 dwarf_xmm3, 211 dwarf_xmm4, 212 dwarf_xmm5, 213 dwarf_xmm6, 214 dwarf_xmm7, 215 dwarf_ymm0 = dwarf_xmm0, 216 dwarf_ymm1 = dwarf_xmm1, 217 dwarf_ymm2 = dwarf_xmm2, 218 dwarf_ymm3 = dwarf_xmm3, 219 dwarf_ymm4 = dwarf_xmm4, 220 dwarf_ymm5 = dwarf_xmm5, 221 dwarf_ymm6 = dwarf_xmm6, 222 dwarf_ymm7 = dwarf_xmm7, 223 dwarf_zmm0 = dwarf_xmm0, 224 dwarf_zmm1 = dwarf_xmm1, 225 dwarf_zmm2 = dwarf_xmm2, 226 dwarf_zmm3 = dwarf_xmm3, 227 dwarf_zmm4 = dwarf_xmm4, 228 dwarf_zmm5 = dwarf_xmm5, 229 dwarf_zmm6 = dwarf_xmm6, 230 dwarf_zmm7 = dwarf_xmm7, 231 dwarf_k0 = 118, 232 dwarf_k1, 233 dwarf_k2, 234 dwarf_k3, 235 dwarf_k4, 236 dwarf_k5, 237 dwarf_k6, 238 dwarf_k7, 239 }; 240 241 enum { 242 debugserver_eax = 0, 243 debugserver_ecx = 1, 244 debugserver_edx = 2, 245 debugserver_ebx = 3, 246 debugserver_esp = 4, 247 debugserver_ebp = 5, 248 debugserver_esi = 6, 249 debugserver_edi = 7, 250 debugserver_eip = 8, 251 debugserver_eflags = 9, 252 debugserver_cs = 10, 253 debugserver_ss = 11, 254 debugserver_ds = 12, 255 debugserver_es = 13, 256 debugserver_fs = 14, 257 debugserver_gs = 15, 258 debugserver_stmm0 = 16, 259 debugserver_stmm1 = 17, 260 debugserver_stmm2 = 18, 261 debugserver_stmm3 = 19, 262 debugserver_stmm4 = 20, 263 debugserver_stmm5 = 21, 264 debugserver_stmm6 = 22, 265 debugserver_stmm7 = 23, 266 debugserver_fctrl = 24, 267 debugserver_fcw = debugserver_fctrl, 268 debugserver_fstat = 25, 269 debugserver_fsw = debugserver_fstat, 270 debugserver_ftag = 26, 271 debugserver_ftw = debugserver_ftag, 272 debugserver_fiseg = 27, 273 debugserver_fpu_cs = debugserver_fiseg, 274 debugserver_fioff = 28, 275 debugserver_ip = debugserver_fioff, 276 debugserver_foseg = 29, 277 debugserver_fpu_ds = debugserver_foseg, 278 debugserver_fooff = 30, 279 debugserver_dp = debugserver_fooff, 280 debugserver_fop = 31, 281 debugserver_xmm0 = 32, 282 debugserver_xmm1 = 33, 283 debugserver_xmm2 = 34, 284 debugserver_xmm3 = 35, 285 debugserver_xmm4 = 36, 286 debugserver_xmm5 = 37, 287 debugserver_xmm6 = 38, 288 debugserver_xmm7 = 39, 289 debugserver_mxcsr = 40, 290 debugserver_mm0 = 41, 291 debugserver_mm1 = 42, 292 debugserver_mm2 = 43, 293 debugserver_mm3 = 44, 294 debugserver_mm4 = 45, 295 debugserver_mm5 = 46, 296 debugserver_mm6 = 47, 297 debugserver_mm7 = 48, 298 debugserver_ymm0 = debugserver_xmm0, 299 debugserver_ymm1 = debugserver_xmm1, 300 debugserver_ymm2 = debugserver_xmm2, 301 debugserver_ymm3 = debugserver_xmm3, 302 debugserver_ymm4 = debugserver_xmm4, 303 debugserver_ymm5 = debugserver_xmm5, 304 debugserver_ymm6 = debugserver_xmm6, 305 debugserver_ymm7 = debugserver_xmm7, 306 debugserver_zmm0 = debugserver_xmm0, 307 debugserver_zmm1 = debugserver_xmm1, 308 debugserver_zmm2 = debugserver_xmm2, 309 debugserver_zmm3 = debugserver_xmm3, 310 debugserver_zmm4 = debugserver_xmm4, 311 debugserver_zmm5 = debugserver_xmm5, 312 debugserver_zmm6 = debugserver_xmm6, 313 debugserver_zmm7 = debugserver_xmm7, 314 debugserver_k0 = 118, 315 debugserver_k1 = 119, 316 debugserver_k2 = 120, 317 debugserver_k3 = 121, 318 debugserver_k4 = 122, 319 debugserver_k5 = 123, 320 debugserver_k6 = 124, 321 debugserver_k7 = 125, 322 }; 323 324 uint64_t DNBArchImplI386::GetPC(uint64_t failValue) { 325 // Get program counter 326 if (GetGPRState(false) == KERN_SUCCESS) 327 return m_state.context.gpr.__eip; 328 return failValue; 329 } 330 331 kern_return_t DNBArchImplI386::SetPC(uint64_t value) { 332 // Get program counter 333 kern_return_t err = GetGPRState(false); 334 if (err == KERN_SUCCESS) { 335 m_state.context.gpr.__eip = static_cast<uint32_t>(value); 336 err = SetGPRState(); 337 } 338 return err == KERN_SUCCESS; 339 } 340 341 uint64_t DNBArchImplI386::GetSP(uint64_t failValue) { 342 // Get stack pointer 343 if (GetGPRState(false) == KERN_SUCCESS) 344 return m_state.context.gpr.__esp; 345 return failValue; 346 } 347 348 // Uncomment the value below to verify the values in the debugger. 349 //#define DEBUG_GPR_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED 350 //#define SET_GPR(reg) m_state.context.gpr.__##reg = gpr_##reg 351 352 kern_return_t DNBArchImplI386::GetGPRState(bool force) { 353 if (force || m_state.GetError(e_regSetGPR, Read)) { 354 #if DEBUG_GPR_VALUES 355 SET_GPR(eax); 356 SET_GPR(ebx); 357 SET_GPR(ecx); 358 SET_GPR(edx); 359 SET_GPR(edi); 360 SET_GPR(esi); 361 SET_GPR(ebp); 362 SET_GPR(esp); 363 SET_GPR(ss); 364 SET_GPR(eflags); 365 SET_GPR(eip); 366 SET_GPR(cs); 367 SET_GPR(ds); 368 SET_GPR(es); 369 SET_GPR(fs); 370 SET_GPR(gs); 371 m_state.SetError(e_regSetGPR, Read, 0); 372 #else 373 mach_msg_type_number_t count = e_regSetWordSizeGPR; 374 m_state.SetError( 375 e_regSetGPR, Read, 376 ::thread_get_state(m_thread->MachPortNumber(), __i386_THREAD_STATE, 377 (thread_state_t)&m_state.context.gpr, &count)); 378 #endif 379 } 380 return m_state.GetError(e_regSetGPR, Read); 381 } 382 383 // Uncomment the value below to verify the values in the debugger. 384 //#define DEBUG_FPU_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED 385 386 kern_return_t DNBArchImplI386::GetFPUState(bool force) { 387 if (force || m_state.GetError(e_regSetFPU, Read)) { 388 if (DEBUG_FPU_REGS) { 389 390 m_state.context.fpu.no_avx.__fpu_reserved[0] = -1; 391 m_state.context.fpu.no_avx.__fpu_reserved[1] = -1; 392 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fcw) = 0x1234; 393 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fsw) = 0x5678; 394 m_state.context.fpu.no_avx.__fpu_ftw = 1; 395 m_state.context.fpu.no_avx.__fpu_rsrv1 = UINT8_MAX; 396 m_state.context.fpu.no_avx.__fpu_fop = 2; 397 m_state.context.fpu.no_avx.__fpu_ip = 3; 398 m_state.context.fpu.no_avx.__fpu_cs = 4; 399 m_state.context.fpu.no_avx.__fpu_rsrv2 = 5; 400 m_state.context.fpu.no_avx.__fpu_dp = 6; 401 m_state.context.fpu.no_avx.__fpu_ds = 7; 402 m_state.context.fpu.no_avx.__fpu_rsrv3 = UINT16_MAX; 403 m_state.context.fpu.no_avx.__fpu_mxcsr = 8; 404 m_state.context.fpu.no_avx.__fpu_mxcsrmask = 9; 405 for (int i = 0; i < 16; ++i) { 406 if (i < 10) { 407 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = 'a'; 408 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = 'b'; 409 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = 'c'; 410 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = 'd'; 411 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = 'e'; 412 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = 'f'; 413 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = 'g'; 414 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = 'h'; 415 } else { 416 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN; 417 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN; 418 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN; 419 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN; 420 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN; 421 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN; 422 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN; 423 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN; 424 } 425 426 m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg[i] = '0'; 427 m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg[i] = '1'; 428 m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg[i] = '2'; 429 m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg[i] = '3'; 430 m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg[i] = '4'; 431 m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg[i] = '5'; 432 m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg[i] = '6'; 433 m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg[i] = '7'; 434 } 435 for (int i = 0; i < sizeof(m_state.context.fpu.no_avx.__fpu_rsrv4); ++i) 436 m_state.context.fpu.no_avx.__fpu_rsrv4[i] = INT8_MIN; 437 m_state.context.fpu.no_avx.__fpu_reserved1 = -1; 438 439 if (CPUHasAVX() || FORCE_AVX_REGS) { 440 for (int i = 0; i < sizeof(m_state.context.fpu.avx.__avx_reserved1); 441 ++i) 442 m_state.context.fpu.avx.__avx_reserved1[i] = INT8_MIN; 443 444 for (int i = 0; i < 16; ++i) { 445 m_state.context.fpu.avx.__fpu_ymmh0.__xmm_reg[i] = '0'; 446 m_state.context.fpu.avx.__fpu_ymmh1.__xmm_reg[i] = '1'; 447 m_state.context.fpu.avx.__fpu_ymmh2.__xmm_reg[i] = '2'; 448 m_state.context.fpu.avx.__fpu_ymmh3.__xmm_reg[i] = '3'; 449 m_state.context.fpu.avx.__fpu_ymmh4.__xmm_reg[i] = '4'; 450 m_state.context.fpu.avx.__fpu_ymmh5.__xmm_reg[i] = '5'; 451 m_state.context.fpu.avx.__fpu_ymmh6.__xmm_reg[i] = '6'; 452 m_state.context.fpu.avx.__fpu_ymmh7.__xmm_reg[i] = '7'; 453 } 454 } 455 if (CPUHasAVX512f() || FORCE_AVX_REGS) { 456 for (int i = 0; i < 8; ++i) { 457 m_state.context.fpu.avx512f.__fpu_k0.__opmask_reg[i] = '0'; 458 m_state.context.fpu.avx512f.__fpu_k1.__opmask_reg[i] = '1'; 459 m_state.context.fpu.avx512f.__fpu_k2.__opmask_reg[i] = '2'; 460 m_state.context.fpu.avx512f.__fpu_k3.__opmask_reg[i] = '3'; 461 m_state.context.fpu.avx512f.__fpu_k4.__opmask_reg[i] = '4'; 462 m_state.context.fpu.avx512f.__fpu_k5.__opmask_reg[i] = '5'; 463 m_state.context.fpu.avx512f.__fpu_k6.__opmask_reg[i] = '6'; 464 m_state.context.fpu.avx512f.__fpu_k7.__opmask_reg[i] = '7'; 465 } 466 467 for (int i = 0; i < 32; ++i) { 468 m_state.context.fpu.avx512f.__fpu_zmmh0.__ymm_reg[i] = '0'; 469 m_state.context.fpu.avx512f.__fpu_zmmh1.__ymm_reg[i] = '1'; 470 m_state.context.fpu.avx512f.__fpu_zmmh2.__ymm_reg[i] = '2'; 471 m_state.context.fpu.avx512f.__fpu_zmmh3.__ymm_reg[i] = '3'; 472 m_state.context.fpu.avx512f.__fpu_zmmh4.__ymm_reg[i] = '4'; 473 m_state.context.fpu.avx512f.__fpu_zmmh5.__ymm_reg[i] = '5'; 474 m_state.context.fpu.avx512f.__fpu_zmmh6.__ymm_reg[i] = '6'; 475 m_state.context.fpu.avx512f.__fpu_zmmh7.__ymm_reg[i] = '7'; 476 } 477 } 478 m_state.SetError(e_regSetFPU, Read, 0); 479 } else { 480 mach_msg_type_number_t count = e_regSetWordSizeFPU; 481 int flavor = __i386_FLOAT_STATE; 482 483 // On a machine with the AVX512 register set, a process only gets a 484 // full AVX512 register context after it uses the AVX512 registers; 485 // if the process has not yet triggered this change, trying to fetch 486 // the AVX512 registers will fail. Fall through to fetching the AVX 487 // registers. 488 if (CPUHasAVX512f() || FORCE_AVX_REGS) { 489 count = e_regSetWordSizeAVX512f; 490 flavor = __i386_AVX512F_STATE; 491 m_state.SetError(e_regSetFPU, Read, 492 ::thread_get_state(m_thread->MachPortNumber(), flavor, 493 (thread_state_t)&m_state.context.fpu, 494 &count)); 495 DNBLogThreadedIf(LOG_THREAD, 496 "::thread_get_state (0x%4.4x, %u, &fpu, %u => 0x%8.8x", 497 m_thread->MachPortNumber(), flavor, (uint32_t)count, 498 m_state.GetError(e_regSetFPU, Read)); 499 if (m_state.GetError(e_regSetFPU, Read) == KERN_SUCCESS) 500 return m_state.GetError(e_regSetFPU, Read); 501 } 502 if (CPUHasAVX()) { 503 count = e_regSetWordSizeAVX; 504 flavor = __i386_AVX_STATE; 505 } 506 m_state.SetError(e_regSetFPU, Read, 507 ::thread_get_state(m_thread->MachPortNumber(), flavor, 508 (thread_state_t)&m_state.context.fpu, 509 &count)); 510 DNBLogThreadedIf(LOG_THREAD, 511 "::thread_get_state (0x%4.4x, %u, &fpu, %u => 0x%8.8x", 512 m_thread->MachPortNumber(), flavor, (uint32_t)count, 513 m_state.GetError(e_regSetFPU, Read)); 514 } 515 } 516 return m_state.GetError(e_regSetFPU, Read); 517 } 518 519 kern_return_t DNBArchImplI386::GetEXCState(bool force) { 520 if (force || m_state.GetError(e_regSetEXC, Read)) { 521 mach_msg_type_number_t count = e_regSetWordSizeEXC; 522 m_state.SetError( 523 e_regSetEXC, Read, 524 ::thread_get_state(m_thread->MachPortNumber(), __i386_EXCEPTION_STATE, 525 (thread_state_t)&m_state.context.exc, &count)); 526 } 527 return m_state.GetError(e_regSetEXC, Read); 528 } 529 530 kern_return_t DNBArchImplI386::SetGPRState() { 531 kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber()); 532 DNBLogThreadedIf( 533 LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u " 534 "(SetGPRState() for stop_count = %u)", 535 m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount()); 536 537 m_state.SetError(e_regSetGPR, Write, 538 ::thread_set_state(m_thread->MachPortNumber(), 539 __i386_THREAD_STATE, 540 (thread_state_t)&m_state.context.gpr, 541 e_regSetWordSizeGPR)); 542 return m_state.GetError(e_regSetGPR, Write); 543 } 544 545 kern_return_t DNBArchImplI386::SetFPUState() { 546 if (DEBUG_FPU_REGS) { 547 m_state.SetError(e_regSetFPU, Write, 0); 548 return m_state.GetError(e_regSetFPU, Write); 549 } else { 550 int flavor = __i386_FLOAT_STATE; 551 mach_msg_type_number_t count = e_regSetWordSizeFPU; 552 if (CPUHasAVX512f() || FORCE_AVX_REGS) { 553 flavor = __i386_AVX512F_STATE; 554 count = e_regSetWordSizeAVX512f; 555 } else 556 if (CPUHasAVX()) { 557 flavor = __i386_AVX_STATE; 558 count = e_regSetWordSizeAVX; 559 } 560 561 m_state.SetError(e_regSetFPU, Write, 562 ::thread_set_state(m_thread->MachPortNumber(), flavor, 563 (thread_state_t)&m_state.context.fpu, 564 count)); 565 return m_state.GetError(e_regSetFPU, Write); 566 } 567 } 568 569 kern_return_t DNBArchImplI386::SetEXCState() { 570 m_state.SetError(e_regSetEXC, Write, 571 ::thread_set_state(m_thread->MachPortNumber(), 572 __i386_EXCEPTION_STATE, 573 (thread_state_t)&m_state.context.exc, 574 e_regSetWordSizeEXC)); 575 return m_state.GetError(e_regSetEXC, Write); 576 } 577 578 kern_return_t DNBArchImplI386::GetDBGState(bool force) { 579 if (force || m_state.GetError(e_regSetDBG, Read)) { 580 mach_msg_type_number_t count = e_regSetWordSizeDBG; 581 m_state.SetError( 582 e_regSetDBG, Read, 583 ::thread_get_state(m_thread->MachPortNumber(), __i386_DEBUG_STATE, 584 (thread_state_t)&m_state.context.dbg, &count)); 585 } 586 return m_state.GetError(e_regSetDBG, Read); 587 } 588 589 kern_return_t DNBArchImplI386::SetDBGState(bool also_set_on_task) { 590 m_state.SetError(e_regSetDBG, Write, 591 ::thread_set_state(m_thread->MachPortNumber(), 592 __i386_DEBUG_STATE, 593 (thread_state_t)&m_state.context.dbg, 594 e_regSetWordSizeDBG)); 595 if (also_set_on_task) { 596 kern_return_t kret = ::task_set_state( 597 m_thread->Process()->Task().TaskPort(), __i386_DEBUG_STATE, 598 (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG); 599 if (kret != KERN_SUCCESS) 600 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::SetDBGState failed " 601 "to set debug control register state: " 602 "0x%8.8x.", 603 kret); 604 } 605 return m_state.GetError(e_regSetDBG, Write); 606 } 607 608 void DNBArchImplI386::ThreadWillResume() { 609 // Do we need to step this thread? If so, let the mach thread tell us so. 610 if (m_thread->IsStepping()) { 611 // This is the primary thread, let the arch do anything it needs 612 EnableHardwareSingleStep(true); 613 } 614 615 // Reset the debug status register, if necessary, before we resume. 616 kern_return_t kret = GetDBGState(false); 617 DNBLogThreadedIf( 618 LOG_WATCHPOINTS, 619 "DNBArchImplI386::ThreadWillResume() GetDBGState() => 0x%8.8x.", kret); 620 if (kret != KERN_SUCCESS) 621 return; 622 623 DBG &debug_state = m_state.context.dbg; 624 bool need_reset = false; 625 uint32_t i, num = NumSupportedHardwareWatchpoints(); 626 for (i = 0; i < num; ++i) 627 if (IsWatchpointHit(debug_state, i)) 628 need_reset = true; 629 630 if (need_reset) { 631 ClearWatchpointHits(debug_state); 632 kret = SetDBGState(false); 633 DNBLogThreadedIf( 634 LOG_WATCHPOINTS, 635 "DNBArchImplI386::ThreadWillResume() SetDBGState() => 0x%8.8x.", kret); 636 } 637 } 638 639 bool DNBArchImplI386::ThreadDidStop() { 640 bool success = true; 641 642 m_state.InvalidateAllRegisterStates(); 643 644 // Are we stepping a single instruction? 645 if (GetGPRState(true) == KERN_SUCCESS) { 646 // We are single stepping, was this the primary thread? 647 if (m_thread->IsStepping()) { 648 // This was the primary thread, we need to clear the trace 649 // bit if so. 650 success = EnableHardwareSingleStep(false) == KERN_SUCCESS; 651 } else { 652 // The MachThread will automatically restore the suspend count 653 // in ThreadDidStop(), so we don't need to do anything here if 654 // we weren't the primary thread the last time 655 } 656 } 657 return success; 658 } 659 660 bool DNBArchImplI386::NotifyException(MachException::Data &exc) { 661 switch (exc.exc_type) { 662 case EXC_BAD_ACCESS: 663 break; 664 case EXC_BAD_INSTRUCTION: 665 break; 666 case EXC_ARITHMETIC: 667 break; 668 case EXC_EMULATION: 669 break; 670 case EXC_SOFTWARE: 671 break; 672 case EXC_BREAKPOINT: 673 if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 2) { 674 // exc_code = EXC_I386_BPT 675 // 676 nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS); 677 if (pc != INVALID_NUB_ADDRESS && pc > 0) { 678 pc -= 1; 679 // Check for a breakpoint at one byte prior to the current PC value 680 // since the PC will be just past the trap. 681 682 DNBBreakpoint *bp = 683 m_thread->Process()->Breakpoints().FindByAddress(pc); 684 if (bp) { 685 // Backup the PC for i386 since the trap was taken and the PC 686 // is at the address following the single byte trap instruction. 687 if (m_state.context.gpr.__eip > 0) { 688 m_state.context.gpr.__eip = static_cast<uint32_t>(pc); 689 // Write the new PC back out 690 SetGPRState(); 691 } 692 } 693 return true; 694 } 695 } else if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 1) { 696 // exc_code = EXC_I386_SGL 697 // 698 // Check whether this corresponds to a watchpoint hit event. 699 // If yes, set the exc_sub_code to the data break address. 700 nub_addr_t addr = 0; 701 uint32_t hw_index = GetHardwareWatchpointHit(addr); 702 if (hw_index != INVALID_NUB_HW_INDEX) { 703 exc.exc_data[1] = addr; 704 // Piggyback the hw_index in the exc.data. 705 exc.exc_data.push_back(hw_index); 706 } 707 708 return true; 709 } 710 break; 711 case EXC_SYSCALL: 712 break; 713 case EXC_MACH_SYSCALL: 714 break; 715 case EXC_RPC_ALERT: 716 break; 717 } 718 return false; 719 } 720 721 uint32_t DNBArchImplI386::NumSupportedHardwareBreakpoints() { 722 // Available debug address registers: dr0, dr1, dr2, dr3. 723 return 4; 724 } 725 726 uint32_t DNBArchImplI386::NumSupportedHardwareWatchpoints() { 727 // Available debug address registers: dr0, dr1, dr2, dr3. 728 return 4; 729 } 730 731 static uint32_t size_and_rw_bits(nub_size_t size, bool read, bool write) { 732 uint32_t rw; 733 if (read) { 734 rw = 0x3; // READ or READ/WRITE 735 } else if (write) { 736 rw = 0x1; // WRITE 737 } else { 738 assert(0 && "read and write cannot both be false"); 739 } 740 741 switch (size) { 742 case 1: 743 return rw; 744 case 2: 745 return (0x1 << 2) | rw; 746 case 4: 747 return (0x3 << 2) | rw; 748 case 8: 749 return (0x2 << 2) | rw; 750 } 751 assert(0 && "invalid size, must be one of 1, 2, 4, or 8"); 752 return 0; 753 } 754 755 void DNBArchImplI386::SetWatchpoint(DBG &debug_state, uint32_t hw_index, 756 nub_addr_t addr, nub_size_t size, bool read, 757 bool write) { 758 // Set both dr7 (debug control register) and dri (debug address register). 759 760 // dr7{7-0} encodes the local/gloabl enable bits: 761 // global enable --. .-- local enable 762 // | | 763 // v v 764 // dr0 -> bits{1-0} 765 // dr1 -> bits{3-2} 766 // dr2 -> bits{5-4} 767 // dr3 -> bits{7-6} 768 // 769 // dr7{31-16} encodes the rw/len bits: 770 // b_x+3, b_x+2, b_x+1, b_x 771 // where bits{x+1, x} => rw 772 // 0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io 773 // read-or-write (unused) 774 // and bits{x+3, x+2} => len 775 // 0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte 776 // 777 // dr0 -> bits{19-16} 778 // dr1 -> bits{23-20} 779 // dr2 -> bits{27-24} 780 // dr3 -> bits{31-28} 781 debug_state.__dr7 |= 782 (1 << (2 * hw_index) | 783 size_and_rw_bits(size, read, write) << (16 + 4 * hw_index)); 784 uint32_t addr_32 = addr & 0xffffffff; 785 switch (hw_index) { 786 case 0: 787 debug_state.__dr0 = addr_32; 788 break; 789 case 1: 790 debug_state.__dr1 = addr_32; 791 break; 792 case 2: 793 debug_state.__dr2 = addr_32; 794 break; 795 case 3: 796 debug_state.__dr3 = addr_32; 797 break; 798 default: 799 assert(0 && 800 "invalid hardware register index, must be one of 0, 1, 2, or 3"); 801 } 802 return; 803 } 804 805 void DNBArchImplI386::SetHardwareBreakpoint(DBG &debug_state, uint32_t hw_index, 806 nub_addr_t addr, nub_size_t size) { 807 // Set both dr7 (debug control register) and dri (debug address register). 808 809 // dr7{7-0} encodes the local/gloabl enable bits: 810 // global enable --. .-- local enable 811 // | | 812 // v v 813 // dr0 -> bits{1-0} 814 // dr1 -> bits{3-2} 815 // dr2 -> bits{5-4} 816 // dr3 -> bits{7-6} 817 // 818 // dr7{31-16} encodes the rw/len bits: 819 // b_x+3, b_x+2, b_x+1, b_x 820 // where bits{x+1, x} => rw 821 // 0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io 822 // read-or-write (unused) 823 // and bits{x+3, x+2} => len 824 // 0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte 825 // 826 // dr0 -> bits{19-16} 827 // dr1 -> bits{23-20} 828 // dr2 -> bits{27-24} 829 // dr3 -> bits{31-28} 830 debug_state.__dr7 |= (1 << (2 * hw_index) | 0 << (16 + 4 * hw_index)); 831 uint32_t addr_32 = addr & 0xffffffff; 832 switch (hw_index) { 833 case 0: 834 debug_state.__dr0 = addr_32; 835 break; 836 case 1: 837 debug_state.__dr1 = addr_32; 838 break; 839 case 2: 840 debug_state.__dr2 = addr_32; 841 break; 842 case 3: 843 debug_state.__dr3 = addr_32; 844 break; 845 default: 846 assert(0 && 847 "invalid hardware register index, must be one of 0, 1, 2, or 3"); 848 } 849 return; 850 } 851 852 uint32_t DNBArchImplI386::EnableHardwareBreakpoint(nub_addr_t addr, 853 nub_size_t size, 854 bool also_set_on_task) { 855 DNBLogThreadedIf(LOG_BREAKPOINTS, 856 "DNBArchImplI386::EnableHardwareBreakpoint( addr = " 857 "0x%8.8llx, size = %llu )", 858 (uint64_t)addr, (uint64_t)size); 859 860 const uint32_t num_hw_breakpoints = NumSupportedHardwareBreakpoints(); 861 // Read the debug state 862 kern_return_t kret = GetDBGState(false); 863 864 if (kret != KERN_SUCCESS) { 865 return INVALID_NUB_HW_INDEX; 866 } 867 868 // Check to make sure we have the needed hardware support 869 uint32_t i = 0; 870 871 DBG &debug_state = m_state.context.dbg; 872 for (i = 0; i < num_hw_breakpoints; ++i) { 873 if (IsWatchpointVacant(debug_state, i)) { 874 break; 875 } 876 } 877 878 // See if we found an available hw breakpoint slot above 879 if (i < num_hw_breakpoints) { 880 DNBLogThreadedIf( 881 LOG_BREAKPOINTS, 882 "DNBArchImplI386::EnableHardwareBreakpoint( free slot = %u )", i); 883 884 StartTransForHWP(); 885 886 // Modify our local copy of the debug state, first. 887 SetHardwareBreakpoint(debug_state, i, addr, size); 888 // Now set the watch point in the inferior. 889 kret = SetDBGState(also_set_on_task); 890 891 DNBLogThreadedIf(LOG_BREAKPOINTS, 892 "DNBArchImplI386::" 893 "EnableHardwareBreakpoint() " 894 "SetDBGState() => 0x%8.8x.", 895 kret); 896 897 if (kret == KERN_SUCCESS) { 898 DNBLogThreadedIf( 899 LOG_BREAKPOINTS, 900 "DNBArchImplI386::EnableHardwareBreakpoint( enabled at slot = %u)", 901 i); 902 return i; 903 } 904 // Revert to the previous debug state voluntarily. The transaction 905 // coordinator knows that we have failed. 906 else { 907 m_state.context.dbg = GetDBGCheckpoint(); 908 } 909 } else { 910 DNBLogThreadedIf(LOG_BREAKPOINTS, 911 "DNBArchImplI386::EnableHardwareBreakpoint(addr = " 912 "0x%8.8llx, size = %llu) => all hardware breakpoint " 913 "resources are being used.", 914 (uint64_t)addr, (uint64_t)size); 915 } 916 917 return INVALID_NUB_HW_INDEX; 918 } 919 920 bool DNBArchImplI386::DisableHardwareBreakpoint(uint32_t hw_index, 921 bool also_set_on_task) { 922 kern_return_t kret = GetDBGState(false); 923 924 const uint32_t num_hw_points = NumSupportedHardwareBreakpoints(); 925 if (kret == KERN_SUCCESS) { 926 DBG &debug_state = m_state.context.dbg; 927 if (hw_index < num_hw_points && 928 !IsWatchpointVacant(debug_state, hw_index)) { 929 930 StartTransForHWP(); 931 932 // Modify our local copy of the debug state, first. 933 ClearWatchpoint(debug_state, hw_index); 934 // Now disable the watch point in the inferior. 935 kret = SetDBGState(true); 936 DNBLogThreadedIf(LOG_WATCHPOINTS, 937 "DNBArchImplI386::DisableHardwareBreakpoint( %u )", 938 hw_index); 939 940 if (kret == KERN_SUCCESS) 941 return true; 942 else // Revert to the previous debug state voluntarily. The transaction 943 // coordinator knows that we have failed. 944 m_state.context.dbg = GetDBGCheckpoint(); 945 } 946 } 947 return false; 948 } 949 950 void DNBArchImplI386::ClearWatchpoint(DBG &debug_state, uint32_t hw_index) { 951 debug_state.__dr7 &= ~(3 << (2 * hw_index)); 952 switch (hw_index) { 953 case 0: 954 debug_state.__dr0 = 0; 955 break; 956 case 1: 957 debug_state.__dr1 = 0; 958 break; 959 case 2: 960 debug_state.__dr2 = 0; 961 break; 962 case 3: 963 debug_state.__dr3 = 0; 964 break; 965 default: 966 assert(0 && 967 "invalid hardware register index, must be one of 0, 1, 2, or 3"); 968 } 969 return; 970 } 971 972 bool DNBArchImplI386::IsWatchpointVacant(const DBG &debug_state, 973 uint32_t hw_index) { 974 // Check dr7 (debug control register) for local/global enable bits: 975 // global enable --. .-- local enable 976 // | | 977 // v v 978 // dr0 -> bits{1-0} 979 // dr1 -> bits{3-2} 980 // dr2 -> bits{5-4} 981 // dr3 -> bits{7-6} 982 return (debug_state.__dr7 & (3 << (2 * hw_index))) == 0; 983 } 984 985 // Resets local copy of debug status register to wait for the next debug 986 // exception. 987 void DNBArchImplI386::ClearWatchpointHits(DBG &debug_state) { 988 // See also IsWatchpointHit(). 989 debug_state.__dr6 = 0; 990 return; 991 } 992 993 bool DNBArchImplI386::IsWatchpointHit(const DBG &debug_state, 994 uint32_t hw_index) { 995 // Check dr6 (debug status register) whether a watchpoint hits: 996 // is watchpoint hit? 997 // | 998 // v 999 // dr0 -> bits{0} 1000 // dr1 -> bits{1} 1001 // dr2 -> bits{2} 1002 // dr3 -> bits{3} 1003 return (debug_state.__dr6 & (1 << hw_index)); 1004 } 1005 1006 nub_addr_t DNBArchImplI386::GetWatchAddress(const DBG &debug_state, 1007 uint32_t hw_index) { 1008 switch (hw_index) { 1009 case 0: 1010 return debug_state.__dr0; 1011 case 1: 1012 return debug_state.__dr1; 1013 case 2: 1014 return debug_state.__dr2; 1015 case 3: 1016 return debug_state.__dr3; 1017 } 1018 assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3"); 1019 return 0; 1020 } 1021 1022 bool DNBArchImplI386::StartTransForHWP() { 1023 if (m_2pc_trans_state != Trans_Done && m_2pc_trans_state != Trans_Rolled_Back) 1024 DNBLogError("%s inconsistent state detected, expected %d or %d, got: %d", 1025 __FUNCTION__, Trans_Done, Trans_Rolled_Back, m_2pc_trans_state); 1026 m_2pc_dbg_checkpoint = m_state.context.dbg; 1027 m_2pc_trans_state = Trans_Pending; 1028 return true; 1029 } 1030 bool DNBArchImplI386::RollbackTransForHWP() { 1031 m_state.context.dbg = m_2pc_dbg_checkpoint; 1032 if (m_2pc_trans_state != Trans_Pending) 1033 DNBLogError("%s inconsistent state detected, expected %d, got: %d", 1034 __FUNCTION__, Trans_Pending, m_2pc_trans_state); 1035 m_2pc_trans_state = Trans_Rolled_Back; 1036 kern_return_t kret = SetDBGState(false); 1037 DNBLogThreadedIf( 1038 LOG_WATCHPOINTS, 1039 "DNBArchImplI386::RollbackTransForHWP() SetDBGState() => 0x%8.8x.", kret); 1040 1041 return kret == KERN_SUCCESS; 1042 } 1043 bool DNBArchImplI386::FinishTransForHWP() { 1044 m_2pc_trans_state = Trans_Done; 1045 return true; 1046 } 1047 DNBArchImplI386::DBG DNBArchImplI386::GetDBGCheckpoint() { 1048 return m_2pc_dbg_checkpoint; 1049 } 1050 1051 uint32_t DNBArchImplI386::EnableHardwareWatchpoint(nub_addr_t addr, 1052 nub_size_t size, bool read, 1053 bool write, 1054 bool also_set_on_task) { 1055 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::EnableHardwareWatchpoint(" 1056 "addr = 0x%llx, size = %llu, read = %u, " 1057 "write = %u)", 1058 (uint64_t)addr, (uint64_t)size, read, write); 1059 1060 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints(); 1061 1062 // Can only watch 1, 2, 4, or 8 bytes. 1063 if (!(size == 1 || size == 2 || size == 4 || size == 8)) 1064 return INVALID_NUB_HW_INDEX; 1065 1066 // We must watch for either read or write 1067 if (!read && !write) 1068 return INVALID_NUB_HW_INDEX; 1069 1070 // Read the debug state 1071 kern_return_t kret = GetDBGState(false); 1072 1073 if (kret == KERN_SUCCESS) { 1074 // Check to make sure we have the needed hardware support 1075 uint32_t i = 0; 1076 1077 DBG &debug_state = m_state.context.dbg; 1078 for (i = 0; i < num_hw_watchpoints; ++i) { 1079 if (IsWatchpointVacant(debug_state, i)) 1080 break; 1081 } 1082 1083 // See if we found an available hw breakpoint slot above 1084 if (i < num_hw_watchpoints) { 1085 StartTransForHWP(); 1086 1087 // Modify our local copy of the debug state, first. 1088 SetWatchpoint(debug_state, i, addr, size, read, write); 1089 // Now set the watch point in the inferior. 1090 kret = SetDBGState(also_set_on_task); 1091 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::" 1092 "EnableHardwareWatchpoint() " 1093 "SetDBGState() => 0x%8.8x.", 1094 kret); 1095 1096 if (kret == KERN_SUCCESS) 1097 return i; 1098 else // Revert to the previous debug state voluntarily. The transaction 1099 // coordinator knows that we have failed. 1100 m_state.context.dbg = GetDBGCheckpoint(); 1101 } else { 1102 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::" 1103 "EnableHardwareWatchpoint(): All " 1104 "hardware resources (%u) are in use.", 1105 num_hw_watchpoints); 1106 } 1107 } 1108 return INVALID_NUB_HW_INDEX; 1109 } 1110 1111 bool DNBArchImplI386::DisableHardwareWatchpoint(uint32_t hw_index, 1112 bool also_set_on_task) { 1113 kern_return_t kret = GetDBGState(false); 1114 1115 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints(); 1116 if (kret == KERN_SUCCESS) { 1117 DBG &debug_state = m_state.context.dbg; 1118 if (hw_index < num_hw_points && 1119 !IsWatchpointVacant(debug_state, hw_index)) { 1120 StartTransForHWP(); 1121 1122 // Modify our local copy of the debug state, first. 1123 ClearWatchpoint(debug_state, hw_index); 1124 // Now disable the watch point in the inferior. 1125 kret = SetDBGState(also_set_on_task); 1126 DNBLogThreadedIf(LOG_WATCHPOINTS, 1127 "DNBArchImplI386::DisableHardwareWatchpoint( %u )", 1128 hw_index); 1129 1130 if (kret == KERN_SUCCESS) 1131 return true; 1132 else // Revert to the previous debug state voluntarily. The transaction 1133 // coordinator knows that we have failed. 1134 m_state.context.dbg = GetDBGCheckpoint(); 1135 } 1136 } 1137 return false; 1138 } 1139 1140 // Iterate through the debug status register; return the index of the first hit. 1141 uint32_t DNBArchImplI386::GetHardwareWatchpointHit(nub_addr_t &addr) { 1142 // Read the debug state 1143 kern_return_t kret = GetDBGState(true); 1144 DNBLogThreadedIf( 1145 LOG_WATCHPOINTS, 1146 "DNBArchImplI386::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", 1147 kret); 1148 if (kret == KERN_SUCCESS) { 1149 DBG &debug_state = m_state.context.dbg; 1150 uint32_t i, num = NumSupportedHardwareWatchpoints(); 1151 for (i = 0; i < num; ++i) { 1152 if (IsWatchpointHit(debug_state, i)) { 1153 addr = GetWatchAddress(debug_state, i); 1154 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplI386::" 1155 "GetHardwareWatchpointHit() found => " 1156 "%u (addr = 0x%llx).", 1157 i, (uint64_t)addr); 1158 return i; 1159 } 1160 } 1161 } 1162 return INVALID_NUB_HW_INDEX; 1163 } 1164 1165 // Set the single step bit in the processor status register. 1166 kern_return_t DNBArchImplI386::EnableHardwareSingleStep(bool enable) { 1167 if (GetGPRState(false) == KERN_SUCCESS) { 1168 const uint32_t trace_bit = 0x100u; 1169 if (enable) 1170 m_state.context.gpr.__eflags |= trace_bit; 1171 else 1172 m_state.context.gpr.__eflags &= ~trace_bit; 1173 return SetGPRState(); 1174 } 1175 return m_state.GetError(e_regSetGPR, Read); 1176 } 1177 1178 // Register information definitions 1179 1180 #define DEFINE_GPR_PSEUDO_16(reg16, reg32) \ 1181 { \ 1182 e_regSetGPR, gpr_##reg16, #reg16, NULL, Uint, Hex, 2, 0, \ 1183 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1184 INVALID_NUB_REGNUM, g_contained_##reg32, g_invalidate_##reg32 \ 1185 } 1186 #define DEFINE_GPR_PSEUDO_8H(reg8, reg32) \ 1187 { \ 1188 e_regSetGPR, gpr_##reg8, #reg8, NULL, Uint, Hex, 1, 1, INVALID_NUB_REGNUM, \ 1189 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1190 g_contained_##reg32, g_invalidate_##reg32 \ 1191 } 1192 #define DEFINE_GPR_PSEUDO_8L(reg8, reg32) \ 1193 { \ 1194 e_regSetGPR, gpr_##reg8, #reg8, NULL, Uint, Hex, 1, 0, INVALID_NUB_REGNUM, \ 1195 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1196 g_contained_##reg32, g_invalidate_##reg32 \ 1197 } 1198 1199 #define GPR_OFFSET(reg) (offsetof(DNBArchImplI386::GPR, __##reg)) 1200 #define FPU_OFFSET(reg) \ 1201 (offsetof(DNBArchImplI386::FPU, __fpu_##reg) + \ 1202 offsetof(DNBArchImplI386::Context, fpu.no_avx)) 1203 #define AVX_OFFSET(reg) \ 1204 (offsetof(DNBArchImplI386::AVX, __fpu_##reg) + \ 1205 offsetof(DNBArchImplI386::Context, fpu.avx)) 1206 #define AVX512F_OFFSET(reg) \ 1207 (offsetof(DNBArchImplI386::AVX512F, __fpu_##reg) + \ 1208 offsetof(DNBArchImplI386::Context, fpu.avx512f)) 1209 #define EXC_OFFSET(reg) \ 1210 (offsetof(DNBArchImplI386::EXC, __##reg) + \ 1211 offsetof(DNBArchImplI386::Context, exc)) 1212 1213 #define GPR_SIZE(reg) (sizeof(((DNBArchImplI386::GPR *)NULL)->__##reg)) 1214 #define FPU_SIZE_UINT(reg) (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg)) 1215 #define FPU_SIZE_MMST(reg) \ 1216 (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg.__mmst_reg)) 1217 #define FPU_SIZE_XMM(reg) \ 1218 (sizeof(((DNBArchImplI386::FPU *)NULL)->__fpu_##reg.__xmm_reg)) 1219 #define FPU_SIZE_YMM(reg) (32) 1220 #define FPU_SIZE_ZMM(reg) (64) 1221 #define EXC_SIZE(reg) (sizeof(((DNBArchImplI386::EXC *)NULL)->__##reg)) 1222 1223 // This does not accurately identify the location of ymm0...7 in 1224 // Context.fpu.avx. That is because there is a bunch of padding 1225 // in Context.fpu.avx that we don't need. Offset macros lay out 1226 // the register state that Debugserver transmits to the debugger 1227 // -- not to interpret the thread_get_state info. 1228 #define AVX_OFFSET_YMM(n) (AVX_OFFSET(xmm7) + FPU_SIZE_XMM(xmm7) + (32 * n)) 1229 1230 // TODO: Test this and come back. 1231 #define AVX512F_OFFSET_ZMM(n) (AVX_OFFSET_YMM(7) + FPU_SIZE_XMM(xmm7) + (64 * n)) 1232 1233 // These macros will auto define the register name, alt name, register size, 1234 // register offset, encoding, format and native register. This ensures that 1235 // the register state structures are defined correctly and have the correct 1236 // sizes and offsets. 1237 1238 const char *g_contained_eax[] = {"eax", NULL}; 1239 const char *g_contained_ebx[] = {"ebx", NULL}; 1240 const char *g_contained_ecx[] = {"ecx", NULL}; 1241 const char *g_contained_edx[] = {"edx", NULL}; 1242 const char *g_contained_edi[] = {"edi", NULL}; 1243 const char *g_contained_esi[] = {"esi", NULL}; 1244 const char *g_contained_ebp[] = {"ebp", NULL}; 1245 const char *g_contained_esp[] = {"esp", NULL}; 1246 1247 const char *g_invalidate_eax[] = {"eax", "ax", "ah", "al", NULL}; 1248 const char *g_invalidate_ebx[] = {"ebx", "bx", "bh", "bl", NULL}; 1249 const char *g_invalidate_ecx[] = {"ecx", "cx", "ch", "cl", NULL}; 1250 const char *g_invalidate_edx[] = {"edx", "dx", "dh", "dl", NULL}; 1251 const char *g_invalidate_edi[] = {"edi", "di", "dil", NULL}; 1252 const char *g_invalidate_esi[] = {"esi", "si", "sil", NULL}; 1253 const char *g_invalidate_ebp[] = {"ebp", "bp", "bpl", NULL}; 1254 const char *g_invalidate_esp[] = {"esp", "sp", "spl", NULL}; 1255 1256 // General purpose registers for 64 bit 1257 const DNBRegisterInfo DNBArchImplI386::g_gpr_registers[] = { 1258 {e_regSetGPR, gpr_eax, "eax", NULL, Uint, Hex, GPR_SIZE(eax), 1259 GPR_OFFSET(eax), ehframe_eax, dwarf_eax, INVALID_NUB_REGNUM, 1260 debugserver_eax, NULL, g_invalidate_eax}, 1261 {e_regSetGPR, gpr_ebx, "ebx", NULL, Uint, Hex, GPR_SIZE(ebx), 1262 GPR_OFFSET(ebx), ehframe_ebx, dwarf_ebx, INVALID_NUB_REGNUM, 1263 debugserver_ebx, NULL, g_invalidate_ebx}, 1264 {e_regSetGPR, gpr_ecx, "ecx", NULL, Uint, Hex, GPR_SIZE(ecx), 1265 GPR_OFFSET(ecx), ehframe_ecx, dwarf_ecx, INVALID_NUB_REGNUM, 1266 debugserver_ecx, NULL, g_invalidate_ecx}, 1267 {e_regSetGPR, gpr_edx, "edx", NULL, Uint, Hex, GPR_SIZE(edx), 1268 GPR_OFFSET(edx), ehframe_edx, dwarf_edx, INVALID_NUB_REGNUM, 1269 debugserver_edx, NULL, g_invalidate_edx}, 1270 {e_regSetGPR, gpr_edi, "edi", NULL, Uint, Hex, GPR_SIZE(edi), 1271 GPR_OFFSET(edi), ehframe_edi, dwarf_edi, INVALID_NUB_REGNUM, 1272 debugserver_edi, NULL, g_invalidate_edi}, 1273 {e_regSetGPR, gpr_esi, "esi", NULL, Uint, Hex, GPR_SIZE(esi), 1274 GPR_OFFSET(esi), ehframe_esi, dwarf_esi, INVALID_NUB_REGNUM, 1275 debugserver_esi, NULL, g_invalidate_esi}, 1276 {e_regSetGPR, gpr_ebp, "ebp", "fp", Uint, Hex, GPR_SIZE(ebp), 1277 GPR_OFFSET(ebp), ehframe_ebp, dwarf_ebp, GENERIC_REGNUM_FP, 1278 debugserver_ebp, NULL, g_invalidate_ebp}, 1279 {e_regSetGPR, gpr_esp, "esp", "sp", Uint, Hex, GPR_SIZE(esp), 1280 GPR_OFFSET(esp), ehframe_esp, dwarf_esp, GENERIC_REGNUM_SP, 1281 debugserver_esp, NULL, g_invalidate_esp}, 1282 {e_regSetGPR, gpr_ss, "ss", NULL, Uint, Hex, GPR_SIZE(ss), GPR_OFFSET(ss), 1283 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, debugserver_ss, 1284 NULL, NULL}, 1285 {e_regSetGPR, gpr_eflags, "eflags", "flags", Uint, Hex, GPR_SIZE(eflags), 1286 GPR_OFFSET(eflags), ehframe_eflags, dwarf_eflags, GENERIC_REGNUM_FLAGS, 1287 debugserver_eflags, NULL, NULL}, 1288 {e_regSetGPR, gpr_eip, "eip", "pc", Uint, Hex, GPR_SIZE(eip), 1289 GPR_OFFSET(eip), ehframe_eip, dwarf_eip, GENERIC_REGNUM_PC, 1290 debugserver_eip, NULL, NULL}, 1291 {e_regSetGPR, gpr_cs, "cs", NULL, Uint, Hex, GPR_SIZE(cs), GPR_OFFSET(cs), 1292 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, debugserver_cs, 1293 NULL, NULL}, 1294 {e_regSetGPR, gpr_ds, "ds", NULL, Uint, Hex, GPR_SIZE(ds), GPR_OFFSET(ds), 1295 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, debugserver_ds, 1296 NULL, NULL}, 1297 {e_regSetGPR, gpr_es, "es", NULL, Uint, Hex, GPR_SIZE(es), GPR_OFFSET(es), 1298 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, debugserver_es, 1299 NULL, NULL}, 1300 {e_regSetGPR, gpr_fs, "fs", NULL, Uint, Hex, GPR_SIZE(fs), GPR_OFFSET(fs), 1301 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, debugserver_fs, 1302 NULL, NULL}, 1303 {e_regSetGPR, gpr_gs, "gs", NULL, Uint, Hex, GPR_SIZE(gs), GPR_OFFSET(gs), 1304 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, debugserver_gs, 1305 NULL, NULL}, 1306 DEFINE_GPR_PSEUDO_16(ax, eax), 1307 DEFINE_GPR_PSEUDO_16(bx, ebx), 1308 DEFINE_GPR_PSEUDO_16(cx, ecx), 1309 DEFINE_GPR_PSEUDO_16(dx, edx), 1310 DEFINE_GPR_PSEUDO_16(di, edi), 1311 DEFINE_GPR_PSEUDO_16(si, esi), 1312 DEFINE_GPR_PSEUDO_16(bp, ebp), 1313 DEFINE_GPR_PSEUDO_16(sp, esp), 1314 DEFINE_GPR_PSEUDO_8H(ah, eax), 1315 DEFINE_GPR_PSEUDO_8H(bh, ebx), 1316 DEFINE_GPR_PSEUDO_8H(ch, ecx), 1317 DEFINE_GPR_PSEUDO_8H(dh, edx), 1318 DEFINE_GPR_PSEUDO_8L(al, eax), 1319 DEFINE_GPR_PSEUDO_8L(bl, ebx), 1320 DEFINE_GPR_PSEUDO_8L(cl, ecx), 1321 DEFINE_GPR_PSEUDO_8L(dl, edx), 1322 DEFINE_GPR_PSEUDO_8L(dil, edi), 1323 DEFINE_GPR_PSEUDO_8L(sil, esi), 1324 DEFINE_GPR_PSEUDO_8L(bpl, ebp), 1325 DEFINE_GPR_PSEUDO_8L(spl, esp)}; 1326 1327 const DNBRegisterInfo DNBArchImplI386::g_fpu_registers_no_avx[] = { 1328 {e_regSetFPU, fpu_fcw, "fctrl", NULL, Uint, Hex, FPU_SIZE_UINT(fcw), 1329 FPU_OFFSET(fcw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1330 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1331 {e_regSetFPU, fpu_fsw, "fstat", NULL, Uint, Hex, FPU_SIZE_UINT(fsw), 1332 FPU_OFFSET(fsw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1333 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1334 {e_regSetFPU, fpu_ftw, "ftag", NULL, Uint, Hex, 2 /* sizeof __fpu_ftw + sizeof __fpu_rsrv1 */, 1335 FPU_OFFSET(ftw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1336 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1337 {e_regSetFPU, fpu_fop, "fop", NULL, Uint, Hex, FPU_SIZE_UINT(fop), 1338 FPU_OFFSET(fop), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1339 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1340 {e_regSetFPU, fpu_ip, "fioff", NULL, Uint, Hex, FPU_SIZE_UINT(ip), 1341 FPU_OFFSET(ip), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1342 INVALID_NUB_REGNUM, NULL, NULL}, 1343 {e_regSetFPU, fpu_cs, "fiseg", NULL, Uint, Hex, FPU_SIZE_UINT(cs), 1344 FPU_OFFSET(cs), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1345 INVALID_NUB_REGNUM, NULL, NULL}, 1346 {e_regSetFPU, fpu_dp, "fooff", NULL, Uint, Hex, FPU_SIZE_UINT(dp), 1347 FPU_OFFSET(dp), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1348 INVALID_NUB_REGNUM, NULL, NULL}, 1349 {e_regSetFPU, fpu_ds, "foseg", NULL, Uint, Hex, FPU_SIZE_UINT(ds), 1350 FPU_OFFSET(ds), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1351 INVALID_NUB_REGNUM, NULL, NULL}, 1352 {e_regSetFPU, fpu_mxcsr, "mxcsr", NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr), 1353 FPU_OFFSET(mxcsr), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1354 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1355 {e_regSetFPU, fpu_mxcsrmask, "mxcsrmask", NULL, Uint, Hex, 1356 FPU_SIZE_UINT(mxcsrmask), FPU_OFFSET(mxcsrmask), INVALID_NUB_REGNUM, 1357 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1358 1359 {e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, 1360 FPU_SIZE_MMST(stmm0), FPU_OFFSET(stmm0), INVALID_NUB_REGNUM, dwarf_stmm0, 1361 INVALID_NUB_REGNUM, debugserver_stmm0, NULL, NULL}, 1362 {e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, 1363 FPU_SIZE_MMST(stmm1), FPU_OFFSET(stmm1), INVALID_NUB_REGNUM, dwarf_stmm1, 1364 INVALID_NUB_REGNUM, debugserver_stmm1, NULL, NULL}, 1365 {e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, 1366 FPU_SIZE_MMST(stmm2), FPU_OFFSET(stmm2), INVALID_NUB_REGNUM, dwarf_stmm2, 1367 INVALID_NUB_REGNUM, debugserver_stmm2, NULL, NULL}, 1368 {e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, 1369 FPU_SIZE_MMST(stmm3), FPU_OFFSET(stmm3), INVALID_NUB_REGNUM, dwarf_stmm3, 1370 INVALID_NUB_REGNUM, debugserver_stmm3, NULL, NULL}, 1371 {e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, 1372 FPU_SIZE_MMST(stmm4), FPU_OFFSET(stmm4), INVALID_NUB_REGNUM, dwarf_stmm4, 1373 INVALID_NUB_REGNUM, debugserver_stmm4, NULL, NULL}, 1374 {e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, 1375 FPU_SIZE_MMST(stmm5), FPU_OFFSET(stmm5), INVALID_NUB_REGNUM, dwarf_stmm5, 1376 INVALID_NUB_REGNUM, debugserver_stmm5, NULL, NULL}, 1377 {e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, 1378 FPU_SIZE_MMST(stmm6), FPU_OFFSET(stmm6), INVALID_NUB_REGNUM, dwarf_stmm6, 1379 INVALID_NUB_REGNUM, debugserver_stmm6, NULL, NULL}, 1380 {e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, 1381 FPU_SIZE_MMST(stmm7), FPU_OFFSET(stmm7), INVALID_NUB_REGNUM, dwarf_stmm7, 1382 INVALID_NUB_REGNUM, debugserver_stmm7, NULL, NULL}, 1383 1384 {e_regSetFPU, fpu_xmm0, "xmm0", NULL, Vector, VectorOfUInt8, 1385 FPU_SIZE_XMM(xmm0), FPU_OFFSET(xmm0), INVALID_NUB_REGNUM, dwarf_xmm0, 1386 INVALID_NUB_REGNUM, debugserver_xmm0, NULL, NULL}, 1387 {e_regSetFPU, fpu_xmm1, "xmm1", NULL, Vector, VectorOfUInt8, 1388 FPU_SIZE_XMM(xmm1), FPU_OFFSET(xmm1), INVALID_NUB_REGNUM, dwarf_xmm1, 1389 INVALID_NUB_REGNUM, debugserver_xmm1, NULL, NULL}, 1390 {e_regSetFPU, fpu_xmm2, "xmm2", NULL, Vector, VectorOfUInt8, 1391 FPU_SIZE_XMM(xmm2), FPU_OFFSET(xmm2), INVALID_NUB_REGNUM, dwarf_xmm2, 1392 INVALID_NUB_REGNUM, debugserver_xmm2, NULL, NULL}, 1393 {e_regSetFPU, fpu_xmm3, "xmm3", NULL, Vector, VectorOfUInt8, 1394 FPU_SIZE_XMM(xmm3), FPU_OFFSET(xmm3), INVALID_NUB_REGNUM, dwarf_xmm3, 1395 INVALID_NUB_REGNUM, debugserver_xmm3, NULL, NULL}, 1396 {e_regSetFPU, fpu_xmm4, "xmm4", NULL, Vector, VectorOfUInt8, 1397 FPU_SIZE_XMM(xmm4), FPU_OFFSET(xmm4), INVALID_NUB_REGNUM, dwarf_xmm4, 1398 INVALID_NUB_REGNUM, debugserver_xmm4, NULL, NULL}, 1399 {e_regSetFPU, fpu_xmm5, "xmm5", NULL, Vector, VectorOfUInt8, 1400 FPU_SIZE_XMM(xmm5), FPU_OFFSET(xmm5), INVALID_NUB_REGNUM, dwarf_xmm5, 1401 INVALID_NUB_REGNUM, debugserver_xmm5, NULL, NULL}, 1402 {e_regSetFPU, fpu_xmm6, "xmm6", NULL, Vector, VectorOfUInt8, 1403 FPU_SIZE_XMM(xmm6), FPU_OFFSET(xmm6), INVALID_NUB_REGNUM, dwarf_xmm6, 1404 INVALID_NUB_REGNUM, debugserver_xmm6, NULL, NULL}, 1405 {e_regSetFPU, fpu_xmm7, "xmm7", NULL, Vector, VectorOfUInt8, 1406 FPU_SIZE_XMM(xmm7), FPU_OFFSET(xmm7), INVALID_NUB_REGNUM, dwarf_xmm7, 1407 INVALID_NUB_REGNUM, debugserver_xmm7, NULL, NULL}}; 1408 1409 static const char *g_contained_ymm0[] = {"ymm0", NULL}; 1410 static const char *g_contained_ymm1[] = {"ymm1", NULL}; 1411 static const char *g_contained_ymm2[] = {"ymm2", NULL}; 1412 static const char *g_contained_ymm3[] = {"ymm3", NULL}; 1413 static const char *g_contained_ymm4[] = {"ymm4", NULL}; 1414 static const char *g_contained_ymm5[] = {"ymm5", NULL}; 1415 static const char *g_contained_ymm6[] = {"ymm6", NULL}; 1416 static const char *g_contained_ymm7[] = {"ymm7", NULL}; 1417 1418 const DNBRegisterInfo DNBArchImplI386::g_fpu_registers_avx[] = { 1419 {e_regSetFPU, fpu_fcw, "fctrl", NULL, Uint, Hex, FPU_SIZE_UINT(fcw), 1420 AVX_OFFSET(fcw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1421 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1422 {e_regSetFPU, fpu_fsw, "fstat", NULL, Uint, Hex, FPU_SIZE_UINT(fsw), 1423 AVX_OFFSET(fsw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1424 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1425 {e_regSetFPU, fpu_ftw, "ftag", NULL, Uint, Hex, 2 /* sizeof __fpu_ftw + sizeof __fpu_rsrv1 */, 1426 AVX_OFFSET(ftw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1427 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1428 {e_regSetFPU, fpu_fop, "fop", NULL, Uint, Hex, FPU_SIZE_UINT(fop), 1429 AVX_OFFSET(fop), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1430 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1431 {e_regSetFPU, fpu_ip, "fioff", NULL, Uint, Hex, FPU_SIZE_UINT(ip), 1432 AVX_OFFSET(ip), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1433 INVALID_NUB_REGNUM, NULL, NULL}, 1434 {e_regSetFPU, fpu_cs, "fiseg", NULL, Uint, Hex, FPU_SIZE_UINT(cs), 1435 AVX_OFFSET(cs), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1436 INVALID_NUB_REGNUM, NULL, NULL}, 1437 {e_regSetFPU, fpu_dp, "fooff", NULL, Uint, Hex, FPU_SIZE_UINT(dp), 1438 AVX_OFFSET(dp), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1439 INVALID_NUB_REGNUM, NULL, NULL}, 1440 {e_regSetFPU, fpu_ds, "foseg", NULL, Uint, Hex, FPU_SIZE_UINT(ds), 1441 AVX_OFFSET(ds), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1442 INVALID_NUB_REGNUM, NULL, NULL}, 1443 {e_regSetFPU, fpu_mxcsr, "mxcsr", NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr), 1444 AVX_OFFSET(mxcsr), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1445 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1446 {e_regSetFPU, fpu_mxcsrmask, "mxcsrmask", NULL, Uint, Hex, 1447 FPU_SIZE_UINT(mxcsrmask), AVX_OFFSET(mxcsrmask), INVALID_NUB_REGNUM, 1448 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1449 1450 {e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, 1451 FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), INVALID_NUB_REGNUM, dwarf_stmm0, 1452 INVALID_NUB_REGNUM, debugserver_stmm0, NULL, NULL}, 1453 {e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, 1454 FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), INVALID_NUB_REGNUM, dwarf_stmm1, 1455 INVALID_NUB_REGNUM, debugserver_stmm1, NULL, NULL}, 1456 {e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, 1457 FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), INVALID_NUB_REGNUM, dwarf_stmm2, 1458 INVALID_NUB_REGNUM, debugserver_stmm2, NULL, NULL}, 1459 {e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, 1460 FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), INVALID_NUB_REGNUM, dwarf_stmm3, 1461 INVALID_NUB_REGNUM, debugserver_stmm3, NULL, NULL}, 1462 {e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, 1463 FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), INVALID_NUB_REGNUM, dwarf_stmm4, 1464 INVALID_NUB_REGNUM, debugserver_stmm4, NULL, NULL}, 1465 {e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, 1466 FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), INVALID_NUB_REGNUM, dwarf_stmm5, 1467 INVALID_NUB_REGNUM, debugserver_stmm5, NULL, NULL}, 1468 {e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, 1469 FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), INVALID_NUB_REGNUM, dwarf_stmm6, 1470 INVALID_NUB_REGNUM, debugserver_stmm6, NULL, NULL}, 1471 {e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, 1472 FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), INVALID_NUB_REGNUM, dwarf_stmm7, 1473 INVALID_NUB_REGNUM, debugserver_stmm7, NULL, NULL}, 1474 1475 {e_regSetFPU, fpu_ymm0, "ymm0", NULL, Vector, VectorOfUInt8, 1476 FPU_SIZE_YMM(ymm0), AVX_OFFSET_YMM(0), INVALID_NUB_REGNUM, dwarf_ymm0, 1477 INVALID_NUB_REGNUM, debugserver_ymm0, NULL, NULL}, 1478 {e_regSetFPU, fpu_ymm1, "ymm1", NULL, Vector, VectorOfUInt8, 1479 FPU_SIZE_YMM(ymm1), AVX_OFFSET_YMM(1), INVALID_NUB_REGNUM, dwarf_ymm1, 1480 INVALID_NUB_REGNUM, debugserver_ymm1, NULL, NULL}, 1481 {e_regSetFPU, fpu_ymm2, "ymm2", NULL, Vector, VectorOfUInt8, 1482 FPU_SIZE_YMM(ymm2), AVX_OFFSET_YMM(2), INVALID_NUB_REGNUM, dwarf_ymm2, 1483 INVALID_NUB_REGNUM, debugserver_ymm2, NULL, NULL}, 1484 {e_regSetFPU, fpu_ymm3, "ymm3", NULL, Vector, VectorOfUInt8, 1485 FPU_SIZE_YMM(ymm3), AVX_OFFSET_YMM(3), INVALID_NUB_REGNUM, dwarf_ymm3, 1486 INVALID_NUB_REGNUM, debugserver_ymm3, NULL, NULL}, 1487 {e_regSetFPU, fpu_ymm4, "ymm4", NULL, Vector, VectorOfUInt8, 1488 FPU_SIZE_YMM(ymm4), AVX_OFFSET_YMM(4), INVALID_NUB_REGNUM, dwarf_ymm4, 1489 INVALID_NUB_REGNUM, debugserver_ymm4, NULL, NULL}, 1490 {e_regSetFPU, fpu_ymm5, "ymm5", NULL, Vector, VectorOfUInt8, 1491 FPU_SIZE_YMM(ymm5), AVX_OFFSET_YMM(5), INVALID_NUB_REGNUM, dwarf_ymm5, 1492 INVALID_NUB_REGNUM, debugserver_ymm5, NULL, NULL}, 1493 {e_regSetFPU, fpu_ymm6, "ymm6", NULL, Vector, VectorOfUInt8, 1494 FPU_SIZE_YMM(ymm6), AVX_OFFSET_YMM(6), INVALID_NUB_REGNUM, dwarf_ymm6, 1495 INVALID_NUB_REGNUM, debugserver_ymm6, NULL, NULL}, 1496 {e_regSetFPU, fpu_ymm7, "ymm7", NULL, Vector, VectorOfUInt8, 1497 FPU_SIZE_YMM(ymm7), AVX_OFFSET_YMM(7), INVALID_NUB_REGNUM, dwarf_ymm7, 1498 INVALID_NUB_REGNUM, debugserver_ymm7, NULL, NULL}, 1499 1500 {e_regSetFPU, fpu_xmm0, "xmm0", NULL, Vector, VectorOfUInt8, 1501 FPU_SIZE_XMM(xmm0), 0, INVALID_NUB_REGNUM, dwarf_xmm0, INVALID_NUB_REGNUM, 1502 debugserver_xmm0, g_contained_ymm0, NULL}, 1503 {e_regSetFPU, fpu_xmm1, "xmm1", NULL, Vector, VectorOfUInt8, 1504 FPU_SIZE_XMM(xmm1), 0, INVALID_NUB_REGNUM, dwarf_xmm1, INVALID_NUB_REGNUM, 1505 debugserver_xmm1, g_contained_ymm1, NULL}, 1506 {e_regSetFPU, fpu_xmm2, "xmm2", NULL, Vector, VectorOfUInt8, 1507 FPU_SIZE_XMM(xmm2), 0, INVALID_NUB_REGNUM, dwarf_xmm2, INVALID_NUB_REGNUM, 1508 debugserver_xmm2, g_contained_ymm2, NULL}, 1509 {e_regSetFPU, fpu_xmm3, "xmm3", NULL, Vector, VectorOfUInt8, 1510 FPU_SIZE_XMM(xmm3), 0, INVALID_NUB_REGNUM, dwarf_xmm3, INVALID_NUB_REGNUM, 1511 debugserver_xmm3, g_contained_ymm3, NULL}, 1512 {e_regSetFPU, fpu_xmm4, "xmm4", NULL, Vector, VectorOfUInt8, 1513 FPU_SIZE_XMM(xmm4), 0, INVALID_NUB_REGNUM, dwarf_xmm4, INVALID_NUB_REGNUM, 1514 debugserver_xmm4, g_contained_ymm4, NULL}, 1515 {e_regSetFPU, fpu_xmm5, "xmm5", NULL, Vector, VectorOfUInt8, 1516 FPU_SIZE_XMM(xmm5), 0, INVALID_NUB_REGNUM, dwarf_xmm5, INVALID_NUB_REGNUM, 1517 debugserver_xmm5, g_contained_ymm5, NULL}, 1518 {e_regSetFPU, fpu_xmm6, "xmm6", NULL, Vector, VectorOfUInt8, 1519 FPU_SIZE_XMM(xmm6), 0, INVALID_NUB_REGNUM, dwarf_xmm6, INVALID_NUB_REGNUM, 1520 debugserver_xmm6, g_contained_ymm6, NULL}, 1521 {e_regSetFPU, fpu_xmm7, "xmm7", NULL, Vector, VectorOfUInt8, 1522 FPU_SIZE_XMM(xmm7), 0, INVALID_NUB_REGNUM, dwarf_xmm7, INVALID_NUB_REGNUM, 1523 debugserver_xmm7, g_contained_ymm7, NULL}, 1524 1525 }; 1526 1527 1528 #define STR(s) #s 1529 1530 #define ZMM_REG_DEF(reg) \ 1531 { \ 1532 e_regSetFPU, fpu_zmm##reg, STR(zmm##reg), NULL, Vector, VectorOfUInt8, \ 1533 FPU_SIZE_ZMM(zmm##reg), AVX512F_OFFSET_ZMM(reg), INVALID_NUB_REGNUM, \ 1534 dwarf_zmm##reg, INVALID_NUB_REGNUM, debugserver_zmm##reg, NULL, NULL \ 1535 } 1536 1537 #define YMM_REG_ALIAS(reg) \ 1538 { \ 1539 e_regSetFPU, fpu_ymm##reg, STR(ymm##reg), NULL, Vector, VectorOfUInt8, \ 1540 FPU_SIZE_YMM(ymm##reg), 0, INVALID_NUB_REGNUM, dwarf_ymm##reg, \ 1541 INVALID_NUB_REGNUM, debugserver_ymm##reg, g_contained_zmm##reg, NULL \ 1542 } 1543 1544 #define XMM_REG_ALIAS(reg) \ 1545 { \ 1546 e_regSetFPU, fpu_xmm##reg, STR(xmm##reg), NULL, Vector, VectorOfUInt8, \ 1547 FPU_SIZE_XMM(xmm##reg), 0, INVALID_NUB_REGNUM, dwarf_xmm##reg, \ 1548 INVALID_NUB_REGNUM, debugserver_xmm##reg, g_contained_zmm##reg, NULL \ 1549 } 1550 1551 #define AVX512_K_REG_DEF(reg) \ 1552 { \ 1553 e_regSetFPU, fpu_k##reg, STR(k##reg), NULL, Vector, VectorOfUInt8, 8, \ 1554 AVX512F_OFFSET(k##reg), dwarf_k##reg, dwarf_k##reg, -1U, \ 1555 debugserver_k##reg, NULL, NULL \ 1556 } 1557 1558 static const char *g_contained_zmm0[] = {"zmm0", NULL}; 1559 static const char *g_contained_zmm1[] = {"zmm1", NULL}; 1560 static const char *g_contained_zmm2[] = {"zmm2", NULL}; 1561 static const char *g_contained_zmm3[] = {"zmm3", NULL}; 1562 static const char *g_contained_zmm4[] = {"zmm4", NULL}; 1563 static const char *g_contained_zmm5[] = {"zmm5", NULL}; 1564 static const char *g_contained_zmm6[] = {"zmm6", NULL}; 1565 static const char *g_contained_zmm7[] = {"zmm7", NULL}; 1566 1567 const DNBRegisterInfo DNBArchImplI386::g_fpu_registers_avx512f[] = { 1568 {e_regSetFPU, fpu_fcw, "fctrl", NULL, Uint, Hex, FPU_SIZE_UINT(fcw), 1569 AVX_OFFSET(fcw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1570 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1571 {e_regSetFPU, fpu_fsw, "fstat", NULL, Uint, Hex, FPU_SIZE_UINT(fsw), 1572 AVX_OFFSET(fsw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1573 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1574 {e_regSetFPU, fpu_ftw, "ftag", NULL, Uint, Hex, 2 /* sizeof __fpu_ftw + sizeof __fpu_rsrv1 */, 1575 FPU_OFFSET(ftw), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1576 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1577 {e_regSetFPU, fpu_fop, "fop", NULL, Uint, Hex, FPU_SIZE_UINT(fop), 1578 AVX_OFFSET(fop), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1579 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1580 {e_regSetFPU, fpu_ip, "fioff", NULL, Uint, Hex, FPU_SIZE_UINT(ip), 1581 AVX_OFFSET(ip), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1582 INVALID_NUB_REGNUM, NULL, NULL}, 1583 {e_regSetFPU, fpu_cs, "fiseg", NULL, Uint, Hex, FPU_SIZE_UINT(cs), 1584 AVX_OFFSET(cs), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1585 INVALID_NUB_REGNUM, NULL, NULL}, 1586 {e_regSetFPU, fpu_dp, "fooff", NULL, Uint, Hex, FPU_SIZE_UINT(dp), 1587 AVX_OFFSET(dp), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1588 INVALID_NUB_REGNUM, NULL, NULL}, 1589 {e_regSetFPU, fpu_ds, "foseg", NULL, Uint, Hex, FPU_SIZE_UINT(ds), 1590 AVX_OFFSET(ds), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1591 INVALID_NUB_REGNUM, NULL, NULL}, 1592 {e_regSetFPU, fpu_mxcsr, "mxcsr", NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr), 1593 AVX_OFFSET(mxcsr), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1594 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1595 {e_regSetFPU, fpu_mxcsrmask, "mxcsrmask", NULL, Uint, Hex, 1596 FPU_SIZE_UINT(mxcsrmask), AVX_OFFSET(mxcsrmask), INVALID_NUB_REGNUM, 1597 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1598 1599 {e_regSetFPU, fpu_stmm0, "stmm0", NULL, Vector, VectorOfUInt8, 1600 FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), INVALID_NUB_REGNUM, dwarf_stmm0, 1601 INVALID_NUB_REGNUM, debugserver_stmm0, NULL, NULL}, 1602 {e_regSetFPU, fpu_stmm1, "stmm1", NULL, Vector, VectorOfUInt8, 1603 FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), INVALID_NUB_REGNUM, dwarf_stmm1, 1604 INVALID_NUB_REGNUM, debugserver_stmm1, NULL, NULL}, 1605 {e_regSetFPU, fpu_stmm2, "stmm2", NULL, Vector, VectorOfUInt8, 1606 FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), INVALID_NUB_REGNUM, dwarf_stmm2, 1607 INVALID_NUB_REGNUM, debugserver_stmm2, NULL, NULL}, 1608 {e_regSetFPU, fpu_stmm3, "stmm3", NULL, Vector, VectorOfUInt8, 1609 FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), INVALID_NUB_REGNUM, dwarf_stmm3, 1610 INVALID_NUB_REGNUM, debugserver_stmm3, NULL, NULL}, 1611 {e_regSetFPU, fpu_stmm4, "stmm4", NULL, Vector, VectorOfUInt8, 1612 FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), INVALID_NUB_REGNUM, dwarf_stmm4, 1613 INVALID_NUB_REGNUM, debugserver_stmm4, NULL, NULL}, 1614 {e_regSetFPU, fpu_stmm5, "stmm5", NULL, Vector, VectorOfUInt8, 1615 FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), INVALID_NUB_REGNUM, dwarf_stmm5, 1616 INVALID_NUB_REGNUM, debugserver_stmm5, NULL, NULL}, 1617 {e_regSetFPU, fpu_stmm6, "stmm6", NULL, Vector, VectorOfUInt8, 1618 FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), INVALID_NUB_REGNUM, dwarf_stmm6, 1619 INVALID_NUB_REGNUM, debugserver_stmm6, NULL, NULL}, 1620 {e_regSetFPU, fpu_stmm7, "stmm7", NULL, Vector, VectorOfUInt8, 1621 FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), INVALID_NUB_REGNUM, dwarf_stmm7, 1622 INVALID_NUB_REGNUM, debugserver_stmm7, NULL, NULL}, 1623 1624 AVX512_K_REG_DEF(0), 1625 AVX512_K_REG_DEF(1), 1626 AVX512_K_REG_DEF(2), 1627 AVX512_K_REG_DEF(3), 1628 AVX512_K_REG_DEF(4), 1629 AVX512_K_REG_DEF(5), 1630 AVX512_K_REG_DEF(6), 1631 AVX512_K_REG_DEF(7), 1632 1633 ZMM_REG_DEF(0), 1634 ZMM_REG_DEF(1), 1635 ZMM_REG_DEF(2), 1636 ZMM_REG_DEF(3), 1637 ZMM_REG_DEF(4), 1638 ZMM_REG_DEF(5), 1639 ZMM_REG_DEF(6), 1640 ZMM_REG_DEF(7), 1641 1642 YMM_REG_ALIAS(0), 1643 YMM_REG_ALIAS(1), 1644 YMM_REG_ALIAS(2), 1645 YMM_REG_ALIAS(3), 1646 YMM_REG_ALIAS(4), 1647 YMM_REG_ALIAS(5), 1648 YMM_REG_ALIAS(6), 1649 YMM_REG_ALIAS(7), 1650 1651 XMM_REG_ALIAS(0), 1652 XMM_REG_ALIAS(1), 1653 XMM_REG_ALIAS(2), 1654 XMM_REG_ALIAS(3), 1655 XMM_REG_ALIAS(4), 1656 XMM_REG_ALIAS(5), 1657 XMM_REG_ALIAS(6), 1658 XMM_REG_ALIAS(7) 1659 1660 }; 1661 1662 const DNBRegisterInfo DNBArchImplI386::g_exc_registers[] = { 1663 {e_regSetEXC, exc_trapno, "trapno", NULL, Uint, Hex, EXC_SIZE(trapno), 1664 EXC_OFFSET(trapno), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1665 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1666 {e_regSetEXC, exc_err, "err", NULL, Uint, Hex, EXC_SIZE(err), 1667 EXC_OFFSET(err), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1668 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1669 {e_regSetEXC, exc_faultvaddr, "faultvaddr", NULL, Uint, Hex, 1670 EXC_SIZE(faultvaddr), EXC_OFFSET(faultvaddr), INVALID_NUB_REGNUM, 1671 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}}; 1672 1673 // Number of registers in each register set 1674 const size_t DNBArchImplI386::k_num_gpr_registers = 1675 sizeof(g_gpr_registers) / sizeof(DNBRegisterInfo); 1676 const size_t DNBArchImplI386::k_num_fpu_registers_no_avx = 1677 sizeof(g_fpu_registers_no_avx) / sizeof(DNBRegisterInfo); 1678 const size_t DNBArchImplI386::k_num_fpu_registers_avx = 1679 sizeof(g_fpu_registers_avx) / sizeof(DNBRegisterInfo); 1680 const size_t DNBArchImplI386::k_num_fpu_registers_avx512f = 1681 sizeof(g_fpu_registers_avx512f) / sizeof(DNBRegisterInfo); 1682 const size_t DNBArchImplI386::k_num_exc_registers = 1683 sizeof(g_exc_registers) / sizeof(DNBRegisterInfo); 1684 const size_t DNBArchImplI386::k_num_all_registers_no_avx = 1685 k_num_gpr_registers + k_num_fpu_registers_no_avx + k_num_exc_registers; 1686 const size_t DNBArchImplI386::k_num_all_registers_avx = 1687 k_num_gpr_registers + k_num_fpu_registers_avx + k_num_exc_registers; 1688 const size_t DNBArchImplI386::k_num_all_registers_avx512f = 1689 k_num_gpr_registers + k_num_fpu_registers_avx512f + k_num_exc_registers; 1690 1691 // Register set definitions. The first definitions at register set index 1692 // of zero is for all registers, followed by other registers sets. The 1693 // register information for the all register set need not be filled in. 1694 const DNBRegisterSetInfo DNBArchImplI386::g_reg_sets_no_avx[] = { 1695 {"i386 Registers", NULL, k_num_all_registers_no_avx}, 1696 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers}, 1697 {"Floating Point Registers", g_fpu_registers_no_avx, 1698 k_num_fpu_registers_no_avx}, 1699 {"Exception State Registers", g_exc_registers, k_num_exc_registers}}; 1700 1701 const DNBRegisterSetInfo DNBArchImplI386::g_reg_sets_avx[] = { 1702 {"i386 Registers", NULL, k_num_all_registers_avx}, 1703 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers}, 1704 {"Floating Point Registers", g_fpu_registers_avx, k_num_fpu_registers_avx}, 1705 {"Exception State Registers", g_exc_registers, k_num_exc_registers}}; 1706 1707 const DNBRegisterSetInfo DNBArchImplI386::g_reg_sets_avx512f[] = { 1708 {"i386 Registers", NULL, k_num_all_registers_avx512f}, 1709 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers}, 1710 {"Floating Point Registers", g_fpu_registers_avx512f, 1711 k_num_fpu_registers_avx512f}, 1712 {"Exception State Registers", g_exc_registers, k_num_exc_registers}}; 1713 1714 // Total number of register sets for this architecture 1715 const size_t DNBArchImplI386::k_num_register_sets = 1716 sizeof(g_reg_sets_avx) / sizeof(DNBRegisterSetInfo); 1717 1718 DNBArchProtocol *DNBArchImplI386::Create(MachThread *thread) { 1719 DNBArchImplI386 *obj = new DNBArchImplI386(thread); 1720 return obj; 1721 } 1722 1723 const uint8_t *DNBArchImplI386::SoftwareBreakpointOpcode(nub_size_t byte_size) { 1724 static const uint8_t g_breakpoint_opcode[] = {0xCC}; 1725 if (byte_size == 1) 1726 return g_breakpoint_opcode; 1727 return NULL; 1728 } 1729 1730 const DNBRegisterSetInfo * 1731 DNBArchImplI386::GetRegisterSetInfo(nub_size_t *num_reg_sets) { 1732 *num_reg_sets = k_num_register_sets; 1733 if (CPUHasAVX512f() || FORCE_AVX_REGS) 1734 return g_reg_sets_avx512f; 1735 if (CPUHasAVX()) 1736 return g_reg_sets_avx; 1737 else 1738 return g_reg_sets_no_avx; 1739 } 1740 1741 void DNBArchImplI386::Initialize() { 1742 DNBArchPluginInfo arch_plugin_info = { 1743 CPU_TYPE_I386, DNBArchImplI386::Create, 1744 DNBArchImplI386::GetRegisterSetInfo, 1745 DNBArchImplI386::SoftwareBreakpointOpcode}; 1746 1747 // Register this arch plug-in with the main protocol class 1748 DNBArchProtocol::RegisterArchPlugin(arch_plugin_info); 1749 } 1750 1751 bool DNBArchImplI386::GetRegisterValue(uint32_t set, uint32_t reg, 1752 DNBRegisterValue *value) { 1753 if (set == REGISTER_SET_GENERIC) { 1754 switch (reg) { 1755 case GENERIC_REGNUM_PC: // Program Counter 1756 set = e_regSetGPR; 1757 reg = gpr_eip; 1758 break; 1759 1760 case GENERIC_REGNUM_SP: // Stack Pointer 1761 set = e_regSetGPR; 1762 reg = gpr_esp; 1763 break; 1764 1765 case GENERIC_REGNUM_FP: // Frame Pointer 1766 set = e_regSetGPR; 1767 reg = gpr_ebp; 1768 break; 1769 1770 case GENERIC_REGNUM_FLAGS: // Processor flags register 1771 set = e_regSetGPR; 1772 reg = gpr_eflags; 1773 break; 1774 1775 case GENERIC_REGNUM_RA: // Return Address 1776 default: 1777 return false; 1778 } 1779 } 1780 1781 if (GetRegisterState(set, false) != KERN_SUCCESS) 1782 return false; 1783 1784 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1785 if (regInfo) { 1786 value->info = *regInfo; 1787 switch (set) { 1788 case e_regSetGPR: 1789 if (reg < k_num_gpr_registers) { 1790 value->value.uint32 = ((uint32_t *)(&m_state.context.gpr))[reg]; 1791 return true; 1792 } 1793 break; 1794 1795 case e_regSetFPU: 1796 if (reg > fpu_xmm7 && !(CPUHasAVX() || FORCE_AVX_REGS)) 1797 return false; 1798 if (reg > fpu_ymm7 && !(CPUHasAVX512f() || FORCE_AVX_REGS)) 1799 return false; 1800 switch (reg) { 1801 case fpu_fcw: 1802 value->value.uint16 = 1803 *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)); 1804 return true; 1805 case fpu_fsw: 1806 value->value.uint16 = 1807 *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)); 1808 return true; 1809 case fpu_ftw: 1810 memcpy (&value->value.uint16, &m_state.context.fpu.no_avx.__fpu_ftw, 2); 1811 return true; 1812 case fpu_fop: 1813 value->value.uint16 = m_state.context.fpu.no_avx.__fpu_fop; 1814 return true; 1815 case fpu_ip: 1816 value->value.uint32 = m_state.context.fpu.no_avx.__fpu_ip; 1817 return true; 1818 case fpu_cs: 1819 value->value.uint16 = m_state.context.fpu.no_avx.__fpu_cs; 1820 return true; 1821 case fpu_dp: 1822 value->value.uint32 = m_state.context.fpu.no_avx.__fpu_dp; 1823 return true; 1824 case fpu_ds: 1825 value->value.uint16 = m_state.context.fpu.no_avx.__fpu_ds; 1826 return true; 1827 case fpu_mxcsr: 1828 value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsr; 1829 return true; 1830 case fpu_mxcsrmask: 1831 value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsrmask; 1832 return true; 1833 1834 case fpu_stmm0: 1835 memcpy(&value->value.uint8, 1836 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg, 10); 1837 return true; 1838 case fpu_stmm1: 1839 memcpy(&value->value.uint8, 1840 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg, 10); 1841 return true; 1842 case fpu_stmm2: 1843 memcpy(&value->value.uint8, 1844 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg, 10); 1845 return true; 1846 case fpu_stmm3: 1847 memcpy(&value->value.uint8, 1848 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg, 10); 1849 return true; 1850 case fpu_stmm4: 1851 memcpy(&value->value.uint8, 1852 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg, 10); 1853 return true; 1854 case fpu_stmm5: 1855 memcpy(&value->value.uint8, 1856 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg, 10); 1857 return true; 1858 case fpu_stmm6: 1859 memcpy(&value->value.uint8, 1860 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg, 10); 1861 return true; 1862 case fpu_stmm7: 1863 memcpy(&value->value.uint8, 1864 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg, 10); 1865 return true; 1866 1867 case fpu_xmm0: 1868 memcpy(&value->value.uint8, 1869 m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg, 16); 1870 return true; 1871 case fpu_xmm1: 1872 memcpy(&value->value.uint8, 1873 m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg, 16); 1874 return true; 1875 case fpu_xmm2: 1876 memcpy(&value->value.uint8, 1877 m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg, 16); 1878 return true; 1879 case fpu_xmm3: 1880 memcpy(&value->value.uint8, 1881 m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg, 16); 1882 return true; 1883 case fpu_xmm4: 1884 memcpy(&value->value.uint8, 1885 m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg, 16); 1886 return true; 1887 case fpu_xmm5: 1888 memcpy(&value->value.uint8, 1889 m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg, 16); 1890 return true; 1891 case fpu_xmm6: 1892 memcpy(&value->value.uint8, 1893 m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg, 16); 1894 return true; 1895 case fpu_xmm7: 1896 memcpy(&value->value.uint8, 1897 m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg, 16); 1898 return true; 1899 1900 #define MEMCPY_YMM(n) \ 1901 memcpy(&value->value.uint8, m_state.context.fpu.avx.__fpu_xmm##n.__xmm_reg, \ 1902 16); \ 1903 memcpy((&value->value.uint8) + 16, \ 1904 m_state.context.fpu.avx.__fpu_ymmh##n.__xmm_reg, 16); 1905 case fpu_ymm0: 1906 MEMCPY_YMM(0); 1907 return true; 1908 case fpu_ymm1: 1909 MEMCPY_YMM(1); 1910 return true; 1911 case fpu_ymm2: 1912 MEMCPY_YMM(2); 1913 return true; 1914 case fpu_ymm3: 1915 MEMCPY_YMM(3); 1916 return true; 1917 case fpu_ymm4: 1918 MEMCPY_YMM(4); 1919 return true; 1920 case fpu_ymm5: 1921 MEMCPY_YMM(5); 1922 return true; 1923 case fpu_ymm6: 1924 MEMCPY_YMM(6); 1925 return true; 1926 case fpu_ymm7: 1927 MEMCPY_YMM(7); 1928 return true; 1929 #undef MEMCPY_YMM 1930 1931 case fpu_k0: 1932 case fpu_k1: 1933 case fpu_k2: 1934 case fpu_k3: 1935 case fpu_k4: 1936 case fpu_k5: 1937 case fpu_k6: 1938 case fpu_k7: 1939 memcpy((&value->value.uint8), 1940 &m_state.context.fpu.avx512f.__fpu_k0 + (reg - fpu_k0), 8); 1941 return true; 1942 case fpu_zmm0: 1943 case fpu_zmm1: 1944 case fpu_zmm2: 1945 case fpu_zmm3: 1946 case fpu_zmm4: 1947 case fpu_zmm5: 1948 case fpu_zmm6: 1949 case fpu_zmm7: 1950 memcpy(&value->value.uint8, 1951 &m_state.context.fpu.avx512f.__fpu_xmm0 + (reg - fpu_zmm0), 16); 1952 memcpy(&value->value.uint8 + 16, 1953 &m_state.context.fpu.avx512f.__fpu_ymmh0 + (reg - fpu_zmm0), 16); 1954 memcpy(&value->value.uint8 + 32, 1955 &m_state.context.fpu.avx512f.__fpu_zmmh0 + (reg - fpu_zmm0), 32); 1956 return true; 1957 } 1958 break; 1959 1960 case e_regSetEXC: 1961 if (reg < k_num_exc_registers) { 1962 value->value.uint32 = (&m_state.context.exc.__trapno)[reg]; 1963 return true; 1964 } 1965 break; 1966 } 1967 } 1968 return false; 1969 } 1970 1971 bool DNBArchImplI386::SetRegisterValue(uint32_t set, uint32_t reg, 1972 const DNBRegisterValue *value) { 1973 if (set == REGISTER_SET_GENERIC) { 1974 switch (reg) { 1975 case GENERIC_REGNUM_PC: // Program Counter 1976 set = e_regSetGPR; 1977 reg = gpr_eip; 1978 break; 1979 1980 case GENERIC_REGNUM_SP: // Stack Pointer 1981 set = e_regSetGPR; 1982 reg = gpr_esp; 1983 break; 1984 1985 case GENERIC_REGNUM_FP: // Frame Pointer 1986 set = e_regSetGPR; 1987 reg = gpr_ebp; 1988 break; 1989 1990 case GENERIC_REGNUM_FLAGS: // Processor flags register 1991 set = e_regSetGPR; 1992 reg = gpr_eflags; 1993 break; 1994 1995 case GENERIC_REGNUM_RA: // Return Address 1996 default: 1997 return false; 1998 } 1999 } 2000 2001 if (GetRegisterState(set, false) != KERN_SUCCESS) 2002 return false; 2003 2004 bool success = false; 2005 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 2006 if (regInfo) { 2007 switch (set) { 2008 case e_regSetGPR: 2009 if (reg < k_num_gpr_registers) { 2010 ((uint32_t *)(&m_state.context.gpr))[reg] = value->value.uint32; 2011 success = true; 2012 } 2013 break; 2014 2015 case e_regSetFPU: 2016 if (reg > fpu_xmm7 && !(CPUHasAVX() || FORCE_AVX_REGS)) 2017 return false; 2018 if (reg > fpu_ymm7 && !(CPUHasAVX512f() || FORCE_AVX_REGS)) 2019 return false; 2020 switch (reg) { 2021 case fpu_fcw: 2022 *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)) = 2023 value->value.uint16; 2024 success = true; 2025 break; 2026 case fpu_fsw: 2027 *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)) = 2028 value->value.uint16; 2029 success = true; 2030 break; 2031 case fpu_ftw: 2032 memcpy (&m_state.context.fpu.no_avx.__fpu_ftw, &value->value.uint16, 2); 2033 success = true; 2034 break; 2035 case fpu_fop: 2036 m_state.context.fpu.no_avx.__fpu_fop = value->value.uint16; 2037 success = true; 2038 break; 2039 case fpu_ip: 2040 m_state.context.fpu.no_avx.__fpu_ip = value->value.uint32; 2041 success = true; 2042 break; 2043 case fpu_cs: 2044 m_state.context.fpu.no_avx.__fpu_cs = value->value.uint16; 2045 success = true; 2046 break; 2047 case fpu_dp: 2048 m_state.context.fpu.no_avx.__fpu_dp = value->value.uint32; 2049 success = true; 2050 break; 2051 case fpu_ds: 2052 m_state.context.fpu.no_avx.__fpu_ds = value->value.uint16; 2053 success = true; 2054 break; 2055 case fpu_mxcsr: 2056 m_state.context.fpu.no_avx.__fpu_mxcsr = value->value.uint32; 2057 success = true; 2058 break; 2059 case fpu_mxcsrmask: 2060 m_state.context.fpu.no_avx.__fpu_mxcsrmask = value->value.uint32; 2061 success = true; 2062 break; 2063 2064 case fpu_stmm0: 2065 memcpy(m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg, 2066 &value->value.uint8, 10); 2067 success = true; 2068 break; 2069 case fpu_stmm1: 2070 memcpy(m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg, 2071 &value->value.uint8, 10); 2072 success = true; 2073 break; 2074 case fpu_stmm2: 2075 memcpy(m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg, 2076 &value->value.uint8, 10); 2077 success = true; 2078 break; 2079 case fpu_stmm3: 2080 memcpy(m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg, 2081 &value->value.uint8, 10); 2082 success = true; 2083 break; 2084 case fpu_stmm4: 2085 memcpy(m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg, 2086 &value->value.uint8, 10); 2087 success = true; 2088 break; 2089 case fpu_stmm5: 2090 memcpy(m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg, 2091 &value->value.uint8, 10); 2092 success = true; 2093 break; 2094 case fpu_stmm6: 2095 memcpy(m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg, 2096 &value->value.uint8, 10); 2097 success = true; 2098 break; 2099 case fpu_stmm7: 2100 memcpy(m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg, 2101 &value->value.uint8, 10); 2102 success = true; 2103 break; 2104 2105 case fpu_xmm0: 2106 memcpy(m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg, 2107 &value->value.uint8, 16); 2108 success = true; 2109 break; 2110 case fpu_xmm1: 2111 memcpy(m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg, 2112 &value->value.uint8, 16); 2113 success = true; 2114 break; 2115 case fpu_xmm2: 2116 memcpy(m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg, 2117 &value->value.uint8, 16); 2118 success = true; 2119 break; 2120 case fpu_xmm3: 2121 memcpy(m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg, 2122 &value->value.uint8, 16); 2123 success = true; 2124 break; 2125 case fpu_xmm4: 2126 memcpy(m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg, 2127 &value->value.uint8, 16); 2128 success = true; 2129 break; 2130 case fpu_xmm5: 2131 memcpy(m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg, 2132 &value->value.uint8, 16); 2133 success = true; 2134 break; 2135 case fpu_xmm6: 2136 memcpy(m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg, 2137 &value->value.uint8, 16); 2138 success = true; 2139 break; 2140 case fpu_xmm7: 2141 memcpy(m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg, 2142 &value->value.uint8, 16); 2143 success = true; 2144 break; 2145 2146 #define MEMCPY_YMM(n) \ 2147 memcpy(m_state.context.fpu.avx.__fpu_xmm##n.__xmm_reg, &value->value.uint8, \ 2148 16); \ 2149 memcpy(m_state.context.fpu.avx.__fpu_ymmh##n.__xmm_reg, \ 2150 (&value->value.uint8) + 16, 16); 2151 case fpu_ymm0: 2152 MEMCPY_YMM(0); 2153 return true; 2154 case fpu_ymm1: 2155 MEMCPY_YMM(1); 2156 return true; 2157 case fpu_ymm2: 2158 MEMCPY_YMM(2); 2159 return true; 2160 case fpu_ymm3: 2161 MEMCPY_YMM(3); 2162 return true; 2163 case fpu_ymm4: 2164 MEMCPY_YMM(4); 2165 return true; 2166 case fpu_ymm5: 2167 MEMCPY_YMM(5); 2168 return true; 2169 case fpu_ymm6: 2170 MEMCPY_YMM(6); 2171 return true; 2172 case fpu_ymm7: 2173 MEMCPY_YMM(7); 2174 return true; 2175 #undef MEMCPY_YMM 2176 2177 case fpu_k0: 2178 case fpu_k1: 2179 case fpu_k2: 2180 case fpu_k3: 2181 case fpu_k4: 2182 case fpu_k5: 2183 case fpu_k6: 2184 case fpu_k7: 2185 memcpy(&m_state.context.fpu.avx512f.__fpu_k0 + (reg - fpu_k0), 2186 &value->value.uint8, 8); 2187 return true; 2188 case fpu_zmm0: 2189 case fpu_zmm1: 2190 case fpu_zmm2: 2191 case fpu_zmm3: 2192 case fpu_zmm4: 2193 case fpu_zmm5: 2194 case fpu_zmm6: 2195 case fpu_zmm7: 2196 memcpy(&m_state.context.fpu.avx512f.__fpu_xmm0 + (reg - fpu_zmm0), 2197 &value->value.uint8, 16); 2198 memcpy(&m_state.context.fpu.avx512f.__fpu_ymmh0 + (reg - fpu_zmm0), 2199 &value->value.uint8 + 16, 16); 2200 memcpy(&m_state.context.fpu.avx512f.__fpu_zmmh0 + (reg - fpu_zmm0), 2201 &value->value.uint8 + 32, 32); 2202 return true; 2203 } 2204 break; 2205 2206 case e_regSetEXC: 2207 if (reg < k_num_exc_registers) { 2208 (&m_state.context.exc.__trapno)[reg] = value->value.uint32; 2209 success = true; 2210 } 2211 break; 2212 } 2213 } 2214 2215 if (success) 2216 return SetRegisterState(set) == KERN_SUCCESS; 2217 return false; 2218 } 2219 2220 uint32_t DNBArchImplI386::GetRegisterContextSize() { 2221 static uint32_t g_cached_size = 0; 2222 if (g_cached_size == 0) { 2223 if(CPUHasAVX512f() || FORCE_AVX_REGS) { 2224 for (size_t i = 0; i < k_num_fpu_registers_avx512f; ++i) { 2225 if (g_fpu_registers_avx512f[i].value_regs == NULL) 2226 g_cached_size += g_fpu_registers_avx512f[i].size; 2227 } 2228 } else 2229 if (CPUHasAVX()) { 2230 for (size_t i = 0; i < k_num_fpu_registers_avx; ++i) { 2231 if (g_fpu_registers_avx[i].value_regs == NULL) 2232 g_cached_size += g_fpu_registers_avx[i].size; 2233 } 2234 } else { 2235 for (size_t i = 0; i < k_num_fpu_registers_no_avx; ++i) { 2236 if (g_fpu_registers_no_avx[i].value_regs == NULL) 2237 g_cached_size += g_fpu_registers_no_avx[i].size; 2238 } 2239 } 2240 DNBLogThreaded("DNBArchImplX86_64::GetRegisterContextSize() - GPR = %zu, " 2241 "FPU = %u, EXC = %zu", 2242 sizeof(GPR), g_cached_size, sizeof(EXC)); 2243 g_cached_size += sizeof(GPR); 2244 g_cached_size += sizeof(EXC); 2245 DNBLogThreaded( 2246 "DNBArchImplX86_64::GetRegisterContextSize() - GPR + FPU + EXC = %u", 2247 g_cached_size); 2248 } 2249 return g_cached_size; 2250 } 2251 2252 nub_size_t DNBArchImplI386::GetRegisterContext(void *buf, nub_size_t buf_len) { 2253 uint32_t size = GetRegisterContextSize(); 2254 2255 if (buf && buf_len) { 2256 if (size > buf_len) 2257 size = static_cast<uint32_t>(buf_len); 2258 2259 bool force = false; 2260 kern_return_t kret; 2261 if ((kret = GetGPRState(force)) != KERN_SUCCESS) { 2262 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = " 2263 "%p, len = %llu) error: GPR regs failed to " 2264 "read: %u ", 2265 buf, (uint64_t)buf_len, kret); 2266 size = 0; 2267 } else if ((kret = GetFPUState(force)) != KERN_SUCCESS) { 2268 DNBLogThreadedIf( 2269 LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = %p, len = " 2270 "%llu) error: %s regs failed to read: %u", 2271 buf, (uint64_t)buf_len, CPUHasAVX() ? "AVX" : "FPU", kret); 2272 size = 0; 2273 } else if ((kret = GetEXCState(force)) != KERN_SUCCESS) { 2274 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::GetRegisterContext (buf = " 2275 "%p, len = %llu) error: EXC regs failed to " 2276 "read: %u", 2277 buf, (uint64_t)buf_len, kret); 2278 size = 0; 2279 } else { 2280 uint8_t *p = (uint8_t *)buf; 2281 // Copy the GPR registers 2282 memcpy(p, &m_state.context.gpr, sizeof(GPR)); 2283 p += sizeof(GPR); 2284 2285 // Walk around the gaps in the FPU regs 2286 memcpy(p, &m_state.context.fpu.no_avx.__fpu_fcw, 5); 2287 p += 5; 2288 memcpy(p, &m_state.context.fpu.no_avx.__fpu_fop, 8); 2289 p += 8; 2290 memcpy(p, &m_state.context.fpu.no_avx.__fpu_dp, 6); 2291 p += 6; 2292 memcpy(p, &m_state.context.fpu.no_avx.__fpu_mxcsr, 8); 2293 p += 8; 2294 2295 // Work around the padding between the stmm registers as they are 16 2296 // byte structs with 10 bytes of the value in each 2297 for (size_t i = 0; i < 8; ++i) { 2298 memcpy(p, &m_state.context.fpu.no_avx.__fpu_stmm0 + i, 10); 2299 p += 10; 2300 } 2301 2302 if (CPUHasAVX512f() || FORCE_AVX_REGS) { 2303 for (size_t i = 0; i < 8; ++i) { 2304 memcpy(p, &m_state.context.fpu.avx512f.__fpu_k0 + i, 8); 2305 p += 8; 2306 } 2307 } 2308 2309 if (CPUHasAVX() || FORCE_AVX_REGS) { 2310 // Interleave the XMM and YMMH registers to make the YMM registers 2311 for (size_t i = 0; i < 8; ++i) { 2312 memcpy(p, &m_state.context.fpu.avx.__fpu_xmm0 + i, 16); 2313 p += 16; 2314 memcpy(p, &m_state.context.fpu.avx.__fpu_ymmh0 + i, 16); 2315 p += 16; 2316 } 2317 if(CPUHasAVX512f() || FORCE_AVX_REGS) { 2318 for (size_t i = 0; i < 8; ++i) { 2319 memcpy(p, &m_state.context.fpu.avx512f.__fpu_zmmh0 + i, 32); 2320 p += 32; 2321 } 2322 } 2323 } else { 2324 // Copy the XMM registers in a single block 2325 memcpy(p, &m_state.context.fpu.no_avx.__fpu_xmm0, 8 * 16); 2326 p += 8 * 16; 2327 } 2328 2329 // Copy the exception registers 2330 memcpy(p, &m_state.context.exc, sizeof(EXC)); 2331 p += sizeof(EXC); 2332 2333 // make sure we end up with exactly what we think we should have 2334 size_t bytes_written = p - (uint8_t *)buf; 2335 UNUSED_IF_ASSERT_DISABLED(bytes_written); 2336 assert(bytes_written == size); 2337 } 2338 } 2339 DNBLogThreadedIf( 2340 LOG_THREAD, 2341 "DNBArchImplI386::GetRegisterContext (buf = %p, len = %llu) => %llu", buf, 2342 (uint64_t)buf_len, (uint64_t)size); 2343 // Return the size of the register context even if NULL was passed in 2344 return size; 2345 } 2346 2347 nub_size_t DNBArchImplI386::SetRegisterContext(const void *buf, 2348 nub_size_t buf_len) { 2349 nub_size_t size = sizeof(m_state.context); 2350 if (buf == NULL || buf_len == 0) 2351 size = 0; 2352 2353 if (size) { 2354 if (size > buf_len) 2355 size = buf_len; 2356 2357 const uint8_t *p = (const uint8_t *)buf; 2358 // Copy the GPR registers 2359 memcpy(&m_state.context.gpr, p, sizeof(GPR)); 2360 p += sizeof(GPR); 2361 2362 // Copy fcw through mxcsrmask as there is no padding 2363 memcpy(&m_state.context.fpu.no_avx.__fpu_fcw, p, 5); 2364 p += 5; 2365 memcpy(&m_state.context.fpu.no_avx.__fpu_fop, p, 8); 2366 p += 8; 2367 memcpy(&m_state.context.fpu.no_avx.__fpu_dp, p, 6); 2368 p += 6; 2369 memcpy(&m_state.context.fpu.no_avx.__fpu_mxcsr, p, 8); 2370 p += 8; 2371 2372 // Work around the padding between the stmm registers as they are 16 2373 // byte structs with 10 bytes of the value in each 2374 for (size_t i = 0; i < 8; ++i) { 2375 memcpy(&m_state.context.fpu.no_avx.__fpu_stmm0 + i, p, 10); 2376 p += 10; 2377 } 2378 2379 if(CPUHasAVX512f() || FORCE_AVX_REGS) { 2380 for (size_t i = 0; i < 8; ++i) { 2381 memcpy(&m_state.context.fpu.avx512f.__fpu_k0 + i, p, 8); 2382 p += 8; 2383 } 2384 } 2385 2386 if (CPUHasAVX() || FORCE_AVX_REGS) { 2387 // Interleave the XMM and YMMH registers to make the YMM registers 2388 for (size_t i = 0; i < 8; ++i) { 2389 memcpy(&m_state.context.fpu.avx.__fpu_xmm0 + i, p, 16); 2390 p += 16; 2391 memcpy(&m_state.context.fpu.avx.__fpu_ymmh0 + i, p, 16); 2392 p += 16; 2393 } 2394 2395 if(CPUHasAVX512f() || FORCE_AVX_REGS) { 2396 for (size_t i = 0; i < 8; ++i) { 2397 memcpy(&m_state.context.fpu.avx512f.__fpu_zmmh0 + i, p, 32); 2398 p += 32; 2399 } 2400 } 2401 } else { 2402 // Copy the XMM registers in a single block 2403 memcpy(&m_state.context.fpu.no_avx.__fpu_xmm0, p, 8 * 16); 2404 p += 8 * 16; 2405 } 2406 2407 // Copy the exception registers 2408 memcpy(&m_state.context.exc, p, sizeof(EXC)); 2409 p += sizeof(EXC); 2410 2411 // make sure we end up with exactly what we think we should have 2412 size_t bytes_written = p - (const uint8_t *)buf; 2413 UNUSED_IF_ASSERT_DISABLED(bytes_written); 2414 assert(bytes_written == size); 2415 kern_return_t kret; 2416 if ((kret = SetGPRState()) != KERN_SUCCESS) 2417 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = " 2418 "%p, len = %llu) error: GPR regs failed to " 2419 "write: %u", 2420 buf, (uint64_t)buf_len, kret); 2421 if ((kret = SetFPUState()) != KERN_SUCCESS) 2422 DNBLogThreadedIf( 2423 LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = %p, len = " 2424 "%llu) error: %s regs failed to write: %u", 2425 buf, (uint64_t)buf_len, CPUHasAVX() ? "AVX" : "FPU", kret); 2426 if ((kret = SetEXCState()) != KERN_SUCCESS) 2427 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::SetRegisterContext (buf = " 2428 "%p, len = %llu) error: EXP regs failed to " 2429 "write: %u", 2430 buf, (uint64_t)buf_len, kret); 2431 } 2432 DNBLogThreadedIf( 2433 LOG_THREAD, 2434 "DNBArchImplI386::SetRegisterContext (buf = %p, len = %llu) => %llu", buf, 2435 (uint64_t)buf_len, (uint64_t)size); 2436 return size; 2437 } 2438 2439 uint32_t DNBArchImplI386::SaveRegisterState() { 2440 kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber()); 2441 DNBLogThreadedIf( 2442 LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u " 2443 "(SetGPRState() for stop_count = %u)", 2444 m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount()); 2445 2446 bool force = true; 2447 2448 if ((kret = GetGPRState(force)) != KERN_SUCCESS) { 2449 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::SaveRegisterState () error: " 2450 "GPR regs failed to read: %u ", 2451 kret); 2452 } else if ((kret = GetFPUState(force)) != KERN_SUCCESS) { 2453 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::SaveRegisterState () error: " 2454 "%s regs failed to read: %u", 2455 CPUHasAVX() ? "AVX" : "FPU", kret); 2456 } else { 2457 const uint32_t save_id = GetNextRegisterStateSaveID(); 2458 m_saved_register_states[save_id] = m_state.context; 2459 return save_id; 2460 } 2461 return 0; 2462 } 2463 bool DNBArchImplI386::RestoreRegisterState(uint32_t save_id) { 2464 SaveRegisterStates::iterator pos = m_saved_register_states.find(save_id); 2465 if (pos != m_saved_register_states.end()) { 2466 m_state.context.gpr = pos->second.gpr; 2467 m_state.context.fpu = pos->second.fpu; 2468 m_state.context.exc = pos->second.exc; 2469 m_state.SetError(e_regSetGPR, Read, 0); 2470 m_state.SetError(e_regSetFPU, Read, 0); 2471 m_state.SetError(e_regSetEXC, Read, 0); 2472 kern_return_t kret; 2473 bool success = true; 2474 if ((kret = SetGPRState()) != KERN_SUCCESS) { 2475 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::RestoreRegisterState " 2476 "(save_id = %u) error: GPR regs failed to " 2477 "write: %u", 2478 save_id, kret); 2479 success = false; 2480 } else if ((kret = SetFPUState()) != KERN_SUCCESS) { 2481 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplI386::RestoreRegisterState " 2482 "(save_id = %u) error: %s regs failed to " 2483 "write: %u", 2484 save_id, CPUHasAVX() ? "AVX" : "FPU", kret); 2485 success = false; 2486 } 2487 m_saved_register_states.erase(pos); 2488 return success; 2489 } 2490 return false; 2491 } 2492 2493 kern_return_t DNBArchImplI386::GetRegisterState(int set, bool force) { 2494 switch (set) { 2495 case e_regSetALL: 2496 return GetGPRState(force) | GetFPUState(force) | GetEXCState(force); 2497 case e_regSetGPR: 2498 return GetGPRState(force); 2499 case e_regSetFPU: 2500 return GetFPUState(force); 2501 case e_regSetEXC: 2502 return GetEXCState(force); 2503 default: 2504 break; 2505 } 2506 return KERN_INVALID_ARGUMENT; 2507 } 2508 2509 kern_return_t DNBArchImplI386::SetRegisterState(int set) { 2510 // Make sure we have a valid context to set. 2511 if (RegisterSetStateIsValid(set)) { 2512 switch (set) { 2513 case e_regSetALL: 2514 return SetGPRState() | SetFPUState() | SetEXCState(); 2515 case e_regSetGPR: 2516 return SetGPRState(); 2517 case e_regSetFPU: 2518 return SetFPUState(); 2519 case e_regSetEXC: 2520 return SetEXCState(); 2521 default: 2522 break; 2523 } 2524 } 2525 return KERN_INVALID_ARGUMENT; 2526 } 2527 2528 bool DNBArchImplI386::RegisterSetStateIsValid(int set) const { 2529 return m_state.RegsAreValid(set); 2530 } 2531 2532 #endif // #if defined (__i386__) 2533