1 //===-- DNBArchImplARM64.cpp ------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Created by Greg Clayton on 6/25/07. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #if defined(__arm__) || defined(__arm64__) || defined(__aarch64__) 14 15 #include "MacOSX/arm64/DNBArchImplARM64.h" 16 17 #if defined(ARM_THREAD_STATE64_COUNT) 18 19 #include "DNB.h" 20 #include "DNBBreakpoint.h" 21 #include "DNBLog.h" 22 #include "DNBRegisterInfo.h" 23 #include "MacOSX/MachProcess.h" 24 #include "MacOSX/MachThread.h" 25 26 #include <inttypes.h> 27 #include <sys/sysctl.h> 28 29 #if __has_feature(ptrauth_calls) 30 #include <ptrauth.h> 31 #endif 32 33 // Break only in privileged or user mode 34 // (PAC bits in the DBGWVRn_EL1 watchpoint control register) 35 #define S_USER ((uint32_t)(2u << 1)) 36 37 #define BCR_ENABLE ((uint32_t)(1u)) 38 #define WCR_ENABLE ((uint32_t)(1u)) 39 40 // Watchpoint load/store 41 // (LSC bits in the DBGWVRn_EL1 watchpoint control register) 42 #define WCR_LOAD ((uint32_t)(1u << 3)) 43 #define WCR_STORE ((uint32_t)(1u << 4)) 44 45 // Enable breakpoint, watchpoint, and vector catch debug exceptions. 46 // (MDE bit in the MDSCR_EL1 register. Equivalent to the MDBGen bit in 47 // DBGDSCRext in Aarch32) 48 #define MDE_ENABLE ((uint32_t)(1u << 15)) 49 50 // Single instruction step 51 // (SS bit in the MDSCR_EL1 register) 52 #define SS_ENABLE ((uint32_t)(1u)) 53 54 static const uint8_t g_arm64_breakpoint_opcode[] = { 55 0x00, 0x00, 0x20, 0xD4}; // "brk #0", 0xd4200000 in BE byte order 56 57 // If we need to set one logical watchpoint by using 58 // two hardware watchpoint registers, the watchpoint 59 // will be split into a "high" and "low" watchpoint. 60 // Record both of them in the LoHi array. 61 62 // It's safe to initialize to all 0's since 63 // hi > lo and therefore LoHi[i] cannot be 0. 64 static uint32_t LoHi[16] = {0}; 65 66 void DNBArchMachARM64::Initialize() { 67 DNBArchPluginInfo arch_plugin_info = { 68 CPU_TYPE_ARM64, DNBArchMachARM64::Create, 69 DNBArchMachARM64::GetRegisterSetInfo, 70 DNBArchMachARM64::SoftwareBreakpointOpcode}; 71 72 // Register this arch plug-in with the main protocol class 73 DNBArchProtocol::RegisterArchPlugin(arch_plugin_info); 74 75 DNBArchPluginInfo arch_plugin_info_32 = { 76 CPU_TYPE_ARM64_32, DNBArchMachARM64::Create, 77 DNBArchMachARM64::GetRegisterSetInfo, 78 DNBArchMachARM64::SoftwareBreakpointOpcode}; 79 80 // Register this arch plug-in with the main protocol class 81 DNBArchProtocol::RegisterArchPlugin(arch_plugin_info_32); 82 } 83 84 DNBArchProtocol *DNBArchMachARM64::Create(MachThread *thread) { 85 DNBArchMachARM64 *obj = new DNBArchMachARM64(thread); 86 87 return obj; 88 } 89 90 const uint8_t * 91 DNBArchMachARM64::SoftwareBreakpointOpcode(nub_size_t byte_size) { 92 return g_arm64_breakpoint_opcode; 93 } 94 95 uint32_t DNBArchMachARM64::GetCPUType() { return CPU_TYPE_ARM64; } 96 97 uint64_t DNBArchMachARM64::GetPC(uint64_t failValue) { 98 // Get program counter 99 if (GetGPRState(false) == KERN_SUCCESS) 100 #if defined(__LP64__) 101 return arm_thread_state64_get_pc(m_state.context.gpr); 102 #else 103 return m_state.context.gpr.__pc; 104 #endif 105 return failValue; 106 } 107 108 kern_return_t DNBArchMachARM64::SetPC(uint64_t value) { 109 // Get program counter 110 kern_return_t err = GetGPRState(false); 111 if (err == KERN_SUCCESS) { 112 #if defined(__LP64__) 113 #if __has_feature(ptrauth_calls) 114 // The incoming value could be garbage. Strip it to avoid 115 // trapping when it gets resigned in the thread state. 116 value = (uint64_t) ptrauth_strip((void*) value, ptrauth_key_function_pointer); 117 value = (uint64_t) ptrauth_sign_unauthenticated((void*) value, ptrauth_key_function_pointer, 0); 118 #endif 119 arm_thread_state64_set_pc_fptr (m_state.context.gpr, (void*) value); 120 #else 121 m_state.context.gpr.__pc = value; 122 #endif 123 err = SetGPRState(); 124 } 125 return err == KERN_SUCCESS; 126 } 127 128 uint64_t DNBArchMachARM64::GetSP(uint64_t failValue) { 129 // Get stack pointer 130 if (GetGPRState(false) == KERN_SUCCESS) 131 #if defined(__LP64__) 132 return arm_thread_state64_get_sp(m_state.context.gpr); 133 #else 134 return m_state.context.gpr.__sp; 135 #endif 136 return failValue; 137 } 138 139 kern_return_t DNBArchMachARM64::GetGPRState(bool force) { 140 int set = e_regSetGPR; 141 // Check if we have valid cached registers 142 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS) 143 return KERN_SUCCESS; 144 145 // Read the registers from our thread 146 mach_msg_type_number_t count = e_regSetGPRCount; 147 kern_return_t kret = 148 ::thread_get_state(m_thread->MachPortNumber(), ARM_THREAD_STATE64, 149 (thread_state_t)&m_state.context.gpr, &count); 150 if (DNBLogEnabledForAny(LOG_THREAD)) { 151 uint64_t *x = &m_state.context.gpr.__x[0]; 152 DNBLogThreaded( 153 "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x (count = %u) regs" 154 "\n x0=%16.16llx" 155 "\n x1=%16.16llx" 156 "\n x2=%16.16llx" 157 "\n x3=%16.16llx" 158 "\n x4=%16.16llx" 159 "\n x5=%16.16llx" 160 "\n x6=%16.16llx" 161 "\n x7=%16.16llx" 162 "\n x8=%16.16llx" 163 "\n x9=%16.16llx" 164 "\n x10=%16.16llx" 165 "\n x11=%16.16llx" 166 "\n x12=%16.16llx" 167 "\n x13=%16.16llx" 168 "\n x14=%16.16llx" 169 "\n x15=%16.16llx" 170 "\n x16=%16.16llx" 171 "\n x17=%16.16llx" 172 "\n x18=%16.16llx" 173 "\n x19=%16.16llx" 174 "\n x20=%16.16llx" 175 "\n x21=%16.16llx" 176 "\n x22=%16.16llx" 177 "\n x23=%16.16llx" 178 "\n x24=%16.16llx" 179 "\n x25=%16.16llx" 180 "\n x26=%16.16llx" 181 "\n x27=%16.16llx" 182 "\n x28=%16.16llx" 183 "\n fp=%16.16llx" 184 "\n lr=%16.16llx" 185 "\n sp=%16.16llx" 186 "\n pc=%16.16llx" 187 "\n cpsr=%8.8x", 188 m_thread->MachPortNumber(), e_regSetGPR, e_regSetGPRCount, kret, count, 189 x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[0], x[11], 190 x[12], x[13], x[14], x[15], x[16], x[17], x[18], x[19], x[20], x[21], 191 x[22], x[23], x[24], x[25], x[26], x[27], x[28], 192 #if defined(__LP64__) 193 (uint64_t) arm_thread_state64_get_fp (m_state.context.gpr), 194 (uint64_t) arm_thread_state64_get_lr (m_state.context.gpr), 195 (uint64_t) arm_thread_state64_get_sp (m_state.context.gpr), 196 (uint64_t) arm_thread_state64_get_pc (m_state.context.gpr), 197 #else 198 m_state.context.gpr.__fp, m_state.context.gpr.__lr, 199 m_state.context.gpr.__sp, m_state.context.gpr.__pc, 200 #endif 201 m_state.context.gpr.__cpsr); 202 } 203 m_state.SetError(set, Read, kret); 204 return kret; 205 } 206 207 kern_return_t DNBArchMachARM64::GetVFPState(bool force) { 208 int set = e_regSetVFP; 209 // Check if we have valid cached registers 210 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS) 211 return KERN_SUCCESS; 212 213 // Read the registers from our thread 214 mach_msg_type_number_t count = e_regSetVFPCount; 215 kern_return_t kret = 216 ::thread_get_state(m_thread->MachPortNumber(), ARM_NEON_STATE64, 217 (thread_state_t)&m_state.context.vfp, &count); 218 if (DNBLogEnabledForAny(LOG_THREAD)) { 219 #if defined(__arm64__) || defined(__aarch64__) 220 DNBLogThreaded( 221 "thread_get_state(0x%4.4x, %u, &vfp, %u) => 0x%8.8x (count = %u) regs" 222 "\n q0 = 0x%16.16llx%16.16llx" 223 "\n q1 = 0x%16.16llx%16.16llx" 224 "\n q2 = 0x%16.16llx%16.16llx" 225 "\n q3 = 0x%16.16llx%16.16llx" 226 "\n q4 = 0x%16.16llx%16.16llx" 227 "\n q5 = 0x%16.16llx%16.16llx" 228 "\n q6 = 0x%16.16llx%16.16llx" 229 "\n q7 = 0x%16.16llx%16.16llx" 230 "\n q8 = 0x%16.16llx%16.16llx" 231 "\n q9 = 0x%16.16llx%16.16llx" 232 "\n q10 = 0x%16.16llx%16.16llx" 233 "\n q11 = 0x%16.16llx%16.16llx" 234 "\n q12 = 0x%16.16llx%16.16llx" 235 "\n q13 = 0x%16.16llx%16.16llx" 236 "\n q14 = 0x%16.16llx%16.16llx" 237 "\n q15 = 0x%16.16llx%16.16llx" 238 "\n q16 = 0x%16.16llx%16.16llx" 239 "\n q17 = 0x%16.16llx%16.16llx" 240 "\n q18 = 0x%16.16llx%16.16llx" 241 "\n q19 = 0x%16.16llx%16.16llx" 242 "\n q20 = 0x%16.16llx%16.16llx" 243 "\n q21 = 0x%16.16llx%16.16llx" 244 "\n q22 = 0x%16.16llx%16.16llx" 245 "\n q23 = 0x%16.16llx%16.16llx" 246 "\n q24 = 0x%16.16llx%16.16llx" 247 "\n q25 = 0x%16.16llx%16.16llx" 248 "\n q26 = 0x%16.16llx%16.16llx" 249 "\n q27 = 0x%16.16llx%16.16llx" 250 "\n q28 = 0x%16.16llx%16.16llx" 251 "\n q29 = 0x%16.16llx%16.16llx" 252 "\n q30 = 0x%16.16llx%16.16llx" 253 "\n q31 = 0x%16.16llx%16.16llx" 254 "\n fpsr = 0x%8.8x" 255 "\n fpcr = 0x%8.8x\n\n", 256 m_thread->MachPortNumber(), e_regSetVFP, e_regSetVFPCount, kret, count, 257 ((uint64_t *)&m_state.context.vfp.__v[0])[0], 258 ((uint64_t *)&m_state.context.vfp.__v[0])[1], 259 ((uint64_t *)&m_state.context.vfp.__v[1])[0], 260 ((uint64_t *)&m_state.context.vfp.__v[1])[1], 261 ((uint64_t *)&m_state.context.vfp.__v[2])[0], 262 ((uint64_t *)&m_state.context.vfp.__v[2])[1], 263 ((uint64_t *)&m_state.context.vfp.__v[3])[0], 264 ((uint64_t *)&m_state.context.vfp.__v[3])[1], 265 ((uint64_t *)&m_state.context.vfp.__v[4])[0], 266 ((uint64_t *)&m_state.context.vfp.__v[4])[1], 267 ((uint64_t *)&m_state.context.vfp.__v[5])[0], 268 ((uint64_t *)&m_state.context.vfp.__v[5])[1], 269 ((uint64_t *)&m_state.context.vfp.__v[6])[0], 270 ((uint64_t *)&m_state.context.vfp.__v[6])[1], 271 ((uint64_t *)&m_state.context.vfp.__v[7])[0], 272 ((uint64_t *)&m_state.context.vfp.__v[7])[1], 273 ((uint64_t *)&m_state.context.vfp.__v[8])[0], 274 ((uint64_t *)&m_state.context.vfp.__v[8])[1], 275 ((uint64_t *)&m_state.context.vfp.__v[9])[0], 276 ((uint64_t *)&m_state.context.vfp.__v[9])[1], 277 ((uint64_t *)&m_state.context.vfp.__v[10])[0], 278 ((uint64_t *)&m_state.context.vfp.__v[10])[1], 279 ((uint64_t *)&m_state.context.vfp.__v[11])[0], 280 ((uint64_t *)&m_state.context.vfp.__v[11])[1], 281 ((uint64_t *)&m_state.context.vfp.__v[12])[0], 282 ((uint64_t *)&m_state.context.vfp.__v[12])[1], 283 ((uint64_t *)&m_state.context.vfp.__v[13])[0], 284 ((uint64_t *)&m_state.context.vfp.__v[13])[1], 285 ((uint64_t *)&m_state.context.vfp.__v[14])[0], 286 ((uint64_t *)&m_state.context.vfp.__v[14])[1], 287 ((uint64_t *)&m_state.context.vfp.__v[15])[0], 288 ((uint64_t *)&m_state.context.vfp.__v[15])[1], 289 ((uint64_t *)&m_state.context.vfp.__v[16])[0], 290 ((uint64_t *)&m_state.context.vfp.__v[16])[1], 291 ((uint64_t *)&m_state.context.vfp.__v[17])[0], 292 ((uint64_t *)&m_state.context.vfp.__v[17])[1], 293 ((uint64_t *)&m_state.context.vfp.__v[18])[0], 294 ((uint64_t *)&m_state.context.vfp.__v[18])[1], 295 ((uint64_t *)&m_state.context.vfp.__v[19])[0], 296 ((uint64_t *)&m_state.context.vfp.__v[19])[1], 297 ((uint64_t *)&m_state.context.vfp.__v[20])[0], 298 ((uint64_t *)&m_state.context.vfp.__v[20])[1], 299 ((uint64_t *)&m_state.context.vfp.__v[21])[0], 300 ((uint64_t *)&m_state.context.vfp.__v[21])[1], 301 ((uint64_t *)&m_state.context.vfp.__v[22])[0], 302 ((uint64_t *)&m_state.context.vfp.__v[22])[1], 303 ((uint64_t *)&m_state.context.vfp.__v[23])[0], 304 ((uint64_t *)&m_state.context.vfp.__v[23])[1], 305 ((uint64_t *)&m_state.context.vfp.__v[24])[0], 306 ((uint64_t *)&m_state.context.vfp.__v[24])[1], 307 ((uint64_t *)&m_state.context.vfp.__v[25])[0], 308 ((uint64_t *)&m_state.context.vfp.__v[25])[1], 309 ((uint64_t *)&m_state.context.vfp.__v[26])[0], 310 ((uint64_t *)&m_state.context.vfp.__v[26])[1], 311 ((uint64_t *)&m_state.context.vfp.__v[27])[0], 312 ((uint64_t *)&m_state.context.vfp.__v[27])[1], 313 ((uint64_t *)&m_state.context.vfp.__v[28])[0], 314 ((uint64_t *)&m_state.context.vfp.__v[28])[1], 315 ((uint64_t *)&m_state.context.vfp.__v[29])[0], 316 ((uint64_t *)&m_state.context.vfp.__v[29])[1], 317 ((uint64_t *)&m_state.context.vfp.__v[30])[0], 318 ((uint64_t *)&m_state.context.vfp.__v[30])[1], 319 ((uint64_t *)&m_state.context.vfp.__v[31])[0], 320 ((uint64_t *)&m_state.context.vfp.__v[31])[1], 321 m_state.context.vfp.__fpsr, m_state.context.vfp.__fpcr); 322 #endif 323 } 324 m_state.SetError(set, Read, kret); 325 return kret; 326 } 327 328 kern_return_t DNBArchMachARM64::GetEXCState(bool force) { 329 int set = e_regSetEXC; 330 // Check if we have valid cached registers 331 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS) 332 return KERN_SUCCESS; 333 334 // Read the registers from our thread 335 mach_msg_type_number_t count = e_regSetEXCCount; 336 kern_return_t kret = 337 ::thread_get_state(m_thread->MachPortNumber(), ARM_EXCEPTION_STATE64, 338 (thread_state_t)&m_state.context.exc, &count); 339 m_state.SetError(set, Read, kret); 340 return kret; 341 } 342 343 static void DumpDBGState(const arm_debug_state_t &dbg) { 344 uint32_t i = 0; 345 for (i = 0; i < 16; i++) 346 DNBLogThreadedIf(LOG_STEP, "BVR%-2u/BCR%-2u = { 0x%8.8x, 0x%8.8x } " 347 "WVR%-2u/WCR%-2u = { 0x%8.8x, 0x%8.8x }", 348 i, i, dbg.__bvr[i], dbg.__bcr[i], i, i, dbg.__wvr[i], 349 dbg.__wcr[i]); 350 } 351 352 kern_return_t DNBArchMachARM64::GetDBGState(bool force) { 353 int set = e_regSetDBG; 354 355 // Check if we have valid cached registers 356 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS) 357 return KERN_SUCCESS; 358 359 // Read the registers from our thread 360 mach_msg_type_number_t count = e_regSetDBGCount; 361 kern_return_t kret = 362 ::thread_get_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE64, 363 (thread_state_t)&m_state.dbg, &count); 364 m_state.SetError(set, Read, kret); 365 366 return kret; 367 } 368 369 kern_return_t DNBArchMachARM64::SetGPRState() { 370 int set = e_regSetGPR; 371 kern_return_t kret = ::thread_set_state( 372 m_thread->MachPortNumber(), ARM_THREAD_STATE64, 373 (thread_state_t)&m_state.context.gpr, e_regSetGPRCount); 374 m_state.SetError(set, Write, 375 kret); // Set the current write error for this register set 376 m_state.InvalidateRegisterSetState(set); // Invalidate the current register 377 // state in case registers are read 378 // back differently 379 return kret; // Return the error code 380 } 381 382 kern_return_t DNBArchMachARM64::SetVFPState() { 383 int set = e_regSetVFP; 384 kern_return_t kret = ::thread_set_state( 385 m_thread->MachPortNumber(), ARM_NEON_STATE64, 386 (thread_state_t)&m_state.context.vfp, e_regSetVFPCount); 387 m_state.SetError(set, Write, 388 kret); // Set the current write error for this register set 389 m_state.InvalidateRegisterSetState(set); // Invalidate the current register 390 // state in case registers are read 391 // back differently 392 return kret; // Return the error code 393 } 394 395 kern_return_t DNBArchMachARM64::SetEXCState() { 396 int set = e_regSetEXC; 397 kern_return_t kret = ::thread_set_state( 398 m_thread->MachPortNumber(), ARM_EXCEPTION_STATE64, 399 (thread_state_t)&m_state.context.exc, e_regSetEXCCount); 400 m_state.SetError(set, Write, 401 kret); // Set the current write error for this register set 402 m_state.InvalidateRegisterSetState(set); // Invalidate the current register 403 // state in case registers are read 404 // back differently 405 return kret; // Return the error code 406 } 407 408 kern_return_t DNBArchMachARM64::SetDBGState(bool also_set_on_task) { 409 int set = e_regSetDBG; 410 kern_return_t kret = 411 ::thread_set_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE64, 412 (thread_state_t)&m_state.dbg, e_regSetDBGCount); 413 if (also_set_on_task) { 414 kern_return_t task_kret = task_set_state( 415 m_thread->Process()->Task().TaskPort(), ARM_DEBUG_STATE64, 416 (thread_state_t)&m_state.dbg, e_regSetDBGCount); 417 if (task_kret != KERN_SUCCESS) 418 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::SetDBGState failed " 419 "to set debug control register state: " 420 "0x%8.8x.", 421 task_kret); 422 } 423 m_state.SetError(set, Write, 424 kret); // Set the current write error for this register set 425 m_state.InvalidateRegisterSetState(set); // Invalidate the current register 426 // state in case registers are read 427 // back differently 428 429 return kret; // Return the error code 430 } 431 432 void DNBArchMachARM64::ThreadWillResume() { 433 // Do we need to step this thread? If so, let the mach thread tell us so. 434 if (m_thread->IsStepping()) { 435 EnableHardwareSingleStep(true); 436 } 437 438 // Disable the triggered watchpoint temporarily before we resume. 439 // Plus, we try to enable hardware single step to execute past the instruction 440 // which triggered our watchpoint. 441 if (m_watchpoint_did_occur) { 442 if (m_watchpoint_hw_index >= 0) { 443 kern_return_t kret = GetDBGState(false); 444 if (kret == KERN_SUCCESS && 445 !IsWatchpointEnabled(m_state.dbg, m_watchpoint_hw_index)) { 446 // The watchpoint might have been disabled by the user. We don't need 447 // to do anything at all 448 // to enable hardware single stepping. 449 m_watchpoint_did_occur = false; 450 m_watchpoint_hw_index = -1; 451 return; 452 } 453 454 DisableHardwareWatchpoint(m_watchpoint_hw_index, false); 455 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() " 456 "DisableHardwareWatchpoint(%d) called", 457 m_watchpoint_hw_index); 458 459 // Enable hardware single step to move past the watchpoint-triggering 460 // instruction. 461 m_watchpoint_resume_single_step_enabled = 462 (EnableHardwareSingleStep(true) == KERN_SUCCESS); 463 464 // If we are not able to enable single step to move past the 465 // watchpoint-triggering instruction, 466 // at least we should reset the two watchpoint member variables so that 467 // the next time around 468 // this callback function is invoked, the enclosing logical branch is 469 // skipped. 470 if (!m_watchpoint_resume_single_step_enabled) { 471 // Reset the two watchpoint member variables. 472 m_watchpoint_did_occur = false; 473 m_watchpoint_hw_index = -1; 474 DNBLogThreadedIf( 475 LOG_WATCHPOINTS, 476 "DNBArchMachARM::ThreadWillResume() failed to enable single step"); 477 } else 478 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() " 479 "succeeded to enable single step"); 480 } 481 } 482 } 483 484 bool DNBArchMachARM64::NotifyException(MachException::Data &exc) { 485 486 switch (exc.exc_type) { 487 default: 488 break; 489 case EXC_BREAKPOINT: 490 if (exc.exc_data.size() == 2 && exc.exc_data[0] == EXC_ARM_DA_DEBUG) { 491 // The data break address is passed as exc_data[1]. 492 nub_addr_t addr = exc.exc_data[1]; 493 // Find the hardware index with the side effect of possibly massaging the 494 // addr to return the starting address as seen from the debugger side. 495 uint32_t hw_index = GetHardwareWatchpointHit(addr); 496 497 // One logical watchpoint was split into two watchpoint locations because 498 // it was too big. If the watchpoint exception is indicating the 2nd half 499 // of the two-parter, find the address of the 1st half and report that -- 500 // that's what lldb is going to expect to see. 501 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::NotifyException " 502 "watchpoint %d was hit on address " 503 "0x%llx", 504 hw_index, (uint64_t)addr); 505 const int num_watchpoints = NumSupportedHardwareWatchpoints(); 506 for (int i = 0; i < num_watchpoints; i++) { 507 if (LoHi[i] != 0 && LoHi[i] == hw_index && LoHi[i] != i && 508 GetWatchpointAddressByIndex(i) != INVALID_NUB_ADDRESS) { 509 addr = GetWatchpointAddressByIndex(i); 510 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::NotifyException " 511 "It is a linked watchpoint; " 512 "rewritten to index %d addr 0x%llx", 513 LoHi[i], (uint64_t)addr); 514 } 515 } 516 517 if (hw_index != INVALID_NUB_HW_INDEX) { 518 m_watchpoint_did_occur = true; 519 m_watchpoint_hw_index = hw_index; 520 exc.exc_data[1] = addr; 521 // Piggyback the hw_index in the exc.data. 522 exc.exc_data.push_back(hw_index); 523 } 524 525 return true; 526 } 527 break; 528 } 529 return false; 530 } 531 532 bool DNBArchMachARM64::ThreadDidStop() { 533 bool success = true; 534 535 m_state.InvalidateAllRegisterStates(); 536 537 if (m_watchpoint_resume_single_step_enabled) { 538 // Great! We now disable the hardware single step as well as re-enable the 539 // hardware watchpoint. 540 // See also ThreadWillResume(). 541 if (EnableHardwareSingleStep(false) == KERN_SUCCESS) { 542 if (m_watchpoint_did_occur && m_watchpoint_hw_index >= 0) { 543 ReenableHardwareWatchpoint(m_watchpoint_hw_index); 544 m_watchpoint_resume_single_step_enabled = false; 545 m_watchpoint_did_occur = false; 546 m_watchpoint_hw_index = -1; 547 } else { 548 DNBLogError("internal error detected: m_watchpoint_resume_step_enabled " 549 "is true but (m_watchpoint_did_occur && " 550 "m_watchpoint_hw_index >= 0) does not hold!"); 551 } 552 } else { 553 DNBLogError("internal error detected: m_watchpoint_resume_step_enabled " 554 "is true but unable to disable single step!"); 555 } 556 } 557 558 // Are we stepping a single instruction? 559 if (GetGPRState(true) == KERN_SUCCESS) { 560 // We are single stepping, was this the primary thread? 561 if (m_thread->IsStepping()) { 562 // This was the primary thread, we need to clear the trace 563 // bit if so. 564 success = EnableHardwareSingleStep(false) == KERN_SUCCESS; 565 } else { 566 // The MachThread will automatically restore the suspend count 567 // in ThreadDidStop(), so we don't need to do anything here if 568 // we weren't the primary thread the last time 569 } 570 } 571 return success; 572 } 573 574 // Set the single step bit in the processor status register. 575 kern_return_t DNBArchMachARM64::EnableHardwareSingleStep(bool enable) { 576 DNBError err; 577 DNBLogThreadedIf(LOG_STEP, "%s( enable = %d )", __FUNCTION__, enable); 578 579 err = GetGPRState(false); 580 581 if (err.Fail()) { 582 err.LogThreaded("%s: failed to read the GPR registers", __FUNCTION__); 583 return err.Status(); 584 } 585 586 err = GetDBGState(false); 587 588 if (err.Fail()) { 589 err.LogThreaded("%s: failed to read the DBG registers", __FUNCTION__); 590 return err.Status(); 591 } 592 593 #if defined(__LP64__) 594 uint64_t pc = arm_thread_state64_get_pc (m_state.context.gpr); 595 #else 596 uint64_t pc = m_state.context.gpr.__pc; 597 #endif 598 599 if (enable) { 600 DNBLogThreadedIf(LOG_STEP, 601 "%s: Setting MDSCR_EL1 Single Step bit at pc 0x%llx", 602 __FUNCTION__, pc); 603 m_state.dbg.__mdscr_el1 |= SS_ENABLE; 604 } else { 605 DNBLogThreadedIf(LOG_STEP, 606 "%s: Clearing MDSCR_EL1 Single Step bit at pc 0x%llx", 607 __FUNCTION__, pc); 608 m_state.dbg.__mdscr_el1 &= ~(SS_ENABLE); 609 } 610 611 return SetDBGState(false); 612 } 613 614 // return 1 if bit "BIT" is set in "value" 615 static inline uint32_t bit(uint32_t value, uint32_t bit) { 616 return (value >> bit) & 1u; 617 } 618 619 // return the bitfield "value[msbit:lsbit]". 620 static inline uint64_t bits(uint64_t value, uint32_t msbit, uint32_t lsbit) { 621 assert(msbit >= lsbit); 622 uint64_t shift_left = sizeof(value) * 8 - 1 - msbit; 623 value <<= 624 shift_left; // shift anything above the msbit off of the unsigned edge 625 value >>= shift_left + lsbit; // shift it back again down to the lsbit 626 // (including undoing any shift from above) 627 return value; // return our result 628 } 629 630 uint32_t DNBArchMachARM64::NumSupportedHardwareWatchpoints() { 631 // Set the init value to something that will let us know that we need to 632 // autodetect how many watchpoints are supported dynamically... 633 static uint32_t g_num_supported_hw_watchpoints = UINT_MAX; 634 if (g_num_supported_hw_watchpoints == UINT_MAX) { 635 // Set this to zero in case we can't tell if there are any HW breakpoints 636 g_num_supported_hw_watchpoints = 0; 637 638 size_t len; 639 uint32_t n = 0; 640 len = sizeof(n); 641 if (::sysctlbyname("hw.optional.watchpoint", &n, &len, NULL, 0) == 0) { 642 g_num_supported_hw_watchpoints = n; 643 DNBLogThreadedIf(LOG_THREAD, "hw.optional.watchpoint=%u", n); 644 } else { 645 // For AArch64 we would need to look at ID_AA64DFR0_EL1 but debugserver runs in 646 // EL0 so it can't 647 // access that reg. The kernel should have filled in the sysctls based on it 648 // though. 649 #if defined(__arm__) 650 uint32_t register_DBGDIDR; 651 652 asm("mrc p14, 0, %0, c0, c0, 0" : "=r"(register_DBGDIDR)); 653 uint32_t numWRPs = bits(register_DBGDIDR, 31, 28); 654 // Zero is reserved for the WRP count, so don't increment it if it is zero 655 if (numWRPs > 0) 656 numWRPs++; 657 g_num_supported_hw_watchpoints = numWRPs; 658 DNBLogThreadedIf(LOG_THREAD, 659 "Number of supported hw watchpoints via asm(): %d", 660 g_num_supported_hw_watchpoints); 661 #endif 662 } 663 } 664 return g_num_supported_hw_watchpoints; 665 } 666 667 uint32_t DNBArchMachARM64::NumSupportedHardwareBreakpoints() { 668 // Set the init value to something that will let us know that we need to 669 // autodetect how many breakpoints are supported dynamically... 670 static uint32_t g_num_supported_hw_breakpoints = UINT_MAX; 671 if (g_num_supported_hw_breakpoints == UINT_MAX) { 672 // Set this to zero in case we can't tell if there are any HW breakpoints 673 g_num_supported_hw_breakpoints = 0; 674 675 size_t len; 676 uint32_t n = 0; 677 len = sizeof(n); 678 if (::sysctlbyname("hw.optional.breakpoint", &n, &len, NULL, 0) == 0) { 679 g_num_supported_hw_breakpoints = n; 680 DNBLogThreadedIf(LOG_THREAD, "hw.optional.breakpoint=%u", n); 681 } else { 682 // For AArch64 we would need to look at ID_AA64DFR0_EL1 but debugserver runs in 683 // EL0 so it can't access that reg. The kernel should have filled in the 684 // sysctls based on it though. 685 #if defined(__arm__) 686 uint32_t register_DBGDIDR; 687 688 asm("mrc p14, 0, %0, c0, c0, 0" : "=r"(register_DBGDIDR)); 689 uint32_t numWRPs = bits(register_DBGDIDR, 31, 28); 690 // Zero is reserved for the WRP count, so don't increment it if it is zero 691 if (numWRPs > 0) 692 numWRPs++; 693 g_num_supported_hw_breakpoints = numWRPs; 694 DNBLogThreadedIf(LOG_THREAD, 695 "Number of supported hw breakpoint via asm(): %d", 696 g_num_supported_hw_breakpoints); 697 #endif 698 } 699 } 700 return g_num_supported_hw_breakpoints; 701 } 702 703 uint32_t DNBArchMachARM64::EnableHardwareBreakpoint(nub_addr_t addr, 704 nub_size_t size, 705 bool also_set_on_task) { 706 DNBLogThreadedIf(LOG_WATCHPOINTS, 707 "DNBArchMachARM64::EnableHardwareBreakpoint(addr = " 708 "0x%8.8llx, size = %zu)", 709 (uint64_t)addr, size); 710 711 const uint32_t num_hw_breakpoints = NumSupportedHardwareBreakpoints(); 712 713 nub_addr_t aligned_bp_address = addr; 714 uint32_t control_value = 0; 715 716 switch (size) { 717 case 2: 718 control_value = (0x3 << 5) | 7; 719 aligned_bp_address &= ~1; 720 break; 721 case 4: 722 control_value = (0xfu << 5) | 7; 723 aligned_bp_address &= ~3; 724 break; 725 }; 726 727 // Read the debug state 728 kern_return_t kret = GetDBGState(false); 729 if (kret == KERN_SUCCESS) { 730 // Check to make sure we have the needed hardware support 731 uint32_t i = 0; 732 733 for (i = 0; i < num_hw_breakpoints; ++i) { 734 if ((m_state.dbg.__bcr[i] & BCR_ENABLE) == 0) 735 break; // We found an available hw breakpoint slot (in i) 736 } 737 738 // See if we found an available hw breakpoint slot above 739 if (i < num_hw_breakpoints) { 740 m_state.dbg.__bvr[i] = aligned_bp_address; 741 m_state.dbg.__bcr[i] = control_value; 742 743 DNBLogThreadedIf(LOG_WATCHPOINTS, 744 "DNBArchMachARM64::EnableHardwareBreakpoint() " 745 "adding breakpoint on address 0x%llx with control " 746 "register value 0x%x", 747 (uint64_t)m_state.dbg.__bvr[i], 748 (uint32_t)m_state.dbg.__bcr[i]); 749 750 // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us 751 // automatically, don't need to do it here. 752 kret = SetDBGState(also_set_on_task); 753 754 DNBLogThreadedIf(LOG_WATCHPOINTS, 755 "DNBArchMachARM64::" 756 "EnableHardwareBreakpoint() " 757 "SetDBGState() => 0x%8.8x.", 758 kret); 759 760 if (kret == KERN_SUCCESS) 761 return i; 762 } else { 763 DNBLogThreadedIf(LOG_WATCHPOINTS, 764 "DNBArchMachARM64::" 765 "EnableHardwareBreakpoint(): All " 766 "hardware resources (%u) are in use.", 767 num_hw_breakpoints); 768 } 769 } 770 return INVALID_NUB_HW_INDEX; 771 } 772 773 uint32_t DNBArchMachARM64::EnableHardwareWatchpoint(nub_addr_t addr, 774 nub_size_t size, bool read, 775 bool write, 776 bool also_set_on_task) { 777 DNBLogThreadedIf(LOG_WATCHPOINTS, 778 "DNBArchMachARM64::EnableHardwareWatchpoint(addr = " 779 "0x%8.8llx, size = %zu, read = %u, write = %u)", 780 (uint64_t)addr, size, read, write); 781 782 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints(); 783 784 // Can't watch zero bytes 785 if (size == 0) 786 return INVALID_NUB_HW_INDEX; 787 788 // We must watch for either read or write 789 if (read == false && write == false) 790 return INVALID_NUB_HW_INDEX; 791 792 // Otherwise, can't watch more than 8 bytes per WVR/WCR pair 793 if (size > 8) 794 return INVALID_NUB_HW_INDEX; 795 796 // Aarch64 watchpoints are in one of two forms: (1) 1-8 bytes, aligned to 797 // an 8 byte address, or (2) a power-of-two size region of memory; minimum 798 // 8 bytes, maximum 2GB; the starting address must be aligned to that power 799 // of two. 800 // 801 // For (1), 1-8 byte watchpoints, using the Byte Address Selector field in 802 // DBGWCR<n>.BAS. Any of the bytes may be watched, but if multiple bytes 803 // are watched, the bytes selected must be contiguous. The start address 804 // watched must be doubleword (8-byte) aligned; if the start address is 805 // word (4-byte) aligned, only 4 bytes can be watched. 806 // 807 // For (2), the MASK field in DBGWCR<n>.MASK is used. 808 // 809 // See the ARM ARM, section "Watchpoint exceptions", and more specifically, 810 // "Watchpoint data address comparisons". 811 // 812 // debugserver today only supports (1) - the Byte Address Selector 1-8 byte 813 // watchpoints that are 8-byte aligned. To support larger watchpoints, 814 // debugserver would need to interpret the mach exception when the watched 815 // region was hit, see if the address accessed lies within the subset 816 // of the power-of-two region that lldb asked us to watch (v. ARM ARM, 817 // "Determining the memory location that caused a Watchpoint exception"), 818 // and silently resume the inferior (disable watchpoint, stepi, re-enable 819 // watchpoint) if the address lies outside the region that lldb asked us 820 // to watch. 821 // 822 // Alternatively, lldb would need to be prepared for a larger region 823 // being watched than it requested, and silently resume the inferior if 824 // the accessed address is outside the region lldb wants to watch. 825 826 nub_addr_t aligned_wp_address = addr & ~0x7; 827 uint32_t addr_dword_offset = addr & 0x7; 828 829 // Do we need to split up this logical watchpoint into two hardware watchpoint 830 // registers? 831 // e.g. a watchpoint of length 4 on address 6. We need do this with 832 // one watchpoint on address 0 with bytes 6 & 7 being monitored 833 // one watchpoint on address 8 with bytes 0, 1, 2, 3 being monitored 834 835 if (addr_dword_offset + size > 8) { 836 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::" 837 "EnableHardwareWatchpoint(addr = " 838 "0x%8.8llx, size = %zu) needs two " 839 "hardware watchpoints slots to monitor", 840 (uint64_t)addr, size); 841 int low_watchpoint_size = 8 - addr_dword_offset; 842 int high_watchpoint_size = addr_dword_offset + size - 8; 843 844 uint32_t lo = EnableHardwareWatchpoint(addr, low_watchpoint_size, read, 845 write, also_set_on_task); 846 if (lo == INVALID_NUB_HW_INDEX) 847 return INVALID_NUB_HW_INDEX; 848 uint32_t hi = 849 EnableHardwareWatchpoint(aligned_wp_address + 8, high_watchpoint_size, 850 read, write, also_set_on_task); 851 if (hi == INVALID_NUB_HW_INDEX) { 852 DisableHardwareWatchpoint(lo, also_set_on_task); 853 return INVALID_NUB_HW_INDEX; 854 } 855 // Tag this lo->hi mapping in our database. 856 LoHi[lo] = hi; 857 return lo; 858 } 859 860 // At this point 861 // 1 aligned_wp_address is the requested address rounded down to 8-byte 862 // alignment 863 // 2 addr_dword_offset is the offset into that double word (8-byte) region 864 // that we are watching 865 // 3 size is the number of bytes within that 8-byte region that we are 866 // watching 867 868 // Set the Byte Address Selects bits DBGWCRn_EL1 bits [12:5] based on the 869 // above. 870 // The bit shift and negation operation will give us 0b11 for 2, 0b1111 for 4, 871 // etc, up to 0b11111111 for 8. 872 // then we shift those bits left by the offset into this dword that we are 873 // interested in. 874 // e.g. if we are watching bytes 4,5,6,7 in a dword we want a BAS of 875 // 0b11110000. 876 uint32_t byte_address_select = ((1 << size) - 1) << addr_dword_offset; 877 878 // Read the debug state 879 kern_return_t kret = GetDBGState(false); 880 881 if (kret == KERN_SUCCESS) { 882 // Check to make sure we have the needed hardware support 883 uint32_t i = 0; 884 885 for (i = 0; i < num_hw_watchpoints; ++i) { 886 if ((m_state.dbg.__wcr[i] & WCR_ENABLE) == 0) 887 break; // We found an available hw watchpoint slot (in i) 888 } 889 890 // See if we found an available hw watchpoint slot above 891 if (i < num_hw_watchpoints) { 892 // DumpDBGState(m_state.dbg); 893 894 // Clear any previous LoHi joined-watchpoint that may have been in use 895 LoHi[i] = 0; 896 897 // shift our Byte Address Select bits up to the correct bit range for the 898 // DBGWCRn_EL1 899 byte_address_select = byte_address_select << 5; 900 901 // Make sure bits 1:0 are clear in our address 902 m_state.dbg.__wvr[i] = aligned_wp_address; // DVA (Data Virtual Address) 903 m_state.dbg.__wcr[i] = byte_address_select | // Which bytes that follow 904 // the DVA that we will watch 905 S_USER | // Stop only in user mode 906 (read ? WCR_LOAD : 0) | // Stop on read access? 907 (write ? WCR_STORE : 0) | // Stop on write access? 908 WCR_ENABLE; // Enable this watchpoint; 909 910 DNBLogThreadedIf( 911 LOG_WATCHPOINTS, "DNBArchMachARM64::EnableHardwareWatchpoint() " 912 "adding watchpoint on address 0x%llx with control " 913 "register value 0x%x", 914 (uint64_t)m_state.dbg.__wvr[i], (uint32_t)m_state.dbg.__wcr[i]); 915 916 // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us 917 // automatically, don't need to do it here. 918 919 kret = SetDBGState(also_set_on_task); 920 // DumpDBGState(m_state.dbg); 921 922 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::" 923 "EnableHardwareWatchpoint() " 924 "SetDBGState() => 0x%8.8x.", 925 kret); 926 927 if (kret == KERN_SUCCESS) 928 return i; 929 } else { 930 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::" 931 "EnableHardwareWatchpoint(): All " 932 "hardware resources (%u) are in use.", 933 num_hw_watchpoints); 934 } 935 } 936 return INVALID_NUB_HW_INDEX; 937 } 938 939 bool DNBArchMachARM64::ReenableHardwareWatchpoint(uint32_t hw_index) { 940 // If this logical watchpoint # is actually implemented using 941 // two hardware watchpoint registers, re-enable both of them. 942 943 if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index]) { 944 return ReenableHardwareWatchpoint_helper(hw_index) && 945 ReenableHardwareWatchpoint_helper(LoHi[hw_index]); 946 } else { 947 return ReenableHardwareWatchpoint_helper(hw_index); 948 } 949 } 950 951 bool DNBArchMachARM64::ReenableHardwareWatchpoint_helper(uint32_t hw_index) { 952 kern_return_t kret = GetDBGState(false); 953 if (kret != KERN_SUCCESS) 954 return false; 955 956 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints(); 957 if (hw_index >= num_hw_points) 958 return false; 959 960 m_state.dbg.__wvr[hw_index] = m_disabled_watchpoints[hw_index].addr; 961 m_state.dbg.__wcr[hw_index] = m_disabled_watchpoints[hw_index].control; 962 963 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::" 964 "EnableHardwareWatchpoint( %u ) - WVR%u = " 965 "0x%8.8llx WCR%u = 0x%8.8llx", 966 hw_index, hw_index, (uint64_t)m_state.dbg.__wvr[hw_index], 967 hw_index, (uint64_t)m_state.dbg.__wcr[hw_index]); 968 969 // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us 970 // automatically, don't need to do it here. 971 972 kret = SetDBGState(false); 973 974 return (kret == KERN_SUCCESS); 975 } 976 977 bool DNBArchMachARM64::DisableHardwareWatchpoint(uint32_t hw_index, 978 bool also_set_on_task) { 979 if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index]) { 980 return DisableHardwareWatchpoint_helper(hw_index, also_set_on_task) && 981 DisableHardwareWatchpoint_helper(LoHi[hw_index], also_set_on_task); 982 } else { 983 return DisableHardwareWatchpoint_helper(hw_index, also_set_on_task); 984 } 985 } 986 987 bool DNBArchMachARM64::DisableHardwareWatchpoint_helper(uint32_t hw_index, 988 bool also_set_on_task) { 989 kern_return_t kret = GetDBGState(false); 990 if (kret != KERN_SUCCESS) 991 return false; 992 993 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints(); 994 if (hw_index >= num_hw_points) 995 return false; 996 997 m_disabled_watchpoints[hw_index].addr = m_state.dbg.__wvr[hw_index]; 998 m_disabled_watchpoints[hw_index].control = m_state.dbg.__wcr[hw_index]; 999 1000 m_state.dbg.__wcr[hw_index] &= ~((nub_addr_t)WCR_ENABLE); 1001 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::" 1002 "DisableHardwareWatchpoint( %u ) - WVR%u = " 1003 "0x%8.8llx WCR%u = 0x%8.8llx", 1004 hw_index, hw_index, (uint64_t)m_state.dbg.__wvr[hw_index], 1005 hw_index, (uint64_t)m_state.dbg.__wcr[hw_index]); 1006 1007 kret = SetDBGState(also_set_on_task); 1008 1009 return (kret == KERN_SUCCESS); 1010 } 1011 1012 bool DNBArchMachARM64::DisableHardwareBreakpoint(uint32_t hw_index, 1013 bool also_set_on_task) { 1014 kern_return_t kret = GetDBGState(false); 1015 if (kret != KERN_SUCCESS) 1016 return false; 1017 1018 const uint32_t num_hw_points = NumSupportedHardwareBreakpoints(); 1019 if (hw_index >= num_hw_points) 1020 return false; 1021 1022 m_disabled_breakpoints[hw_index].addr = m_state.dbg.__bvr[hw_index]; 1023 m_disabled_breakpoints[hw_index].control = m_state.dbg.__bcr[hw_index]; 1024 1025 m_state.dbg.__bcr[hw_index] = 0; 1026 DNBLogThreadedIf(LOG_WATCHPOINTS, 1027 "DNBArchMachARM64::" 1028 "DisableHardwareBreakpoint( %u ) - WVR%u = " 1029 "0x%8.8llx BCR%u = 0x%8.8llx", 1030 hw_index, hw_index, (uint64_t)m_state.dbg.__bvr[hw_index], 1031 hw_index, (uint64_t)m_state.dbg.__bcr[hw_index]); 1032 1033 kret = SetDBGState(also_set_on_task); 1034 1035 return (kret == KERN_SUCCESS); 1036 } 1037 1038 // This is for checking the Byte Address Select bits in the DBRWCRn_EL1 control 1039 // register. 1040 // Returns -1 if the trailing bit patterns are not one of: 1041 // { 0b???????1, 0b??????10, 0b?????100, 0b????1000, 0b???10000, 0b??100000, 1042 // 0b?1000000, 0b10000000 }. 1043 static inline int32_t LowestBitSet(uint32_t val) { 1044 for (unsigned i = 0; i < 8; ++i) { 1045 if (bit(val, i)) 1046 return i; 1047 } 1048 return -1; 1049 } 1050 1051 // Iterate through the debug registers; return the index of the first watchpoint 1052 // whose address matches. 1053 // As a side effect, the starting address as understood by the debugger is 1054 // returned which could be 1055 // different from 'addr' passed as an in/out argument. 1056 uint32_t DNBArchMachARM64::GetHardwareWatchpointHit(nub_addr_t &addr) { 1057 // Read the debug state 1058 kern_return_t kret = GetDBGState(true); 1059 // DumpDBGState(m_state.dbg); 1060 DNBLogThreadedIf( 1061 LOG_WATCHPOINTS, 1062 "DNBArchMachARM64::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", 1063 kret); 1064 DNBLogThreadedIf(LOG_WATCHPOINTS, 1065 "DNBArchMachARM64::GetHardwareWatchpointHit() addr = 0x%llx", 1066 (uint64_t)addr); 1067 1068 if (kret == KERN_SUCCESS) { 1069 DBG &debug_state = m_state.dbg; 1070 uint32_t i, num = NumSupportedHardwareWatchpoints(); 1071 for (i = 0; i < num; ++i) { 1072 nub_addr_t wp_addr = GetWatchAddress(debug_state, i); 1073 uint32_t byte_mask = bits(debug_state.__wcr[i], 12, 5); 1074 1075 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::" 1076 "GetHardwareWatchpointHit() slot: %u " 1077 "(addr = 0x%llx; byte_mask = 0x%x)", 1078 i, static_cast<uint64_t>(wp_addr), 1079 byte_mask); 1080 1081 if (!IsWatchpointEnabled(debug_state, i)) 1082 continue; 1083 1084 if (bits(wp_addr, 48, 3) != bits(addr, 48, 3)) 1085 continue; 1086 1087 // Sanity check the byte_mask 1088 uint32_t lsb = LowestBitSet(byte_mask); 1089 if (lsb < 0) 1090 continue; 1091 1092 uint64_t byte_to_match = bits(addr, 2, 0); 1093 1094 if (byte_mask & (1 << byte_to_match)) { 1095 addr = wp_addr + lsb; 1096 return i; 1097 } 1098 } 1099 } 1100 return INVALID_NUB_HW_INDEX; 1101 } 1102 1103 nub_addr_t DNBArchMachARM64::GetWatchpointAddressByIndex(uint32_t hw_index) { 1104 kern_return_t kret = GetDBGState(true); 1105 if (kret != KERN_SUCCESS) 1106 return INVALID_NUB_ADDRESS; 1107 const uint32_t num = NumSupportedHardwareWatchpoints(); 1108 if (hw_index >= num) 1109 return INVALID_NUB_ADDRESS; 1110 if (IsWatchpointEnabled(m_state.dbg, hw_index)) 1111 return GetWatchAddress(m_state.dbg, hw_index); 1112 return INVALID_NUB_ADDRESS; 1113 } 1114 1115 bool DNBArchMachARM64::IsWatchpointEnabled(const DBG &debug_state, 1116 uint32_t hw_index) { 1117 // Watchpoint Control Registers, bitfield definitions 1118 // ... 1119 // Bits Value Description 1120 // [0] 0 Watchpoint disabled 1121 // 1 Watchpoint enabled. 1122 return (debug_state.__wcr[hw_index] & 1u); 1123 } 1124 1125 nub_addr_t DNBArchMachARM64::GetWatchAddress(const DBG &debug_state, 1126 uint32_t hw_index) { 1127 // Watchpoint Value Registers, bitfield definitions 1128 // Bits Description 1129 // [31:2] Watchpoint value (word address, i.e., 4-byte aligned) 1130 // [1:0] RAZ/SBZP 1131 return bits(debug_state.__wvr[hw_index], 63, 0); 1132 } 1133 1134 // Register information definitions for 64 bit ARMv8. 1135 enum gpr_regnums { 1136 gpr_x0 = 0, 1137 gpr_x1, 1138 gpr_x2, 1139 gpr_x3, 1140 gpr_x4, 1141 gpr_x5, 1142 gpr_x6, 1143 gpr_x7, 1144 gpr_x8, 1145 gpr_x9, 1146 gpr_x10, 1147 gpr_x11, 1148 gpr_x12, 1149 gpr_x13, 1150 gpr_x14, 1151 gpr_x15, 1152 gpr_x16, 1153 gpr_x17, 1154 gpr_x18, 1155 gpr_x19, 1156 gpr_x20, 1157 gpr_x21, 1158 gpr_x22, 1159 gpr_x23, 1160 gpr_x24, 1161 gpr_x25, 1162 gpr_x26, 1163 gpr_x27, 1164 gpr_x28, 1165 gpr_fp, 1166 gpr_x29 = gpr_fp, 1167 gpr_lr, 1168 gpr_x30 = gpr_lr, 1169 gpr_sp, 1170 gpr_x31 = gpr_sp, 1171 gpr_pc, 1172 gpr_cpsr, 1173 gpr_w0, 1174 gpr_w1, 1175 gpr_w2, 1176 gpr_w3, 1177 gpr_w4, 1178 gpr_w5, 1179 gpr_w6, 1180 gpr_w7, 1181 gpr_w8, 1182 gpr_w9, 1183 gpr_w10, 1184 gpr_w11, 1185 gpr_w12, 1186 gpr_w13, 1187 gpr_w14, 1188 gpr_w15, 1189 gpr_w16, 1190 gpr_w17, 1191 gpr_w18, 1192 gpr_w19, 1193 gpr_w20, 1194 gpr_w21, 1195 gpr_w22, 1196 gpr_w23, 1197 gpr_w24, 1198 gpr_w25, 1199 gpr_w26, 1200 gpr_w27, 1201 gpr_w28 1202 1203 }; 1204 1205 enum { 1206 vfp_v0 = 0, 1207 vfp_v1, 1208 vfp_v2, 1209 vfp_v3, 1210 vfp_v4, 1211 vfp_v5, 1212 vfp_v6, 1213 vfp_v7, 1214 vfp_v8, 1215 vfp_v9, 1216 vfp_v10, 1217 vfp_v11, 1218 vfp_v12, 1219 vfp_v13, 1220 vfp_v14, 1221 vfp_v15, 1222 vfp_v16, 1223 vfp_v17, 1224 vfp_v18, 1225 vfp_v19, 1226 vfp_v20, 1227 vfp_v21, 1228 vfp_v22, 1229 vfp_v23, 1230 vfp_v24, 1231 vfp_v25, 1232 vfp_v26, 1233 vfp_v27, 1234 vfp_v28, 1235 vfp_v29, 1236 vfp_v30, 1237 vfp_v31, 1238 vfp_fpsr, 1239 vfp_fpcr, 1240 1241 // lower 32 bits of the corresponding vfp_v<n> reg. 1242 vfp_s0, 1243 vfp_s1, 1244 vfp_s2, 1245 vfp_s3, 1246 vfp_s4, 1247 vfp_s5, 1248 vfp_s6, 1249 vfp_s7, 1250 vfp_s8, 1251 vfp_s9, 1252 vfp_s10, 1253 vfp_s11, 1254 vfp_s12, 1255 vfp_s13, 1256 vfp_s14, 1257 vfp_s15, 1258 vfp_s16, 1259 vfp_s17, 1260 vfp_s18, 1261 vfp_s19, 1262 vfp_s20, 1263 vfp_s21, 1264 vfp_s22, 1265 vfp_s23, 1266 vfp_s24, 1267 vfp_s25, 1268 vfp_s26, 1269 vfp_s27, 1270 vfp_s28, 1271 vfp_s29, 1272 vfp_s30, 1273 vfp_s31, 1274 1275 // lower 64 bits of the corresponding vfp_v<n> reg. 1276 vfp_d0, 1277 vfp_d1, 1278 vfp_d2, 1279 vfp_d3, 1280 vfp_d4, 1281 vfp_d5, 1282 vfp_d6, 1283 vfp_d7, 1284 vfp_d8, 1285 vfp_d9, 1286 vfp_d10, 1287 vfp_d11, 1288 vfp_d12, 1289 vfp_d13, 1290 vfp_d14, 1291 vfp_d15, 1292 vfp_d16, 1293 vfp_d17, 1294 vfp_d18, 1295 vfp_d19, 1296 vfp_d20, 1297 vfp_d21, 1298 vfp_d22, 1299 vfp_d23, 1300 vfp_d24, 1301 vfp_d25, 1302 vfp_d26, 1303 vfp_d27, 1304 vfp_d28, 1305 vfp_d29, 1306 vfp_d30, 1307 vfp_d31 1308 }; 1309 1310 enum { exc_far = 0, exc_esr, exc_exception }; 1311 1312 // These numbers from the "DWARF for the ARM 64-bit Architecture (AArch64)" 1313 // document. 1314 1315 enum { 1316 dwarf_x0 = 0, 1317 dwarf_x1, 1318 dwarf_x2, 1319 dwarf_x3, 1320 dwarf_x4, 1321 dwarf_x5, 1322 dwarf_x6, 1323 dwarf_x7, 1324 dwarf_x8, 1325 dwarf_x9, 1326 dwarf_x10, 1327 dwarf_x11, 1328 dwarf_x12, 1329 dwarf_x13, 1330 dwarf_x14, 1331 dwarf_x15, 1332 dwarf_x16, 1333 dwarf_x17, 1334 dwarf_x18, 1335 dwarf_x19, 1336 dwarf_x20, 1337 dwarf_x21, 1338 dwarf_x22, 1339 dwarf_x23, 1340 dwarf_x24, 1341 dwarf_x25, 1342 dwarf_x26, 1343 dwarf_x27, 1344 dwarf_x28, 1345 dwarf_x29, 1346 dwarf_x30, 1347 dwarf_x31, 1348 dwarf_pc = 32, 1349 dwarf_elr_mode = 33, 1350 dwarf_fp = dwarf_x29, 1351 dwarf_lr = dwarf_x30, 1352 dwarf_sp = dwarf_x31, 1353 // 34-63 reserved 1354 1355 // V0-V31 (128 bit vector registers) 1356 dwarf_v0 = 64, 1357 dwarf_v1, 1358 dwarf_v2, 1359 dwarf_v3, 1360 dwarf_v4, 1361 dwarf_v5, 1362 dwarf_v6, 1363 dwarf_v7, 1364 dwarf_v8, 1365 dwarf_v9, 1366 dwarf_v10, 1367 dwarf_v11, 1368 dwarf_v12, 1369 dwarf_v13, 1370 dwarf_v14, 1371 dwarf_v15, 1372 dwarf_v16, 1373 dwarf_v17, 1374 dwarf_v18, 1375 dwarf_v19, 1376 dwarf_v20, 1377 dwarf_v21, 1378 dwarf_v22, 1379 dwarf_v23, 1380 dwarf_v24, 1381 dwarf_v25, 1382 dwarf_v26, 1383 dwarf_v27, 1384 dwarf_v28, 1385 dwarf_v29, 1386 dwarf_v30, 1387 dwarf_v31 1388 1389 // 96-127 reserved 1390 }; 1391 1392 enum { 1393 debugserver_gpr_x0 = 0, 1394 debugserver_gpr_x1, 1395 debugserver_gpr_x2, 1396 debugserver_gpr_x3, 1397 debugserver_gpr_x4, 1398 debugserver_gpr_x5, 1399 debugserver_gpr_x6, 1400 debugserver_gpr_x7, 1401 debugserver_gpr_x8, 1402 debugserver_gpr_x9, 1403 debugserver_gpr_x10, 1404 debugserver_gpr_x11, 1405 debugserver_gpr_x12, 1406 debugserver_gpr_x13, 1407 debugserver_gpr_x14, 1408 debugserver_gpr_x15, 1409 debugserver_gpr_x16, 1410 debugserver_gpr_x17, 1411 debugserver_gpr_x18, 1412 debugserver_gpr_x19, 1413 debugserver_gpr_x20, 1414 debugserver_gpr_x21, 1415 debugserver_gpr_x22, 1416 debugserver_gpr_x23, 1417 debugserver_gpr_x24, 1418 debugserver_gpr_x25, 1419 debugserver_gpr_x26, 1420 debugserver_gpr_x27, 1421 debugserver_gpr_x28, 1422 debugserver_gpr_fp, // x29 1423 debugserver_gpr_lr, // x30 1424 debugserver_gpr_sp, // sp aka xsp 1425 debugserver_gpr_pc, 1426 debugserver_gpr_cpsr, 1427 debugserver_vfp_v0, 1428 debugserver_vfp_v1, 1429 debugserver_vfp_v2, 1430 debugserver_vfp_v3, 1431 debugserver_vfp_v4, 1432 debugserver_vfp_v5, 1433 debugserver_vfp_v6, 1434 debugserver_vfp_v7, 1435 debugserver_vfp_v8, 1436 debugserver_vfp_v9, 1437 debugserver_vfp_v10, 1438 debugserver_vfp_v11, 1439 debugserver_vfp_v12, 1440 debugserver_vfp_v13, 1441 debugserver_vfp_v14, 1442 debugserver_vfp_v15, 1443 debugserver_vfp_v16, 1444 debugserver_vfp_v17, 1445 debugserver_vfp_v18, 1446 debugserver_vfp_v19, 1447 debugserver_vfp_v20, 1448 debugserver_vfp_v21, 1449 debugserver_vfp_v22, 1450 debugserver_vfp_v23, 1451 debugserver_vfp_v24, 1452 debugserver_vfp_v25, 1453 debugserver_vfp_v26, 1454 debugserver_vfp_v27, 1455 debugserver_vfp_v28, 1456 debugserver_vfp_v29, 1457 debugserver_vfp_v30, 1458 debugserver_vfp_v31, 1459 debugserver_vfp_fpsr, 1460 debugserver_vfp_fpcr 1461 }; 1462 1463 const char *g_contained_x0[]{"x0", NULL}; 1464 const char *g_contained_x1[]{"x1", NULL}; 1465 const char *g_contained_x2[]{"x2", NULL}; 1466 const char *g_contained_x3[]{"x3", NULL}; 1467 const char *g_contained_x4[]{"x4", NULL}; 1468 const char *g_contained_x5[]{"x5", NULL}; 1469 const char *g_contained_x6[]{"x6", NULL}; 1470 const char *g_contained_x7[]{"x7", NULL}; 1471 const char *g_contained_x8[]{"x8", NULL}; 1472 const char *g_contained_x9[]{"x9", NULL}; 1473 const char *g_contained_x10[]{"x10", NULL}; 1474 const char *g_contained_x11[]{"x11", NULL}; 1475 const char *g_contained_x12[]{"x12", NULL}; 1476 const char *g_contained_x13[]{"x13", NULL}; 1477 const char *g_contained_x14[]{"x14", NULL}; 1478 const char *g_contained_x15[]{"x15", NULL}; 1479 const char *g_contained_x16[]{"x16", NULL}; 1480 const char *g_contained_x17[]{"x17", NULL}; 1481 const char *g_contained_x18[]{"x18", NULL}; 1482 const char *g_contained_x19[]{"x19", NULL}; 1483 const char *g_contained_x20[]{"x20", NULL}; 1484 const char *g_contained_x21[]{"x21", NULL}; 1485 const char *g_contained_x22[]{"x22", NULL}; 1486 const char *g_contained_x23[]{"x23", NULL}; 1487 const char *g_contained_x24[]{"x24", NULL}; 1488 const char *g_contained_x25[]{"x25", NULL}; 1489 const char *g_contained_x26[]{"x26", NULL}; 1490 const char *g_contained_x27[]{"x27", NULL}; 1491 const char *g_contained_x28[]{"x28", NULL}; 1492 1493 const char *g_invalidate_x0[]{"x0", "w0", NULL}; 1494 const char *g_invalidate_x1[]{"x1", "w1", NULL}; 1495 const char *g_invalidate_x2[]{"x2", "w2", NULL}; 1496 const char *g_invalidate_x3[]{"x3", "w3", NULL}; 1497 const char *g_invalidate_x4[]{"x4", "w4", NULL}; 1498 const char *g_invalidate_x5[]{"x5", "w5", NULL}; 1499 const char *g_invalidate_x6[]{"x6", "w6", NULL}; 1500 const char *g_invalidate_x7[]{"x7", "w7", NULL}; 1501 const char *g_invalidate_x8[]{"x8", "w8", NULL}; 1502 const char *g_invalidate_x9[]{"x9", "w9", NULL}; 1503 const char *g_invalidate_x10[]{"x10", "w10", NULL}; 1504 const char *g_invalidate_x11[]{"x11", "w11", NULL}; 1505 const char *g_invalidate_x12[]{"x12", "w12", NULL}; 1506 const char *g_invalidate_x13[]{"x13", "w13", NULL}; 1507 const char *g_invalidate_x14[]{"x14", "w14", NULL}; 1508 const char *g_invalidate_x15[]{"x15", "w15", NULL}; 1509 const char *g_invalidate_x16[]{"x16", "w16", NULL}; 1510 const char *g_invalidate_x17[]{"x17", "w17", NULL}; 1511 const char *g_invalidate_x18[]{"x18", "w18", NULL}; 1512 const char *g_invalidate_x19[]{"x19", "w19", NULL}; 1513 const char *g_invalidate_x20[]{"x20", "w20", NULL}; 1514 const char *g_invalidate_x21[]{"x21", "w21", NULL}; 1515 const char *g_invalidate_x22[]{"x22", "w22", NULL}; 1516 const char *g_invalidate_x23[]{"x23", "w23", NULL}; 1517 const char *g_invalidate_x24[]{"x24", "w24", NULL}; 1518 const char *g_invalidate_x25[]{"x25", "w25", NULL}; 1519 const char *g_invalidate_x26[]{"x26", "w26", NULL}; 1520 const char *g_invalidate_x27[]{"x27", "w27", NULL}; 1521 const char *g_invalidate_x28[]{"x28", "w28", NULL}; 1522 1523 #define GPR_OFFSET_IDX(idx) (offsetof(DNBArchMachARM64::GPR, __x[idx])) 1524 1525 #define GPR_OFFSET_NAME(reg) (offsetof(DNBArchMachARM64::GPR, __##reg)) 1526 1527 // These macros will auto define the register name, alt name, register size, 1528 // register offset, encoding, format and native register. This ensures that 1529 // the register state structures are defined correctly and have the correct 1530 // sizes and offsets. 1531 #define DEFINE_GPR_IDX(idx, reg, alt, gen) \ 1532 { \ 1533 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 8, GPR_OFFSET_IDX(idx), \ 1534 dwarf_##reg, dwarf_##reg, gen, debugserver_gpr_##reg, NULL, \ 1535 g_invalidate_x##idx \ 1536 } 1537 #define DEFINE_GPR_NAME(reg, alt, gen) \ 1538 { \ 1539 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 8, GPR_OFFSET_NAME(reg), \ 1540 dwarf_##reg, dwarf_##reg, gen, debugserver_gpr_##reg, NULL, NULL \ 1541 } 1542 #define DEFINE_PSEUDO_GPR_IDX(idx, reg) \ 1543 { \ 1544 e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, 4, 0, INVALID_NUB_REGNUM, \ 1545 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1546 g_contained_x##idx, g_invalidate_x##idx \ 1547 } 1548 1549 //_STRUCT_ARM_THREAD_STATE64 1550 //{ 1551 // uint64_t x[29]; /* General purpose registers x0-x28 */ 1552 // uint64_t fp; /* Frame pointer x29 */ 1553 // uint64_t lr; /* Link register x30 */ 1554 // uint64_t sp; /* Stack pointer x31 */ 1555 // uint64_t pc; /* Program counter */ 1556 // uint32_t cpsr; /* Current program status register */ 1557 //}; 1558 1559 // General purpose registers 1560 const DNBRegisterInfo DNBArchMachARM64::g_gpr_registers[] = { 1561 DEFINE_GPR_IDX(0, x0, "arg1", GENERIC_REGNUM_ARG1), 1562 DEFINE_GPR_IDX(1, x1, "arg2", GENERIC_REGNUM_ARG2), 1563 DEFINE_GPR_IDX(2, x2, "arg3", GENERIC_REGNUM_ARG3), 1564 DEFINE_GPR_IDX(3, x3, "arg4", GENERIC_REGNUM_ARG4), 1565 DEFINE_GPR_IDX(4, x4, "arg5", GENERIC_REGNUM_ARG5), 1566 DEFINE_GPR_IDX(5, x5, "arg6", GENERIC_REGNUM_ARG6), 1567 DEFINE_GPR_IDX(6, x6, "arg7", GENERIC_REGNUM_ARG7), 1568 DEFINE_GPR_IDX(7, x7, "arg8", GENERIC_REGNUM_ARG8), 1569 DEFINE_GPR_IDX(8, x8, NULL, INVALID_NUB_REGNUM), 1570 DEFINE_GPR_IDX(9, x9, NULL, INVALID_NUB_REGNUM), 1571 DEFINE_GPR_IDX(10, x10, NULL, INVALID_NUB_REGNUM), 1572 DEFINE_GPR_IDX(11, x11, NULL, INVALID_NUB_REGNUM), 1573 DEFINE_GPR_IDX(12, x12, NULL, INVALID_NUB_REGNUM), 1574 DEFINE_GPR_IDX(13, x13, NULL, INVALID_NUB_REGNUM), 1575 DEFINE_GPR_IDX(14, x14, NULL, INVALID_NUB_REGNUM), 1576 DEFINE_GPR_IDX(15, x15, NULL, INVALID_NUB_REGNUM), 1577 DEFINE_GPR_IDX(16, x16, NULL, INVALID_NUB_REGNUM), 1578 DEFINE_GPR_IDX(17, x17, NULL, INVALID_NUB_REGNUM), 1579 DEFINE_GPR_IDX(18, x18, NULL, INVALID_NUB_REGNUM), 1580 DEFINE_GPR_IDX(19, x19, NULL, INVALID_NUB_REGNUM), 1581 DEFINE_GPR_IDX(20, x20, NULL, INVALID_NUB_REGNUM), 1582 DEFINE_GPR_IDX(21, x21, NULL, INVALID_NUB_REGNUM), 1583 DEFINE_GPR_IDX(22, x22, NULL, INVALID_NUB_REGNUM), 1584 DEFINE_GPR_IDX(23, x23, NULL, INVALID_NUB_REGNUM), 1585 DEFINE_GPR_IDX(24, x24, NULL, INVALID_NUB_REGNUM), 1586 DEFINE_GPR_IDX(25, x25, NULL, INVALID_NUB_REGNUM), 1587 DEFINE_GPR_IDX(26, x26, NULL, INVALID_NUB_REGNUM), 1588 DEFINE_GPR_IDX(27, x27, NULL, INVALID_NUB_REGNUM), 1589 DEFINE_GPR_IDX(28, x28, NULL, INVALID_NUB_REGNUM), 1590 // For the G/g packet we want to show where the offset into the regctx 1591 // is for fp/lr/sp/pc, but we cannot directly access them on arm64e 1592 // devices (and therefore can't offsetof() them)) - add the offset based 1593 // on the last accessible register by hand for advertising the location 1594 // in the regctx to lldb. We'll go through the accessor functions when 1595 // we read/write them here. 1596 { 1597 e_regSetGPR, gpr_fp, "fp", "x29", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 8, 1598 dwarf_fp, dwarf_fp, GENERIC_REGNUM_FP, debugserver_gpr_fp, NULL, NULL 1599 }, 1600 { 1601 e_regSetGPR, gpr_lr, "lr", "x30", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 16, 1602 dwarf_lr, dwarf_lr, GENERIC_REGNUM_RA, debugserver_gpr_lr, NULL, NULL 1603 }, 1604 { 1605 e_regSetGPR, gpr_sp, "sp", "xsp", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 24, 1606 dwarf_sp, dwarf_sp, GENERIC_REGNUM_SP, debugserver_gpr_sp, NULL, NULL 1607 }, 1608 { 1609 e_regSetGPR, gpr_pc, "pc", NULL, Uint, Hex, 8, GPR_OFFSET_IDX(28) + 32, 1610 dwarf_pc, dwarf_pc, GENERIC_REGNUM_PC, debugserver_gpr_pc, NULL, NULL 1611 }, 1612 1613 // in armv7 we specify that writing to the CPSR should invalidate r8-12, sp, 1614 // lr. 1615 // this should be specified for arm64 too even though debugserver is only 1616 // used for 1617 // userland debugging. 1618 {e_regSetGPR, gpr_cpsr, "cpsr", "flags", Uint, Hex, 4, 1619 GPR_OFFSET_NAME(cpsr), dwarf_elr_mode, dwarf_elr_mode, INVALID_NUB_REGNUM, 1620 debugserver_gpr_cpsr, NULL, NULL}, 1621 1622 DEFINE_PSEUDO_GPR_IDX(0, w0), 1623 DEFINE_PSEUDO_GPR_IDX(1, w1), 1624 DEFINE_PSEUDO_GPR_IDX(2, w2), 1625 DEFINE_PSEUDO_GPR_IDX(3, w3), 1626 DEFINE_PSEUDO_GPR_IDX(4, w4), 1627 DEFINE_PSEUDO_GPR_IDX(5, w5), 1628 DEFINE_PSEUDO_GPR_IDX(6, w6), 1629 DEFINE_PSEUDO_GPR_IDX(7, w7), 1630 DEFINE_PSEUDO_GPR_IDX(8, w8), 1631 DEFINE_PSEUDO_GPR_IDX(9, w9), 1632 DEFINE_PSEUDO_GPR_IDX(10, w10), 1633 DEFINE_PSEUDO_GPR_IDX(11, w11), 1634 DEFINE_PSEUDO_GPR_IDX(12, w12), 1635 DEFINE_PSEUDO_GPR_IDX(13, w13), 1636 DEFINE_PSEUDO_GPR_IDX(14, w14), 1637 DEFINE_PSEUDO_GPR_IDX(15, w15), 1638 DEFINE_PSEUDO_GPR_IDX(16, w16), 1639 DEFINE_PSEUDO_GPR_IDX(17, w17), 1640 DEFINE_PSEUDO_GPR_IDX(18, w18), 1641 DEFINE_PSEUDO_GPR_IDX(19, w19), 1642 DEFINE_PSEUDO_GPR_IDX(20, w20), 1643 DEFINE_PSEUDO_GPR_IDX(21, w21), 1644 DEFINE_PSEUDO_GPR_IDX(22, w22), 1645 DEFINE_PSEUDO_GPR_IDX(23, w23), 1646 DEFINE_PSEUDO_GPR_IDX(24, w24), 1647 DEFINE_PSEUDO_GPR_IDX(25, w25), 1648 DEFINE_PSEUDO_GPR_IDX(26, w26), 1649 DEFINE_PSEUDO_GPR_IDX(27, w27), 1650 DEFINE_PSEUDO_GPR_IDX(28, w28)}; 1651 1652 const char *g_contained_v0[]{"v0", NULL}; 1653 const char *g_contained_v1[]{"v1", NULL}; 1654 const char *g_contained_v2[]{"v2", NULL}; 1655 const char *g_contained_v3[]{"v3", NULL}; 1656 const char *g_contained_v4[]{"v4", NULL}; 1657 const char *g_contained_v5[]{"v5", NULL}; 1658 const char *g_contained_v6[]{"v6", NULL}; 1659 const char *g_contained_v7[]{"v7", NULL}; 1660 const char *g_contained_v8[]{"v8", NULL}; 1661 const char *g_contained_v9[]{"v9", NULL}; 1662 const char *g_contained_v10[]{"v10", NULL}; 1663 const char *g_contained_v11[]{"v11", NULL}; 1664 const char *g_contained_v12[]{"v12", NULL}; 1665 const char *g_contained_v13[]{"v13", NULL}; 1666 const char *g_contained_v14[]{"v14", NULL}; 1667 const char *g_contained_v15[]{"v15", NULL}; 1668 const char *g_contained_v16[]{"v16", NULL}; 1669 const char *g_contained_v17[]{"v17", NULL}; 1670 const char *g_contained_v18[]{"v18", NULL}; 1671 const char *g_contained_v19[]{"v19", NULL}; 1672 const char *g_contained_v20[]{"v20", NULL}; 1673 const char *g_contained_v21[]{"v21", NULL}; 1674 const char *g_contained_v22[]{"v22", NULL}; 1675 const char *g_contained_v23[]{"v23", NULL}; 1676 const char *g_contained_v24[]{"v24", NULL}; 1677 const char *g_contained_v25[]{"v25", NULL}; 1678 const char *g_contained_v26[]{"v26", NULL}; 1679 const char *g_contained_v27[]{"v27", NULL}; 1680 const char *g_contained_v28[]{"v28", NULL}; 1681 const char *g_contained_v29[]{"v29", NULL}; 1682 const char *g_contained_v30[]{"v30", NULL}; 1683 const char *g_contained_v31[]{"v31", NULL}; 1684 1685 const char *g_invalidate_v0[]{"v0", "d0", "s0", NULL}; 1686 const char *g_invalidate_v1[]{"v1", "d1", "s1", NULL}; 1687 const char *g_invalidate_v2[]{"v2", "d2", "s2", NULL}; 1688 const char *g_invalidate_v3[]{"v3", "d3", "s3", NULL}; 1689 const char *g_invalidate_v4[]{"v4", "d4", "s4", NULL}; 1690 const char *g_invalidate_v5[]{"v5", "d5", "s5", NULL}; 1691 const char *g_invalidate_v6[]{"v6", "d6", "s6", NULL}; 1692 const char *g_invalidate_v7[]{"v7", "d7", "s7", NULL}; 1693 const char *g_invalidate_v8[]{"v8", "d8", "s8", NULL}; 1694 const char *g_invalidate_v9[]{"v9", "d9", "s9", NULL}; 1695 const char *g_invalidate_v10[]{"v10", "d10", "s10", NULL}; 1696 const char *g_invalidate_v11[]{"v11", "d11", "s11", NULL}; 1697 const char *g_invalidate_v12[]{"v12", "d12", "s12", NULL}; 1698 const char *g_invalidate_v13[]{"v13", "d13", "s13", NULL}; 1699 const char *g_invalidate_v14[]{"v14", "d14", "s14", NULL}; 1700 const char *g_invalidate_v15[]{"v15", "d15", "s15", NULL}; 1701 const char *g_invalidate_v16[]{"v16", "d16", "s16", NULL}; 1702 const char *g_invalidate_v17[]{"v17", "d17", "s17", NULL}; 1703 const char *g_invalidate_v18[]{"v18", "d18", "s18", NULL}; 1704 const char *g_invalidate_v19[]{"v19", "d19", "s19", NULL}; 1705 const char *g_invalidate_v20[]{"v20", "d20", "s20", NULL}; 1706 const char *g_invalidate_v21[]{"v21", "d21", "s21", NULL}; 1707 const char *g_invalidate_v22[]{"v22", "d22", "s22", NULL}; 1708 const char *g_invalidate_v23[]{"v23", "d23", "s23", NULL}; 1709 const char *g_invalidate_v24[]{"v24", "d24", "s24", NULL}; 1710 const char *g_invalidate_v25[]{"v25", "d25", "s25", NULL}; 1711 const char *g_invalidate_v26[]{"v26", "d26", "s26", NULL}; 1712 const char *g_invalidate_v27[]{"v27", "d27", "s27", NULL}; 1713 const char *g_invalidate_v28[]{"v28", "d28", "s28", NULL}; 1714 const char *g_invalidate_v29[]{"v29", "d29", "s29", NULL}; 1715 const char *g_invalidate_v30[]{"v30", "d30", "s30", NULL}; 1716 const char *g_invalidate_v31[]{"v31", "d31", "s31", NULL}; 1717 1718 #if defined(__arm64__) || defined(__aarch64__) 1719 #define VFP_V_OFFSET_IDX(idx) \ 1720 (offsetof(DNBArchMachARM64::FPU, __v) + (idx * 16) + \ 1721 offsetof(DNBArchMachARM64::Context, vfp)) 1722 #else 1723 #define VFP_V_OFFSET_IDX(idx) \ 1724 (offsetof(DNBArchMachARM64::FPU, opaque) + (idx * 16) + \ 1725 offsetof(DNBArchMachARM64::Context, vfp)) 1726 #endif 1727 #define VFP_OFFSET_NAME(reg) \ 1728 (offsetof(DNBArchMachARM64::FPU, reg) + \ 1729 offsetof(DNBArchMachARM64::Context, vfp)) 1730 #define EXC_OFFSET(reg) \ 1731 (offsetof(DNBArchMachARM64::EXC, reg) + \ 1732 offsetof(DNBArchMachARM64::Context, exc)) 1733 1734 //#define FLOAT_FORMAT Float 1735 #define DEFINE_VFP_V_IDX(idx) \ 1736 { \ 1737 e_regSetVFP, vfp_v##idx, "v" #idx, "q" #idx, Vector, VectorOfUInt8, 16, \ 1738 VFP_V_OFFSET_IDX(idx), INVALID_NUB_REGNUM, dwarf_v##idx, \ 1739 INVALID_NUB_REGNUM, debugserver_vfp_v##idx, NULL, g_invalidate_v##idx \ 1740 } 1741 #define DEFINE_PSEUDO_VFP_S_IDX(idx) \ 1742 { \ 1743 e_regSetVFP, vfp_s##idx, "s" #idx, NULL, IEEE754, Float, 4, 0, \ 1744 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1745 INVALID_NUB_REGNUM, g_contained_v##idx, g_invalidate_v##idx \ 1746 } 1747 #define DEFINE_PSEUDO_VFP_D_IDX(idx) \ 1748 { \ 1749 e_regSetVFP, vfp_d##idx, "d" #idx, NULL, IEEE754, Float, 8, 0, \ 1750 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1751 INVALID_NUB_REGNUM, g_contained_v##idx, g_invalidate_v##idx \ 1752 } 1753 1754 // Floating point registers 1755 const DNBRegisterInfo DNBArchMachARM64::g_vfp_registers[] = { 1756 DEFINE_VFP_V_IDX(0), 1757 DEFINE_VFP_V_IDX(1), 1758 DEFINE_VFP_V_IDX(2), 1759 DEFINE_VFP_V_IDX(3), 1760 DEFINE_VFP_V_IDX(4), 1761 DEFINE_VFP_V_IDX(5), 1762 DEFINE_VFP_V_IDX(6), 1763 DEFINE_VFP_V_IDX(7), 1764 DEFINE_VFP_V_IDX(8), 1765 DEFINE_VFP_V_IDX(9), 1766 DEFINE_VFP_V_IDX(10), 1767 DEFINE_VFP_V_IDX(11), 1768 DEFINE_VFP_V_IDX(12), 1769 DEFINE_VFP_V_IDX(13), 1770 DEFINE_VFP_V_IDX(14), 1771 DEFINE_VFP_V_IDX(15), 1772 DEFINE_VFP_V_IDX(16), 1773 DEFINE_VFP_V_IDX(17), 1774 DEFINE_VFP_V_IDX(18), 1775 DEFINE_VFP_V_IDX(19), 1776 DEFINE_VFP_V_IDX(20), 1777 DEFINE_VFP_V_IDX(21), 1778 DEFINE_VFP_V_IDX(22), 1779 DEFINE_VFP_V_IDX(23), 1780 DEFINE_VFP_V_IDX(24), 1781 DEFINE_VFP_V_IDX(25), 1782 DEFINE_VFP_V_IDX(26), 1783 DEFINE_VFP_V_IDX(27), 1784 DEFINE_VFP_V_IDX(28), 1785 DEFINE_VFP_V_IDX(29), 1786 DEFINE_VFP_V_IDX(30), 1787 DEFINE_VFP_V_IDX(31), 1788 {e_regSetVFP, vfp_fpsr, "fpsr", NULL, Uint, Hex, 4, 1789 VFP_V_OFFSET_IDX(32) + 0, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1790 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1791 {e_regSetVFP, vfp_fpcr, "fpcr", NULL, Uint, Hex, 4, 1792 VFP_V_OFFSET_IDX(32) + 4, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1793 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1794 1795 DEFINE_PSEUDO_VFP_S_IDX(0), 1796 DEFINE_PSEUDO_VFP_S_IDX(1), 1797 DEFINE_PSEUDO_VFP_S_IDX(2), 1798 DEFINE_PSEUDO_VFP_S_IDX(3), 1799 DEFINE_PSEUDO_VFP_S_IDX(4), 1800 DEFINE_PSEUDO_VFP_S_IDX(5), 1801 DEFINE_PSEUDO_VFP_S_IDX(6), 1802 DEFINE_PSEUDO_VFP_S_IDX(7), 1803 DEFINE_PSEUDO_VFP_S_IDX(8), 1804 DEFINE_PSEUDO_VFP_S_IDX(9), 1805 DEFINE_PSEUDO_VFP_S_IDX(10), 1806 DEFINE_PSEUDO_VFP_S_IDX(11), 1807 DEFINE_PSEUDO_VFP_S_IDX(12), 1808 DEFINE_PSEUDO_VFP_S_IDX(13), 1809 DEFINE_PSEUDO_VFP_S_IDX(14), 1810 DEFINE_PSEUDO_VFP_S_IDX(15), 1811 DEFINE_PSEUDO_VFP_S_IDX(16), 1812 DEFINE_PSEUDO_VFP_S_IDX(17), 1813 DEFINE_PSEUDO_VFP_S_IDX(18), 1814 DEFINE_PSEUDO_VFP_S_IDX(19), 1815 DEFINE_PSEUDO_VFP_S_IDX(20), 1816 DEFINE_PSEUDO_VFP_S_IDX(21), 1817 DEFINE_PSEUDO_VFP_S_IDX(22), 1818 DEFINE_PSEUDO_VFP_S_IDX(23), 1819 DEFINE_PSEUDO_VFP_S_IDX(24), 1820 DEFINE_PSEUDO_VFP_S_IDX(25), 1821 DEFINE_PSEUDO_VFP_S_IDX(26), 1822 DEFINE_PSEUDO_VFP_S_IDX(27), 1823 DEFINE_PSEUDO_VFP_S_IDX(28), 1824 DEFINE_PSEUDO_VFP_S_IDX(29), 1825 DEFINE_PSEUDO_VFP_S_IDX(30), 1826 DEFINE_PSEUDO_VFP_S_IDX(31), 1827 1828 DEFINE_PSEUDO_VFP_D_IDX(0), 1829 DEFINE_PSEUDO_VFP_D_IDX(1), 1830 DEFINE_PSEUDO_VFP_D_IDX(2), 1831 DEFINE_PSEUDO_VFP_D_IDX(3), 1832 DEFINE_PSEUDO_VFP_D_IDX(4), 1833 DEFINE_PSEUDO_VFP_D_IDX(5), 1834 DEFINE_PSEUDO_VFP_D_IDX(6), 1835 DEFINE_PSEUDO_VFP_D_IDX(7), 1836 DEFINE_PSEUDO_VFP_D_IDX(8), 1837 DEFINE_PSEUDO_VFP_D_IDX(9), 1838 DEFINE_PSEUDO_VFP_D_IDX(10), 1839 DEFINE_PSEUDO_VFP_D_IDX(11), 1840 DEFINE_PSEUDO_VFP_D_IDX(12), 1841 DEFINE_PSEUDO_VFP_D_IDX(13), 1842 DEFINE_PSEUDO_VFP_D_IDX(14), 1843 DEFINE_PSEUDO_VFP_D_IDX(15), 1844 DEFINE_PSEUDO_VFP_D_IDX(16), 1845 DEFINE_PSEUDO_VFP_D_IDX(17), 1846 DEFINE_PSEUDO_VFP_D_IDX(18), 1847 DEFINE_PSEUDO_VFP_D_IDX(19), 1848 DEFINE_PSEUDO_VFP_D_IDX(20), 1849 DEFINE_PSEUDO_VFP_D_IDX(21), 1850 DEFINE_PSEUDO_VFP_D_IDX(22), 1851 DEFINE_PSEUDO_VFP_D_IDX(23), 1852 DEFINE_PSEUDO_VFP_D_IDX(24), 1853 DEFINE_PSEUDO_VFP_D_IDX(25), 1854 DEFINE_PSEUDO_VFP_D_IDX(26), 1855 DEFINE_PSEUDO_VFP_D_IDX(27), 1856 DEFINE_PSEUDO_VFP_D_IDX(28), 1857 DEFINE_PSEUDO_VFP_D_IDX(29), 1858 DEFINE_PSEUDO_VFP_D_IDX(30), 1859 DEFINE_PSEUDO_VFP_D_IDX(31) 1860 1861 }; 1862 1863 //_STRUCT_ARM_EXCEPTION_STATE64 1864 //{ 1865 // uint64_t far; /* Virtual Fault Address */ 1866 // uint32_t esr; /* Exception syndrome */ 1867 // uint32_t exception; /* number of arm exception taken */ 1868 //}; 1869 1870 // Exception registers 1871 const DNBRegisterInfo DNBArchMachARM64::g_exc_registers[] = { 1872 {e_regSetEXC, exc_far, "far", NULL, Uint, Hex, 8, EXC_OFFSET(__far), 1873 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1874 INVALID_NUB_REGNUM, NULL, NULL}, 1875 {e_regSetEXC, exc_esr, "esr", NULL, Uint, Hex, 4, EXC_OFFSET(__esr), 1876 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1877 INVALID_NUB_REGNUM, NULL, NULL}, 1878 {e_regSetEXC, exc_exception, "exception", NULL, Uint, Hex, 4, 1879 EXC_OFFSET(__exception), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1880 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}}; 1881 1882 // Number of registers in each register set 1883 const size_t DNBArchMachARM64::k_num_gpr_registers = 1884 sizeof(g_gpr_registers) / sizeof(DNBRegisterInfo); 1885 const size_t DNBArchMachARM64::k_num_vfp_registers = 1886 sizeof(g_vfp_registers) / sizeof(DNBRegisterInfo); 1887 const size_t DNBArchMachARM64::k_num_exc_registers = 1888 sizeof(g_exc_registers) / sizeof(DNBRegisterInfo); 1889 const size_t DNBArchMachARM64::k_num_all_registers = 1890 k_num_gpr_registers + k_num_vfp_registers + k_num_exc_registers; 1891 1892 // Register set definitions. The first definitions at register set index 1893 // of zero is for all registers, followed by other registers sets. The 1894 // register information for the all register set need not be filled in. 1895 const DNBRegisterSetInfo DNBArchMachARM64::g_reg_sets[] = { 1896 {"ARM64 Registers", NULL, k_num_all_registers}, 1897 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers}, 1898 {"Floating Point Registers", g_vfp_registers, k_num_vfp_registers}, 1899 {"Exception State Registers", g_exc_registers, k_num_exc_registers}}; 1900 // Total number of register sets for this architecture 1901 const size_t DNBArchMachARM64::k_num_register_sets = 1902 sizeof(g_reg_sets) / sizeof(DNBRegisterSetInfo); 1903 1904 const DNBRegisterSetInfo * 1905 DNBArchMachARM64::GetRegisterSetInfo(nub_size_t *num_reg_sets) { 1906 *num_reg_sets = k_num_register_sets; 1907 return g_reg_sets; 1908 } 1909 1910 bool DNBArchMachARM64::FixGenericRegisterNumber(uint32_t &set, uint32_t ®) { 1911 if (set == REGISTER_SET_GENERIC) { 1912 switch (reg) { 1913 case GENERIC_REGNUM_PC: // Program Counter 1914 set = e_regSetGPR; 1915 reg = gpr_pc; 1916 break; 1917 1918 case GENERIC_REGNUM_SP: // Stack Pointer 1919 set = e_regSetGPR; 1920 reg = gpr_sp; 1921 break; 1922 1923 case GENERIC_REGNUM_FP: // Frame Pointer 1924 set = e_regSetGPR; 1925 reg = gpr_fp; 1926 break; 1927 1928 case GENERIC_REGNUM_RA: // Return Address 1929 set = e_regSetGPR; 1930 reg = gpr_lr; 1931 break; 1932 1933 case GENERIC_REGNUM_FLAGS: // Processor flags register 1934 set = e_regSetGPR; 1935 reg = gpr_cpsr; 1936 break; 1937 1938 case GENERIC_REGNUM_ARG1: 1939 case GENERIC_REGNUM_ARG2: 1940 case GENERIC_REGNUM_ARG3: 1941 case GENERIC_REGNUM_ARG4: 1942 case GENERIC_REGNUM_ARG5: 1943 case GENERIC_REGNUM_ARG6: 1944 set = e_regSetGPR; 1945 reg = gpr_x0 + reg - GENERIC_REGNUM_ARG1; 1946 break; 1947 1948 default: 1949 return false; 1950 } 1951 } 1952 return true; 1953 } 1954 bool DNBArchMachARM64::GetRegisterValue(uint32_t set, uint32_t reg, 1955 DNBRegisterValue *value) { 1956 if (!FixGenericRegisterNumber(set, reg)) 1957 return false; 1958 1959 if (GetRegisterState(set, false) != KERN_SUCCESS) 1960 return false; 1961 1962 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1963 if (regInfo) { 1964 value->info = *regInfo; 1965 switch (set) { 1966 case e_regSetGPR: 1967 if (reg <= gpr_pc) { 1968 #if defined(__LP64__) 1969 if (reg == gpr_pc) 1970 value->value.uint64 = arm_thread_state64_get_pc (m_state.context.gpr); 1971 else if (reg == gpr_lr) 1972 value->value.uint64 = arm_thread_state64_get_lr (m_state.context.gpr); 1973 else if (reg == gpr_sp) 1974 value->value.uint64 = arm_thread_state64_get_sp (m_state.context.gpr); 1975 else if (reg == gpr_fp) 1976 value->value.uint64 = arm_thread_state64_get_fp (m_state.context.gpr); 1977 else 1978 value->value.uint64 = m_state.context.gpr.__x[reg]; 1979 #else 1980 value->value.uint64 = m_state.context.gpr.__x[reg]; 1981 #endif 1982 return true; 1983 } else if (reg == gpr_cpsr) { 1984 value->value.uint32 = m_state.context.gpr.__cpsr; 1985 return true; 1986 } 1987 break; 1988 1989 case e_regSetVFP: 1990 1991 if (reg >= vfp_v0 && reg <= vfp_v31) { 1992 #if defined(__arm64__) || defined(__aarch64__) 1993 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_v0], 1994 16); 1995 #else 1996 memcpy(&value->value.v_uint8, 1997 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_v0) * 16), 1998 16); 1999 #endif 2000 return true; 2001 } else if (reg == vfp_fpsr) { 2002 #if defined(__arm64__) || defined(__aarch64__) 2003 memcpy(&value->value.uint32, &m_state.context.vfp.__fpsr, 4); 2004 #else 2005 memcpy(&value->value.uint32, 2006 ((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 0, 4); 2007 #endif 2008 return true; 2009 } else if (reg == vfp_fpcr) { 2010 #if defined(__arm64__) || defined(__aarch64__) 2011 memcpy(&value->value.uint32, &m_state.context.vfp.__fpcr, 4); 2012 #else 2013 memcpy(&value->value.uint32, 2014 ((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 4, 4); 2015 #endif 2016 return true; 2017 } else if (reg >= vfp_s0 && reg <= vfp_s31) { 2018 #if defined(__arm64__) || defined(__aarch64__) 2019 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_s0], 2020 4); 2021 #else 2022 memcpy(&value->value.v_uint8, 2023 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_s0) * 16), 2024 4); 2025 #endif 2026 return true; 2027 } else if (reg >= vfp_d0 && reg <= vfp_d31) { 2028 #if defined(__arm64__) || defined(__aarch64__) 2029 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_d0], 2030 8); 2031 #else 2032 memcpy(&value->value.v_uint8, 2033 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_d0) * 16), 2034 8); 2035 #endif 2036 return true; 2037 } 2038 break; 2039 2040 case e_regSetEXC: 2041 if (reg == exc_far) { 2042 value->value.uint64 = m_state.context.exc.__far; 2043 return true; 2044 } else if (reg == exc_esr) { 2045 value->value.uint32 = m_state.context.exc.__esr; 2046 return true; 2047 } else if (reg == exc_exception) { 2048 value->value.uint32 = m_state.context.exc.__exception; 2049 return true; 2050 } 2051 break; 2052 } 2053 } 2054 return false; 2055 } 2056 2057 bool DNBArchMachARM64::SetRegisterValue(uint32_t set, uint32_t reg, 2058 const DNBRegisterValue *value) { 2059 if (!FixGenericRegisterNumber(set, reg)) 2060 return false; 2061 2062 if (GetRegisterState(set, false) != KERN_SUCCESS) 2063 return false; 2064 2065 bool success = false; 2066 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 2067 if (regInfo) { 2068 switch (set) { 2069 case e_regSetGPR: 2070 if (reg <= gpr_pc) { 2071 #if defined(__LP64__) 2072 uint64_t signed_value = value->value.uint64; 2073 #if __has_feature(ptrauth_calls) 2074 // The incoming value could be garbage. Strip it to avoid 2075 // trapping when it gets resigned in the thread state. 2076 signed_value = (uint64_t) ptrauth_strip((void*) signed_value, ptrauth_key_function_pointer); 2077 signed_value = (uint64_t) ptrauth_sign_unauthenticated((void*) signed_value, ptrauth_key_function_pointer, 0); 2078 #endif 2079 if (reg == gpr_pc) 2080 arm_thread_state64_set_pc_fptr (m_state.context.gpr, (void*) signed_value); 2081 else if (reg == gpr_lr) 2082 arm_thread_state64_set_lr_fptr (m_state.context.gpr, (void*) signed_value); 2083 else if (reg == gpr_sp) 2084 arm_thread_state64_set_sp (m_state.context.gpr, value->value.uint64); 2085 else if (reg == gpr_fp) 2086 arm_thread_state64_set_fp (m_state.context.gpr, value->value.uint64); 2087 else 2088 m_state.context.gpr.__x[reg] = value->value.uint64; 2089 #else 2090 m_state.context.gpr.__x[reg] = value->value.uint64; 2091 #endif 2092 success = true; 2093 } else if (reg == gpr_cpsr) { 2094 m_state.context.gpr.__cpsr = value->value.uint32; 2095 success = true; 2096 } 2097 break; 2098 2099 case e_regSetVFP: 2100 if (reg >= vfp_v0 && reg <= vfp_v31) { 2101 #if defined(__arm64__) || defined(__aarch64__) 2102 memcpy(&m_state.context.vfp.__v[reg - vfp_v0], &value->value.v_uint8, 2103 16); 2104 #else 2105 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_v0) * 16), 2106 &value->value.v_uint8, 16); 2107 #endif 2108 success = true; 2109 } else if (reg == vfp_fpsr) { 2110 #if defined(__arm64__) || defined(__aarch64__) 2111 memcpy(&m_state.context.vfp.__fpsr, &value->value.uint32, 4); 2112 #else 2113 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 0, 2114 &value->value.uint32, 4); 2115 #endif 2116 success = true; 2117 } else if (reg == vfp_fpcr) { 2118 #if defined(__arm64__) || defined(__aarch64__) 2119 memcpy(&m_state.context.vfp.__fpcr, &value->value.uint32, 4); 2120 #else 2121 memcpy(((uint8_t *)m_state.context.vfp.opaque) + (32 * 16) + 4, 2122 &value->value.uint32, 4); 2123 #endif 2124 success = true; 2125 } else if (reg >= vfp_s0 && reg <= vfp_s31) { 2126 #if defined(__arm64__) || defined(__aarch64__) 2127 memcpy(&m_state.context.vfp.__v[reg - vfp_s0], &value->value.v_uint8, 2128 4); 2129 #else 2130 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_s0) * 16), 2131 &value->value.v_uint8, 4); 2132 #endif 2133 success = true; 2134 } else if (reg >= vfp_d0 && reg <= vfp_d31) { 2135 #if defined(__arm64__) || defined(__aarch64__) 2136 memcpy(&m_state.context.vfp.__v[reg - vfp_d0], &value->value.v_uint8, 2137 8); 2138 #else 2139 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_d0) * 16), 2140 &value->value.v_uint8, 8); 2141 #endif 2142 success = true; 2143 } 2144 break; 2145 2146 case e_regSetEXC: 2147 if (reg == exc_far) { 2148 m_state.context.exc.__far = value->value.uint64; 2149 success = true; 2150 } else if (reg == exc_esr) { 2151 m_state.context.exc.__esr = value->value.uint32; 2152 success = true; 2153 } else if (reg == exc_exception) { 2154 m_state.context.exc.__exception = value->value.uint32; 2155 success = true; 2156 } 2157 break; 2158 } 2159 } 2160 if (success) 2161 return SetRegisterState(set) == KERN_SUCCESS; 2162 return false; 2163 } 2164 2165 kern_return_t DNBArchMachARM64::GetRegisterState(int set, bool force) { 2166 switch (set) { 2167 case e_regSetALL: 2168 return GetGPRState(force) | GetVFPState(force) | GetEXCState(force) | 2169 GetDBGState(force); 2170 case e_regSetGPR: 2171 return GetGPRState(force); 2172 case e_regSetVFP: 2173 return GetVFPState(force); 2174 case e_regSetEXC: 2175 return GetEXCState(force); 2176 case e_regSetDBG: 2177 return GetDBGState(force); 2178 default: 2179 break; 2180 } 2181 return KERN_INVALID_ARGUMENT; 2182 } 2183 2184 kern_return_t DNBArchMachARM64::SetRegisterState(int set) { 2185 // Make sure we have a valid context to set. 2186 kern_return_t err = GetRegisterState(set, false); 2187 if (err != KERN_SUCCESS) 2188 return err; 2189 2190 switch (set) { 2191 case e_regSetALL: 2192 return SetGPRState() | SetVFPState() | SetEXCState() | SetDBGState(false); 2193 case e_regSetGPR: 2194 return SetGPRState(); 2195 case e_regSetVFP: 2196 return SetVFPState(); 2197 case e_regSetEXC: 2198 return SetEXCState(); 2199 case e_regSetDBG: 2200 return SetDBGState(false); 2201 default: 2202 break; 2203 } 2204 return KERN_INVALID_ARGUMENT; 2205 } 2206 2207 bool DNBArchMachARM64::RegisterSetStateIsValid(int set) const { 2208 return m_state.RegsAreValid(set); 2209 } 2210 2211 nub_size_t DNBArchMachARM64::GetRegisterContext(void *buf, nub_size_t buf_len) { 2212 nub_size_t size = sizeof(m_state.context.gpr) + sizeof(m_state.context.vfp) + 2213 sizeof(m_state.context.exc); 2214 2215 if (buf && buf_len) { 2216 if (size > buf_len) 2217 size = buf_len; 2218 2219 bool force = false; 2220 if (GetGPRState(force) | GetVFPState(force) | GetEXCState(force)) 2221 return 0; 2222 2223 // Copy each struct individually to avoid any padding that might be between 2224 // the structs in m_state.context 2225 uint8_t *p = (uint8_t *)buf; 2226 ::memcpy(p, &m_state.context.gpr, sizeof(m_state.context.gpr)); 2227 p += sizeof(m_state.context.gpr); 2228 ::memcpy(p, &m_state.context.vfp, sizeof(m_state.context.vfp)); 2229 p += sizeof(m_state.context.vfp); 2230 ::memcpy(p, &m_state.context.exc, sizeof(m_state.context.exc)); 2231 p += sizeof(m_state.context.exc); 2232 2233 size_t bytes_written = p - (uint8_t *)buf; 2234 UNUSED_IF_ASSERT_DISABLED(bytes_written); 2235 assert(bytes_written == size); 2236 } 2237 DNBLogThreadedIf( 2238 LOG_THREAD, 2239 "DNBArchMachARM64::GetRegisterContext (buf = %p, len = %zu) => %zu", buf, 2240 buf_len, size); 2241 // Return the size of the register context even if NULL was passed in 2242 return size; 2243 } 2244 2245 nub_size_t DNBArchMachARM64::SetRegisterContext(const void *buf, 2246 nub_size_t buf_len) { 2247 nub_size_t size = sizeof(m_state.context.gpr) + sizeof(m_state.context.vfp) + 2248 sizeof(m_state.context.exc); 2249 2250 if (buf == NULL || buf_len == 0) 2251 size = 0; 2252 2253 if (size) { 2254 if (size > buf_len) 2255 size = buf_len; 2256 2257 // Copy each struct individually to avoid any padding that might be between 2258 // the structs in m_state.context 2259 uint8_t *p = (uint8_t *)buf; 2260 ::memcpy(&m_state.context.gpr, p, sizeof(m_state.context.gpr)); 2261 p += sizeof(m_state.context.gpr); 2262 ::memcpy(&m_state.context.vfp, p, sizeof(m_state.context.vfp)); 2263 p += sizeof(m_state.context.vfp); 2264 ::memcpy(&m_state.context.exc, p, sizeof(m_state.context.exc)); 2265 p += sizeof(m_state.context.exc); 2266 2267 size_t bytes_written = p - (uint8_t *)buf; 2268 UNUSED_IF_ASSERT_DISABLED(bytes_written); 2269 assert(bytes_written == size); 2270 SetGPRState(); 2271 SetVFPState(); 2272 SetEXCState(); 2273 } 2274 DNBLogThreadedIf( 2275 LOG_THREAD, 2276 "DNBArchMachARM64::SetRegisterContext (buf = %p, len = %zu) => %zu", buf, 2277 buf_len, size); 2278 return size; 2279 } 2280 2281 uint32_t DNBArchMachARM64::SaveRegisterState() { 2282 kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber()); 2283 DNBLogThreadedIf( 2284 LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u " 2285 "(SetGPRState() for stop_count = %u)", 2286 m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount()); 2287 2288 // Always re-read the registers because above we call thread_abort_safely(); 2289 bool force = true; 2290 2291 if ((kret = GetGPRState(force)) != KERN_SUCCESS) { 2292 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::SaveRegisterState () " 2293 "error: GPR regs failed to read: %u ", 2294 kret); 2295 } else if ((kret = GetVFPState(force)) != KERN_SUCCESS) { 2296 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::SaveRegisterState () " 2297 "error: %s regs failed to read: %u", 2298 "VFP", kret); 2299 } else { 2300 const uint32_t save_id = GetNextRegisterStateSaveID(); 2301 m_saved_register_states[save_id] = m_state.context; 2302 return save_id; 2303 } 2304 return UINT32_MAX; 2305 } 2306 2307 bool DNBArchMachARM64::RestoreRegisterState(uint32_t save_id) { 2308 SaveRegisterStates::iterator pos = m_saved_register_states.find(save_id); 2309 if (pos != m_saved_register_states.end()) { 2310 m_state.context.gpr = pos->second.gpr; 2311 m_state.context.vfp = pos->second.vfp; 2312 kern_return_t kret; 2313 bool success = true; 2314 if ((kret = SetGPRState()) != KERN_SUCCESS) { 2315 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::RestoreRegisterState " 2316 "(save_id = %u) error: GPR regs failed to " 2317 "write: %u", 2318 save_id, kret); 2319 success = false; 2320 } else if ((kret = SetVFPState()) != KERN_SUCCESS) { 2321 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::RestoreRegisterState " 2322 "(save_id = %u) error: %s regs failed to " 2323 "write: %u", 2324 save_id, "VFP", kret); 2325 success = false; 2326 } 2327 m_saved_register_states.erase(pos); 2328 return success; 2329 } 2330 return false; 2331 } 2332 2333 #endif // #if defined (ARM_THREAD_STATE64_COUNT) 2334 #endif // #if defined (__arm__) || defined (__arm64__) || defined (__aarch64__) 2335