1 //===-- DNBArchImplARM64.cpp ------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Created by Greg Clayton on 6/25/07. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #if defined(__arm__) || defined(__arm64__) || defined(__aarch64__) 14 15 #include "MacOSX/arm64/DNBArchImplARM64.h" 16 17 #if defined(ARM_THREAD_STATE64_COUNT) 18 19 #include "DNB.h" 20 #include "DNBBreakpoint.h" 21 #include "DNBLog.h" 22 #include "DNBRegisterInfo.h" 23 #include "MacOSX/MachProcess.h" 24 #include "MacOSX/MachThread.h" 25 26 #include <cinttypes> 27 #include <sys/sysctl.h> 28 29 #if __has_feature(ptrauth_calls) 30 #include <ptrauth.h> 31 #endif 32 33 // Break only in privileged or user mode 34 // (PAC bits in the DBGWVRn_EL1 watchpoint control register) 35 #define S_USER ((uint32_t)(2u << 1)) 36 37 #define BCR_ENABLE ((uint32_t)(1u)) 38 #define WCR_ENABLE ((uint32_t)(1u)) 39 40 // Watchpoint load/store 41 // (LSC bits in the DBGWVRn_EL1 watchpoint control register) 42 #define WCR_LOAD ((uint32_t)(1u << 3)) 43 #define WCR_STORE ((uint32_t)(1u << 4)) 44 45 // Enable breakpoint, watchpoint, and vector catch debug exceptions. 46 // (MDE bit in the MDSCR_EL1 register. Equivalent to the MDBGen bit in 47 // DBGDSCRext in Aarch32) 48 #define MDE_ENABLE ((uint32_t)(1u << 15)) 49 50 // Single instruction step 51 // (SS bit in the MDSCR_EL1 register) 52 #define SS_ENABLE ((uint32_t)(1u)) 53 54 static const uint8_t g_arm64_breakpoint_opcode[] = { 55 0x00, 0x00, 0x20, 0xD4}; // "brk #0", 0xd4200000 in BE byte order 56 57 // If we need to set one logical watchpoint by using 58 // two hardware watchpoint registers, the watchpoint 59 // will be split into a "high" and "low" watchpoint. 60 // Record both of them in the LoHi array. 61 62 // It's safe to initialize to all 0's since 63 // hi > lo and therefore LoHi[i] cannot be 0. 64 static uint32_t LoHi[16] = {0}; 65 66 void DNBArchMachARM64::Initialize() { 67 DNBArchPluginInfo arch_plugin_info = { 68 CPU_TYPE_ARM64, DNBArchMachARM64::Create, 69 DNBArchMachARM64::GetRegisterSetInfo, 70 DNBArchMachARM64::SoftwareBreakpointOpcode}; 71 72 // Register this arch plug-in with the main protocol class 73 DNBArchProtocol::RegisterArchPlugin(arch_plugin_info); 74 75 DNBArchPluginInfo arch_plugin_info_32 = { 76 CPU_TYPE_ARM64_32, DNBArchMachARM64::Create, 77 DNBArchMachARM64::GetRegisterSetInfo, 78 DNBArchMachARM64::SoftwareBreakpointOpcode}; 79 80 // Register this arch plug-in with the main protocol class 81 DNBArchProtocol::RegisterArchPlugin(arch_plugin_info_32); 82 } 83 84 DNBArchProtocol *DNBArchMachARM64::Create(MachThread *thread) { 85 DNBArchMachARM64 *obj = new DNBArchMachARM64(thread); 86 87 return obj; 88 } 89 90 const uint8_t * 91 DNBArchMachARM64::SoftwareBreakpointOpcode(nub_size_t byte_size) { 92 return g_arm64_breakpoint_opcode; 93 } 94 95 uint32_t DNBArchMachARM64::GetCPUType() { return CPU_TYPE_ARM64; } 96 97 uint64_t DNBArchMachARM64::GetPC(uint64_t failValue) { 98 // Get program counter 99 if (GetGPRState(false) == KERN_SUCCESS) 100 #if defined(__LP64__) 101 return arm_thread_state64_get_pc(m_state.context.gpr); 102 #else 103 return m_state.context.gpr.__pc; 104 #endif 105 return failValue; 106 } 107 108 kern_return_t DNBArchMachARM64::SetPC(uint64_t value) { 109 // Get program counter 110 kern_return_t err = GetGPRState(false); 111 if (err == KERN_SUCCESS) { 112 #if defined(__LP64__) 113 #if __has_feature(ptrauth_calls) 114 // The incoming value could be garbage. Strip it to avoid 115 // trapping when it gets resigned in the thread state. 116 value = (uint64_t) ptrauth_strip((void*) value, ptrauth_key_function_pointer); 117 value = (uint64_t) ptrauth_sign_unauthenticated((void*) value, ptrauth_key_function_pointer, 0); 118 #endif 119 arm_thread_state64_set_pc_fptr (m_state.context.gpr, (void*) value); 120 #else 121 m_state.context.gpr.__pc = value; 122 #endif 123 err = SetGPRState(); 124 } 125 return err == KERN_SUCCESS; 126 } 127 128 uint64_t DNBArchMachARM64::GetSP(uint64_t failValue) { 129 // Get stack pointer 130 if (GetGPRState(false) == KERN_SUCCESS) 131 #if defined(__LP64__) 132 return arm_thread_state64_get_sp(m_state.context.gpr); 133 #else 134 return m_state.context.gpr.__sp; 135 #endif 136 return failValue; 137 } 138 139 kern_return_t DNBArchMachARM64::GetGPRState(bool force) { 140 int set = e_regSetGPR; 141 // Check if we have valid cached registers 142 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS) 143 return KERN_SUCCESS; 144 145 // Read the registers from our thread 146 mach_msg_type_number_t count = e_regSetGPRCount; 147 kern_return_t kret = 148 ::thread_get_state(m_thread->MachPortNumber(), ARM_THREAD_STATE64, 149 (thread_state_t)&m_state.context.gpr, &count); 150 if (DNBLogEnabledForAny(LOG_THREAD)) { 151 uint64_t *x = &m_state.context.gpr.__x[0]; 152 153 #if defined(__LP64__) 154 uint64_t log_fp = arm_thread_state64_get_fp(m_state.context.gpr); 155 uint64_t log_lr = arm_thread_state64_get_lr(m_state.context.gpr); 156 uint64_t log_sp = arm_thread_state64_get_sp(m_state.context.gpr); 157 uint64_t log_pc = arm_thread_state64_get_pc(m_state.context.gpr); 158 #else 159 uint64_t log_fp = m_state.context.gpr.__fp; 160 uint64_t log_fp = m_state.context.gpr.__lr; 161 uint64_t log_fp = m_state.context.gpr.__sp; 162 uint64_t log_fp = m_state.context.gpr.__pc, 163 #endif 164 DNBLogThreaded( 165 "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x (count = %u) regs" 166 "\n x0=%16.16llx" 167 "\n x1=%16.16llx" 168 "\n x2=%16.16llx" 169 "\n x3=%16.16llx" 170 "\n x4=%16.16llx" 171 "\n x5=%16.16llx" 172 "\n x6=%16.16llx" 173 "\n x7=%16.16llx" 174 "\n x8=%16.16llx" 175 "\n x9=%16.16llx" 176 "\n x10=%16.16llx" 177 "\n x11=%16.16llx" 178 "\n x12=%16.16llx" 179 "\n x13=%16.16llx" 180 "\n x14=%16.16llx" 181 "\n x15=%16.16llx" 182 "\n x16=%16.16llx" 183 "\n x17=%16.16llx" 184 "\n x18=%16.16llx" 185 "\n x19=%16.16llx" 186 "\n x20=%16.16llx" 187 "\n x21=%16.16llx" 188 "\n x22=%16.16llx" 189 "\n x23=%16.16llx" 190 "\n x24=%16.16llx" 191 "\n x25=%16.16llx" 192 "\n x26=%16.16llx" 193 "\n x27=%16.16llx" 194 "\n x28=%16.16llx" 195 "\n fp=%16.16llx" 196 "\n lr=%16.16llx" 197 "\n sp=%16.16llx" 198 "\n pc=%16.16llx" 199 "\n cpsr=%8.8x", 200 m_thread->MachPortNumber(), e_regSetGPR, e_regSetGPRCount, kret, count, 201 x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[0], x[11], 202 x[12], x[13], x[14], x[15], x[16], x[17], x[18], x[19], x[20], x[21], 203 x[22], x[23], x[24], x[25], x[26], x[27], x[28], 204 log_fp, log_lr, log_sp, log_pc, m_state.context.gpr.__cpsr); 205 } 206 m_state.SetError(set, Read, kret); 207 return kret; 208 } 209 210 kern_return_t DNBArchMachARM64::GetVFPState(bool force) { 211 int set = e_regSetVFP; 212 // Check if we have valid cached registers 213 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS) 214 return KERN_SUCCESS; 215 216 // Read the registers from our thread 217 mach_msg_type_number_t count = e_regSetVFPCount; 218 kern_return_t kret = 219 ::thread_get_state(m_thread->MachPortNumber(), ARM_NEON_STATE64, 220 (thread_state_t)&m_state.context.vfp, &count); 221 if (DNBLogEnabledForAny(LOG_THREAD)) { 222 #if defined(__arm64__) || defined(__aarch64__) 223 DNBLogThreaded( 224 "thread_get_state(0x%4.4x, %u, &vfp, %u) => 0x%8.8x (count = %u) regs" 225 "\n q0 = 0x%16.16llx%16.16llx" 226 "\n q1 = 0x%16.16llx%16.16llx" 227 "\n q2 = 0x%16.16llx%16.16llx" 228 "\n q3 = 0x%16.16llx%16.16llx" 229 "\n q4 = 0x%16.16llx%16.16llx" 230 "\n q5 = 0x%16.16llx%16.16llx" 231 "\n q6 = 0x%16.16llx%16.16llx" 232 "\n q7 = 0x%16.16llx%16.16llx" 233 "\n q8 = 0x%16.16llx%16.16llx" 234 "\n q9 = 0x%16.16llx%16.16llx" 235 "\n q10 = 0x%16.16llx%16.16llx" 236 "\n q11 = 0x%16.16llx%16.16llx" 237 "\n q12 = 0x%16.16llx%16.16llx" 238 "\n q13 = 0x%16.16llx%16.16llx" 239 "\n q14 = 0x%16.16llx%16.16llx" 240 "\n q15 = 0x%16.16llx%16.16llx" 241 "\n q16 = 0x%16.16llx%16.16llx" 242 "\n q17 = 0x%16.16llx%16.16llx" 243 "\n q18 = 0x%16.16llx%16.16llx" 244 "\n q19 = 0x%16.16llx%16.16llx" 245 "\n q20 = 0x%16.16llx%16.16llx" 246 "\n q21 = 0x%16.16llx%16.16llx" 247 "\n q22 = 0x%16.16llx%16.16llx" 248 "\n q23 = 0x%16.16llx%16.16llx" 249 "\n q24 = 0x%16.16llx%16.16llx" 250 "\n q25 = 0x%16.16llx%16.16llx" 251 "\n q26 = 0x%16.16llx%16.16llx" 252 "\n q27 = 0x%16.16llx%16.16llx" 253 "\n q28 = 0x%16.16llx%16.16llx" 254 "\n q29 = 0x%16.16llx%16.16llx" 255 "\n q30 = 0x%16.16llx%16.16llx" 256 "\n q31 = 0x%16.16llx%16.16llx" 257 "\n fpsr = 0x%8.8x" 258 "\n fpcr = 0x%8.8x\n\n", 259 m_thread->MachPortNumber(), e_regSetVFP, e_regSetVFPCount, kret, count, 260 ((uint64_t *)&m_state.context.vfp.__v[0])[0], 261 ((uint64_t *)&m_state.context.vfp.__v[0])[1], 262 ((uint64_t *)&m_state.context.vfp.__v[1])[0], 263 ((uint64_t *)&m_state.context.vfp.__v[1])[1], 264 ((uint64_t *)&m_state.context.vfp.__v[2])[0], 265 ((uint64_t *)&m_state.context.vfp.__v[2])[1], 266 ((uint64_t *)&m_state.context.vfp.__v[3])[0], 267 ((uint64_t *)&m_state.context.vfp.__v[3])[1], 268 ((uint64_t *)&m_state.context.vfp.__v[4])[0], 269 ((uint64_t *)&m_state.context.vfp.__v[4])[1], 270 ((uint64_t *)&m_state.context.vfp.__v[5])[0], 271 ((uint64_t *)&m_state.context.vfp.__v[5])[1], 272 ((uint64_t *)&m_state.context.vfp.__v[6])[0], 273 ((uint64_t *)&m_state.context.vfp.__v[6])[1], 274 ((uint64_t *)&m_state.context.vfp.__v[7])[0], 275 ((uint64_t *)&m_state.context.vfp.__v[7])[1], 276 ((uint64_t *)&m_state.context.vfp.__v[8])[0], 277 ((uint64_t *)&m_state.context.vfp.__v[8])[1], 278 ((uint64_t *)&m_state.context.vfp.__v[9])[0], 279 ((uint64_t *)&m_state.context.vfp.__v[9])[1], 280 ((uint64_t *)&m_state.context.vfp.__v[10])[0], 281 ((uint64_t *)&m_state.context.vfp.__v[10])[1], 282 ((uint64_t *)&m_state.context.vfp.__v[11])[0], 283 ((uint64_t *)&m_state.context.vfp.__v[11])[1], 284 ((uint64_t *)&m_state.context.vfp.__v[12])[0], 285 ((uint64_t *)&m_state.context.vfp.__v[12])[1], 286 ((uint64_t *)&m_state.context.vfp.__v[13])[0], 287 ((uint64_t *)&m_state.context.vfp.__v[13])[1], 288 ((uint64_t *)&m_state.context.vfp.__v[14])[0], 289 ((uint64_t *)&m_state.context.vfp.__v[14])[1], 290 ((uint64_t *)&m_state.context.vfp.__v[15])[0], 291 ((uint64_t *)&m_state.context.vfp.__v[15])[1], 292 ((uint64_t *)&m_state.context.vfp.__v[16])[0], 293 ((uint64_t *)&m_state.context.vfp.__v[16])[1], 294 ((uint64_t *)&m_state.context.vfp.__v[17])[0], 295 ((uint64_t *)&m_state.context.vfp.__v[17])[1], 296 ((uint64_t *)&m_state.context.vfp.__v[18])[0], 297 ((uint64_t *)&m_state.context.vfp.__v[18])[1], 298 ((uint64_t *)&m_state.context.vfp.__v[19])[0], 299 ((uint64_t *)&m_state.context.vfp.__v[19])[1], 300 ((uint64_t *)&m_state.context.vfp.__v[20])[0], 301 ((uint64_t *)&m_state.context.vfp.__v[20])[1], 302 ((uint64_t *)&m_state.context.vfp.__v[21])[0], 303 ((uint64_t *)&m_state.context.vfp.__v[21])[1], 304 ((uint64_t *)&m_state.context.vfp.__v[22])[0], 305 ((uint64_t *)&m_state.context.vfp.__v[22])[1], 306 ((uint64_t *)&m_state.context.vfp.__v[23])[0], 307 ((uint64_t *)&m_state.context.vfp.__v[23])[1], 308 ((uint64_t *)&m_state.context.vfp.__v[24])[0], 309 ((uint64_t *)&m_state.context.vfp.__v[24])[1], 310 ((uint64_t *)&m_state.context.vfp.__v[25])[0], 311 ((uint64_t *)&m_state.context.vfp.__v[25])[1], 312 ((uint64_t *)&m_state.context.vfp.__v[26])[0], 313 ((uint64_t *)&m_state.context.vfp.__v[26])[1], 314 ((uint64_t *)&m_state.context.vfp.__v[27])[0], 315 ((uint64_t *)&m_state.context.vfp.__v[27])[1], 316 ((uint64_t *)&m_state.context.vfp.__v[28])[0], 317 ((uint64_t *)&m_state.context.vfp.__v[28])[1], 318 ((uint64_t *)&m_state.context.vfp.__v[29])[0], 319 ((uint64_t *)&m_state.context.vfp.__v[29])[1], 320 ((uint64_t *)&m_state.context.vfp.__v[30])[0], 321 ((uint64_t *)&m_state.context.vfp.__v[30])[1], 322 ((uint64_t *)&m_state.context.vfp.__v[31])[0], 323 ((uint64_t *)&m_state.context.vfp.__v[31])[1], 324 m_state.context.vfp.__fpsr, m_state.context.vfp.__fpcr); 325 #endif 326 } 327 m_state.SetError(set, Read, kret); 328 return kret; 329 } 330 331 kern_return_t DNBArchMachARM64::GetEXCState(bool force) { 332 int set = e_regSetEXC; 333 // Check if we have valid cached registers 334 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS) 335 return KERN_SUCCESS; 336 337 // Read the registers from our thread 338 mach_msg_type_number_t count = e_regSetEXCCount; 339 kern_return_t kret = 340 ::thread_get_state(m_thread->MachPortNumber(), ARM_EXCEPTION_STATE64, 341 (thread_state_t)&m_state.context.exc, &count); 342 m_state.SetError(set, Read, kret); 343 return kret; 344 } 345 346 #if 0 347 static void DumpDBGState(const arm_debug_state_t &dbg) { 348 uint32_t i = 0; 349 for (i = 0; i < 16; i++) 350 DNBLogThreadedIf(LOG_STEP, "BVR%-2u/BCR%-2u = { 0x%8.8x, 0x%8.8x } " 351 "WVR%-2u/WCR%-2u = { 0x%8.8x, 0x%8.8x }", 352 i, i, dbg.__bvr[i], dbg.__bcr[i], i, i, dbg.__wvr[i], 353 dbg.__wcr[i]); 354 } 355 #endif 356 357 kern_return_t DNBArchMachARM64::GetDBGState(bool force) { 358 int set = e_regSetDBG; 359 360 // Check if we have valid cached registers 361 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS) 362 return KERN_SUCCESS; 363 364 // Read the registers from our thread 365 mach_msg_type_number_t count = e_regSetDBGCount; 366 kern_return_t kret = 367 ::thread_get_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE64, 368 (thread_state_t)&m_state.dbg, &count); 369 m_state.SetError(set, Read, kret); 370 371 return kret; 372 } 373 374 kern_return_t DNBArchMachARM64::SetGPRState() { 375 int set = e_regSetGPR; 376 kern_return_t kret = ::thread_set_state( 377 m_thread->MachPortNumber(), ARM_THREAD_STATE64, 378 (thread_state_t)&m_state.context.gpr, e_regSetGPRCount); 379 m_state.SetError(set, Write, 380 kret); // Set the current write error for this register set 381 m_state.InvalidateRegisterSetState(set); // Invalidate the current register 382 // state in case registers are read 383 // back differently 384 return kret; // Return the error code 385 } 386 387 kern_return_t DNBArchMachARM64::SetVFPState() { 388 int set = e_regSetVFP; 389 kern_return_t kret = ::thread_set_state( 390 m_thread->MachPortNumber(), ARM_NEON_STATE64, 391 (thread_state_t)&m_state.context.vfp, e_regSetVFPCount); 392 m_state.SetError(set, Write, 393 kret); // Set the current write error for this register set 394 m_state.InvalidateRegisterSetState(set); // Invalidate the current register 395 // state in case registers are read 396 // back differently 397 return kret; // Return the error code 398 } 399 400 kern_return_t DNBArchMachARM64::SetEXCState() { 401 int set = e_regSetEXC; 402 kern_return_t kret = ::thread_set_state( 403 m_thread->MachPortNumber(), ARM_EXCEPTION_STATE64, 404 (thread_state_t)&m_state.context.exc, e_regSetEXCCount); 405 m_state.SetError(set, Write, 406 kret); // Set the current write error for this register set 407 m_state.InvalidateRegisterSetState(set); // Invalidate the current register 408 // state in case registers are read 409 // back differently 410 return kret; // Return the error code 411 } 412 413 kern_return_t DNBArchMachARM64::SetDBGState(bool also_set_on_task) { 414 int set = e_regSetDBG; 415 kern_return_t kret = 416 ::thread_set_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE64, 417 (thread_state_t)&m_state.dbg, e_regSetDBGCount); 418 if (also_set_on_task) { 419 kern_return_t task_kret = task_set_state( 420 m_thread->Process()->Task().TaskPort(), ARM_DEBUG_STATE64, 421 (thread_state_t)&m_state.dbg, e_regSetDBGCount); 422 if (task_kret != KERN_SUCCESS) 423 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::SetDBGState failed " 424 "to set debug control register state: " 425 "0x%8.8x.", 426 task_kret); 427 } 428 m_state.SetError(set, Write, 429 kret); // Set the current write error for this register set 430 m_state.InvalidateRegisterSetState(set); // Invalidate the current register 431 // state in case registers are read 432 // back differently 433 434 return kret; // Return the error code 435 } 436 437 void DNBArchMachARM64::ThreadWillResume() { 438 // Do we need to step this thread? If so, let the mach thread tell us so. 439 if (m_thread->IsStepping()) { 440 EnableHardwareSingleStep(true); 441 } 442 443 // Disable the triggered watchpoint temporarily before we resume. 444 // Plus, we try to enable hardware single step to execute past the instruction 445 // which triggered our watchpoint. 446 if (m_watchpoint_did_occur) { 447 if (m_watchpoint_hw_index >= 0) { 448 kern_return_t kret = GetDBGState(false); 449 if (kret == KERN_SUCCESS && 450 !IsWatchpointEnabled(m_state.dbg, m_watchpoint_hw_index)) { 451 // The watchpoint might have been disabled by the user. We don't need 452 // to do anything at all 453 // to enable hardware single stepping. 454 m_watchpoint_did_occur = false; 455 m_watchpoint_hw_index = -1; 456 return; 457 } 458 459 DisableHardwareWatchpoint(m_watchpoint_hw_index, false); 460 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() " 461 "DisableHardwareWatchpoint(%d) called", 462 m_watchpoint_hw_index); 463 464 // Enable hardware single step to move past the watchpoint-triggering 465 // instruction. 466 m_watchpoint_resume_single_step_enabled = 467 (EnableHardwareSingleStep(true) == KERN_SUCCESS); 468 469 // If we are not able to enable single step to move past the 470 // watchpoint-triggering instruction, 471 // at least we should reset the two watchpoint member variables so that 472 // the next time around 473 // this callback function is invoked, the enclosing logical branch is 474 // skipped. 475 if (!m_watchpoint_resume_single_step_enabled) { 476 // Reset the two watchpoint member variables. 477 m_watchpoint_did_occur = false; 478 m_watchpoint_hw_index = -1; 479 DNBLogThreadedIf( 480 LOG_WATCHPOINTS, 481 "DNBArchMachARM::ThreadWillResume() failed to enable single step"); 482 } else 483 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() " 484 "succeeded to enable single step"); 485 } 486 } 487 } 488 489 bool DNBArchMachARM64::NotifyException(MachException::Data &exc) { 490 491 switch (exc.exc_type) { 492 default: 493 break; 494 case EXC_BREAKPOINT: 495 if (exc.exc_data.size() == 2 && exc.exc_data[0] == EXC_ARM_DA_DEBUG) { 496 // The data break address is passed as exc_data[1]. 497 nub_addr_t addr = exc.exc_data[1]; 498 // Find the hardware index with the side effect of possibly massaging the 499 // addr to return the starting address as seen from the debugger side. 500 uint32_t hw_index = GetHardwareWatchpointHit(addr); 501 502 // One logical watchpoint was split into two watchpoint locations because 503 // it was too big. If the watchpoint exception is indicating the 2nd half 504 // of the two-parter, find the address of the 1st half and report that -- 505 // that's what lldb is going to expect to see. 506 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::NotifyException " 507 "watchpoint %d was hit on address " 508 "0x%llx", 509 hw_index, (uint64_t)addr); 510 const uint32_t num_watchpoints = NumSupportedHardwareWatchpoints(); 511 for (uint32_t i = 0; i < num_watchpoints; i++) { 512 if (LoHi[i] != 0 && LoHi[i] == hw_index && LoHi[i] != i && 513 GetWatchpointAddressByIndex(i) != INVALID_NUB_ADDRESS) { 514 addr = GetWatchpointAddressByIndex(i); 515 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::NotifyException " 516 "It is a linked watchpoint; " 517 "rewritten to index %d addr 0x%llx", 518 LoHi[i], (uint64_t)addr); 519 } 520 } 521 522 if (hw_index != INVALID_NUB_HW_INDEX) { 523 m_watchpoint_did_occur = true; 524 m_watchpoint_hw_index = hw_index; 525 exc.exc_data[1] = addr; 526 // Piggyback the hw_index in the exc.data. 527 exc.exc_data.push_back(hw_index); 528 } 529 530 return true; 531 } 532 // detect a __builtin_debugtrap instruction pattern ("brk #0xf000") 533 // and advance the $pc past it, so that the user can continue execution. 534 // Generally speaking, this knowledge should be centralized in lldb, 535 // recognizing the builtin_trap instruction and knowing how to advance 536 // the pc past it, so that continue etc work. 537 if (exc.exc_data.size() == 2 && exc.exc_data[0] == EXC_ARM_BREAKPOINT) { 538 nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS); 539 if (pc != INVALID_NUB_ADDRESS && pc > 0) { 540 DNBBreakpoint *bp = 541 m_thread->Process()->Breakpoints().FindByAddress(pc); 542 if (bp == nullptr) { 543 uint8_t insnbuf[4]; 544 if (m_thread->Process()->ReadMemory(pc, 4, insnbuf) == 4) { 545 uint8_t builtin_debugtrap_insn[4] = {0x00, 0x00, 0x3e, 546 0xd4}; // brk #0xf000 547 if (memcmp(insnbuf, builtin_debugtrap_insn, 4) == 0) { 548 SetPC(pc + 4); 549 } 550 } 551 } 552 } 553 } 554 break; 555 } 556 return false; 557 } 558 559 bool DNBArchMachARM64::ThreadDidStop() { 560 bool success = true; 561 562 m_state.InvalidateAllRegisterStates(); 563 564 if (m_watchpoint_resume_single_step_enabled) { 565 // Great! We now disable the hardware single step as well as re-enable the 566 // hardware watchpoint. 567 // See also ThreadWillResume(). 568 if (EnableHardwareSingleStep(false) == KERN_SUCCESS) { 569 if (m_watchpoint_did_occur && m_watchpoint_hw_index >= 0) { 570 ReenableHardwareWatchpoint(m_watchpoint_hw_index); 571 m_watchpoint_resume_single_step_enabled = false; 572 m_watchpoint_did_occur = false; 573 m_watchpoint_hw_index = -1; 574 } else { 575 DNBLogError("internal error detected: m_watchpoint_resume_step_enabled " 576 "is true but (m_watchpoint_did_occur && " 577 "m_watchpoint_hw_index >= 0) does not hold!"); 578 } 579 } else { 580 DNBLogError("internal error detected: m_watchpoint_resume_step_enabled " 581 "is true but unable to disable single step!"); 582 } 583 } 584 585 // Are we stepping a single instruction? 586 if (GetGPRState(true) == KERN_SUCCESS) { 587 // We are single stepping, was this the primary thread? 588 if (m_thread->IsStepping()) { 589 // This was the primary thread, we need to clear the trace 590 // bit if so. 591 success = EnableHardwareSingleStep(false) == KERN_SUCCESS; 592 } else { 593 // The MachThread will automatically restore the suspend count 594 // in ThreadDidStop(), so we don't need to do anything here if 595 // we weren't the primary thread the last time 596 } 597 } 598 return success; 599 } 600 601 // Set the single step bit in the processor status register. 602 kern_return_t DNBArchMachARM64::EnableHardwareSingleStep(bool enable) { 603 DNBError err; 604 DNBLogThreadedIf(LOG_STEP, "%s( enable = %d )", __FUNCTION__, enable); 605 606 err = GetGPRState(false); 607 608 if (err.Fail()) { 609 err.LogThreaded("%s: failed to read the GPR registers", __FUNCTION__); 610 return err.Status(); 611 } 612 613 err = GetDBGState(false); 614 615 if (err.Fail()) { 616 err.LogThreaded("%s: failed to read the DBG registers", __FUNCTION__); 617 return err.Status(); 618 } 619 620 #if defined(__LP64__) 621 uint64_t pc = arm_thread_state64_get_pc (m_state.context.gpr); 622 #else 623 uint64_t pc = m_state.context.gpr.__pc; 624 #endif 625 626 if (enable) { 627 DNBLogThreadedIf(LOG_STEP, 628 "%s: Setting MDSCR_EL1 Single Step bit at pc 0x%llx", 629 __FUNCTION__, pc); 630 m_state.dbg.__mdscr_el1 |= SS_ENABLE; 631 } else { 632 DNBLogThreadedIf(LOG_STEP, 633 "%s: Clearing MDSCR_EL1 Single Step bit at pc 0x%llx", 634 __FUNCTION__, pc); 635 m_state.dbg.__mdscr_el1 &= ~(SS_ENABLE); 636 } 637 638 return SetDBGState(false); 639 } 640 641 // return 1 if bit "BIT" is set in "value" 642 static inline uint32_t bit(uint32_t value, uint32_t bit) { 643 return (value >> bit) & 1u; 644 } 645 646 // return the bitfield "value[msbit:lsbit]". 647 static inline uint64_t bits(uint64_t value, uint32_t msbit, uint32_t lsbit) { 648 assert(msbit >= lsbit); 649 uint64_t shift_left = sizeof(value) * 8 - 1 - msbit; 650 value <<= 651 shift_left; // shift anything above the msbit off of the unsigned edge 652 value >>= shift_left + lsbit; // shift it back again down to the lsbit 653 // (including undoing any shift from above) 654 return value; // return our result 655 } 656 657 uint32_t DNBArchMachARM64::NumSupportedHardwareWatchpoints() { 658 // Set the init value to something that will let us know that we need to 659 // autodetect how many watchpoints are supported dynamically... 660 static uint32_t g_num_supported_hw_watchpoints = UINT_MAX; 661 if (g_num_supported_hw_watchpoints == UINT_MAX) { 662 // Set this to zero in case we can't tell if there are any HW breakpoints 663 g_num_supported_hw_watchpoints = 0; 664 665 size_t len; 666 uint32_t n = 0; 667 len = sizeof(n); 668 if (::sysctlbyname("hw.optional.watchpoint", &n, &len, NULL, 0) == 0) { 669 g_num_supported_hw_watchpoints = n; 670 DNBLogThreadedIf(LOG_THREAD, "hw.optional.watchpoint=%u", n); 671 } else { 672 // For AArch64 we would need to look at ID_AA64DFR0_EL1 but debugserver runs in 673 // EL0 so it can't 674 // access that reg. The kernel should have filled in the sysctls based on it 675 // though. 676 #if defined(__arm__) 677 uint32_t register_DBGDIDR; 678 679 asm("mrc p14, 0, %0, c0, c0, 0" : "=r"(register_DBGDIDR)); 680 uint32_t numWRPs = bits(register_DBGDIDR, 31, 28); 681 // Zero is reserved for the WRP count, so don't increment it if it is zero 682 if (numWRPs > 0) 683 numWRPs++; 684 g_num_supported_hw_watchpoints = numWRPs; 685 DNBLogThreadedIf(LOG_THREAD, 686 "Number of supported hw watchpoints via asm(): %d", 687 g_num_supported_hw_watchpoints); 688 #endif 689 } 690 } 691 return g_num_supported_hw_watchpoints; 692 } 693 694 uint32_t DNBArchMachARM64::NumSupportedHardwareBreakpoints() { 695 // Set the init value to something that will let us know that we need to 696 // autodetect how many breakpoints are supported dynamically... 697 static uint32_t g_num_supported_hw_breakpoints = UINT_MAX; 698 if (g_num_supported_hw_breakpoints == UINT_MAX) { 699 // Set this to zero in case we can't tell if there are any HW breakpoints 700 g_num_supported_hw_breakpoints = 0; 701 702 size_t len; 703 uint32_t n = 0; 704 len = sizeof(n); 705 if (::sysctlbyname("hw.optional.breakpoint", &n, &len, NULL, 0) == 0) { 706 g_num_supported_hw_breakpoints = n; 707 DNBLogThreadedIf(LOG_THREAD, "hw.optional.breakpoint=%u", n); 708 } else { 709 // For AArch64 we would need to look at ID_AA64DFR0_EL1 but debugserver runs in 710 // EL0 so it can't access that reg. The kernel should have filled in the 711 // sysctls based on it though. 712 #if defined(__arm__) 713 uint32_t register_DBGDIDR; 714 715 asm("mrc p14, 0, %0, c0, c0, 0" : "=r"(register_DBGDIDR)); 716 uint32_t numWRPs = bits(register_DBGDIDR, 31, 28); 717 // Zero is reserved for the WRP count, so don't increment it if it is zero 718 if (numWRPs > 0) 719 numWRPs++; 720 g_num_supported_hw_breakpoints = numWRPs; 721 DNBLogThreadedIf(LOG_THREAD, 722 "Number of supported hw breakpoint via asm(): %d", 723 g_num_supported_hw_breakpoints); 724 #endif 725 } 726 } 727 return g_num_supported_hw_breakpoints; 728 } 729 730 uint32_t DNBArchMachARM64::EnableHardwareBreakpoint(nub_addr_t addr, 731 nub_size_t size, 732 bool also_set_on_task) { 733 DNBLogThreadedIf(LOG_WATCHPOINTS, 734 "DNBArchMachARM64::EnableHardwareBreakpoint(addr = " 735 "0x%8.8llx, size = %zu)", 736 (uint64_t)addr, size); 737 738 const uint32_t num_hw_breakpoints = NumSupportedHardwareBreakpoints(); 739 740 nub_addr_t aligned_bp_address = addr; 741 uint32_t control_value = 0; 742 743 switch (size) { 744 case 2: 745 control_value = (0x3 << 5) | 7; 746 aligned_bp_address &= ~1; 747 break; 748 case 4: 749 control_value = (0xfu << 5) | 7; 750 aligned_bp_address &= ~3; 751 break; 752 }; 753 754 // Read the debug state 755 kern_return_t kret = GetDBGState(false); 756 if (kret == KERN_SUCCESS) { 757 // Check to make sure we have the needed hardware support 758 uint32_t i = 0; 759 760 for (i = 0; i < num_hw_breakpoints; ++i) { 761 if ((m_state.dbg.__bcr[i] & BCR_ENABLE) == 0) 762 break; // We found an available hw breakpoint slot (in i) 763 } 764 765 // See if we found an available hw breakpoint slot above 766 if (i < num_hw_breakpoints) { 767 m_state.dbg.__bvr[i] = aligned_bp_address; 768 m_state.dbg.__bcr[i] = control_value; 769 770 DNBLogThreadedIf(LOG_WATCHPOINTS, 771 "DNBArchMachARM64::EnableHardwareBreakpoint() " 772 "adding breakpoint on address 0x%llx with control " 773 "register value 0x%x", 774 (uint64_t)m_state.dbg.__bvr[i], 775 (uint32_t)m_state.dbg.__bcr[i]); 776 777 // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us 778 // automatically, don't need to do it here. 779 kret = SetDBGState(also_set_on_task); 780 781 DNBLogThreadedIf(LOG_WATCHPOINTS, 782 "DNBArchMachARM64::" 783 "EnableHardwareBreakpoint() " 784 "SetDBGState() => 0x%8.8x.", 785 kret); 786 787 if (kret == KERN_SUCCESS) 788 return i; 789 } else { 790 DNBLogThreadedIf(LOG_WATCHPOINTS, 791 "DNBArchMachARM64::" 792 "EnableHardwareBreakpoint(): All " 793 "hardware resources (%u) are in use.", 794 num_hw_breakpoints); 795 } 796 } 797 return INVALID_NUB_HW_INDEX; 798 } 799 800 uint32_t DNBArchMachARM64::EnableHardwareWatchpoint(nub_addr_t addr, 801 nub_size_t size, bool read, 802 bool write, 803 bool also_set_on_task) { 804 DNBLogThreadedIf(LOG_WATCHPOINTS, 805 "DNBArchMachARM64::EnableHardwareWatchpoint(addr = " 806 "0x%8.8llx, size = %zu, read = %u, write = %u)", 807 (uint64_t)addr, size, read, write); 808 809 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints(); 810 811 // Can't watch zero bytes 812 if (size == 0) 813 return INVALID_NUB_HW_INDEX; 814 815 // We must watch for either read or write 816 if (read == false && write == false) 817 return INVALID_NUB_HW_INDEX; 818 819 // Otherwise, can't watch more than 8 bytes per WVR/WCR pair 820 if (size > 8) 821 return INVALID_NUB_HW_INDEX; 822 823 // Aarch64 watchpoints are in one of two forms: (1) 1-8 bytes, aligned to 824 // an 8 byte address, or (2) a power-of-two size region of memory; minimum 825 // 8 bytes, maximum 2GB; the starting address must be aligned to that power 826 // of two. 827 // 828 // For (1), 1-8 byte watchpoints, using the Byte Address Selector field in 829 // DBGWCR<n>.BAS. Any of the bytes may be watched, but if multiple bytes 830 // are watched, the bytes selected must be contiguous. The start address 831 // watched must be doubleword (8-byte) aligned; if the start address is 832 // word (4-byte) aligned, only 4 bytes can be watched. 833 // 834 // For (2), the MASK field in DBGWCR<n>.MASK is used. 835 // 836 // See the ARM ARM, section "Watchpoint exceptions", and more specifically, 837 // "Watchpoint data address comparisons". 838 // 839 // debugserver today only supports (1) - the Byte Address Selector 1-8 byte 840 // watchpoints that are 8-byte aligned. To support larger watchpoints, 841 // debugserver would need to interpret the mach exception when the watched 842 // region was hit, see if the address accessed lies within the subset 843 // of the power-of-two region that lldb asked us to watch (v. ARM ARM, 844 // "Determining the memory location that caused a Watchpoint exception"), 845 // and silently resume the inferior (disable watchpoint, stepi, re-enable 846 // watchpoint) if the address lies outside the region that lldb asked us 847 // to watch. 848 // 849 // Alternatively, lldb would need to be prepared for a larger region 850 // being watched than it requested, and silently resume the inferior if 851 // the accessed address is outside the region lldb wants to watch. 852 853 nub_addr_t aligned_wp_address = addr & ~0x7; 854 uint32_t addr_dword_offset = addr & 0x7; 855 856 // Do we need to split up this logical watchpoint into two hardware watchpoint 857 // registers? 858 // e.g. a watchpoint of length 4 on address 6. We need do this with 859 // one watchpoint on address 0 with bytes 6 & 7 being monitored 860 // one watchpoint on address 8 with bytes 0, 1, 2, 3 being monitored 861 862 if (addr_dword_offset + size > 8) { 863 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::" 864 "EnableHardwareWatchpoint(addr = " 865 "0x%8.8llx, size = %zu) needs two " 866 "hardware watchpoints slots to monitor", 867 (uint64_t)addr, size); 868 int low_watchpoint_size = 8 - addr_dword_offset; 869 int high_watchpoint_size = addr_dword_offset + size - 8; 870 871 uint32_t lo = EnableHardwareWatchpoint(addr, low_watchpoint_size, read, 872 write, also_set_on_task); 873 if (lo == INVALID_NUB_HW_INDEX) 874 return INVALID_NUB_HW_INDEX; 875 uint32_t hi = 876 EnableHardwareWatchpoint(aligned_wp_address + 8, high_watchpoint_size, 877 read, write, also_set_on_task); 878 if (hi == INVALID_NUB_HW_INDEX) { 879 DisableHardwareWatchpoint(lo, also_set_on_task); 880 return INVALID_NUB_HW_INDEX; 881 } 882 // Tag this lo->hi mapping in our database. 883 LoHi[lo] = hi; 884 return lo; 885 } 886 887 // At this point 888 // 1 aligned_wp_address is the requested address rounded down to 8-byte 889 // alignment 890 // 2 addr_dword_offset is the offset into that double word (8-byte) region 891 // that we are watching 892 // 3 size is the number of bytes within that 8-byte region that we are 893 // watching 894 895 // Set the Byte Address Selects bits DBGWCRn_EL1 bits [12:5] based on the 896 // above. 897 // The bit shift and negation operation will give us 0b11 for 2, 0b1111 for 4, 898 // etc, up to 0b11111111 for 8. 899 // then we shift those bits left by the offset into this dword that we are 900 // interested in. 901 // e.g. if we are watching bytes 4,5,6,7 in a dword we want a BAS of 902 // 0b11110000. 903 uint32_t byte_address_select = ((1 << size) - 1) << addr_dword_offset; 904 905 // Read the debug state 906 kern_return_t kret = GetDBGState(false); 907 908 if (kret == KERN_SUCCESS) { 909 // Check to make sure we have the needed hardware support 910 uint32_t i = 0; 911 912 for (i = 0; i < num_hw_watchpoints; ++i) { 913 if ((m_state.dbg.__wcr[i] & WCR_ENABLE) == 0) 914 break; // We found an available hw watchpoint slot (in i) 915 } 916 917 // See if we found an available hw watchpoint slot above 918 if (i < num_hw_watchpoints) { 919 // DumpDBGState(m_state.dbg); 920 921 // Clear any previous LoHi joined-watchpoint that may have been in use 922 LoHi[i] = 0; 923 924 // shift our Byte Address Select bits up to the correct bit range for the 925 // DBGWCRn_EL1 926 byte_address_select = byte_address_select << 5; 927 928 // Make sure bits 1:0 are clear in our address 929 m_state.dbg.__wvr[i] = aligned_wp_address; // DVA (Data Virtual Address) 930 m_state.dbg.__wcr[i] = byte_address_select | // Which bytes that follow 931 // the DVA that we will watch 932 S_USER | // Stop only in user mode 933 (read ? WCR_LOAD : 0) | // Stop on read access? 934 (write ? WCR_STORE : 0) | // Stop on write access? 935 WCR_ENABLE; // Enable this watchpoint; 936 937 DNBLogThreadedIf( 938 LOG_WATCHPOINTS, "DNBArchMachARM64::EnableHardwareWatchpoint() " 939 "adding watchpoint on address 0x%llx with control " 940 "register value 0x%x", 941 (uint64_t)m_state.dbg.__wvr[i], (uint32_t)m_state.dbg.__wcr[i]); 942 943 // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us 944 // automatically, don't need to do it here. 945 946 kret = SetDBGState(also_set_on_task); 947 // DumpDBGState(m_state.dbg); 948 949 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::" 950 "EnableHardwareWatchpoint() " 951 "SetDBGState() => 0x%8.8x.", 952 kret); 953 954 if (kret == KERN_SUCCESS) 955 return i; 956 } else { 957 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::" 958 "EnableHardwareWatchpoint(): All " 959 "hardware resources (%u) are in use.", 960 num_hw_watchpoints); 961 } 962 } 963 return INVALID_NUB_HW_INDEX; 964 } 965 966 bool DNBArchMachARM64::ReenableHardwareWatchpoint(uint32_t hw_index) { 967 // If this logical watchpoint # is actually implemented using 968 // two hardware watchpoint registers, re-enable both of them. 969 970 if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index]) { 971 return ReenableHardwareWatchpoint_helper(hw_index) && 972 ReenableHardwareWatchpoint_helper(LoHi[hw_index]); 973 } else { 974 return ReenableHardwareWatchpoint_helper(hw_index); 975 } 976 } 977 978 bool DNBArchMachARM64::ReenableHardwareWatchpoint_helper(uint32_t hw_index) { 979 kern_return_t kret = GetDBGState(false); 980 if (kret != KERN_SUCCESS) 981 return false; 982 983 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints(); 984 if (hw_index >= num_hw_points) 985 return false; 986 987 m_state.dbg.__wvr[hw_index] = m_disabled_watchpoints[hw_index].addr; 988 m_state.dbg.__wcr[hw_index] = m_disabled_watchpoints[hw_index].control; 989 990 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::" 991 "EnableHardwareWatchpoint( %u ) - WVR%u = " 992 "0x%8.8llx WCR%u = 0x%8.8llx", 993 hw_index, hw_index, (uint64_t)m_state.dbg.__wvr[hw_index], 994 hw_index, (uint64_t)m_state.dbg.__wcr[hw_index]); 995 996 // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us 997 // automatically, don't need to do it here. 998 999 kret = SetDBGState(false); 1000 1001 return (kret == KERN_SUCCESS); 1002 } 1003 1004 bool DNBArchMachARM64::DisableHardwareWatchpoint(uint32_t hw_index, 1005 bool also_set_on_task) { 1006 if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index]) { 1007 return DisableHardwareWatchpoint_helper(hw_index, also_set_on_task) && 1008 DisableHardwareWatchpoint_helper(LoHi[hw_index], also_set_on_task); 1009 } else { 1010 return DisableHardwareWatchpoint_helper(hw_index, also_set_on_task); 1011 } 1012 } 1013 1014 bool DNBArchMachARM64::DisableHardwareWatchpoint_helper(uint32_t hw_index, 1015 bool also_set_on_task) { 1016 kern_return_t kret = GetDBGState(false); 1017 if (kret != KERN_SUCCESS) 1018 return false; 1019 1020 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints(); 1021 if (hw_index >= num_hw_points) 1022 return false; 1023 1024 m_disabled_watchpoints[hw_index].addr = m_state.dbg.__wvr[hw_index]; 1025 m_disabled_watchpoints[hw_index].control = m_state.dbg.__wcr[hw_index]; 1026 1027 m_state.dbg.__wcr[hw_index] &= ~((nub_addr_t)WCR_ENABLE); 1028 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::" 1029 "DisableHardwareWatchpoint( %u ) - WVR%u = " 1030 "0x%8.8llx WCR%u = 0x%8.8llx", 1031 hw_index, hw_index, (uint64_t)m_state.dbg.__wvr[hw_index], 1032 hw_index, (uint64_t)m_state.dbg.__wcr[hw_index]); 1033 1034 kret = SetDBGState(also_set_on_task); 1035 1036 return (kret == KERN_SUCCESS); 1037 } 1038 1039 bool DNBArchMachARM64::DisableHardwareBreakpoint(uint32_t hw_index, 1040 bool also_set_on_task) { 1041 kern_return_t kret = GetDBGState(false); 1042 if (kret != KERN_SUCCESS) 1043 return false; 1044 1045 const uint32_t num_hw_points = NumSupportedHardwareBreakpoints(); 1046 if (hw_index >= num_hw_points) 1047 return false; 1048 1049 m_disabled_breakpoints[hw_index].addr = m_state.dbg.__bvr[hw_index]; 1050 m_disabled_breakpoints[hw_index].control = m_state.dbg.__bcr[hw_index]; 1051 1052 m_state.dbg.__bcr[hw_index] = 0; 1053 DNBLogThreadedIf(LOG_WATCHPOINTS, 1054 "DNBArchMachARM64::" 1055 "DisableHardwareBreakpoint( %u ) - WVR%u = " 1056 "0x%8.8llx BCR%u = 0x%8.8llx", 1057 hw_index, hw_index, (uint64_t)m_state.dbg.__bvr[hw_index], 1058 hw_index, (uint64_t)m_state.dbg.__bcr[hw_index]); 1059 1060 kret = SetDBGState(also_set_on_task); 1061 1062 return (kret == KERN_SUCCESS); 1063 } 1064 1065 // This is for checking the Byte Address Select bits in the DBRWCRn_EL1 control 1066 // register. 1067 // Returns -1 if the trailing bit patterns are not one of: 1068 // { 0b???????1, 0b??????10, 0b?????100, 0b????1000, 0b???10000, 0b??100000, 1069 // 0b?1000000, 0b10000000 }. 1070 static inline int32_t LowestBitSet(uint32_t val) { 1071 for (unsigned i = 0; i < 8; ++i) { 1072 if (bit(val, i)) 1073 return i; 1074 } 1075 return -1; 1076 } 1077 1078 // Iterate through the debug registers; return the index of the first watchpoint 1079 // whose address matches. 1080 // As a side effect, the starting address as understood by the debugger is 1081 // returned which could be 1082 // different from 'addr' passed as an in/out argument. 1083 uint32_t DNBArchMachARM64::GetHardwareWatchpointHit(nub_addr_t &addr) { 1084 // Read the debug state 1085 kern_return_t kret = GetDBGState(true); 1086 // DumpDBGState(m_state.dbg); 1087 DNBLogThreadedIf( 1088 LOG_WATCHPOINTS, 1089 "DNBArchMachARM64::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.", 1090 kret); 1091 DNBLogThreadedIf(LOG_WATCHPOINTS, 1092 "DNBArchMachARM64::GetHardwareWatchpointHit() addr = 0x%llx", 1093 (uint64_t)addr); 1094 1095 if (kret == KERN_SUCCESS) { 1096 DBG &debug_state = m_state.dbg; 1097 uint32_t i, num = NumSupportedHardwareWatchpoints(); 1098 for (i = 0; i < num; ++i) { 1099 nub_addr_t wp_addr = GetWatchAddress(debug_state, i); 1100 uint32_t byte_mask = bits(debug_state.__wcr[i], 12, 5); 1101 1102 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::" 1103 "GetHardwareWatchpointHit() slot: %u " 1104 "(addr = 0x%llx; byte_mask = 0x%x)", 1105 i, static_cast<uint64_t>(wp_addr), 1106 byte_mask); 1107 1108 if (!IsWatchpointEnabled(debug_state, i)) 1109 continue; 1110 1111 if (bits(wp_addr, 48, 3) != bits(addr, 48, 3)) 1112 continue; 1113 1114 // Sanity check the byte_mask 1115 uint32_t lsb = LowestBitSet(byte_mask); 1116 if (lsb < 0) 1117 continue; 1118 1119 uint64_t byte_to_match = bits(addr, 2, 0); 1120 1121 if (byte_mask & (1 << byte_to_match)) { 1122 addr = wp_addr + lsb; 1123 return i; 1124 } 1125 } 1126 } 1127 return INVALID_NUB_HW_INDEX; 1128 } 1129 1130 nub_addr_t DNBArchMachARM64::GetWatchpointAddressByIndex(uint32_t hw_index) { 1131 kern_return_t kret = GetDBGState(true); 1132 if (kret != KERN_SUCCESS) 1133 return INVALID_NUB_ADDRESS; 1134 const uint32_t num = NumSupportedHardwareWatchpoints(); 1135 if (hw_index >= num) 1136 return INVALID_NUB_ADDRESS; 1137 if (IsWatchpointEnabled(m_state.dbg, hw_index)) 1138 return GetWatchAddress(m_state.dbg, hw_index); 1139 return INVALID_NUB_ADDRESS; 1140 } 1141 1142 bool DNBArchMachARM64::IsWatchpointEnabled(const DBG &debug_state, 1143 uint32_t hw_index) { 1144 // Watchpoint Control Registers, bitfield definitions 1145 // ... 1146 // Bits Value Description 1147 // [0] 0 Watchpoint disabled 1148 // 1 Watchpoint enabled. 1149 return (debug_state.__wcr[hw_index] & 1u); 1150 } 1151 1152 nub_addr_t DNBArchMachARM64::GetWatchAddress(const DBG &debug_state, 1153 uint32_t hw_index) { 1154 // Watchpoint Value Registers, bitfield definitions 1155 // Bits Description 1156 // [31:2] Watchpoint value (word address, i.e., 4-byte aligned) 1157 // [1:0] RAZ/SBZP 1158 return bits(debug_state.__wvr[hw_index], 63, 0); 1159 } 1160 1161 // Register information definitions for 64 bit ARMv8. 1162 enum gpr_regnums { 1163 gpr_x0 = 0, 1164 gpr_x1, 1165 gpr_x2, 1166 gpr_x3, 1167 gpr_x4, 1168 gpr_x5, 1169 gpr_x6, 1170 gpr_x7, 1171 gpr_x8, 1172 gpr_x9, 1173 gpr_x10, 1174 gpr_x11, 1175 gpr_x12, 1176 gpr_x13, 1177 gpr_x14, 1178 gpr_x15, 1179 gpr_x16, 1180 gpr_x17, 1181 gpr_x18, 1182 gpr_x19, 1183 gpr_x20, 1184 gpr_x21, 1185 gpr_x22, 1186 gpr_x23, 1187 gpr_x24, 1188 gpr_x25, 1189 gpr_x26, 1190 gpr_x27, 1191 gpr_x28, 1192 gpr_fp, 1193 gpr_x29 = gpr_fp, 1194 gpr_lr, 1195 gpr_x30 = gpr_lr, 1196 gpr_sp, 1197 gpr_x31 = gpr_sp, 1198 gpr_pc, 1199 gpr_cpsr, 1200 gpr_w0, 1201 gpr_w1, 1202 gpr_w2, 1203 gpr_w3, 1204 gpr_w4, 1205 gpr_w5, 1206 gpr_w6, 1207 gpr_w7, 1208 gpr_w8, 1209 gpr_w9, 1210 gpr_w10, 1211 gpr_w11, 1212 gpr_w12, 1213 gpr_w13, 1214 gpr_w14, 1215 gpr_w15, 1216 gpr_w16, 1217 gpr_w17, 1218 gpr_w18, 1219 gpr_w19, 1220 gpr_w20, 1221 gpr_w21, 1222 gpr_w22, 1223 gpr_w23, 1224 gpr_w24, 1225 gpr_w25, 1226 gpr_w26, 1227 gpr_w27, 1228 gpr_w28 1229 1230 }; 1231 1232 enum { 1233 vfp_v0 = 0, 1234 vfp_v1, 1235 vfp_v2, 1236 vfp_v3, 1237 vfp_v4, 1238 vfp_v5, 1239 vfp_v6, 1240 vfp_v7, 1241 vfp_v8, 1242 vfp_v9, 1243 vfp_v10, 1244 vfp_v11, 1245 vfp_v12, 1246 vfp_v13, 1247 vfp_v14, 1248 vfp_v15, 1249 vfp_v16, 1250 vfp_v17, 1251 vfp_v18, 1252 vfp_v19, 1253 vfp_v20, 1254 vfp_v21, 1255 vfp_v22, 1256 vfp_v23, 1257 vfp_v24, 1258 vfp_v25, 1259 vfp_v26, 1260 vfp_v27, 1261 vfp_v28, 1262 vfp_v29, 1263 vfp_v30, 1264 vfp_v31, 1265 vfp_fpsr, 1266 vfp_fpcr, 1267 1268 // lower 32 bits of the corresponding vfp_v<n> reg. 1269 vfp_s0, 1270 vfp_s1, 1271 vfp_s2, 1272 vfp_s3, 1273 vfp_s4, 1274 vfp_s5, 1275 vfp_s6, 1276 vfp_s7, 1277 vfp_s8, 1278 vfp_s9, 1279 vfp_s10, 1280 vfp_s11, 1281 vfp_s12, 1282 vfp_s13, 1283 vfp_s14, 1284 vfp_s15, 1285 vfp_s16, 1286 vfp_s17, 1287 vfp_s18, 1288 vfp_s19, 1289 vfp_s20, 1290 vfp_s21, 1291 vfp_s22, 1292 vfp_s23, 1293 vfp_s24, 1294 vfp_s25, 1295 vfp_s26, 1296 vfp_s27, 1297 vfp_s28, 1298 vfp_s29, 1299 vfp_s30, 1300 vfp_s31, 1301 1302 // lower 64 bits of the corresponding vfp_v<n> reg. 1303 vfp_d0, 1304 vfp_d1, 1305 vfp_d2, 1306 vfp_d3, 1307 vfp_d4, 1308 vfp_d5, 1309 vfp_d6, 1310 vfp_d7, 1311 vfp_d8, 1312 vfp_d9, 1313 vfp_d10, 1314 vfp_d11, 1315 vfp_d12, 1316 vfp_d13, 1317 vfp_d14, 1318 vfp_d15, 1319 vfp_d16, 1320 vfp_d17, 1321 vfp_d18, 1322 vfp_d19, 1323 vfp_d20, 1324 vfp_d21, 1325 vfp_d22, 1326 vfp_d23, 1327 vfp_d24, 1328 vfp_d25, 1329 vfp_d26, 1330 vfp_d27, 1331 vfp_d28, 1332 vfp_d29, 1333 vfp_d30, 1334 vfp_d31 1335 }; 1336 1337 enum { exc_far = 0, exc_esr, exc_exception }; 1338 1339 // These numbers from the "DWARF for the ARM 64-bit Architecture (AArch64)" 1340 // document. 1341 1342 enum { 1343 dwarf_x0 = 0, 1344 dwarf_x1, 1345 dwarf_x2, 1346 dwarf_x3, 1347 dwarf_x4, 1348 dwarf_x5, 1349 dwarf_x6, 1350 dwarf_x7, 1351 dwarf_x8, 1352 dwarf_x9, 1353 dwarf_x10, 1354 dwarf_x11, 1355 dwarf_x12, 1356 dwarf_x13, 1357 dwarf_x14, 1358 dwarf_x15, 1359 dwarf_x16, 1360 dwarf_x17, 1361 dwarf_x18, 1362 dwarf_x19, 1363 dwarf_x20, 1364 dwarf_x21, 1365 dwarf_x22, 1366 dwarf_x23, 1367 dwarf_x24, 1368 dwarf_x25, 1369 dwarf_x26, 1370 dwarf_x27, 1371 dwarf_x28, 1372 dwarf_x29, 1373 dwarf_x30, 1374 dwarf_x31, 1375 dwarf_pc = 32, 1376 dwarf_elr_mode = 33, 1377 dwarf_fp = dwarf_x29, 1378 dwarf_lr = dwarf_x30, 1379 dwarf_sp = dwarf_x31, 1380 // 34-63 reserved 1381 1382 // V0-V31 (128 bit vector registers) 1383 dwarf_v0 = 64, 1384 dwarf_v1, 1385 dwarf_v2, 1386 dwarf_v3, 1387 dwarf_v4, 1388 dwarf_v5, 1389 dwarf_v6, 1390 dwarf_v7, 1391 dwarf_v8, 1392 dwarf_v9, 1393 dwarf_v10, 1394 dwarf_v11, 1395 dwarf_v12, 1396 dwarf_v13, 1397 dwarf_v14, 1398 dwarf_v15, 1399 dwarf_v16, 1400 dwarf_v17, 1401 dwarf_v18, 1402 dwarf_v19, 1403 dwarf_v20, 1404 dwarf_v21, 1405 dwarf_v22, 1406 dwarf_v23, 1407 dwarf_v24, 1408 dwarf_v25, 1409 dwarf_v26, 1410 dwarf_v27, 1411 dwarf_v28, 1412 dwarf_v29, 1413 dwarf_v30, 1414 dwarf_v31 1415 1416 // 96-127 reserved 1417 }; 1418 1419 enum { 1420 debugserver_gpr_x0 = 0, 1421 debugserver_gpr_x1, 1422 debugserver_gpr_x2, 1423 debugserver_gpr_x3, 1424 debugserver_gpr_x4, 1425 debugserver_gpr_x5, 1426 debugserver_gpr_x6, 1427 debugserver_gpr_x7, 1428 debugserver_gpr_x8, 1429 debugserver_gpr_x9, 1430 debugserver_gpr_x10, 1431 debugserver_gpr_x11, 1432 debugserver_gpr_x12, 1433 debugserver_gpr_x13, 1434 debugserver_gpr_x14, 1435 debugserver_gpr_x15, 1436 debugserver_gpr_x16, 1437 debugserver_gpr_x17, 1438 debugserver_gpr_x18, 1439 debugserver_gpr_x19, 1440 debugserver_gpr_x20, 1441 debugserver_gpr_x21, 1442 debugserver_gpr_x22, 1443 debugserver_gpr_x23, 1444 debugserver_gpr_x24, 1445 debugserver_gpr_x25, 1446 debugserver_gpr_x26, 1447 debugserver_gpr_x27, 1448 debugserver_gpr_x28, 1449 debugserver_gpr_fp, // x29 1450 debugserver_gpr_lr, // x30 1451 debugserver_gpr_sp, // sp aka xsp 1452 debugserver_gpr_pc, 1453 debugserver_gpr_cpsr, 1454 debugserver_vfp_v0, 1455 debugserver_vfp_v1, 1456 debugserver_vfp_v2, 1457 debugserver_vfp_v3, 1458 debugserver_vfp_v4, 1459 debugserver_vfp_v5, 1460 debugserver_vfp_v6, 1461 debugserver_vfp_v7, 1462 debugserver_vfp_v8, 1463 debugserver_vfp_v9, 1464 debugserver_vfp_v10, 1465 debugserver_vfp_v11, 1466 debugserver_vfp_v12, 1467 debugserver_vfp_v13, 1468 debugserver_vfp_v14, 1469 debugserver_vfp_v15, 1470 debugserver_vfp_v16, 1471 debugserver_vfp_v17, 1472 debugserver_vfp_v18, 1473 debugserver_vfp_v19, 1474 debugserver_vfp_v20, 1475 debugserver_vfp_v21, 1476 debugserver_vfp_v22, 1477 debugserver_vfp_v23, 1478 debugserver_vfp_v24, 1479 debugserver_vfp_v25, 1480 debugserver_vfp_v26, 1481 debugserver_vfp_v27, 1482 debugserver_vfp_v28, 1483 debugserver_vfp_v29, 1484 debugserver_vfp_v30, 1485 debugserver_vfp_v31, 1486 debugserver_vfp_fpsr, 1487 debugserver_vfp_fpcr 1488 }; 1489 1490 const char *g_contained_x0[]{"x0", NULL}; 1491 const char *g_contained_x1[]{"x1", NULL}; 1492 const char *g_contained_x2[]{"x2", NULL}; 1493 const char *g_contained_x3[]{"x3", NULL}; 1494 const char *g_contained_x4[]{"x4", NULL}; 1495 const char *g_contained_x5[]{"x5", NULL}; 1496 const char *g_contained_x6[]{"x6", NULL}; 1497 const char *g_contained_x7[]{"x7", NULL}; 1498 const char *g_contained_x8[]{"x8", NULL}; 1499 const char *g_contained_x9[]{"x9", NULL}; 1500 const char *g_contained_x10[]{"x10", NULL}; 1501 const char *g_contained_x11[]{"x11", NULL}; 1502 const char *g_contained_x12[]{"x12", NULL}; 1503 const char *g_contained_x13[]{"x13", NULL}; 1504 const char *g_contained_x14[]{"x14", NULL}; 1505 const char *g_contained_x15[]{"x15", NULL}; 1506 const char *g_contained_x16[]{"x16", NULL}; 1507 const char *g_contained_x17[]{"x17", NULL}; 1508 const char *g_contained_x18[]{"x18", NULL}; 1509 const char *g_contained_x19[]{"x19", NULL}; 1510 const char *g_contained_x20[]{"x20", NULL}; 1511 const char *g_contained_x21[]{"x21", NULL}; 1512 const char *g_contained_x22[]{"x22", NULL}; 1513 const char *g_contained_x23[]{"x23", NULL}; 1514 const char *g_contained_x24[]{"x24", NULL}; 1515 const char *g_contained_x25[]{"x25", NULL}; 1516 const char *g_contained_x26[]{"x26", NULL}; 1517 const char *g_contained_x27[]{"x27", NULL}; 1518 const char *g_contained_x28[]{"x28", NULL}; 1519 1520 const char *g_invalidate_x0[]{"x0", "w0", NULL}; 1521 const char *g_invalidate_x1[]{"x1", "w1", NULL}; 1522 const char *g_invalidate_x2[]{"x2", "w2", NULL}; 1523 const char *g_invalidate_x3[]{"x3", "w3", NULL}; 1524 const char *g_invalidate_x4[]{"x4", "w4", NULL}; 1525 const char *g_invalidate_x5[]{"x5", "w5", NULL}; 1526 const char *g_invalidate_x6[]{"x6", "w6", NULL}; 1527 const char *g_invalidate_x7[]{"x7", "w7", NULL}; 1528 const char *g_invalidate_x8[]{"x8", "w8", NULL}; 1529 const char *g_invalidate_x9[]{"x9", "w9", NULL}; 1530 const char *g_invalidate_x10[]{"x10", "w10", NULL}; 1531 const char *g_invalidate_x11[]{"x11", "w11", NULL}; 1532 const char *g_invalidate_x12[]{"x12", "w12", NULL}; 1533 const char *g_invalidate_x13[]{"x13", "w13", NULL}; 1534 const char *g_invalidate_x14[]{"x14", "w14", NULL}; 1535 const char *g_invalidate_x15[]{"x15", "w15", NULL}; 1536 const char *g_invalidate_x16[]{"x16", "w16", NULL}; 1537 const char *g_invalidate_x17[]{"x17", "w17", NULL}; 1538 const char *g_invalidate_x18[]{"x18", "w18", NULL}; 1539 const char *g_invalidate_x19[]{"x19", "w19", NULL}; 1540 const char *g_invalidate_x20[]{"x20", "w20", NULL}; 1541 const char *g_invalidate_x21[]{"x21", "w21", NULL}; 1542 const char *g_invalidate_x22[]{"x22", "w22", NULL}; 1543 const char *g_invalidate_x23[]{"x23", "w23", NULL}; 1544 const char *g_invalidate_x24[]{"x24", "w24", NULL}; 1545 const char *g_invalidate_x25[]{"x25", "w25", NULL}; 1546 const char *g_invalidate_x26[]{"x26", "w26", NULL}; 1547 const char *g_invalidate_x27[]{"x27", "w27", NULL}; 1548 const char *g_invalidate_x28[]{"x28", "w28", NULL}; 1549 1550 #define GPR_OFFSET_IDX(idx) (offsetof(DNBArchMachARM64::GPR, __x[idx])) 1551 1552 #define GPR_OFFSET_NAME(reg) (offsetof(DNBArchMachARM64::GPR, __##reg)) 1553 1554 // These macros will auto define the register name, alt name, register size, 1555 // register offset, encoding, format and native register. This ensures that 1556 // the register state structures are defined correctly and have the correct 1557 // sizes and offsets. 1558 #define DEFINE_GPR_IDX(idx, reg, alt, gen) \ 1559 { \ 1560 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 8, GPR_OFFSET_IDX(idx), \ 1561 dwarf_##reg, dwarf_##reg, gen, debugserver_gpr_##reg, NULL, \ 1562 g_invalidate_x##idx \ 1563 } 1564 #define DEFINE_GPR_NAME(reg, alt, gen) \ 1565 { \ 1566 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 8, GPR_OFFSET_NAME(reg), \ 1567 dwarf_##reg, dwarf_##reg, gen, debugserver_gpr_##reg, NULL, NULL \ 1568 } 1569 #define DEFINE_PSEUDO_GPR_IDX(idx, reg) \ 1570 { \ 1571 e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, 4, 0, INVALID_NUB_REGNUM, \ 1572 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1573 g_contained_x##idx, g_invalidate_x##idx \ 1574 } 1575 1576 //_STRUCT_ARM_THREAD_STATE64 1577 //{ 1578 // uint64_t x[29]; /* General purpose registers x0-x28 */ 1579 // uint64_t fp; /* Frame pointer x29 */ 1580 // uint64_t lr; /* Link register x30 */ 1581 // uint64_t sp; /* Stack pointer x31 */ 1582 // uint64_t pc; /* Program counter */ 1583 // uint32_t cpsr; /* Current program status register */ 1584 //}; 1585 1586 // General purpose registers 1587 const DNBRegisterInfo DNBArchMachARM64::g_gpr_registers[] = { 1588 DEFINE_GPR_IDX(0, x0, "arg1", GENERIC_REGNUM_ARG1), 1589 DEFINE_GPR_IDX(1, x1, "arg2", GENERIC_REGNUM_ARG2), 1590 DEFINE_GPR_IDX(2, x2, "arg3", GENERIC_REGNUM_ARG3), 1591 DEFINE_GPR_IDX(3, x3, "arg4", GENERIC_REGNUM_ARG4), 1592 DEFINE_GPR_IDX(4, x4, "arg5", GENERIC_REGNUM_ARG5), 1593 DEFINE_GPR_IDX(5, x5, "arg6", GENERIC_REGNUM_ARG6), 1594 DEFINE_GPR_IDX(6, x6, "arg7", GENERIC_REGNUM_ARG7), 1595 DEFINE_GPR_IDX(7, x7, "arg8", GENERIC_REGNUM_ARG8), 1596 DEFINE_GPR_IDX(8, x8, NULL, INVALID_NUB_REGNUM), 1597 DEFINE_GPR_IDX(9, x9, NULL, INVALID_NUB_REGNUM), 1598 DEFINE_GPR_IDX(10, x10, NULL, INVALID_NUB_REGNUM), 1599 DEFINE_GPR_IDX(11, x11, NULL, INVALID_NUB_REGNUM), 1600 DEFINE_GPR_IDX(12, x12, NULL, INVALID_NUB_REGNUM), 1601 DEFINE_GPR_IDX(13, x13, NULL, INVALID_NUB_REGNUM), 1602 DEFINE_GPR_IDX(14, x14, NULL, INVALID_NUB_REGNUM), 1603 DEFINE_GPR_IDX(15, x15, NULL, INVALID_NUB_REGNUM), 1604 DEFINE_GPR_IDX(16, x16, NULL, INVALID_NUB_REGNUM), 1605 DEFINE_GPR_IDX(17, x17, NULL, INVALID_NUB_REGNUM), 1606 DEFINE_GPR_IDX(18, x18, NULL, INVALID_NUB_REGNUM), 1607 DEFINE_GPR_IDX(19, x19, NULL, INVALID_NUB_REGNUM), 1608 DEFINE_GPR_IDX(20, x20, NULL, INVALID_NUB_REGNUM), 1609 DEFINE_GPR_IDX(21, x21, NULL, INVALID_NUB_REGNUM), 1610 DEFINE_GPR_IDX(22, x22, NULL, INVALID_NUB_REGNUM), 1611 DEFINE_GPR_IDX(23, x23, NULL, INVALID_NUB_REGNUM), 1612 DEFINE_GPR_IDX(24, x24, NULL, INVALID_NUB_REGNUM), 1613 DEFINE_GPR_IDX(25, x25, NULL, INVALID_NUB_REGNUM), 1614 DEFINE_GPR_IDX(26, x26, NULL, INVALID_NUB_REGNUM), 1615 DEFINE_GPR_IDX(27, x27, NULL, INVALID_NUB_REGNUM), 1616 DEFINE_GPR_IDX(28, x28, NULL, INVALID_NUB_REGNUM), 1617 // For the G/g packet we want to show where the offset into the regctx 1618 // is for fp/lr/sp/pc, but we cannot directly access them on arm64e 1619 // devices (and therefore can't offsetof() them)) - add the offset based 1620 // on the last accessible register by hand for advertising the location 1621 // in the regctx to lldb. We'll go through the accessor functions when 1622 // we read/write them here. 1623 { 1624 e_regSetGPR, gpr_fp, "fp", "x29", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 8, 1625 dwarf_fp, dwarf_fp, GENERIC_REGNUM_FP, debugserver_gpr_fp, NULL, NULL 1626 }, 1627 { 1628 e_regSetGPR, gpr_lr, "lr", "x30", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 16, 1629 dwarf_lr, dwarf_lr, GENERIC_REGNUM_RA, debugserver_gpr_lr, NULL, NULL 1630 }, 1631 { 1632 e_regSetGPR, gpr_sp, "sp", "xsp", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 24, 1633 dwarf_sp, dwarf_sp, GENERIC_REGNUM_SP, debugserver_gpr_sp, NULL, NULL 1634 }, 1635 { 1636 e_regSetGPR, gpr_pc, "pc", NULL, Uint, Hex, 8, GPR_OFFSET_IDX(28) + 32, 1637 dwarf_pc, dwarf_pc, GENERIC_REGNUM_PC, debugserver_gpr_pc, NULL, NULL 1638 }, 1639 1640 // in armv7 we specify that writing to the CPSR should invalidate r8-12, sp, 1641 // lr. 1642 // this should be specified for arm64 too even though debugserver is only 1643 // used for 1644 // userland debugging. 1645 {e_regSetGPR, gpr_cpsr, "cpsr", "flags", Uint, Hex, 4, 1646 GPR_OFFSET_NAME(cpsr), dwarf_elr_mode, dwarf_elr_mode, INVALID_NUB_REGNUM, 1647 debugserver_gpr_cpsr, NULL, NULL}, 1648 1649 DEFINE_PSEUDO_GPR_IDX(0, w0), 1650 DEFINE_PSEUDO_GPR_IDX(1, w1), 1651 DEFINE_PSEUDO_GPR_IDX(2, w2), 1652 DEFINE_PSEUDO_GPR_IDX(3, w3), 1653 DEFINE_PSEUDO_GPR_IDX(4, w4), 1654 DEFINE_PSEUDO_GPR_IDX(5, w5), 1655 DEFINE_PSEUDO_GPR_IDX(6, w6), 1656 DEFINE_PSEUDO_GPR_IDX(7, w7), 1657 DEFINE_PSEUDO_GPR_IDX(8, w8), 1658 DEFINE_PSEUDO_GPR_IDX(9, w9), 1659 DEFINE_PSEUDO_GPR_IDX(10, w10), 1660 DEFINE_PSEUDO_GPR_IDX(11, w11), 1661 DEFINE_PSEUDO_GPR_IDX(12, w12), 1662 DEFINE_PSEUDO_GPR_IDX(13, w13), 1663 DEFINE_PSEUDO_GPR_IDX(14, w14), 1664 DEFINE_PSEUDO_GPR_IDX(15, w15), 1665 DEFINE_PSEUDO_GPR_IDX(16, w16), 1666 DEFINE_PSEUDO_GPR_IDX(17, w17), 1667 DEFINE_PSEUDO_GPR_IDX(18, w18), 1668 DEFINE_PSEUDO_GPR_IDX(19, w19), 1669 DEFINE_PSEUDO_GPR_IDX(20, w20), 1670 DEFINE_PSEUDO_GPR_IDX(21, w21), 1671 DEFINE_PSEUDO_GPR_IDX(22, w22), 1672 DEFINE_PSEUDO_GPR_IDX(23, w23), 1673 DEFINE_PSEUDO_GPR_IDX(24, w24), 1674 DEFINE_PSEUDO_GPR_IDX(25, w25), 1675 DEFINE_PSEUDO_GPR_IDX(26, w26), 1676 DEFINE_PSEUDO_GPR_IDX(27, w27), 1677 DEFINE_PSEUDO_GPR_IDX(28, w28)}; 1678 1679 const char *g_contained_v0[]{"v0", NULL}; 1680 const char *g_contained_v1[]{"v1", NULL}; 1681 const char *g_contained_v2[]{"v2", NULL}; 1682 const char *g_contained_v3[]{"v3", NULL}; 1683 const char *g_contained_v4[]{"v4", NULL}; 1684 const char *g_contained_v5[]{"v5", NULL}; 1685 const char *g_contained_v6[]{"v6", NULL}; 1686 const char *g_contained_v7[]{"v7", NULL}; 1687 const char *g_contained_v8[]{"v8", NULL}; 1688 const char *g_contained_v9[]{"v9", NULL}; 1689 const char *g_contained_v10[]{"v10", NULL}; 1690 const char *g_contained_v11[]{"v11", NULL}; 1691 const char *g_contained_v12[]{"v12", NULL}; 1692 const char *g_contained_v13[]{"v13", NULL}; 1693 const char *g_contained_v14[]{"v14", NULL}; 1694 const char *g_contained_v15[]{"v15", NULL}; 1695 const char *g_contained_v16[]{"v16", NULL}; 1696 const char *g_contained_v17[]{"v17", NULL}; 1697 const char *g_contained_v18[]{"v18", NULL}; 1698 const char *g_contained_v19[]{"v19", NULL}; 1699 const char *g_contained_v20[]{"v20", NULL}; 1700 const char *g_contained_v21[]{"v21", NULL}; 1701 const char *g_contained_v22[]{"v22", NULL}; 1702 const char *g_contained_v23[]{"v23", NULL}; 1703 const char *g_contained_v24[]{"v24", NULL}; 1704 const char *g_contained_v25[]{"v25", NULL}; 1705 const char *g_contained_v26[]{"v26", NULL}; 1706 const char *g_contained_v27[]{"v27", NULL}; 1707 const char *g_contained_v28[]{"v28", NULL}; 1708 const char *g_contained_v29[]{"v29", NULL}; 1709 const char *g_contained_v30[]{"v30", NULL}; 1710 const char *g_contained_v31[]{"v31", NULL}; 1711 1712 const char *g_invalidate_v0[]{"v0", "d0", "s0", NULL}; 1713 const char *g_invalidate_v1[]{"v1", "d1", "s1", NULL}; 1714 const char *g_invalidate_v2[]{"v2", "d2", "s2", NULL}; 1715 const char *g_invalidate_v3[]{"v3", "d3", "s3", NULL}; 1716 const char *g_invalidate_v4[]{"v4", "d4", "s4", NULL}; 1717 const char *g_invalidate_v5[]{"v5", "d5", "s5", NULL}; 1718 const char *g_invalidate_v6[]{"v6", "d6", "s6", NULL}; 1719 const char *g_invalidate_v7[]{"v7", "d7", "s7", NULL}; 1720 const char *g_invalidate_v8[]{"v8", "d8", "s8", NULL}; 1721 const char *g_invalidate_v9[]{"v9", "d9", "s9", NULL}; 1722 const char *g_invalidate_v10[]{"v10", "d10", "s10", NULL}; 1723 const char *g_invalidate_v11[]{"v11", "d11", "s11", NULL}; 1724 const char *g_invalidate_v12[]{"v12", "d12", "s12", NULL}; 1725 const char *g_invalidate_v13[]{"v13", "d13", "s13", NULL}; 1726 const char *g_invalidate_v14[]{"v14", "d14", "s14", NULL}; 1727 const char *g_invalidate_v15[]{"v15", "d15", "s15", NULL}; 1728 const char *g_invalidate_v16[]{"v16", "d16", "s16", NULL}; 1729 const char *g_invalidate_v17[]{"v17", "d17", "s17", NULL}; 1730 const char *g_invalidate_v18[]{"v18", "d18", "s18", NULL}; 1731 const char *g_invalidate_v19[]{"v19", "d19", "s19", NULL}; 1732 const char *g_invalidate_v20[]{"v20", "d20", "s20", NULL}; 1733 const char *g_invalidate_v21[]{"v21", "d21", "s21", NULL}; 1734 const char *g_invalidate_v22[]{"v22", "d22", "s22", NULL}; 1735 const char *g_invalidate_v23[]{"v23", "d23", "s23", NULL}; 1736 const char *g_invalidate_v24[]{"v24", "d24", "s24", NULL}; 1737 const char *g_invalidate_v25[]{"v25", "d25", "s25", NULL}; 1738 const char *g_invalidate_v26[]{"v26", "d26", "s26", NULL}; 1739 const char *g_invalidate_v27[]{"v27", "d27", "s27", NULL}; 1740 const char *g_invalidate_v28[]{"v28", "d28", "s28", NULL}; 1741 const char *g_invalidate_v29[]{"v29", "d29", "s29", NULL}; 1742 const char *g_invalidate_v30[]{"v30", "d30", "s30", NULL}; 1743 const char *g_invalidate_v31[]{"v31", "d31", "s31", NULL}; 1744 1745 #if defined(__arm64__) || defined(__aarch64__) 1746 #define VFP_V_OFFSET_IDX(idx) \ 1747 (offsetof(DNBArchMachARM64::FPU, __v) + (idx * 16) + \ 1748 offsetof(DNBArchMachARM64::Context, vfp)) 1749 #else 1750 #define VFP_V_OFFSET_IDX(idx) \ 1751 (offsetof(DNBArchMachARM64::FPU, opaque) + (idx * 16) + \ 1752 offsetof(DNBArchMachARM64::Context, vfp)) 1753 #endif 1754 #define VFP_OFFSET_NAME(reg) \ 1755 (offsetof(DNBArchMachARM64::FPU, reg) + \ 1756 offsetof(DNBArchMachARM64::Context, vfp)) 1757 #define EXC_OFFSET(reg) \ 1758 (offsetof(DNBArchMachARM64::EXC, reg) + \ 1759 offsetof(DNBArchMachARM64::Context, exc)) 1760 1761 //#define FLOAT_FORMAT Float 1762 #define DEFINE_VFP_V_IDX(idx) \ 1763 { \ 1764 e_regSetVFP, vfp_v##idx, "v" #idx, "q" #idx, Vector, VectorOfUInt8, 16, \ 1765 VFP_V_OFFSET_IDX(idx), INVALID_NUB_REGNUM, dwarf_v##idx, \ 1766 INVALID_NUB_REGNUM, debugserver_vfp_v##idx, NULL, g_invalidate_v##idx \ 1767 } 1768 #define DEFINE_PSEUDO_VFP_S_IDX(idx) \ 1769 { \ 1770 e_regSetVFP, vfp_s##idx, "s" #idx, NULL, IEEE754, Float, 4, 0, \ 1771 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1772 INVALID_NUB_REGNUM, g_contained_v##idx, g_invalidate_v##idx \ 1773 } 1774 #define DEFINE_PSEUDO_VFP_D_IDX(idx) \ 1775 { \ 1776 e_regSetVFP, vfp_d##idx, "d" #idx, NULL, IEEE754, Float, 8, 0, \ 1777 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \ 1778 INVALID_NUB_REGNUM, g_contained_v##idx, g_invalidate_v##idx \ 1779 } 1780 1781 // Floating point registers 1782 const DNBRegisterInfo DNBArchMachARM64::g_vfp_registers[] = { 1783 DEFINE_VFP_V_IDX(0), 1784 DEFINE_VFP_V_IDX(1), 1785 DEFINE_VFP_V_IDX(2), 1786 DEFINE_VFP_V_IDX(3), 1787 DEFINE_VFP_V_IDX(4), 1788 DEFINE_VFP_V_IDX(5), 1789 DEFINE_VFP_V_IDX(6), 1790 DEFINE_VFP_V_IDX(7), 1791 DEFINE_VFP_V_IDX(8), 1792 DEFINE_VFP_V_IDX(9), 1793 DEFINE_VFP_V_IDX(10), 1794 DEFINE_VFP_V_IDX(11), 1795 DEFINE_VFP_V_IDX(12), 1796 DEFINE_VFP_V_IDX(13), 1797 DEFINE_VFP_V_IDX(14), 1798 DEFINE_VFP_V_IDX(15), 1799 DEFINE_VFP_V_IDX(16), 1800 DEFINE_VFP_V_IDX(17), 1801 DEFINE_VFP_V_IDX(18), 1802 DEFINE_VFP_V_IDX(19), 1803 DEFINE_VFP_V_IDX(20), 1804 DEFINE_VFP_V_IDX(21), 1805 DEFINE_VFP_V_IDX(22), 1806 DEFINE_VFP_V_IDX(23), 1807 DEFINE_VFP_V_IDX(24), 1808 DEFINE_VFP_V_IDX(25), 1809 DEFINE_VFP_V_IDX(26), 1810 DEFINE_VFP_V_IDX(27), 1811 DEFINE_VFP_V_IDX(28), 1812 DEFINE_VFP_V_IDX(29), 1813 DEFINE_VFP_V_IDX(30), 1814 DEFINE_VFP_V_IDX(31), 1815 {e_regSetVFP, vfp_fpsr, "fpsr", NULL, Uint, Hex, 4, 1816 VFP_V_OFFSET_IDX(32) + 0, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1817 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1818 {e_regSetVFP, vfp_fpcr, "fpcr", NULL, Uint, Hex, 4, 1819 VFP_V_OFFSET_IDX(32) + 4, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1820 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}, 1821 1822 DEFINE_PSEUDO_VFP_S_IDX(0), 1823 DEFINE_PSEUDO_VFP_S_IDX(1), 1824 DEFINE_PSEUDO_VFP_S_IDX(2), 1825 DEFINE_PSEUDO_VFP_S_IDX(3), 1826 DEFINE_PSEUDO_VFP_S_IDX(4), 1827 DEFINE_PSEUDO_VFP_S_IDX(5), 1828 DEFINE_PSEUDO_VFP_S_IDX(6), 1829 DEFINE_PSEUDO_VFP_S_IDX(7), 1830 DEFINE_PSEUDO_VFP_S_IDX(8), 1831 DEFINE_PSEUDO_VFP_S_IDX(9), 1832 DEFINE_PSEUDO_VFP_S_IDX(10), 1833 DEFINE_PSEUDO_VFP_S_IDX(11), 1834 DEFINE_PSEUDO_VFP_S_IDX(12), 1835 DEFINE_PSEUDO_VFP_S_IDX(13), 1836 DEFINE_PSEUDO_VFP_S_IDX(14), 1837 DEFINE_PSEUDO_VFP_S_IDX(15), 1838 DEFINE_PSEUDO_VFP_S_IDX(16), 1839 DEFINE_PSEUDO_VFP_S_IDX(17), 1840 DEFINE_PSEUDO_VFP_S_IDX(18), 1841 DEFINE_PSEUDO_VFP_S_IDX(19), 1842 DEFINE_PSEUDO_VFP_S_IDX(20), 1843 DEFINE_PSEUDO_VFP_S_IDX(21), 1844 DEFINE_PSEUDO_VFP_S_IDX(22), 1845 DEFINE_PSEUDO_VFP_S_IDX(23), 1846 DEFINE_PSEUDO_VFP_S_IDX(24), 1847 DEFINE_PSEUDO_VFP_S_IDX(25), 1848 DEFINE_PSEUDO_VFP_S_IDX(26), 1849 DEFINE_PSEUDO_VFP_S_IDX(27), 1850 DEFINE_PSEUDO_VFP_S_IDX(28), 1851 DEFINE_PSEUDO_VFP_S_IDX(29), 1852 DEFINE_PSEUDO_VFP_S_IDX(30), 1853 DEFINE_PSEUDO_VFP_S_IDX(31), 1854 1855 DEFINE_PSEUDO_VFP_D_IDX(0), 1856 DEFINE_PSEUDO_VFP_D_IDX(1), 1857 DEFINE_PSEUDO_VFP_D_IDX(2), 1858 DEFINE_PSEUDO_VFP_D_IDX(3), 1859 DEFINE_PSEUDO_VFP_D_IDX(4), 1860 DEFINE_PSEUDO_VFP_D_IDX(5), 1861 DEFINE_PSEUDO_VFP_D_IDX(6), 1862 DEFINE_PSEUDO_VFP_D_IDX(7), 1863 DEFINE_PSEUDO_VFP_D_IDX(8), 1864 DEFINE_PSEUDO_VFP_D_IDX(9), 1865 DEFINE_PSEUDO_VFP_D_IDX(10), 1866 DEFINE_PSEUDO_VFP_D_IDX(11), 1867 DEFINE_PSEUDO_VFP_D_IDX(12), 1868 DEFINE_PSEUDO_VFP_D_IDX(13), 1869 DEFINE_PSEUDO_VFP_D_IDX(14), 1870 DEFINE_PSEUDO_VFP_D_IDX(15), 1871 DEFINE_PSEUDO_VFP_D_IDX(16), 1872 DEFINE_PSEUDO_VFP_D_IDX(17), 1873 DEFINE_PSEUDO_VFP_D_IDX(18), 1874 DEFINE_PSEUDO_VFP_D_IDX(19), 1875 DEFINE_PSEUDO_VFP_D_IDX(20), 1876 DEFINE_PSEUDO_VFP_D_IDX(21), 1877 DEFINE_PSEUDO_VFP_D_IDX(22), 1878 DEFINE_PSEUDO_VFP_D_IDX(23), 1879 DEFINE_PSEUDO_VFP_D_IDX(24), 1880 DEFINE_PSEUDO_VFP_D_IDX(25), 1881 DEFINE_PSEUDO_VFP_D_IDX(26), 1882 DEFINE_PSEUDO_VFP_D_IDX(27), 1883 DEFINE_PSEUDO_VFP_D_IDX(28), 1884 DEFINE_PSEUDO_VFP_D_IDX(29), 1885 DEFINE_PSEUDO_VFP_D_IDX(30), 1886 DEFINE_PSEUDO_VFP_D_IDX(31) 1887 1888 }; 1889 1890 //_STRUCT_ARM_EXCEPTION_STATE64 1891 //{ 1892 // uint64_t far; /* Virtual Fault Address */ 1893 // uint32_t esr; /* Exception syndrome */ 1894 // uint32_t exception; /* number of arm exception taken */ 1895 //}; 1896 1897 // Exception registers 1898 const DNBRegisterInfo DNBArchMachARM64::g_exc_registers[] = { 1899 {e_regSetEXC, exc_far, "far", NULL, Uint, Hex, 8, EXC_OFFSET(__far), 1900 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1901 INVALID_NUB_REGNUM, NULL, NULL}, 1902 {e_regSetEXC, exc_esr, "esr", NULL, Uint, Hex, 4, EXC_OFFSET(__esr), 1903 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1904 INVALID_NUB_REGNUM, NULL, NULL}, 1905 {e_regSetEXC, exc_exception, "exception", NULL, Uint, Hex, 4, 1906 EXC_OFFSET(__exception), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, 1907 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}}; 1908 1909 // Number of registers in each register set 1910 const size_t DNBArchMachARM64::k_num_gpr_registers = 1911 sizeof(g_gpr_registers) / sizeof(DNBRegisterInfo); 1912 const size_t DNBArchMachARM64::k_num_vfp_registers = 1913 sizeof(g_vfp_registers) / sizeof(DNBRegisterInfo); 1914 const size_t DNBArchMachARM64::k_num_exc_registers = 1915 sizeof(g_exc_registers) / sizeof(DNBRegisterInfo); 1916 const size_t DNBArchMachARM64::k_num_all_registers = 1917 k_num_gpr_registers + k_num_vfp_registers + k_num_exc_registers; 1918 1919 // Register set definitions. The first definitions at register set index 1920 // of zero is for all registers, followed by other registers sets. The 1921 // register information for the all register set need not be filled in. 1922 const DNBRegisterSetInfo DNBArchMachARM64::g_reg_sets[] = { 1923 {"ARM64 Registers", NULL, k_num_all_registers}, 1924 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers}, 1925 {"Floating Point Registers", g_vfp_registers, k_num_vfp_registers}, 1926 {"Exception State Registers", g_exc_registers, k_num_exc_registers}}; 1927 // Total number of register sets for this architecture 1928 const size_t DNBArchMachARM64::k_num_register_sets = 1929 sizeof(g_reg_sets) / sizeof(DNBRegisterSetInfo); 1930 1931 const DNBRegisterSetInfo * 1932 DNBArchMachARM64::GetRegisterSetInfo(nub_size_t *num_reg_sets) { 1933 *num_reg_sets = k_num_register_sets; 1934 return g_reg_sets; 1935 } 1936 1937 bool DNBArchMachARM64::FixGenericRegisterNumber(uint32_t &set, uint32_t ®) { 1938 if (set == REGISTER_SET_GENERIC) { 1939 switch (reg) { 1940 case GENERIC_REGNUM_PC: // Program Counter 1941 set = e_regSetGPR; 1942 reg = gpr_pc; 1943 break; 1944 1945 case GENERIC_REGNUM_SP: // Stack Pointer 1946 set = e_regSetGPR; 1947 reg = gpr_sp; 1948 break; 1949 1950 case GENERIC_REGNUM_FP: // Frame Pointer 1951 set = e_regSetGPR; 1952 reg = gpr_fp; 1953 break; 1954 1955 case GENERIC_REGNUM_RA: // Return Address 1956 set = e_regSetGPR; 1957 reg = gpr_lr; 1958 break; 1959 1960 case GENERIC_REGNUM_FLAGS: // Processor flags register 1961 set = e_regSetGPR; 1962 reg = gpr_cpsr; 1963 break; 1964 1965 case GENERIC_REGNUM_ARG1: 1966 case GENERIC_REGNUM_ARG2: 1967 case GENERIC_REGNUM_ARG3: 1968 case GENERIC_REGNUM_ARG4: 1969 case GENERIC_REGNUM_ARG5: 1970 case GENERIC_REGNUM_ARG6: 1971 set = e_regSetGPR; 1972 reg = gpr_x0 + reg - GENERIC_REGNUM_ARG1; 1973 break; 1974 1975 default: 1976 return false; 1977 } 1978 } 1979 return true; 1980 } 1981 bool DNBArchMachARM64::GetRegisterValue(uint32_t set, uint32_t reg, 1982 DNBRegisterValue *value) { 1983 if (!FixGenericRegisterNumber(set, reg)) 1984 return false; 1985 1986 if (GetRegisterState(set, false) != KERN_SUCCESS) 1987 return false; 1988 1989 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 1990 if (regInfo) { 1991 value->info = *regInfo; 1992 switch (set) { 1993 case e_regSetGPR: 1994 if (reg <= gpr_pc) { 1995 #if defined(__LP64__) 1996 if (reg == gpr_pc) 1997 value->value.uint64 = arm_thread_state64_get_pc (m_state.context.gpr); 1998 else if (reg == gpr_lr) 1999 value->value.uint64 = arm_thread_state64_get_lr (m_state.context.gpr); 2000 else if (reg == gpr_sp) 2001 value->value.uint64 = arm_thread_state64_get_sp (m_state.context.gpr); 2002 else if (reg == gpr_fp) 2003 value->value.uint64 = arm_thread_state64_get_fp (m_state.context.gpr); 2004 else 2005 value->value.uint64 = m_state.context.gpr.__x[reg]; 2006 #else 2007 value->value.uint64 = m_state.context.gpr.__x[reg]; 2008 #endif 2009 return true; 2010 } else if (reg == gpr_cpsr) { 2011 value->value.uint32 = m_state.context.gpr.__cpsr; 2012 return true; 2013 } 2014 break; 2015 2016 case e_regSetVFP: 2017 2018 if (reg >= vfp_v0 && reg <= vfp_v31) { 2019 #if defined(__arm64__) || defined(__aarch64__) 2020 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_v0], 2021 16); 2022 #else 2023 memcpy(&value->value.v_uint8, 2024 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_v0) * 16), 2025 16); 2026 #endif 2027 return true; 2028 } else if (reg == vfp_fpsr) { 2029 #if defined(__arm64__) || defined(__aarch64__) 2030 memcpy(&value->value.uint32, &m_state.context.vfp.__fpsr, 4); 2031 #else 2032 memcpy(&value->value.uint32, 2033 ((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 0, 4); 2034 #endif 2035 return true; 2036 } else if (reg == vfp_fpcr) { 2037 #if defined(__arm64__) || defined(__aarch64__) 2038 memcpy(&value->value.uint32, &m_state.context.vfp.__fpcr, 4); 2039 #else 2040 memcpy(&value->value.uint32, 2041 ((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 4, 4); 2042 #endif 2043 return true; 2044 } else if (reg >= vfp_s0 && reg <= vfp_s31) { 2045 #if defined(__arm64__) || defined(__aarch64__) 2046 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_s0], 2047 4); 2048 #else 2049 memcpy(&value->value.v_uint8, 2050 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_s0) * 16), 2051 4); 2052 #endif 2053 return true; 2054 } else if (reg >= vfp_d0 && reg <= vfp_d31) { 2055 #if defined(__arm64__) || defined(__aarch64__) 2056 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_d0], 2057 8); 2058 #else 2059 memcpy(&value->value.v_uint8, 2060 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_d0) * 16), 2061 8); 2062 #endif 2063 return true; 2064 } 2065 break; 2066 2067 case e_regSetEXC: 2068 if (reg == exc_far) { 2069 value->value.uint64 = m_state.context.exc.__far; 2070 return true; 2071 } else if (reg == exc_esr) { 2072 value->value.uint32 = m_state.context.exc.__esr; 2073 return true; 2074 } else if (reg == exc_exception) { 2075 value->value.uint32 = m_state.context.exc.__exception; 2076 return true; 2077 } 2078 break; 2079 } 2080 } 2081 return false; 2082 } 2083 2084 bool DNBArchMachARM64::SetRegisterValue(uint32_t set, uint32_t reg, 2085 const DNBRegisterValue *value) { 2086 if (!FixGenericRegisterNumber(set, reg)) 2087 return false; 2088 2089 if (GetRegisterState(set, false) != KERN_SUCCESS) 2090 return false; 2091 2092 bool success = false; 2093 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg); 2094 if (regInfo) { 2095 switch (set) { 2096 case e_regSetGPR: 2097 if (reg <= gpr_pc) { 2098 #if defined(__LP64__) 2099 uint64_t signed_value = value->value.uint64; 2100 #if __has_feature(ptrauth_calls) 2101 // The incoming value could be garbage. Strip it to avoid 2102 // trapping when it gets resigned in the thread state. 2103 signed_value = (uint64_t) ptrauth_strip((void*) signed_value, ptrauth_key_function_pointer); 2104 signed_value = (uint64_t) ptrauth_sign_unauthenticated((void*) signed_value, ptrauth_key_function_pointer, 0); 2105 #endif 2106 if (reg == gpr_pc) 2107 arm_thread_state64_set_pc_fptr (m_state.context.gpr, (void*) signed_value); 2108 else if (reg == gpr_lr) 2109 arm_thread_state64_set_lr_fptr (m_state.context.gpr, (void*) signed_value); 2110 else if (reg == gpr_sp) 2111 arm_thread_state64_set_sp (m_state.context.gpr, value->value.uint64); 2112 else if (reg == gpr_fp) 2113 arm_thread_state64_set_fp (m_state.context.gpr, value->value.uint64); 2114 else 2115 m_state.context.gpr.__x[reg] = value->value.uint64; 2116 #else 2117 m_state.context.gpr.__x[reg] = value->value.uint64; 2118 #endif 2119 success = true; 2120 } else if (reg == gpr_cpsr) { 2121 m_state.context.gpr.__cpsr = value->value.uint32; 2122 success = true; 2123 } 2124 break; 2125 2126 case e_regSetVFP: 2127 if (reg >= vfp_v0 && reg <= vfp_v31) { 2128 #if defined(__arm64__) || defined(__aarch64__) 2129 memcpy(&m_state.context.vfp.__v[reg - vfp_v0], &value->value.v_uint8, 2130 16); 2131 #else 2132 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_v0) * 16), 2133 &value->value.v_uint8, 16); 2134 #endif 2135 success = true; 2136 } else if (reg == vfp_fpsr) { 2137 #if defined(__arm64__) || defined(__aarch64__) 2138 memcpy(&m_state.context.vfp.__fpsr, &value->value.uint32, 4); 2139 #else 2140 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 0, 2141 &value->value.uint32, 4); 2142 #endif 2143 success = true; 2144 } else if (reg == vfp_fpcr) { 2145 #if defined(__arm64__) || defined(__aarch64__) 2146 memcpy(&m_state.context.vfp.__fpcr, &value->value.uint32, 4); 2147 #else 2148 memcpy(((uint8_t *)m_state.context.vfp.opaque) + (32 * 16) + 4, 2149 &value->value.uint32, 4); 2150 #endif 2151 success = true; 2152 } else if (reg >= vfp_s0 && reg <= vfp_s31) { 2153 #if defined(__arm64__) || defined(__aarch64__) 2154 memcpy(&m_state.context.vfp.__v[reg - vfp_s0], &value->value.v_uint8, 2155 4); 2156 #else 2157 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_s0) * 16), 2158 &value->value.v_uint8, 4); 2159 #endif 2160 success = true; 2161 } else if (reg >= vfp_d0 && reg <= vfp_d31) { 2162 #if defined(__arm64__) || defined(__aarch64__) 2163 memcpy(&m_state.context.vfp.__v[reg - vfp_d0], &value->value.v_uint8, 2164 8); 2165 #else 2166 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_d0) * 16), 2167 &value->value.v_uint8, 8); 2168 #endif 2169 success = true; 2170 } 2171 break; 2172 2173 case e_regSetEXC: 2174 if (reg == exc_far) { 2175 m_state.context.exc.__far = value->value.uint64; 2176 success = true; 2177 } else if (reg == exc_esr) { 2178 m_state.context.exc.__esr = value->value.uint32; 2179 success = true; 2180 } else if (reg == exc_exception) { 2181 m_state.context.exc.__exception = value->value.uint32; 2182 success = true; 2183 } 2184 break; 2185 } 2186 } 2187 if (success) 2188 return SetRegisterState(set) == KERN_SUCCESS; 2189 return false; 2190 } 2191 2192 kern_return_t DNBArchMachARM64::GetRegisterState(int set, bool force) { 2193 switch (set) { 2194 case e_regSetALL: 2195 return GetGPRState(force) | GetVFPState(force) | GetEXCState(force) | 2196 GetDBGState(force); 2197 case e_regSetGPR: 2198 return GetGPRState(force); 2199 case e_regSetVFP: 2200 return GetVFPState(force); 2201 case e_regSetEXC: 2202 return GetEXCState(force); 2203 case e_regSetDBG: 2204 return GetDBGState(force); 2205 default: 2206 break; 2207 } 2208 return KERN_INVALID_ARGUMENT; 2209 } 2210 2211 kern_return_t DNBArchMachARM64::SetRegisterState(int set) { 2212 // Make sure we have a valid context to set. 2213 kern_return_t err = GetRegisterState(set, false); 2214 if (err != KERN_SUCCESS) 2215 return err; 2216 2217 switch (set) { 2218 case e_regSetALL: 2219 return SetGPRState() | SetVFPState() | SetEXCState() | SetDBGState(false); 2220 case e_regSetGPR: 2221 return SetGPRState(); 2222 case e_regSetVFP: 2223 return SetVFPState(); 2224 case e_regSetEXC: 2225 return SetEXCState(); 2226 case e_regSetDBG: 2227 return SetDBGState(false); 2228 default: 2229 break; 2230 } 2231 return KERN_INVALID_ARGUMENT; 2232 } 2233 2234 bool DNBArchMachARM64::RegisterSetStateIsValid(int set) const { 2235 return m_state.RegsAreValid(set); 2236 } 2237 2238 nub_size_t DNBArchMachARM64::GetRegisterContext(void *buf, nub_size_t buf_len) { 2239 nub_size_t size = sizeof(m_state.context.gpr) + sizeof(m_state.context.vfp) + 2240 sizeof(m_state.context.exc); 2241 2242 if (buf && buf_len) { 2243 if (size > buf_len) 2244 size = buf_len; 2245 2246 bool force = false; 2247 if (GetGPRState(force) | GetVFPState(force) | GetEXCState(force)) 2248 return 0; 2249 2250 // Copy each struct individually to avoid any padding that might be between 2251 // the structs in m_state.context 2252 uint8_t *p = (uint8_t *)buf; 2253 ::memcpy(p, &m_state.context.gpr, sizeof(m_state.context.gpr)); 2254 p += sizeof(m_state.context.gpr); 2255 ::memcpy(p, &m_state.context.vfp, sizeof(m_state.context.vfp)); 2256 p += sizeof(m_state.context.vfp); 2257 ::memcpy(p, &m_state.context.exc, sizeof(m_state.context.exc)); 2258 p += sizeof(m_state.context.exc); 2259 2260 size_t bytes_written = p - (uint8_t *)buf; 2261 UNUSED_IF_ASSERT_DISABLED(bytes_written); 2262 assert(bytes_written == size); 2263 } 2264 DNBLogThreadedIf( 2265 LOG_THREAD, 2266 "DNBArchMachARM64::GetRegisterContext (buf = %p, len = %zu) => %zu", buf, 2267 buf_len, size); 2268 // Return the size of the register context even if NULL was passed in 2269 return size; 2270 } 2271 2272 nub_size_t DNBArchMachARM64::SetRegisterContext(const void *buf, 2273 nub_size_t buf_len) { 2274 nub_size_t size = sizeof(m_state.context.gpr) + sizeof(m_state.context.vfp) + 2275 sizeof(m_state.context.exc); 2276 2277 if (buf == NULL || buf_len == 0) 2278 size = 0; 2279 2280 if (size) { 2281 if (size > buf_len) 2282 size = buf_len; 2283 2284 // Copy each struct individually to avoid any padding that might be between 2285 // the structs in m_state.context 2286 uint8_t *p = const_cast<uint8_t*>(reinterpret_cast<const uint8_t *>(buf)); 2287 ::memcpy(&m_state.context.gpr, p, sizeof(m_state.context.gpr)); 2288 p += sizeof(m_state.context.gpr); 2289 ::memcpy(&m_state.context.vfp, p, sizeof(m_state.context.vfp)); 2290 p += sizeof(m_state.context.vfp); 2291 ::memcpy(&m_state.context.exc, p, sizeof(m_state.context.exc)); 2292 p += sizeof(m_state.context.exc); 2293 2294 size_t bytes_written = p - reinterpret_cast<const uint8_t *>(buf); 2295 UNUSED_IF_ASSERT_DISABLED(bytes_written); 2296 assert(bytes_written == size); 2297 SetGPRState(); 2298 SetVFPState(); 2299 SetEXCState(); 2300 } 2301 DNBLogThreadedIf( 2302 LOG_THREAD, 2303 "DNBArchMachARM64::SetRegisterContext (buf = %p, len = %zu) => %zu", buf, 2304 buf_len, size); 2305 return size; 2306 } 2307 2308 uint32_t DNBArchMachARM64::SaveRegisterState() { 2309 kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber()); 2310 DNBLogThreadedIf( 2311 LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u " 2312 "(SetGPRState() for stop_count = %u)", 2313 m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount()); 2314 2315 // Always re-read the registers because above we call thread_abort_safely(); 2316 bool force = true; 2317 2318 if ((kret = GetGPRState(force)) != KERN_SUCCESS) { 2319 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::SaveRegisterState () " 2320 "error: GPR regs failed to read: %u ", 2321 kret); 2322 } else if ((kret = GetVFPState(force)) != KERN_SUCCESS) { 2323 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::SaveRegisterState () " 2324 "error: %s regs failed to read: %u", 2325 "VFP", kret); 2326 } else { 2327 const uint32_t save_id = GetNextRegisterStateSaveID(); 2328 m_saved_register_states[save_id] = m_state.context; 2329 return save_id; 2330 } 2331 return UINT32_MAX; 2332 } 2333 2334 bool DNBArchMachARM64::RestoreRegisterState(uint32_t save_id) { 2335 SaveRegisterStates::iterator pos = m_saved_register_states.find(save_id); 2336 if (pos != m_saved_register_states.end()) { 2337 m_state.context.gpr = pos->second.gpr; 2338 m_state.context.vfp = pos->second.vfp; 2339 kern_return_t kret; 2340 bool success = true; 2341 if ((kret = SetGPRState()) != KERN_SUCCESS) { 2342 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::RestoreRegisterState " 2343 "(save_id = %u) error: GPR regs failed to " 2344 "write: %u", 2345 save_id, kret); 2346 success = false; 2347 } else if ((kret = SetVFPState()) != KERN_SUCCESS) { 2348 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::RestoreRegisterState " 2349 "(save_id = %u) error: %s regs failed to " 2350 "write: %u", 2351 save_id, "VFP", kret); 2352 success = false; 2353 } 2354 m_saved_register_states.erase(pos); 2355 return success; 2356 } 2357 return false; 2358 } 2359 2360 #endif // #if defined (ARM_THREAD_STATE64_COUNT) 2361 #endif // #if defined (__arm__) || defined (__arm64__) || defined (__aarch64__) 2362