xref: /llvm-project/lldb/tools/debugserver/source/MacOSX/arm64/DNBArchImplARM64.cpp (revision 46e782300765eeac8026377bf30d5f08888c2b25)
1 //===-- DNBArchImplARM64.cpp ------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //  Created by Greg Clayton on 6/25/07.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #if defined(__arm__) || defined(__arm64__) || defined(__aarch64__)
14 
15 #include "MacOSX/arm64/DNBArchImplARM64.h"
16 
17 #if defined(ARM_THREAD_STATE64_COUNT)
18 
19 #include "DNB.h"
20 #include "DNBBreakpoint.h"
21 #include "DNBLog.h"
22 #include "DNBRegisterInfo.h"
23 #include "MacOSX/MachProcess.h"
24 #include "MacOSX/MachThread.h"
25 
26 #include <cinttypes>
27 #include <sys/sysctl.h>
28 
29 #undef DEBUGSERVER_IS_ARM64E
30 #if __has_feature(ptrauth_calls)
31 #include <ptrauth.h>
32 #if defined(__LP64__)
33 #define DEBUGSERVER_IS_ARM64E 1
34 #endif
35 #endif
36 
37 // Break only in privileged or user mode
38 // (PAC bits in the DBGWVRn_EL1 watchpoint control register)
39 #define S_USER ((uint32_t)(2u << 1))
40 
41 #define BCR_ENABLE ((uint32_t)(1u))
42 #define WCR_ENABLE ((uint32_t)(1u))
43 
44 // Watchpoint load/store
45 // (LSC bits in the DBGWVRn_EL1 watchpoint control register)
46 #define WCR_LOAD ((uint32_t)(1u << 3))
47 #define WCR_STORE ((uint32_t)(1u << 4))
48 
49 // Single instruction step
50 // (SS bit in the MDSCR_EL1 register)
51 #define SS_ENABLE ((uint32_t)(1u))
52 
53 static const uint8_t g_arm64_breakpoint_opcode[] = {
54     0x00, 0x00, 0x20, 0xD4}; // "brk #0", 0xd4200000 in BE byte order
55 
56 // If we need to set one logical watchpoint by using
57 // two hardware watchpoint registers, the watchpoint
58 // will be split into a "high" and "low" watchpoint.
59 // Record both of them in the LoHi array.
60 
61 // It's safe to initialize to all 0's since
62 // hi > lo and therefore LoHi[i] cannot be 0.
63 static uint32_t LoHi[16] = {0};
64 
65 void DNBArchMachARM64::Initialize() {
66   DNBArchPluginInfo arch_plugin_info = {
67       CPU_TYPE_ARM64, DNBArchMachARM64::Create,
68       DNBArchMachARM64::GetRegisterSetInfo,
69       DNBArchMachARM64::SoftwareBreakpointOpcode};
70 
71   // Register this arch plug-in with the main protocol class
72   DNBArchProtocol::RegisterArchPlugin(arch_plugin_info);
73 
74   DNBArchPluginInfo arch_plugin_info_32 = {
75       CPU_TYPE_ARM64_32, DNBArchMachARM64::Create,
76       DNBArchMachARM64::GetRegisterSetInfo,
77       DNBArchMachARM64::SoftwareBreakpointOpcode};
78 
79   // Register this arch plug-in with the main protocol class
80   DNBArchProtocol::RegisterArchPlugin(arch_plugin_info_32);
81 }
82 
83 DNBArchProtocol *DNBArchMachARM64::Create(MachThread *thread) {
84   DNBArchMachARM64 *obj = new DNBArchMachARM64(thread);
85 
86   return obj;
87 }
88 
89 const uint8_t *
90 DNBArchMachARM64::SoftwareBreakpointOpcode(nub_size_t byte_size) {
91   return g_arm64_breakpoint_opcode;
92 }
93 
94 uint32_t DNBArchMachARM64::GetCPUType() { return CPU_TYPE_ARM64; }
95 
96 static std::once_flag g_cpu_has_sme_once;
97 bool DNBArchMachARM64::CPUHasSME() {
98   static bool g_has_sme = false;
99   std::call_once(g_cpu_has_sme_once, []() {
100     int ret = 0;
101     size_t size = sizeof(ret);
102     if (sysctlbyname("hw.optional.arm.FEAT_SME", &ret, &size, NULL, 0) != -1)
103       g_has_sme = ret == 1;
104   });
105   return g_has_sme;
106 }
107 
108 static std::once_flag g_cpu_has_sme2_once;
109 bool DNBArchMachARM64::CPUHasSME2() {
110   static bool g_has_sme2 = false;
111   std::call_once(g_cpu_has_sme2_once, []() {
112     int ret = 0;
113     size_t size = sizeof(ret);
114     if (sysctlbyname("hw.optional.arm.FEAT_SME2", &ret, &size, NULL, 0) != -1)
115       g_has_sme2 = ret == 1;
116   });
117   return g_has_sme2;
118 }
119 
120 static std::once_flag g_sme_max_svl_once;
121 unsigned int DNBArchMachARM64::GetSMEMaxSVL() {
122   static unsigned int g_sme_max_svl = 0;
123   std::call_once(g_sme_max_svl_once, []() {
124     if (CPUHasSME()) {
125       unsigned int ret = 0;
126       size_t size = sizeof(ret);
127       if (sysctlbyname("hw.optional.arm.sme_max_svl_b", &ret, &size, NULL, 0) !=
128           -1)
129         g_sme_max_svl = ret;
130     }
131   });
132   return g_sme_max_svl;
133 }
134 
135 static uint64_t clear_pac_bits(uint64_t value) {
136   uint32_t addressing_bits = 0;
137   if (!DNBGetAddressingBits(addressing_bits))
138     return value;
139 
140     // On arm64_32, no ptrauth bits to clear
141 #if !defined(__LP64__)
142   return value;
143 #endif
144 
145   uint64_t mask = ((1ULL << addressing_bits) - 1);
146 
147   // Normally PAC bit clearing needs to check b55 and either set the
148   // non-addressing bits, or clear them.  But the register values we
149   // get from thread_get_state on an arm64e process don't follow this
150   // convention?, at least when there's been a PAC auth failure in
151   // the inferior.
152   // Userland processes are always in low memory, so this
153   // hardcoding b55 == 0 PAC stripping behavior here.
154 
155   return value & mask; // high bits cleared to 0
156 }
157 
158 uint64_t DNBArchMachARM64::GetPC(uint64_t failValue) {
159   // Get program counter
160   if (GetGPRState(false) == KERN_SUCCESS)
161 #if defined(DEBUGSERVER_IS_ARM64E)
162     return clear_pac_bits(
163         reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_pc));
164 #else
165     return m_state.context.gpr.__pc;
166 #endif
167   return failValue;
168 }
169 
170 kern_return_t DNBArchMachARM64::SetPC(uint64_t value) {
171   // Get program counter
172   kern_return_t err = GetGPRState(false);
173   if (err == KERN_SUCCESS) {
174 #if defined(__LP64__)
175 #if __has_feature(ptrauth_calls)
176     // The incoming value could be garbage.  Strip it to avoid
177     // trapping when it gets resigned in the thread state.
178     value = (uint64_t) ptrauth_strip((void*) value, ptrauth_key_function_pointer);
179     value = (uint64_t) ptrauth_sign_unauthenticated((void*) value, ptrauth_key_function_pointer, 0);
180 #endif
181     arm_thread_state64_set_pc_fptr (m_state.context.gpr, (void*) value);
182 #else
183     m_state.context.gpr.__pc = value;
184 #endif
185     err = SetGPRState();
186   }
187   return err == KERN_SUCCESS;
188 }
189 
190 uint64_t DNBArchMachARM64::GetSP(uint64_t failValue) {
191   // Get stack pointer
192   if (GetGPRState(false) == KERN_SUCCESS)
193 #if defined(DEBUGSERVER_IS_ARM64E)
194     return clear_pac_bits(
195         reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_sp));
196 #else
197     return m_state.context.gpr.__sp;
198 #endif
199   return failValue;
200 }
201 
202 kern_return_t DNBArchMachARM64::GetGPRState(bool force) {
203   int set = e_regSetGPR;
204   // Check if we have valid cached registers
205   if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
206     return KERN_SUCCESS;
207 
208   // Read the registers from our thread
209   mach_msg_type_number_t count = e_regSetGPRCount;
210   kern_return_t kret =
211       ::thread_get_state(m_thread->MachPortNumber(), ARM_THREAD_STATE64,
212                          (thread_state_t)&m_state.context.gpr, &count);
213   if (DNBLogEnabledForAny(LOG_THREAD)) {
214     uint64_t *x = &m_state.context.gpr.__x[0];
215 
216     const char *log_str = "thread_get_state signed regs "
217                           "\n   fp=%16.16llx"
218                           "\n   lr=%16.16llx"
219                           "\n   sp=%16.16llx"
220                           "\n   pc=%16.16llx";
221 #if defined(DEBUGSERVER_IS_ARM64E)
222     DNBLogThreaded(log_str,
223                    reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_fp),
224                    reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_lr),
225                    reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_sp),
226                    reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_pc));
227 #else
228     DNBLogThreaded(log_str, m_state.context.gpr.__fp, m_state.context.gpr.__lr,
229                    m_state.context.gpr.__sp, m_state.context.gpr.__pc);
230 #endif
231 
232 #if defined(DEBUGSERVER_IS_ARM64E)
233     uint64_t log_fp = clear_pac_bits(
234         reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_fp));
235     uint64_t log_lr = clear_pac_bits(
236         reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_lr));
237     uint64_t log_sp = clear_pac_bits(
238         reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_sp));
239     uint64_t log_pc = clear_pac_bits(
240         reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_pc));
241 #else
242     uint64_t log_fp = m_state.context.gpr.__fp;
243     uint64_t log_lr = m_state.context.gpr.__lr;
244     uint64_t log_sp = m_state.context.gpr.__sp;
245     uint64_t log_pc = m_state.context.gpr.__pc;
246 #endif
247     DNBLogThreaded(
248         "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x (count = %u) regs"
249         "\n   x0=%16.16llx"
250         "\n   x1=%16.16llx"
251         "\n   x2=%16.16llx"
252         "\n   x3=%16.16llx"
253         "\n   x4=%16.16llx"
254         "\n   x5=%16.16llx"
255         "\n   x6=%16.16llx"
256         "\n   x7=%16.16llx"
257         "\n   x8=%16.16llx"
258         "\n   x9=%16.16llx"
259         "\n  x10=%16.16llx"
260         "\n  x11=%16.16llx"
261         "\n  x12=%16.16llx"
262         "\n  x13=%16.16llx"
263         "\n  x14=%16.16llx"
264         "\n  x15=%16.16llx"
265         "\n  x16=%16.16llx"
266         "\n  x17=%16.16llx"
267         "\n  x18=%16.16llx"
268         "\n  x19=%16.16llx"
269         "\n  x20=%16.16llx"
270         "\n  x21=%16.16llx"
271         "\n  x22=%16.16llx"
272         "\n  x23=%16.16llx"
273         "\n  x24=%16.16llx"
274         "\n  x25=%16.16llx"
275         "\n  x26=%16.16llx"
276         "\n  x27=%16.16llx"
277         "\n  x28=%16.16llx"
278         "\n   fp=%16.16llx"
279         "\n   lr=%16.16llx"
280         "\n   sp=%16.16llx"
281         "\n   pc=%16.16llx"
282         "\n cpsr=%8.8x",
283         m_thread->MachPortNumber(), e_regSetGPR, e_regSetGPRCount, kret, count,
284         x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[0], x[11],
285         x[12], x[13], x[14], x[15], x[16], x[17], x[18], x[19], x[20], x[21],
286         x[22], x[23], x[24], x[25], x[26], x[27], x[28],
287         log_fp, log_lr, log_sp, log_pc, m_state.context.gpr.__cpsr);
288   }
289   m_state.SetError(set, Read, kret);
290   return kret;
291 }
292 
293 kern_return_t DNBArchMachARM64::GetVFPState(bool force) {
294   int set = e_regSetVFP;
295   // Check if we have valid cached registers
296   if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
297     return KERN_SUCCESS;
298 
299   // Read the registers from our thread
300   mach_msg_type_number_t count = e_regSetVFPCount;
301   kern_return_t kret =
302       ::thread_get_state(m_thread->MachPortNumber(), ARM_NEON_STATE64,
303                          (thread_state_t)&m_state.context.vfp, &count);
304   if (DNBLogEnabledForAny(LOG_THREAD)) {
305 #if defined(__arm64__) || defined(__aarch64__)
306     DNBLogThreaded(
307         "thread_get_state(0x%4.4x, %u, &vfp, %u) => 0x%8.8x (count = %u) regs"
308         "\n   q0  = 0x%16.16llx%16.16llx"
309         "\n   q1  = 0x%16.16llx%16.16llx"
310         "\n   q2  = 0x%16.16llx%16.16llx"
311         "\n   q3  = 0x%16.16llx%16.16llx"
312         "\n   q4  = 0x%16.16llx%16.16llx"
313         "\n   q5  = 0x%16.16llx%16.16llx"
314         "\n   q6  = 0x%16.16llx%16.16llx"
315         "\n   q7  = 0x%16.16llx%16.16llx"
316         "\n   q8  = 0x%16.16llx%16.16llx"
317         "\n   q9  = 0x%16.16llx%16.16llx"
318         "\n   q10 = 0x%16.16llx%16.16llx"
319         "\n   q11 = 0x%16.16llx%16.16llx"
320         "\n   q12 = 0x%16.16llx%16.16llx"
321         "\n   q13 = 0x%16.16llx%16.16llx"
322         "\n   q14 = 0x%16.16llx%16.16llx"
323         "\n   q15 = 0x%16.16llx%16.16llx"
324         "\n   q16 = 0x%16.16llx%16.16llx"
325         "\n   q17 = 0x%16.16llx%16.16llx"
326         "\n   q18 = 0x%16.16llx%16.16llx"
327         "\n   q19 = 0x%16.16llx%16.16llx"
328         "\n   q20 = 0x%16.16llx%16.16llx"
329         "\n   q21 = 0x%16.16llx%16.16llx"
330         "\n   q22 = 0x%16.16llx%16.16llx"
331         "\n   q23 = 0x%16.16llx%16.16llx"
332         "\n   q24 = 0x%16.16llx%16.16llx"
333         "\n   q25 = 0x%16.16llx%16.16llx"
334         "\n   q26 = 0x%16.16llx%16.16llx"
335         "\n   q27 = 0x%16.16llx%16.16llx"
336         "\n   q28 = 0x%16.16llx%16.16llx"
337         "\n   q29 = 0x%16.16llx%16.16llx"
338         "\n   q30 = 0x%16.16llx%16.16llx"
339         "\n   q31 = 0x%16.16llx%16.16llx"
340         "\n  fpsr = 0x%8.8x"
341         "\n  fpcr = 0x%8.8x\n\n",
342         m_thread->MachPortNumber(), e_regSetVFP, e_regSetVFPCount, kret, count,
343         ((uint64_t *)&m_state.context.vfp.__v[0])[0],
344         ((uint64_t *)&m_state.context.vfp.__v[0])[1],
345         ((uint64_t *)&m_state.context.vfp.__v[1])[0],
346         ((uint64_t *)&m_state.context.vfp.__v[1])[1],
347         ((uint64_t *)&m_state.context.vfp.__v[2])[0],
348         ((uint64_t *)&m_state.context.vfp.__v[2])[1],
349         ((uint64_t *)&m_state.context.vfp.__v[3])[0],
350         ((uint64_t *)&m_state.context.vfp.__v[3])[1],
351         ((uint64_t *)&m_state.context.vfp.__v[4])[0],
352         ((uint64_t *)&m_state.context.vfp.__v[4])[1],
353         ((uint64_t *)&m_state.context.vfp.__v[5])[0],
354         ((uint64_t *)&m_state.context.vfp.__v[5])[1],
355         ((uint64_t *)&m_state.context.vfp.__v[6])[0],
356         ((uint64_t *)&m_state.context.vfp.__v[6])[1],
357         ((uint64_t *)&m_state.context.vfp.__v[7])[0],
358         ((uint64_t *)&m_state.context.vfp.__v[7])[1],
359         ((uint64_t *)&m_state.context.vfp.__v[8])[0],
360         ((uint64_t *)&m_state.context.vfp.__v[8])[1],
361         ((uint64_t *)&m_state.context.vfp.__v[9])[0],
362         ((uint64_t *)&m_state.context.vfp.__v[9])[1],
363         ((uint64_t *)&m_state.context.vfp.__v[10])[0],
364         ((uint64_t *)&m_state.context.vfp.__v[10])[1],
365         ((uint64_t *)&m_state.context.vfp.__v[11])[0],
366         ((uint64_t *)&m_state.context.vfp.__v[11])[1],
367         ((uint64_t *)&m_state.context.vfp.__v[12])[0],
368         ((uint64_t *)&m_state.context.vfp.__v[12])[1],
369         ((uint64_t *)&m_state.context.vfp.__v[13])[0],
370         ((uint64_t *)&m_state.context.vfp.__v[13])[1],
371         ((uint64_t *)&m_state.context.vfp.__v[14])[0],
372         ((uint64_t *)&m_state.context.vfp.__v[14])[1],
373         ((uint64_t *)&m_state.context.vfp.__v[15])[0],
374         ((uint64_t *)&m_state.context.vfp.__v[15])[1],
375         ((uint64_t *)&m_state.context.vfp.__v[16])[0],
376         ((uint64_t *)&m_state.context.vfp.__v[16])[1],
377         ((uint64_t *)&m_state.context.vfp.__v[17])[0],
378         ((uint64_t *)&m_state.context.vfp.__v[17])[1],
379         ((uint64_t *)&m_state.context.vfp.__v[18])[0],
380         ((uint64_t *)&m_state.context.vfp.__v[18])[1],
381         ((uint64_t *)&m_state.context.vfp.__v[19])[0],
382         ((uint64_t *)&m_state.context.vfp.__v[19])[1],
383         ((uint64_t *)&m_state.context.vfp.__v[20])[0],
384         ((uint64_t *)&m_state.context.vfp.__v[20])[1],
385         ((uint64_t *)&m_state.context.vfp.__v[21])[0],
386         ((uint64_t *)&m_state.context.vfp.__v[21])[1],
387         ((uint64_t *)&m_state.context.vfp.__v[22])[0],
388         ((uint64_t *)&m_state.context.vfp.__v[22])[1],
389         ((uint64_t *)&m_state.context.vfp.__v[23])[0],
390         ((uint64_t *)&m_state.context.vfp.__v[23])[1],
391         ((uint64_t *)&m_state.context.vfp.__v[24])[0],
392         ((uint64_t *)&m_state.context.vfp.__v[24])[1],
393         ((uint64_t *)&m_state.context.vfp.__v[25])[0],
394         ((uint64_t *)&m_state.context.vfp.__v[25])[1],
395         ((uint64_t *)&m_state.context.vfp.__v[26])[0],
396         ((uint64_t *)&m_state.context.vfp.__v[26])[1],
397         ((uint64_t *)&m_state.context.vfp.__v[27])[0],
398         ((uint64_t *)&m_state.context.vfp.__v[27])[1],
399         ((uint64_t *)&m_state.context.vfp.__v[28])[0],
400         ((uint64_t *)&m_state.context.vfp.__v[28])[1],
401         ((uint64_t *)&m_state.context.vfp.__v[29])[0],
402         ((uint64_t *)&m_state.context.vfp.__v[29])[1],
403         ((uint64_t *)&m_state.context.vfp.__v[30])[0],
404         ((uint64_t *)&m_state.context.vfp.__v[30])[1],
405         ((uint64_t *)&m_state.context.vfp.__v[31])[0],
406         ((uint64_t *)&m_state.context.vfp.__v[31])[1],
407         m_state.context.vfp.__fpsr, m_state.context.vfp.__fpcr);
408 #endif
409   }
410   m_state.SetError(set, Read, kret);
411   return kret;
412 }
413 
414 kern_return_t DNBArchMachARM64::GetEXCState(bool force) {
415   int set = e_regSetEXC;
416   // Check if we have valid cached registers
417   if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
418     return KERN_SUCCESS;
419 
420   // Read the registers from our thread
421   mach_msg_type_number_t count = e_regSetEXCCount;
422   kern_return_t kret =
423       ::thread_get_state(m_thread->MachPortNumber(), ARM_EXCEPTION_STATE64,
424                          (thread_state_t)&m_state.context.exc, &count);
425   m_state.SetError(set, Read, kret);
426   return kret;
427 }
428 
429 #if 0
430 static void DumpDBGState(const arm_debug_state_t &dbg) {
431   uint32_t i = 0;
432   for (i = 0; i < 16; i++)
433     DNBLogThreadedIf(LOG_STEP, "BVR%-2u/BCR%-2u = { 0x%8.8x, 0x%8.8x } "
434                                "WVR%-2u/WCR%-2u = { 0x%8.8x, 0x%8.8x }",
435                      i, i, dbg.__bvr[i], dbg.__bcr[i], i, i, dbg.__wvr[i],
436                      dbg.__wcr[i]);
437 }
438 #endif
439 
440 kern_return_t DNBArchMachARM64::GetDBGState(bool force) {
441   int set = e_regSetDBG;
442 
443   // Check if we have valid cached registers
444   if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
445     return KERN_SUCCESS;
446 
447   // Read the registers from our thread
448   mach_msg_type_number_t count = e_regSetDBGCount;
449   kern_return_t kret =
450       ::thread_get_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE64,
451                          (thread_state_t)&m_state.dbg, &count);
452   m_state.SetError(set, Read, kret);
453 
454   return kret;
455 }
456 
457 kern_return_t DNBArchMachARM64::GetSVEState(bool force) {
458   int set = e_regSetSVE;
459   // Check if we have valid cached registers
460   if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
461     return KERN_SUCCESS;
462 
463   if (!CPUHasSME())
464     return KERN_INVALID_ARGUMENT;
465 
466   // If the processor is not in Streaming SVE Mode, these thread_get_states
467   // will fail, and we may return uninitialized data in the register context.
468   memset(&m_state.context.sve.z[0], 0,
469          ARM_SVE_Z_STATE_COUNT * sizeof(uint32_t));
470   memset(&m_state.context.sve.z[16], 0,
471          ARM_SVE_Z_STATE_COUNT * sizeof(uint32_t));
472   memset(&m_state.context.sve.p[0], 0,
473          ARM_SVE_P_STATE_COUNT * sizeof(uint32_t));
474 
475   // Read the registers from our thread
476   mach_msg_type_number_t count = ARM_SVE_Z_STATE_COUNT;
477   kern_return_t kret =
478       ::thread_get_state(m_thread->MachPortNumber(), ARM_SVE_Z_STATE1,
479                          (thread_state_t)&m_state.context.sve.z[0], &count);
480   m_state.SetError(set, Read, kret);
481   DNBLogThreadedIf(LOG_THREAD, "Read SVE registers z0..z15 return value %d",
482                    kret);
483   if (kret != KERN_SUCCESS)
484     return kret;
485 
486   count = ARM_SVE_Z_STATE_COUNT;
487   kret = thread_get_state(m_thread->MachPortNumber(), ARM_SVE_Z_STATE2,
488                           (thread_state_t)&m_state.context.sve.z[16], &count);
489   m_state.SetError(set, Read, kret);
490   DNBLogThreadedIf(LOG_THREAD, "Read SVE registers z16..z31 return value %d",
491                    kret);
492   if (kret != KERN_SUCCESS)
493     return kret;
494 
495   count = ARM_SVE_P_STATE_COUNT;
496   kret = thread_get_state(m_thread->MachPortNumber(), ARM_SVE_P_STATE,
497                           (thread_state_t)&m_state.context.sve.p[0], &count);
498   m_state.SetError(set, Read, kret);
499   DNBLogThreadedIf(LOG_THREAD, "Read SVE registers p0..p15 return value %d",
500                    kret);
501 
502   return kret;
503 }
504 
505 kern_return_t DNBArchMachARM64::GetSMEState(bool force) {
506   int set = e_regSetSME;
507   // Check if we have valid cached registers
508   if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
509     return KERN_SUCCESS;
510 
511   if (!CPUHasSME())
512     return KERN_INVALID_ARGUMENT;
513 
514   // If the processor is not in Streaming SVE Mode, these thread_get_states
515   // will fail, and we may return uninitialized data in the register context.
516   memset(&m_state.context.sme.svcr, 0, ARM_SME_STATE_COUNT * sizeof(uint32_t));
517   memset(m_state.context.sme.za.data(), 0, m_state.context.sme.za.size());
518   if (CPUHasSME2())
519     memset(&m_state.context.sme.zt0, 0,
520            ARM_SME2_STATE_COUNT * sizeof(uint32_t));
521 
522   // Read the registers from our thread
523   mach_msg_type_number_t count = ARM_SME_STATE_COUNT;
524   kern_return_t kret =
525       ::thread_get_state(m_thread->MachPortNumber(), ARM_SME_STATE,
526                          (thread_state_t)&m_state.context.sme.svcr, &count);
527   m_state.SetError(set, Read, kret);
528   DNBLogThreadedIf(LOG_THREAD, "Read ARM_SME_STATE return value %d", kret);
529   if (kret != KERN_SUCCESS)
530     return kret;
531 
532   size_t za_size = m_state.context.sme.svl_b * m_state.context.sme.svl_b;
533   const size_t max_chunk_size = 4096;
534   int n_chunks;
535   size_t chunk_size;
536   if (za_size <= max_chunk_size) {
537     n_chunks = 1;
538     chunk_size = za_size;
539   } else {
540     n_chunks = za_size / max_chunk_size;
541     chunk_size = max_chunk_size;
542   }
543   for (int i = 0; i < n_chunks; i++) {
544     count = ARM_SME_ZA_STATE_COUNT;
545     arm_sme_za_state_t za_state;
546     kret = thread_get_state(m_thread->MachPortNumber(), ARM_SME_ZA_STATE1 + i,
547                             (thread_state_t)&za_state, &count);
548     m_state.SetError(set, Read, kret);
549     DNBLogThreadedIf(LOG_THREAD, "Read ARM_SME_STATE return value %d", kret);
550     if (kret != KERN_SUCCESS)
551       return kret;
552     memcpy(m_state.context.sme.za.data() + (i * chunk_size), &za_state,
553            chunk_size);
554   }
555 
556   if (CPUHasSME2()) {
557     count = ARM_SME2_STATE_COUNT;
558     kret = thread_get_state(m_thread->MachPortNumber(), ARM_SME2_STATE,
559                             (thread_state_t)&m_state.context.sme.zt0, &count);
560     m_state.SetError(set, Read, kret);
561     DNBLogThreadedIf(LOG_THREAD, "Read ARM_SME2_STATE return value %d", kret);
562     if (kret != KERN_SUCCESS)
563       return kret;
564   }
565 
566   return kret;
567 }
568 
569 kern_return_t DNBArchMachARM64::SetGPRState() {
570   int set = e_regSetGPR;
571   kern_return_t kret = ::thread_set_state(
572       m_thread->MachPortNumber(), ARM_THREAD_STATE64,
573       (thread_state_t)&m_state.context.gpr, e_regSetGPRCount);
574   m_state.SetError(set, Write,
575                    kret); // Set the current write error for this register set
576   m_state.InvalidateRegisterSetState(set); // Invalidate the current register
577                                            // state in case registers are read
578                                            // back differently
579   return kret;                             // Return the error code
580 }
581 
582 kern_return_t DNBArchMachARM64::SetVFPState() {
583   int set = e_regSetVFP;
584   kern_return_t kret = ::thread_set_state(
585       m_thread->MachPortNumber(), ARM_NEON_STATE64,
586       (thread_state_t)&m_state.context.vfp, e_regSetVFPCount);
587   m_state.SetError(set, Write,
588                    kret); // Set the current write error for this register set
589   m_state.InvalidateRegisterSetState(set); // Invalidate the current register
590                                            // state in case registers are read
591                                            // back differently
592   return kret;                             // Return the error code
593 }
594 
595 kern_return_t DNBArchMachARM64::SetSVEState() {
596   if (!CPUHasSME())
597     return KERN_INVALID_ARGUMENT;
598 
599   int set = e_regSetSVE;
600   kern_return_t kret = thread_set_state(
601       m_thread->MachPortNumber(), ARM_SVE_Z_STATE1,
602       (thread_state_t)&m_state.context.sve.z[0], ARM_SVE_Z_STATE_COUNT);
603   m_state.SetError(set, Write, kret);
604   DNBLogThreadedIf(LOG_THREAD, "Write ARM_SVE_Z_STATE1 return value %d", kret);
605   if (kret != KERN_SUCCESS)
606     return kret;
607 
608   kret = thread_set_state(m_thread->MachPortNumber(), ARM_SVE_Z_STATE2,
609                           (thread_state_t)&m_state.context.sve.z[16],
610                           ARM_SVE_Z_STATE_COUNT);
611   m_state.SetError(set, Write, kret);
612   DNBLogThreadedIf(LOG_THREAD, "Write ARM_SVE_Z_STATE2 return value %d", kret);
613   if (kret != KERN_SUCCESS)
614     return kret;
615 
616   kret = thread_set_state(m_thread->MachPortNumber(), ARM_SVE_P_STATE,
617                           (thread_state_t)&m_state.context.sve.p[0],
618                           ARM_SVE_P_STATE_COUNT);
619   m_state.SetError(set, Write, kret);
620   DNBLogThreadedIf(LOG_THREAD, "Write ARM_SVE_P_STATE return value %d", kret);
621   if (kret != KERN_SUCCESS)
622     return kret;
623 
624   return kret;
625 }
626 
627 kern_return_t DNBArchMachARM64::SetSMEState() {
628   if (!CPUHasSME())
629     return KERN_INVALID_ARGUMENT;
630   kern_return_t kret;
631 
632   int set = e_regSetSME;
633   size_t za_size = m_state.context.sme.svl_b * m_state.context.sme.svl_b;
634   const size_t max_chunk_size = 4096;
635   int n_chunks;
636   size_t chunk_size;
637   if (za_size <= max_chunk_size) {
638     n_chunks = 1;
639     chunk_size = za_size;
640   } else {
641     n_chunks = za_size / max_chunk_size;
642     chunk_size = max_chunk_size;
643   }
644   for (int i = 0; i < n_chunks; i++) {
645     arm_sme_za_state_t za_state;
646     memcpy(&za_state, m_state.context.sme.za.data() + (i * chunk_size),
647            chunk_size);
648     kret = thread_set_state(m_thread->MachPortNumber(), ARM_SME_ZA_STATE1 + i,
649                             (thread_state_t)&za_state, ARM_SME_ZA_STATE_COUNT);
650     m_state.SetError(set, Write, kret);
651     DNBLogThreadedIf(LOG_THREAD, "Write ARM_SME_STATE return value %d", kret);
652     if (kret != KERN_SUCCESS)
653       return kret;
654   }
655 
656   if (CPUHasSME2()) {
657     kret = thread_set_state(m_thread->MachPortNumber(), ARM_SME2_STATE,
658                             (thread_state_t)&m_state.context.sme.zt0,
659                             ARM_SME2_STATE);
660     m_state.SetError(set, Write, kret);
661     DNBLogThreadedIf(LOG_THREAD, "Write ARM_SME2_STATE return value %d", kret);
662     if (kret != KERN_SUCCESS)
663       return kret;
664   }
665 
666   return kret;
667 }
668 
669 kern_return_t DNBArchMachARM64::SetEXCState() {
670   int set = e_regSetEXC;
671   kern_return_t kret = ::thread_set_state(
672       m_thread->MachPortNumber(), ARM_EXCEPTION_STATE64,
673       (thread_state_t)&m_state.context.exc, e_regSetEXCCount);
674   m_state.SetError(set, Write,
675                    kret); // Set the current write error for this register set
676   m_state.InvalidateRegisterSetState(set); // Invalidate the current register
677                                            // state in case registers are read
678                                            // back differently
679   return kret;                             // Return the error code
680 }
681 
682 kern_return_t DNBArchMachARM64::SetDBGState(bool also_set_on_task) {
683   int set = e_regSetDBG;
684   kern_return_t kret =
685       ::thread_set_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE64,
686                          (thread_state_t)&m_state.dbg, e_regSetDBGCount);
687   if (also_set_on_task) {
688     kern_return_t task_kret = task_set_state(
689         m_thread->Process()->Task().TaskPort(), ARM_DEBUG_STATE64,
690         (thread_state_t)&m_state.dbg, e_regSetDBGCount);
691     if (task_kret != KERN_SUCCESS)
692       DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::SetDBGState failed "
693                                         "to set debug control register state: "
694                                         "0x%8.8x.",
695                        task_kret);
696   }
697   m_state.SetError(set, Write,
698                    kret); // Set the current write error for this register set
699   m_state.InvalidateRegisterSetState(set); // Invalidate the current register
700                                            // state in case registers are read
701                                            // back differently
702 
703   return kret; // Return the error code
704 }
705 
706 void DNBArchMachARM64::ThreadWillResume() {
707   // Do we need to step this thread? If so, let the mach thread tell us so.
708   if (m_thread->IsStepping()) {
709     EnableHardwareSingleStep(true);
710   }
711 
712   // Disable the triggered watchpoint temporarily before we resume.
713   // Plus, we try to enable hardware single step to execute past the instruction
714   // which triggered our watchpoint.
715   if (m_watchpoint_did_occur) {
716     if (m_watchpoint_hw_index >= 0) {
717       kern_return_t kret = GetDBGState(false);
718       if (kret == KERN_SUCCESS &&
719           !IsWatchpointEnabled(m_state.dbg, m_watchpoint_hw_index)) {
720         // The watchpoint might have been disabled by the user.  We don't need
721         // to do anything at all
722         // to enable hardware single stepping.
723         m_watchpoint_did_occur = false;
724         m_watchpoint_hw_index = -1;
725         return;
726       }
727 
728       DisableHardwareWatchpoint(m_watchpoint_hw_index, false);
729       DNBLogThreadedIf(LOG_WATCHPOINTS,
730                        "DNBArchMachARM64::ThreadWillResume() "
731                        "DisableHardwareWatchpoint(%d) called",
732                        m_watchpoint_hw_index);
733 
734       // Enable hardware single step to move past the watchpoint-triggering
735       // instruction.
736       m_watchpoint_resume_single_step_enabled =
737           (EnableHardwareSingleStep(true) == KERN_SUCCESS);
738 
739       // If we are not able to enable single step to move past the
740       // watchpoint-triggering instruction,
741       // at least we should reset the two watchpoint member variables so that
742       // the next time around
743       // this callback function is invoked, the enclosing logical branch is
744       // skipped.
745       if (!m_watchpoint_resume_single_step_enabled) {
746         // Reset the two watchpoint member variables.
747         m_watchpoint_did_occur = false;
748         m_watchpoint_hw_index = -1;
749         DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::ThreadWillResume()"
750                                           " failed to enable single step");
751       } else
752         DNBLogThreadedIf(LOG_WATCHPOINTS,
753                          "DNBArchMachARM64::ThreadWillResume() "
754                          "succeeded to enable single step");
755     }
756   }
757 }
758 
759 bool DNBArchMachARM64::NotifyException(MachException::Data &exc) {
760 
761   switch (exc.exc_type) {
762   default:
763     break;
764   case EXC_BREAKPOINT:
765     if (exc.exc_data.size() == 2 && exc.exc_data[0] == EXC_ARM_DA_DEBUG) {
766       // The data break address is passed as exc_data[1].
767       nub_addr_t addr = exc.exc_data[1];
768       // Find the hardware index with the side effect of possibly massaging the
769       // addr to return the starting address as seen from the debugger side.
770       uint32_t hw_index = GetHardwareWatchpointHit(addr);
771 
772       // One logical watchpoint was split into two watchpoint locations because
773       // it was too big.  If the watchpoint exception is indicating the 2nd half
774       // of the two-parter, find the address of the 1st half and report that --
775       // that's what lldb is going to expect to see.
776       DNBLogThreadedIf(LOG_WATCHPOINTS,
777                        "DNBArchMachARM64::NotifyException "
778                        "watchpoint %d was hit on address "
779                        "0x%llx",
780                        hw_index, (uint64_t)addr);
781       const uint32_t num_watchpoints = NumSupportedHardwareWatchpoints();
782       for (uint32_t i = 0; i < num_watchpoints; i++) {
783         if (LoHi[i] != 0 && LoHi[i] == hw_index && LoHi[i] != i &&
784             GetWatchpointAddressByIndex(i) != INVALID_NUB_ADDRESS) {
785           addr = GetWatchpointAddressByIndex(i);
786           DNBLogThreadedIf(LOG_WATCHPOINTS,
787                            "DNBArchMachARM64::NotifyException "
788                            "It is a linked watchpoint; "
789                            "rewritten to index %d addr 0x%llx",
790                            LoHi[i], (uint64_t)addr);
791         }
792       }
793 
794       if (hw_index != INVALID_NUB_HW_INDEX) {
795         m_watchpoint_did_occur = true;
796         m_watchpoint_hw_index = hw_index;
797         exc.exc_data[1] = addr;
798         // Piggyback the hw_index in the exc.data.
799         exc.exc_data.push_back(hw_index);
800       }
801 
802       return true;
803     }
804     // detect a __builtin_debugtrap instruction pattern ("brk #0xf000")
805     // and advance the $pc past it, so that the user can continue execution.
806     // Generally speaking, this knowledge should be centralized in lldb,
807     // recognizing the builtin_trap instruction and knowing how to advance
808     // the pc past it, so that continue etc work.
809     if (exc.exc_data.size() == 2 && exc.exc_data[0] == EXC_ARM_BREAKPOINT) {
810       nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS);
811       if (pc != INVALID_NUB_ADDRESS && pc > 0) {
812         DNBBreakpoint *bp =
813             m_thread->Process()->Breakpoints().FindByAddress(pc);
814         if (bp == nullptr) {
815           uint8_t insnbuf[4];
816           if (m_thread->Process()->ReadMemory(pc, 4, insnbuf) == 4) {
817             uint8_t builtin_debugtrap_insn[4] = {0x00, 0x00, 0x3e,
818                                                  0xd4}; // brk #0xf000
819             if (memcmp(insnbuf, builtin_debugtrap_insn, 4) == 0) {
820               SetPC(pc + 4);
821             }
822           }
823         }
824       }
825     }
826     break;
827   }
828   return false;
829 }
830 
831 bool DNBArchMachARM64::ThreadDidStop() {
832   bool success = true;
833 
834   m_state.InvalidateAllRegisterStates();
835 
836   if (m_watchpoint_resume_single_step_enabled) {
837     // Great!  We now disable the hardware single step as well as re-enable the
838     // hardware watchpoint.
839     // See also ThreadWillResume().
840     if (EnableHardwareSingleStep(false) == KERN_SUCCESS) {
841       if (m_watchpoint_did_occur && m_watchpoint_hw_index >= 0) {
842         ReenableHardwareWatchpoint(m_watchpoint_hw_index);
843         m_watchpoint_resume_single_step_enabled = false;
844         m_watchpoint_did_occur = false;
845         m_watchpoint_hw_index = -1;
846       } else {
847         DNBLogError("internal error detected: m_watchpoint_resume_step_enabled "
848                     "is true but (m_watchpoint_did_occur && "
849                     "m_watchpoint_hw_index >= 0) does not hold!");
850       }
851     } else {
852       DNBLogError("internal error detected: m_watchpoint_resume_step_enabled "
853                   "is true but unable to disable single step!");
854     }
855   }
856 
857   // Are we stepping a single instruction?
858   if (GetGPRState(true) == KERN_SUCCESS) {
859     // We are single stepping, was this the primary thread?
860     if (m_thread->IsStepping()) {
861       // This was the primary thread, we need to clear the trace
862       // bit if so.
863       success = EnableHardwareSingleStep(false) == KERN_SUCCESS;
864     } else {
865       // The MachThread will automatically restore the suspend count
866       // in ThreadDidStop(), so we don't need to do anything here if
867       // we weren't the primary thread the last time
868     }
869   }
870   return success;
871 }
872 
873 // Set the single step bit in the processor status register.
874 kern_return_t DNBArchMachARM64::EnableHardwareSingleStep(bool enable) {
875   DNBError err;
876   DNBLogThreadedIf(LOG_STEP, "%s( enable = %d )", __FUNCTION__, enable);
877 
878   err = GetGPRState(false);
879 
880   if (err.Fail()) {
881     err.LogThreaded("%s: failed to read the GPR registers", __FUNCTION__);
882     return err.Status();
883   }
884 
885   err = GetDBGState(false);
886 
887   if (err.Fail()) {
888     err.LogThreaded("%s: failed to read the DBG registers", __FUNCTION__);
889     return err.Status();
890   }
891 
892 #if defined(DEBUGSERVER_IS_ARM64E)
893   uint64_t pc = clear_pac_bits(
894       reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_pc));
895 #else
896   uint64_t pc = m_state.context.gpr.__pc;
897 #endif
898 
899   if (enable) {
900     DNBLogThreadedIf(LOG_STEP,
901                      "%s: Setting MDSCR_EL1 Single Step bit at pc 0x%llx",
902                      __FUNCTION__, pc);
903     m_state.dbg.__mdscr_el1 |= SS_ENABLE;
904   } else {
905     DNBLogThreadedIf(LOG_STEP,
906                      "%s: Clearing MDSCR_EL1 Single Step bit at pc 0x%llx",
907                      __FUNCTION__, pc);
908     m_state.dbg.__mdscr_el1 &= ~(SS_ENABLE);
909   }
910 
911   return SetDBGState(false);
912 }
913 
914 // return 1 if bit "BIT" is set in "value"
915 static inline uint32_t bit(uint32_t value, uint32_t bit) {
916   return (value >> bit) & 1u;
917 }
918 
919 // return the bitfield "value[msbit:lsbit]".
920 static inline uint64_t bits(uint64_t value, uint32_t msbit, uint32_t lsbit) {
921   assert(msbit >= lsbit);
922   uint64_t shift_left = sizeof(value) * 8 - 1 - msbit;
923   value <<=
924       shift_left; // shift anything above the msbit off of the unsigned edge
925   value >>= shift_left + lsbit; // shift it back again down to the lsbit
926                                 // (including undoing any shift from above)
927   return value;                 // return our result
928 }
929 
930 uint32_t DNBArchMachARM64::NumSupportedHardwareWatchpoints() {
931   // Set the init value to something that will let us know that we need to
932   // autodetect how many watchpoints are supported dynamically...
933   static uint32_t g_num_supported_hw_watchpoints = UINT_MAX;
934   if (g_num_supported_hw_watchpoints == UINT_MAX) {
935     // Set this to zero in case we can't tell if there are any HW breakpoints
936     g_num_supported_hw_watchpoints = 0;
937 
938     size_t len;
939     uint32_t n = 0;
940     len = sizeof(n);
941     if (::sysctlbyname("hw.optional.watchpoint", &n, &len, NULL, 0) == 0) {
942       g_num_supported_hw_watchpoints = n;
943       DNBLogThreadedIf(LOG_THREAD, "hw.optional.watchpoint=%u", n);
944     } else {
945 // For AArch64 we would need to look at ID_AA64DFR0_EL1 but debugserver runs in
946 // EL0 so it can't
947 // access that reg.  The kernel should have filled in the sysctls based on it
948 // though.
949 #if defined(__arm__)
950       uint32_t register_DBGDIDR;
951 
952       asm("mrc p14, 0, %0, c0, c0, 0" : "=r"(register_DBGDIDR));
953       uint32_t numWRPs = bits(register_DBGDIDR, 31, 28);
954       // Zero is reserved for the WRP count, so don't increment it if it is zero
955       if (numWRPs > 0)
956         numWRPs++;
957       g_num_supported_hw_watchpoints = numWRPs;
958       DNBLogThreadedIf(LOG_THREAD,
959                        "Number of supported hw watchpoints via asm():  %d",
960                        g_num_supported_hw_watchpoints);
961 #endif
962     }
963   }
964   return g_num_supported_hw_watchpoints;
965 }
966 
967 uint32_t DNBArchMachARM64::NumSupportedHardwareBreakpoints() {
968   // Set the init value to something that will let us know that we need to
969   // autodetect how many breakpoints are supported dynamically...
970   static uint32_t g_num_supported_hw_breakpoints = UINT_MAX;
971   if (g_num_supported_hw_breakpoints == UINT_MAX) {
972     // Set this to zero in case we can't tell if there are any HW breakpoints
973     g_num_supported_hw_breakpoints = 0;
974 
975     size_t len;
976     uint32_t n = 0;
977     len = sizeof(n);
978     if (::sysctlbyname("hw.optional.breakpoint", &n, &len, NULL, 0) == 0) {
979       g_num_supported_hw_breakpoints = n;
980       DNBLogThreadedIf(LOG_THREAD, "hw.optional.breakpoint=%u", n);
981     } else {
982 // For AArch64 we would need to look at ID_AA64DFR0_EL1 but debugserver runs in
983 // EL0 so it can't access that reg.  The kernel should have filled in the
984 // sysctls based on it though.
985 #if defined(__arm__)
986       uint32_t register_DBGDIDR;
987 
988       asm("mrc p14, 0, %0, c0, c0, 0" : "=r"(register_DBGDIDR));
989       uint32_t numWRPs = bits(register_DBGDIDR, 31, 28);
990       // Zero is reserved for the WRP count, so don't increment it if it is zero
991       if (numWRPs > 0)
992         numWRPs++;
993       g_num_supported_hw_breakpoints = numWRPs;
994       DNBLogThreadedIf(LOG_THREAD,
995                        "Number of supported hw breakpoint via asm():  %d",
996                        g_num_supported_hw_breakpoints);
997 #endif
998     }
999   }
1000   return g_num_supported_hw_breakpoints;
1001 }
1002 
1003 uint32_t DNBArchMachARM64::EnableHardwareBreakpoint(nub_addr_t addr,
1004                                                     nub_size_t size,
1005                                                     bool also_set_on_task) {
1006   DNBLogThreadedIf(LOG_WATCHPOINTS,
1007                    "DNBArchMachARM64::EnableHardwareBreakpoint(addr = "
1008                    "0x%8.8llx, size = %zu)",
1009                    (uint64_t)addr, size);
1010 
1011   const uint32_t num_hw_breakpoints = NumSupportedHardwareBreakpoints();
1012 
1013   nub_addr_t aligned_bp_address = addr;
1014   uint32_t control_value = 0;
1015 
1016   switch (size) {
1017   case 2:
1018     control_value = (0x3 << 5) | 7;
1019     aligned_bp_address &= ~1;
1020     break;
1021   case 4:
1022     control_value = (0xfu << 5) | 7;
1023     aligned_bp_address &= ~3;
1024     break;
1025   };
1026 
1027   // Read the debug state
1028   kern_return_t kret = GetDBGState(false);
1029   if (kret == KERN_SUCCESS) {
1030     // Check to make sure we have the needed hardware support
1031     uint32_t i = 0;
1032 
1033     for (i = 0; i < num_hw_breakpoints; ++i) {
1034       if ((m_state.dbg.__bcr[i] & BCR_ENABLE) == 0)
1035         break; // We found an available hw breakpoint slot (in i)
1036     }
1037 
1038     // See if we found an available hw breakpoint slot above
1039     if (i < num_hw_breakpoints) {
1040       m_state.dbg.__bvr[i] = aligned_bp_address;
1041       m_state.dbg.__bcr[i] = control_value;
1042 
1043       DNBLogThreadedIf(LOG_WATCHPOINTS,
1044                        "DNBArchMachARM64::EnableHardwareBreakpoint() "
1045                        "adding breakpoint on address 0x%llx with control "
1046                        "register value 0x%x",
1047                        (uint64_t)m_state.dbg.__bvr[i],
1048                        (uint32_t)m_state.dbg.__bcr[i]);
1049 
1050       kret = SetDBGState(also_set_on_task);
1051 
1052       DNBLogThreadedIf(LOG_WATCHPOINTS,
1053                        "DNBArchMachARM64::"
1054                        "EnableHardwareBreakpoint() "
1055                        "SetDBGState() => 0x%8.8x.",
1056                        kret);
1057 
1058       if (kret == KERN_SUCCESS)
1059         return i;
1060     } else {
1061       DNBLogThreadedIf(LOG_WATCHPOINTS,
1062                        "DNBArchMachARM64::"
1063                        "EnableHardwareBreakpoint(): All "
1064                        "hardware resources (%u) are in use.",
1065                        num_hw_breakpoints);
1066     }
1067   }
1068   return INVALID_NUB_HW_INDEX;
1069 }
1070 
1071 // This should be `std::bit_ceil(aligned_size)` but
1072 // that requires C++20.
1073 // Calculates the smallest integral power of two that is not smaller than x.
1074 static uint64_t bit_ceil(uint64_t input) {
1075   if (input <= 1 || __builtin_popcount(input) == 1)
1076     return input;
1077 
1078   return 1ULL << (64 - __builtin_clzll(input));
1079 }
1080 
1081 std::vector<DNBArchMachARM64::WatchpointSpec>
1082 DNBArchMachARM64::AlignRequestedWatchpoint(nub_addr_t requested_addr,
1083                                            nub_size_t requested_size) {
1084 
1085   // Can't watch zero bytes
1086   if (requested_size == 0)
1087     return {};
1088 
1089   // Smallest size we can watch on AArch64 is 8 bytes
1090   constexpr nub_size_t min_watchpoint_alignment = 8;
1091   nub_size_t aligned_size = std::max(requested_size, min_watchpoint_alignment);
1092 
1093   /// Round up \a requested_size to the next power-of-2 size, at least 8
1094   /// bytes
1095   /// requested_size == 8   -> aligned_size == 8
1096   /// requested_size == 9   -> aligned_size == 16
1097   aligned_size = aligned_size = bit_ceil(aligned_size);
1098 
1099   nub_addr_t aligned_start = requested_addr & ~(aligned_size - 1);
1100   // Does this power-of-2 memory range, aligned to power-of-2, completely
1101   // encompass the requested watch region.
1102   if (aligned_start + aligned_size >= requested_addr + requested_size) {
1103     WatchpointSpec wp;
1104     wp.aligned_start = aligned_start;
1105     wp.requested_start = requested_addr;
1106     wp.aligned_size = aligned_size;
1107     wp.requested_size = requested_size;
1108     return {{wp}};
1109   }
1110 
1111   // We need to split this into two watchpoints, split on the aligned_size
1112   // boundary and re-evaluate the alignment of each half.
1113   //
1114   // requested_addr 48 requested_size 20 -> aligned_size 32
1115   //                              aligned_start 32
1116   //                              split_addr 64
1117   //                              first_requested_addr 48
1118   //                              first_requested_size 16
1119   //                              second_requested_addr 64
1120   //                              second_requested_size 4
1121   nub_addr_t split_addr = aligned_start + aligned_size;
1122 
1123   nub_addr_t first_requested_addr = requested_addr;
1124   nub_size_t first_requested_size = split_addr - requested_addr;
1125   nub_addr_t second_requested_addr = split_addr;
1126   nub_size_t second_requested_size = requested_size - first_requested_size;
1127 
1128   std::vector<WatchpointSpec> first_wp =
1129       AlignRequestedWatchpoint(first_requested_addr, first_requested_size);
1130   std::vector<WatchpointSpec> second_wp =
1131       AlignRequestedWatchpoint(second_requested_addr, second_requested_size);
1132   if (first_wp.size() != 1 || second_wp.size() != 1)
1133     return {};
1134 
1135   return {{first_wp[0], second_wp[0]}};
1136 }
1137 
1138 uint32_t DNBArchMachARM64::EnableHardwareWatchpoint(nub_addr_t addr,
1139                                                     nub_size_t size, bool read,
1140                                                     bool write,
1141                                                     bool also_set_on_task) {
1142   DNBLogThreadedIf(LOG_WATCHPOINTS,
1143                    "DNBArchMachARM64::EnableHardwareWatchpoint(addr = "
1144                    "0x%8.8llx, size = %zu, read = %u, write = %u)",
1145                    (uint64_t)addr, size, read, write);
1146 
1147   std::vector<DNBArchMachARM64::WatchpointSpec> wps =
1148       AlignRequestedWatchpoint(addr, size);
1149   DNBLogThreadedIf(LOG_WATCHPOINTS,
1150                    "DNBArchMachARM64::EnableHardwareWatchpoint() using %zu "
1151                    "hardware watchpoints",
1152                    wps.size());
1153 
1154   if (wps.size() == 0)
1155     return INVALID_NUB_HW_INDEX;
1156 
1157   // We must watch for either read or write
1158   if (read == false && write == false)
1159     return INVALID_NUB_HW_INDEX;
1160 
1161   // Only one hardware watchpoint needed
1162   // to implement the user's request.
1163   if (wps.size() == 1) {
1164     if (wps[0].aligned_size <= 8)
1165       return SetBASWatchpoint(wps[0], read, write, also_set_on_task);
1166     else
1167       return SetMASKWatchpoint(wps[0], read, write, also_set_on_task);
1168   }
1169 
1170   // We have multiple WatchpointSpecs
1171 
1172   std::vector<uint32_t> wp_slots_used;
1173   for (size_t i = 0; i < wps.size(); i++) {
1174     uint32_t idx =
1175         EnableHardwareWatchpoint(wps[i].requested_start, wps[i].requested_size,
1176                                  read, write, also_set_on_task);
1177     if (idx != INVALID_NUB_HW_INDEX)
1178       wp_slots_used.push_back(idx);
1179   }
1180 
1181   // Did we fail to set all of the WatchpointSpecs needed
1182   // for this user's request?
1183   if (wps.size() != wp_slots_used.size()) {
1184     for (int wp_slot : wp_slots_used)
1185       DisableHardwareWatchpoint(wp_slot, also_set_on_task);
1186     return INVALID_NUB_HW_INDEX;
1187   }
1188 
1189   LoHi[wp_slots_used[0]] = wp_slots_used[1];
1190   return wp_slots_used[0];
1191 }
1192 
1193 uint32_t DNBArchMachARM64::SetBASWatchpoint(DNBArchMachARM64::WatchpointSpec wp,
1194                                             bool read, bool write,
1195                                             bool also_set_on_task) {
1196   const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
1197 
1198   nub_addr_t aligned_dword_addr = wp.aligned_start;
1199   nub_addr_t watching_offset = wp.requested_start - wp.aligned_start;
1200   nub_size_t watching_size = wp.requested_size;
1201 
1202   // If user asks to watch 3 bytes at 0x1005,
1203   // aligned_dword_addr 0x1000
1204   // watching_offset 5
1205   // watching_size 3
1206 
1207   // Set the Byte Address Selects bits DBGWCRn_EL1 bits [12:5] based on the
1208   // above.
1209   // The bit shift and negation operation will give us 0b11 for 2, 0b1111 for 4,
1210   // etc, up to 0b11111111 for 8.
1211   // then we shift those bits left by the offset into this dword that we are
1212   // interested in.
1213   // e.g. if we are watching bytes 4,5,6,7 in a dword we want a BAS of
1214   // 0b11110000.
1215   uint32_t byte_address_select = ((1 << watching_size) - 1) << watching_offset;
1216 
1217   // Read the debug state
1218   kern_return_t kret = GetDBGState(false);
1219   if (kret != KERN_SUCCESS)
1220     return INVALID_NUB_HW_INDEX;
1221 
1222   // Check to make sure we have the needed hardware support
1223   uint32_t i = 0;
1224 
1225   for (i = 0; i < num_hw_watchpoints; ++i) {
1226     if ((m_state.dbg.__wcr[i] & WCR_ENABLE) == 0)
1227       break; // We found an available hw watchpoint slot
1228   }
1229   if (i == num_hw_watchpoints) {
1230     DNBLogThreadedIf(LOG_WATCHPOINTS,
1231                      "DNBArchMachARM64::"
1232                      "SetBASWatchpoint(): All "
1233                      "hardware resources (%u) are in use.",
1234                      num_hw_watchpoints);
1235     return INVALID_NUB_HW_INDEX;
1236   }
1237 
1238   DNBLogThreadedIf(LOG_WATCHPOINTS,
1239                    "DNBArchMachARM64::"
1240                    "SetBASWatchpoint() "
1241                    "set hardware register %d to BAS watchpoint "
1242                    "aligned start address 0x%llx, watch region start "
1243                    "offset %lld, number of bytes %zu",
1244                    i, aligned_dword_addr, watching_offset, watching_size);
1245 
1246   // Clear any previous LoHi joined-watchpoint that may have been in use
1247   LoHi[i] = 0;
1248 
1249   // shift our Byte Address Select bits up to the correct bit range for the
1250   // DBGWCRn_EL1
1251   byte_address_select = byte_address_select << 5;
1252 
1253   // Make sure bits 1:0 are clear in our address
1254   m_state.dbg.__wvr[i] = aligned_dword_addr;       // DVA (Data Virtual Address)
1255   m_state.dbg.__wcr[i] = byte_address_select |     // Which bytes that follow
1256                                                    // the DVA that we will watch
1257                          S_USER |                  // Stop only in user mode
1258                          (read ? WCR_LOAD : 0) |   // Stop on read access?
1259                          (write ? WCR_STORE : 0) | // Stop on write access?
1260                          WCR_ENABLE;               // Enable this watchpoint;
1261 
1262   DNBLogThreadedIf(LOG_WATCHPOINTS,
1263                    "DNBArchMachARM64::SetBASWatchpoint() "
1264                    "adding watchpoint on address 0x%llx with control "
1265                    "register value 0x%x",
1266                    (uint64_t)m_state.dbg.__wvr[i],
1267                    (uint32_t)m_state.dbg.__wcr[i]);
1268 
1269   kret = SetDBGState(also_set_on_task);
1270   // DumpDBGState(m_state.dbg);
1271 
1272   DNBLogThreadedIf(LOG_WATCHPOINTS,
1273                    "DNBArchMachARM64::"
1274                    "SetBASWatchpoint() "
1275                    "SetDBGState() => 0x%8.8x.",
1276                    kret);
1277 
1278   if (kret == KERN_SUCCESS)
1279     return i;
1280 
1281   return INVALID_NUB_HW_INDEX;
1282 }
1283 
1284 uint32_t
1285 DNBArchMachARM64::SetMASKWatchpoint(DNBArchMachARM64::WatchpointSpec wp,
1286                                     bool read, bool write,
1287                                     bool also_set_on_task) {
1288   const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
1289 
1290   // Read the debug state
1291   kern_return_t kret = GetDBGState(false);
1292   if (kret != KERN_SUCCESS)
1293     return INVALID_NUB_HW_INDEX;
1294 
1295   // Check to make sure we have the needed hardware support
1296   uint32_t i = 0;
1297 
1298   for (i = 0; i < num_hw_watchpoints; ++i) {
1299     if ((m_state.dbg.__wcr[i] & WCR_ENABLE) == 0)
1300       break; // We found an available hw watchpoint slot
1301   }
1302   if (i == num_hw_watchpoints) {
1303     DNBLogThreadedIf(LOG_WATCHPOINTS,
1304                      "DNBArchMachARM64::"
1305                      "SetMASKWatchpoint(): All "
1306                      "hardware resources (%u) are in use.",
1307                      num_hw_watchpoints);
1308     return INVALID_NUB_HW_INDEX;
1309   }
1310 
1311   DNBLogThreadedIf(LOG_WATCHPOINTS,
1312                    "DNBArchMachARM64::"
1313                    "SetMASKWatchpoint() "
1314                    "set hardware register %d to MASK watchpoint "
1315                    "aligned start address 0x%llx, aligned size %zu",
1316                    i, wp.aligned_start, wp.aligned_size);
1317 
1318   // Clear any previous LoHi joined-watchpoint that may have been in use
1319   LoHi[i] = 0;
1320 
1321   // MASK field is the number of low bits that are masked off
1322   // when comparing the address with the DBGWVR<n>_EL1 values.
1323   // If aligned size is 16, that means we ignore low 4 bits, 0b1111.
1324   // popcount(16 - 1) give us the correct value of 4.
1325   // 2GB is max watchable region, which is 31 bits (low bits 0x7fffffff
1326   // masked off) -- a MASK value of 31.
1327   const uint64_t mask = __builtin_popcountl(wp.aligned_size - 1) << 24;
1328   // A '0b11111111' BAS value needed for mask watchpoints plus a
1329   // nonzero mask value.
1330   const uint64_t not_bas_wp = 0xff << 5;
1331 
1332   m_state.dbg.__wvr[i] = wp.aligned_start;
1333   m_state.dbg.__wcr[i] = mask | not_bas_wp | S_USER | // Stop only in user mode
1334                          (read ? WCR_LOAD : 0) |      // Stop on read access?
1335                          (write ? WCR_STORE : 0) |    // Stop on write access?
1336                          WCR_ENABLE;                  // Enable this watchpoint;
1337 
1338   DNBLogThreadedIf(LOG_WATCHPOINTS,
1339                    "DNBArchMachARM64::SetMASKWatchpoint() "
1340                    "adding watchpoint on address 0x%llx with control "
1341                    "register value 0x%llx",
1342                    (uint64_t)m_state.dbg.__wvr[i],
1343                    (uint64_t)m_state.dbg.__wcr[i]);
1344 
1345   kret = SetDBGState(also_set_on_task);
1346 
1347   DNBLogThreadedIf(LOG_WATCHPOINTS,
1348                    "DNBArchMachARM64::"
1349                    "SetMASKWatchpoint() "
1350                    "SetDBGState() => 0x%8.8x.",
1351                    kret);
1352 
1353   if (kret == KERN_SUCCESS)
1354     return i;
1355 
1356   return INVALID_NUB_HW_INDEX;
1357 }
1358 
1359 bool DNBArchMachARM64::ReenableHardwareWatchpoint(uint32_t hw_index) {
1360   // If this logical watchpoint # is actually implemented using
1361   // two hardware watchpoint registers, re-enable both of them.
1362 
1363   if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index]) {
1364     return ReenableHardwareWatchpoint_helper(hw_index) &&
1365            ReenableHardwareWatchpoint_helper(LoHi[hw_index]);
1366   } else {
1367     return ReenableHardwareWatchpoint_helper(hw_index);
1368   }
1369 }
1370 
1371 bool DNBArchMachARM64::ReenableHardwareWatchpoint_helper(uint32_t hw_index) {
1372   kern_return_t kret = GetDBGState(false);
1373   if (kret != KERN_SUCCESS)
1374     return false;
1375 
1376   const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
1377   if (hw_index >= num_hw_points)
1378     return false;
1379 
1380   m_state.dbg.__wvr[hw_index] = m_disabled_watchpoints[hw_index].addr;
1381   m_state.dbg.__wcr[hw_index] = m_disabled_watchpoints[hw_index].control;
1382 
1383   DNBLogThreadedIf(LOG_WATCHPOINTS,
1384                    "DNBArchMachARM64::"
1385                    "ReenableHardwareWatchpoint_helper( %u ) - WVR%u = "
1386                    "0x%8.8llx  WCR%u = 0x%8.8llx",
1387                    hw_index, hw_index, (uint64_t)m_state.dbg.__wvr[hw_index],
1388                    hw_index, (uint64_t)m_state.dbg.__wcr[hw_index]);
1389 
1390   kret = SetDBGState(false);
1391 
1392   return (kret == KERN_SUCCESS);
1393 }
1394 
1395 bool DNBArchMachARM64::DisableHardwareWatchpoint(uint32_t hw_index,
1396                                                  bool also_set_on_task) {
1397   if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index]) {
1398     return DisableHardwareWatchpoint_helper(hw_index, also_set_on_task) &&
1399            DisableHardwareWatchpoint_helper(LoHi[hw_index], also_set_on_task);
1400   } else {
1401     return DisableHardwareWatchpoint_helper(hw_index, also_set_on_task);
1402   }
1403 }
1404 
1405 bool DNBArchMachARM64::DisableHardwareWatchpoint_helper(uint32_t hw_index,
1406                                                         bool also_set_on_task) {
1407   kern_return_t kret = GetDBGState(false);
1408   if (kret != KERN_SUCCESS)
1409     return false;
1410 
1411   const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
1412   if (hw_index >= num_hw_points)
1413     return false;
1414 
1415   m_disabled_watchpoints[hw_index].addr = m_state.dbg.__wvr[hw_index];
1416   m_disabled_watchpoints[hw_index].control = m_state.dbg.__wcr[hw_index];
1417 
1418   m_state.dbg.__wcr[hw_index] &= ~((nub_addr_t)WCR_ENABLE);
1419   DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::"
1420                                     "DisableHardwareWatchpoint( %u ) - WVR%u = "
1421                                     "0x%8.8llx  WCR%u = 0x%8.8llx",
1422                    hw_index, hw_index, (uint64_t)m_state.dbg.__wvr[hw_index],
1423                    hw_index, (uint64_t)m_state.dbg.__wcr[hw_index]);
1424 
1425   kret = SetDBGState(also_set_on_task);
1426 
1427   return (kret == KERN_SUCCESS);
1428 }
1429 
1430 bool DNBArchMachARM64::DisableHardwareBreakpoint(uint32_t hw_index,
1431                                                  bool also_set_on_task) {
1432   kern_return_t kret = GetDBGState(false);
1433   if (kret != KERN_SUCCESS)
1434     return false;
1435 
1436   const uint32_t num_hw_points = NumSupportedHardwareBreakpoints();
1437   if (hw_index >= num_hw_points)
1438     return false;
1439 
1440   m_disabled_breakpoints[hw_index].addr = m_state.dbg.__bvr[hw_index];
1441   m_disabled_breakpoints[hw_index].control = m_state.dbg.__bcr[hw_index];
1442 
1443   m_state.dbg.__bcr[hw_index] = 0;
1444   DNBLogThreadedIf(LOG_WATCHPOINTS,
1445                    "DNBArchMachARM64::"
1446                    "DisableHardwareBreakpoint( %u ) - WVR%u = "
1447                    "0x%8.8llx  BCR%u = 0x%8.8llx",
1448                    hw_index, hw_index, (uint64_t)m_state.dbg.__bvr[hw_index],
1449                    hw_index, (uint64_t)m_state.dbg.__bcr[hw_index]);
1450 
1451   kret = SetDBGState(also_set_on_task);
1452 
1453   return (kret == KERN_SUCCESS);
1454 }
1455 
1456 // This is for checking the Byte Address Select bits in the DBRWCRn_EL1 control
1457 // register.
1458 // Returns -1 if the trailing bit patterns are not one of:
1459 // { 0b???????1, 0b??????10, 0b?????100, 0b????1000, 0b???10000, 0b??100000,
1460 // 0b?1000000, 0b10000000 }.
1461 static inline int32_t LowestBitSet(uint32_t val) {
1462   for (unsigned i = 0; i < 8; ++i) {
1463     if (bit(val, i))
1464       return i;
1465   }
1466   return -1;
1467 }
1468 
1469 // Iterate through the debug registers; return the index of the first watchpoint
1470 // whose address matches.
1471 // As a side effect, the starting address as understood by the debugger is
1472 // returned which could be
1473 // different from 'addr' passed as an in/out argument.
1474 uint32_t DNBArchMachARM64::GetHardwareWatchpointHit(nub_addr_t &addr) {
1475   // Read the debug state
1476   kern_return_t kret = GetDBGState(true);
1477   // DumpDBGState(m_state.dbg);
1478   DNBLogThreadedIf(
1479       LOG_WATCHPOINTS,
1480       "DNBArchMachARM64::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.",
1481       kret);
1482   DNBLogThreadedIf(LOG_WATCHPOINTS,
1483                    "DNBArchMachARM64::GetHardwareWatchpointHit() addr = 0x%llx",
1484                    (uint64_t)addr);
1485 
1486   if (kret == KERN_SUCCESS) {
1487     DBG &debug_state = m_state.dbg;
1488     uint32_t i, num = NumSupportedHardwareWatchpoints();
1489     for (i = 0; i < num; ++i) {
1490       nub_addr_t wp_addr = GetWatchAddress(debug_state, i);
1491 
1492       DNBLogThreadedIf(LOG_WATCHPOINTS,
1493                        "DNBArchImplARM64::"
1494                        "GetHardwareWatchpointHit() slot: %u "
1495                        "(addr = 0x%llx, WCR = 0x%llx)",
1496                        i, wp_addr, debug_state.__wcr[i]);
1497 
1498       if (!IsWatchpointEnabled(debug_state, i))
1499         continue;
1500 
1501       // DBGWCR<n>EL1.BAS are the bits of the doubleword that are watched
1502       // with a BAS watchpoint.
1503       uint32_t bas_bits = bits(debug_state.__wcr[i], 12, 5);
1504       // DBGWCR<n>EL1.MASK is the number of bits that are masked off the
1505       // virtual address when comparing to DBGWVR<n>_EL1.
1506       uint32_t mask = bits(debug_state.__wcr[i], 28, 24);
1507 
1508       const bool is_bas_watchpoint = mask == 0;
1509 
1510       DNBLogThreadedIf(
1511           LOG_WATCHPOINTS,
1512           "DNBArchImplARM64::"
1513           "GetHardwareWatchpointHit() slot: %u %s",
1514           i, is_bas_watchpoint ? "is BAS watchpoint" : "is MASK watchpoint");
1515 
1516       if (is_bas_watchpoint) {
1517         if (bits(wp_addr, 48, 3) != bits(addr, 48, 3))
1518           continue;
1519       } else {
1520         if (bits(wp_addr, 48, mask) == bits(addr, 48, mask)) {
1521           DNBLogThreadedIf(LOG_WATCHPOINTS,
1522                            "DNBArchImplARM64::"
1523                            "GetHardwareWatchpointHit() slot: %u matched MASK "
1524                            "ignoring %u low bits",
1525                            i, mask);
1526           return i;
1527         }
1528       }
1529 
1530       if (is_bas_watchpoint) {
1531         // Sanity check the bas_bits
1532         uint32_t lsb = LowestBitSet(bas_bits);
1533         if (lsb < 0)
1534           continue;
1535 
1536         uint64_t byte_to_match = bits(addr, 2, 0);
1537 
1538         if (bas_bits & (1 << byte_to_match)) {
1539           addr = wp_addr + lsb;
1540           DNBLogThreadedIf(LOG_WATCHPOINTS,
1541                            "DNBArchImplARM64::"
1542                            "GetHardwareWatchpointHit() slot: %u matched BAS",
1543                            i);
1544           return i;
1545         }
1546       }
1547     }
1548   }
1549   return INVALID_NUB_HW_INDEX;
1550 }
1551 
1552 nub_addr_t DNBArchMachARM64::GetWatchpointAddressByIndex(uint32_t hw_index) {
1553   kern_return_t kret = GetDBGState(true);
1554   if (kret != KERN_SUCCESS)
1555     return INVALID_NUB_ADDRESS;
1556   const uint32_t num = NumSupportedHardwareWatchpoints();
1557   if (hw_index >= num)
1558     return INVALID_NUB_ADDRESS;
1559   if (IsWatchpointEnabled(m_state.dbg, hw_index))
1560     return GetWatchAddress(m_state.dbg, hw_index);
1561   return INVALID_NUB_ADDRESS;
1562 }
1563 
1564 bool DNBArchMachARM64::IsWatchpointEnabled(const DBG &debug_state,
1565                                            uint32_t hw_index) {
1566   // Watchpoint Control Registers, bitfield definitions
1567   // ...
1568   // Bits    Value    Description
1569   // [0]     0        Watchpoint disabled
1570   //         1        Watchpoint enabled.
1571   return (debug_state.__wcr[hw_index] & 1u);
1572 }
1573 
1574 nub_addr_t DNBArchMachARM64::GetWatchAddress(const DBG &debug_state,
1575                                              uint32_t hw_index) {
1576   // Watchpoint Value Registers, bitfield definitions
1577   // Bits        Description
1578   // [31:2]      Watchpoint value (word address, i.e., 4-byte aligned)
1579   // [1:0]       RAZ/SBZP
1580   return bits(debug_state.__wvr[hw_index], 63, 0);
1581 }
1582 
1583 // Register information definitions for 64 bit ARMv8.
1584 enum gpr_regnums {
1585   gpr_x0 = 0,
1586   gpr_x1,
1587   gpr_x2,
1588   gpr_x3,
1589   gpr_x4,
1590   gpr_x5,
1591   gpr_x6,
1592   gpr_x7,
1593   gpr_x8,
1594   gpr_x9,
1595   gpr_x10,
1596   gpr_x11,
1597   gpr_x12,
1598   gpr_x13,
1599   gpr_x14,
1600   gpr_x15,
1601   gpr_x16,
1602   gpr_x17,
1603   gpr_x18,
1604   gpr_x19,
1605   gpr_x20,
1606   gpr_x21,
1607   gpr_x22,
1608   gpr_x23,
1609   gpr_x24,
1610   gpr_x25,
1611   gpr_x26,
1612   gpr_x27,
1613   gpr_x28,
1614   gpr_fp,
1615   gpr_x29 = gpr_fp,
1616   gpr_lr,
1617   gpr_x30 = gpr_lr,
1618   gpr_sp,
1619   gpr_x31 = gpr_sp,
1620   gpr_pc,
1621   gpr_cpsr,
1622   gpr_w0,
1623   gpr_w1,
1624   gpr_w2,
1625   gpr_w3,
1626   gpr_w4,
1627   gpr_w5,
1628   gpr_w6,
1629   gpr_w7,
1630   gpr_w8,
1631   gpr_w9,
1632   gpr_w10,
1633   gpr_w11,
1634   gpr_w12,
1635   gpr_w13,
1636   gpr_w14,
1637   gpr_w15,
1638   gpr_w16,
1639   gpr_w17,
1640   gpr_w18,
1641   gpr_w19,
1642   gpr_w20,
1643   gpr_w21,
1644   gpr_w22,
1645   gpr_w23,
1646   gpr_w24,
1647   gpr_w25,
1648   gpr_w26,
1649   gpr_w27,
1650   gpr_w28
1651 
1652 };
1653 
1654 enum {
1655   vfp_v0 = 0,
1656   vfp_v1,
1657   vfp_v2,
1658   vfp_v3,
1659   vfp_v4,
1660   vfp_v5,
1661   vfp_v6,
1662   vfp_v7,
1663   vfp_v8,
1664   vfp_v9,
1665   vfp_v10,
1666   vfp_v11,
1667   vfp_v12,
1668   vfp_v13,
1669   vfp_v14,
1670   vfp_v15,
1671   vfp_v16,
1672   vfp_v17,
1673   vfp_v18,
1674   vfp_v19,
1675   vfp_v20,
1676   vfp_v21,
1677   vfp_v22,
1678   vfp_v23,
1679   vfp_v24,
1680   vfp_v25,
1681   vfp_v26,
1682   vfp_v27,
1683   vfp_v28,
1684   vfp_v29,
1685   vfp_v30,
1686   vfp_v31,
1687   vfp_fpsr,
1688   vfp_fpcr,
1689 
1690   // lower 32 bits of the corresponding vfp_v<n> reg.
1691   vfp_s0,
1692   vfp_s1,
1693   vfp_s2,
1694   vfp_s3,
1695   vfp_s4,
1696   vfp_s5,
1697   vfp_s6,
1698   vfp_s7,
1699   vfp_s8,
1700   vfp_s9,
1701   vfp_s10,
1702   vfp_s11,
1703   vfp_s12,
1704   vfp_s13,
1705   vfp_s14,
1706   vfp_s15,
1707   vfp_s16,
1708   vfp_s17,
1709   vfp_s18,
1710   vfp_s19,
1711   vfp_s20,
1712   vfp_s21,
1713   vfp_s22,
1714   vfp_s23,
1715   vfp_s24,
1716   vfp_s25,
1717   vfp_s26,
1718   vfp_s27,
1719   vfp_s28,
1720   vfp_s29,
1721   vfp_s30,
1722   vfp_s31,
1723 
1724   // lower 64 bits of the corresponding vfp_v<n> reg.
1725   vfp_d0,
1726   vfp_d1,
1727   vfp_d2,
1728   vfp_d3,
1729   vfp_d4,
1730   vfp_d5,
1731   vfp_d6,
1732   vfp_d7,
1733   vfp_d8,
1734   vfp_d9,
1735   vfp_d10,
1736   vfp_d11,
1737   vfp_d12,
1738   vfp_d13,
1739   vfp_d14,
1740   vfp_d15,
1741   vfp_d16,
1742   vfp_d17,
1743   vfp_d18,
1744   vfp_d19,
1745   vfp_d20,
1746   vfp_d21,
1747   vfp_d22,
1748   vfp_d23,
1749   vfp_d24,
1750   vfp_d25,
1751   vfp_d26,
1752   vfp_d27,
1753   vfp_d28,
1754   vfp_d29,
1755   vfp_d30,
1756   vfp_d31
1757 };
1758 
1759 enum {
1760   sve_z0,
1761   sve_z1,
1762   sve_z2,
1763   sve_z3,
1764   sve_z4,
1765   sve_z5,
1766   sve_z6,
1767   sve_z7,
1768   sve_z8,
1769   sve_z9,
1770   sve_z10,
1771   sve_z11,
1772   sve_z12,
1773   sve_z13,
1774   sve_z14,
1775   sve_z15,
1776   sve_z16,
1777   sve_z17,
1778   sve_z18,
1779   sve_z19,
1780   sve_z20,
1781   sve_z21,
1782   sve_z22,
1783   sve_z23,
1784   sve_z24,
1785   sve_z25,
1786   sve_z26,
1787   sve_z27,
1788   sve_z28,
1789   sve_z29,
1790   sve_z30,
1791   sve_z31,
1792   sve_p0,
1793   sve_p1,
1794   sve_p2,
1795   sve_p3,
1796   sve_p4,
1797   sve_p5,
1798   sve_p6,
1799   sve_p7,
1800   sve_p8,
1801   sve_p9,
1802   sve_p10,
1803   sve_p11,
1804   sve_p12,
1805   sve_p13,
1806   sve_p14,
1807   sve_p15
1808 };
1809 
1810 enum { sme_svcr, sme_tpidr2, sme_svl_b, sme_za, sme_zt0 };
1811 
1812 enum { exc_far = 0, exc_esr, exc_exception };
1813 
1814 // These numbers from the "DWARF for the ARM 64-bit Architecture (AArch64)"
1815 // document.
1816 
1817 enum {
1818   dwarf_x0 = 0,
1819   dwarf_x1,
1820   dwarf_x2,
1821   dwarf_x3,
1822   dwarf_x4,
1823   dwarf_x5,
1824   dwarf_x6,
1825   dwarf_x7,
1826   dwarf_x8,
1827   dwarf_x9,
1828   dwarf_x10,
1829   dwarf_x11,
1830   dwarf_x12,
1831   dwarf_x13,
1832   dwarf_x14,
1833   dwarf_x15,
1834   dwarf_x16,
1835   dwarf_x17,
1836   dwarf_x18,
1837   dwarf_x19,
1838   dwarf_x20,
1839   dwarf_x21,
1840   dwarf_x22,
1841   dwarf_x23,
1842   dwarf_x24,
1843   dwarf_x25,
1844   dwarf_x26,
1845   dwarf_x27,
1846   dwarf_x28,
1847   dwarf_x29,
1848   dwarf_x30,
1849   dwarf_x31,
1850   dwarf_pc = 32,
1851   dwarf_elr_mode = 33,
1852   dwarf_fp = dwarf_x29,
1853   dwarf_lr = dwarf_x30,
1854   dwarf_sp = dwarf_x31,
1855   // 34-63 reserved
1856 
1857   // V0-V31 (128 bit vector registers)
1858   dwarf_v0 = 64,
1859   dwarf_v1,
1860   dwarf_v2,
1861   dwarf_v3,
1862   dwarf_v4,
1863   dwarf_v5,
1864   dwarf_v6,
1865   dwarf_v7,
1866   dwarf_v8,
1867   dwarf_v9,
1868   dwarf_v10,
1869   dwarf_v11,
1870   dwarf_v12,
1871   dwarf_v13,
1872   dwarf_v14,
1873   dwarf_v15,
1874   dwarf_v16,
1875   dwarf_v17,
1876   dwarf_v18,
1877   dwarf_v19,
1878   dwarf_v20,
1879   dwarf_v21,
1880   dwarf_v22,
1881   dwarf_v23,
1882   dwarf_v24,
1883   dwarf_v25,
1884   dwarf_v26,
1885   dwarf_v27,
1886   dwarf_v28,
1887   dwarf_v29,
1888   dwarf_v30,
1889   dwarf_v31
1890 
1891   // 96-127 reserved
1892 };
1893 
1894 enum {
1895   debugserver_gpr_x0 = 0,
1896   debugserver_gpr_x1,
1897   debugserver_gpr_x2,
1898   debugserver_gpr_x3,
1899   debugserver_gpr_x4,
1900   debugserver_gpr_x5,
1901   debugserver_gpr_x6,
1902   debugserver_gpr_x7,
1903   debugserver_gpr_x8,
1904   debugserver_gpr_x9,
1905   debugserver_gpr_x10,
1906   debugserver_gpr_x11,
1907   debugserver_gpr_x12,
1908   debugserver_gpr_x13,
1909   debugserver_gpr_x14,
1910   debugserver_gpr_x15,
1911   debugserver_gpr_x16,
1912   debugserver_gpr_x17,
1913   debugserver_gpr_x18,
1914   debugserver_gpr_x19,
1915   debugserver_gpr_x20,
1916   debugserver_gpr_x21,
1917   debugserver_gpr_x22,
1918   debugserver_gpr_x23,
1919   debugserver_gpr_x24,
1920   debugserver_gpr_x25,
1921   debugserver_gpr_x26,
1922   debugserver_gpr_x27,
1923   debugserver_gpr_x28,
1924   debugserver_gpr_fp, // x29
1925   debugserver_gpr_lr, // x30
1926   debugserver_gpr_sp, // sp aka xsp
1927   debugserver_gpr_pc,
1928   debugserver_gpr_cpsr,
1929   debugserver_vfp_v0,
1930   debugserver_vfp_v1,
1931   debugserver_vfp_v2,
1932   debugserver_vfp_v3,
1933   debugserver_vfp_v4,
1934   debugserver_vfp_v5,
1935   debugserver_vfp_v6,
1936   debugserver_vfp_v7,
1937   debugserver_vfp_v8,
1938   debugserver_vfp_v9,
1939   debugserver_vfp_v10,
1940   debugserver_vfp_v11,
1941   debugserver_vfp_v12,
1942   debugserver_vfp_v13,
1943   debugserver_vfp_v14,
1944   debugserver_vfp_v15,
1945   debugserver_vfp_v16,
1946   debugserver_vfp_v17,
1947   debugserver_vfp_v18,
1948   debugserver_vfp_v19,
1949   debugserver_vfp_v20,
1950   debugserver_vfp_v21,
1951   debugserver_vfp_v22,
1952   debugserver_vfp_v23,
1953   debugserver_vfp_v24,
1954   debugserver_vfp_v25,
1955   debugserver_vfp_v26,
1956   debugserver_vfp_v27,
1957   debugserver_vfp_v28,
1958   debugserver_vfp_v29,
1959   debugserver_vfp_v30,
1960   debugserver_vfp_v31,
1961   debugserver_vfp_fpsr,
1962   debugserver_vfp_fpcr,
1963   debugserver_sve_z0,
1964   debugserver_sve_z1,
1965   debugserver_sve_z2,
1966   debugserver_sve_z3,
1967   debugserver_sve_z4,
1968   debugserver_sve_z5,
1969   debugserver_sve_z6,
1970   debugserver_sve_z7,
1971   debugserver_sve_z8,
1972   debugserver_sve_z9,
1973   debugserver_sve_z10,
1974   debugserver_sve_z11,
1975   debugserver_sve_z12,
1976   debugserver_sve_z13,
1977   debugserver_sve_z14,
1978   debugserver_sve_z15,
1979   debugserver_sve_z16,
1980   debugserver_sve_z17,
1981   debugserver_sve_z18,
1982   debugserver_sve_z19,
1983   debugserver_sve_z20,
1984   debugserver_sve_z21,
1985   debugserver_sve_z22,
1986   debugserver_sve_z23,
1987   debugserver_sve_z24,
1988   debugserver_sve_z25,
1989   debugserver_sve_z26,
1990   debugserver_sve_z27,
1991   debugserver_sve_z28,
1992   debugserver_sve_z29,
1993   debugserver_sve_z30,
1994   debugserver_sve_z31,
1995   debugserver_sve_p0,
1996   debugserver_sve_p1,
1997   debugserver_sve_p2,
1998   debugserver_sve_p3,
1999   debugserver_sve_p4,
2000   debugserver_sve_p5,
2001   debugserver_sve_p6,
2002   debugserver_sve_p7,
2003   debugserver_sve_p8,
2004   debugserver_sve_p9,
2005   debugserver_sve_p10,
2006   debugserver_sve_p11,
2007   debugserver_sve_p12,
2008   debugserver_sve_p13,
2009   debugserver_sve_p14,
2010   debugserver_sve_p15,
2011   debugserver_sme_svcr,
2012   debugserver_sme_tpidr2,
2013   debugserver_sme_svl_b,
2014   debugserver_sme_za,
2015   debugserver_sme_zt0
2016 };
2017 
2018 const char *g_contained_x0[]{"x0", NULL};
2019 const char *g_contained_x1[]{"x1", NULL};
2020 const char *g_contained_x2[]{"x2", NULL};
2021 const char *g_contained_x3[]{"x3", NULL};
2022 const char *g_contained_x4[]{"x4", NULL};
2023 const char *g_contained_x5[]{"x5", NULL};
2024 const char *g_contained_x6[]{"x6", NULL};
2025 const char *g_contained_x7[]{"x7", NULL};
2026 const char *g_contained_x8[]{"x8", NULL};
2027 const char *g_contained_x9[]{"x9", NULL};
2028 const char *g_contained_x10[]{"x10", NULL};
2029 const char *g_contained_x11[]{"x11", NULL};
2030 const char *g_contained_x12[]{"x12", NULL};
2031 const char *g_contained_x13[]{"x13", NULL};
2032 const char *g_contained_x14[]{"x14", NULL};
2033 const char *g_contained_x15[]{"x15", NULL};
2034 const char *g_contained_x16[]{"x16", NULL};
2035 const char *g_contained_x17[]{"x17", NULL};
2036 const char *g_contained_x18[]{"x18", NULL};
2037 const char *g_contained_x19[]{"x19", NULL};
2038 const char *g_contained_x20[]{"x20", NULL};
2039 const char *g_contained_x21[]{"x21", NULL};
2040 const char *g_contained_x22[]{"x22", NULL};
2041 const char *g_contained_x23[]{"x23", NULL};
2042 const char *g_contained_x24[]{"x24", NULL};
2043 const char *g_contained_x25[]{"x25", NULL};
2044 const char *g_contained_x26[]{"x26", NULL};
2045 const char *g_contained_x27[]{"x27", NULL};
2046 const char *g_contained_x28[]{"x28", NULL};
2047 
2048 const char *g_invalidate_x0[]{"x0", "w0", NULL};
2049 const char *g_invalidate_x1[]{"x1", "w1", NULL};
2050 const char *g_invalidate_x2[]{"x2", "w2", NULL};
2051 const char *g_invalidate_x3[]{"x3", "w3", NULL};
2052 const char *g_invalidate_x4[]{"x4", "w4", NULL};
2053 const char *g_invalidate_x5[]{"x5", "w5", NULL};
2054 const char *g_invalidate_x6[]{"x6", "w6", NULL};
2055 const char *g_invalidate_x7[]{"x7", "w7", NULL};
2056 const char *g_invalidate_x8[]{"x8", "w8", NULL};
2057 const char *g_invalidate_x9[]{"x9", "w9", NULL};
2058 const char *g_invalidate_x10[]{"x10", "w10", NULL};
2059 const char *g_invalidate_x11[]{"x11", "w11", NULL};
2060 const char *g_invalidate_x12[]{"x12", "w12", NULL};
2061 const char *g_invalidate_x13[]{"x13", "w13", NULL};
2062 const char *g_invalidate_x14[]{"x14", "w14", NULL};
2063 const char *g_invalidate_x15[]{"x15", "w15", NULL};
2064 const char *g_invalidate_x16[]{"x16", "w16", NULL};
2065 const char *g_invalidate_x17[]{"x17", "w17", NULL};
2066 const char *g_invalidate_x18[]{"x18", "w18", NULL};
2067 const char *g_invalidate_x19[]{"x19", "w19", NULL};
2068 const char *g_invalidate_x20[]{"x20", "w20", NULL};
2069 const char *g_invalidate_x21[]{"x21", "w21", NULL};
2070 const char *g_invalidate_x22[]{"x22", "w22", NULL};
2071 const char *g_invalidate_x23[]{"x23", "w23", NULL};
2072 const char *g_invalidate_x24[]{"x24", "w24", NULL};
2073 const char *g_invalidate_x25[]{"x25", "w25", NULL};
2074 const char *g_invalidate_x26[]{"x26", "w26", NULL};
2075 const char *g_invalidate_x27[]{"x27", "w27", NULL};
2076 const char *g_invalidate_x28[]{"x28", "w28", NULL};
2077 
2078 #define GPR_OFFSET_IDX(idx) (offsetof(DNBArchMachARM64::GPR, __x[idx]))
2079 
2080 #define GPR_OFFSET_NAME(reg) (offsetof(DNBArchMachARM64::GPR, __##reg))
2081 
2082 // These macros will auto define the register name, alt name, register size,
2083 // register offset, encoding, format and native register. This ensures that
2084 // the register state structures are defined correctly and have the correct
2085 // sizes and offsets.
2086 #define DEFINE_GPR_IDX(idx, reg, alt, gen)                                     \
2087   {                                                                            \
2088     e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 8, GPR_OFFSET_IDX(idx),      \
2089         dwarf_##reg, dwarf_##reg, gen, debugserver_gpr_##reg, NULL,            \
2090         g_invalidate_x##idx                                                    \
2091   }
2092 #define DEFINE_GPR_NAME(reg, alt, gen)                                         \
2093   {                                                                            \
2094     e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 8, GPR_OFFSET_NAME(reg),     \
2095         dwarf_##reg, dwarf_##reg, gen, debugserver_gpr_##reg, NULL, NULL       \
2096   }
2097 #define DEFINE_PSEUDO_GPR_IDX(idx, reg)                                        \
2098   {                                                                            \
2099     e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, 4, 0, INVALID_NUB_REGNUM,   \
2100         INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,            \
2101         g_contained_x##idx, g_invalidate_x##idx                                \
2102   }
2103 
2104 //_STRUCT_ARM_THREAD_STATE64
2105 //{
2106 //	uint64_t    x[29];	/* General purpose registers x0-x28 */
2107 //	uint64_t    fp;		/* Frame pointer x29 */
2108 //	uint64_t    lr;		/* Link register x30 */
2109 //	uint64_t    sp;		/* Stack pointer x31 */
2110 //	uint64_t    pc;		/* Program counter */
2111 //	uint32_t    cpsr;	/* Current program status register */
2112 //};
2113 
2114 // General purpose registers
2115 const DNBRegisterInfo DNBArchMachARM64::g_gpr_registers[] = {
2116     DEFINE_GPR_IDX(0, x0, "arg1", GENERIC_REGNUM_ARG1),
2117     DEFINE_GPR_IDX(1, x1, "arg2", GENERIC_REGNUM_ARG2),
2118     DEFINE_GPR_IDX(2, x2, "arg3", GENERIC_REGNUM_ARG3),
2119     DEFINE_GPR_IDX(3, x3, "arg4", GENERIC_REGNUM_ARG4),
2120     DEFINE_GPR_IDX(4, x4, "arg5", GENERIC_REGNUM_ARG5),
2121     DEFINE_GPR_IDX(5, x5, "arg6", GENERIC_REGNUM_ARG6),
2122     DEFINE_GPR_IDX(6, x6, "arg7", GENERIC_REGNUM_ARG7),
2123     DEFINE_GPR_IDX(7, x7, "arg8", GENERIC_REGNUM_ARG8),
2124     DEFINE_GPR_IDX(8, x8, NULL, INVALID_NUB_REGNUM),
2125     DEFINE_GPR_IDX(9, x9, NULL, INVALID_NUB_REGNUM),
2126     DEFINE_GPR_IDX(10, x10, NULL, INVALID_NUB_REGNUM),
2127     DEFINE_GPR_IDX(11, x11, NULL, INVALID_NUB_REGNUM),
2128     DEFINE_GPR_IDX(12, x12, NULL, INVALID_NUB_REGNUM),
2129     DEFINE_GPR_IDX(13, x13, NULL, INVALID_NUB_REGNUM),
2130     DEFINE_GPR_IDX(14, x14, NULL, INVALID_NUB_REGNUM),
2131     DEFINE_GPR_IDX(15, x15, NULL, INVALID_NUB_REGNUM),
2132     DEFINE_GPR_IDX(16, x16, NULL, INVALID_NUB_REGNUM),
2133     DEFINE_GPR_IDX(17, x17, NULL, INVALID_NUB_REGNUM),
2134     DEFINE_GPR_IDX(18, x18, NULL, INVALID_NUB_REGNUM),
2135     DEFINE_GPR_IDX(19, x19, NULL, INVALID_NUB_REGNUM),
2136     DEFINE_GPR_IDX(20, x20, NULL, INVALID_NUB_REGNUM),
2137     DEFINE_GPR_IDX(21, x21, NULL, INVALID_NUB_REGNUM),
2138     DEFINE_GPR_IDX(22, x22, NULL, INVALID_NUB_REGNUM),
2139     DEFINE_GPR_IDX(23, x23, NULL, INVALID_NUB_REGNUM),
2140     DEFINE_GPR_IDX(24, x24, NULL, INVALID_NUB_REGNUM),
2141     DEFINE_GPR_IDX(25, x25, NULL, INVALID_NUB_REGNUM),
2142     DEFINE_GPR_IDX(26, x26, NULL, INVALID_NUB_REGNUM),
2143     DEFINE_GPR_IDX(27, x27, NULL, INVALID_NUB_REGNUM),
2144     DEFINE_GPR_IDX(28, x28, NULL, INVALID_NUB_REGNUM),
2145     // For the G/g packet we want to show where the offset into the regctx
2146     // is for fp/lr/sp/pc, but we cannot directly access them on arm64e
2147     // devices (and therefore can't offsetof() them)) - add the offset based
2148     // on the last accessible register by hand for advertising the location
2149     // in the regctx to lldb.  We'll go through the accessor functions when
2150     // we read/write them here.
2151     {
2152        e_regSetGPR, gpr_fp, "fp", "x29", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 8,
2153        dwarf_fp, dwarf_fp, GENERIC_REGNUM_FP, debugserver_gpr_fp, NULL, NULL
2154     },
2155     {
2156        e_regSetGPR, gpr_lr, "lr", "x30", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 16,
2157        dwarf_lr, dwarf_lr, GENERIC_REGNUM_RA, debugserver_gpr_lr, NULL, NULL
2158     },
2159     {
2160        e_regSetGPR, gpr_sp, "sp", "xsp", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 24,
2161        dwarf_sp, dwarf_sp, GENERIC_REGNUM_SP, debugserver_gpr_sp, NULL, NULL
2162     },
2163     {
2164        e_regSetGPR, gpr_pc, "pc", NULL, Uint, Hex, 8, GPR_OFFSET_IDX(28) + 32,
2165        dwarf_pc, dwarf_pc, GENERIC_REGNUM_PC, debugserver_gpr_pc, NULL, NULL
2166     },
2167 
2168     // in armv7 we specify that writing to the CPSR should invalidate r8-12, sp,
2169     // lr.
2170     // this should be specified for arm64 too even though debugserver is only
2171     // used for
2172     // userland debugging.
2173     {e_regSetGPR, gpr_cpsr, "cpsr", "flags", Uint, Hex, 4,
2174      GPR_OFFSET_NAME(cpsr), dwarf_elr_mode, dwarf_elr_mode, GENERIC_REGNUM_FLAGS,
2175      debugserver_gpr_cpsr, NULL, NULL},
2176 
2177     DEFINE_PSEUDO_GPR_IDX(0, w0),
2178     DEFINE_PSEUDO_GPR_IDX(1, w1),
2179     DEFINE_PSEUDO_GPR_IDX(2, w2),
2180     DEFINE_PSEUDO_GPR_IDX(3, w3),
2181     DEFINE_PSEUDO_GPR_IDX(4, w4),
2182     DEFINE_PSEUDO_GPR_IDX(5, w5),
2183     DEFINE_PSEUDO_GPR_IDX(6, w6),
2184     DEFINE_PSEUDO_GPR_IDX(7, w7),
2185     DEFINE_PSEUDO_GPR_IDX(8, w8),
2186     DEFINE_PSEUDO_GPR_IDX(9, w9),
2187     DEFINE_PSEUDO_GPR_IDX(10, w10),
2188     DEFINE_PSEUDO_GPR_IDX(11, w11),
2189     DEFINE_PSEUDO_GPR_IDX(12, w12),
2190     DEFINE_PSEUDO_GPR_IDX(13, w13),
2191     DEFINE_PSEUDO_GPR_IDX(14, w14),
2192     DEFINE_PSEUDO_GPR_IDX(15, w15),
2193     DEFINE_PSEUDO_GPR_IDX(16, w16),
2194     DEFINE_PSEUDO_GPR_IDX(17, w17),
2195     DEFINE_PSEUDO_GPR_IDX(18, w18),
2196     DEFINE_PSEUDO_GPR_IDX(19, w19),
2197     DEFINE_PSEUDO_GPR_IDX(20, w20),
2198     DEFINE_PSEUDO_GPR_IDX(21, w21),
2199     DEFINE_PSEUDO_GPR_IDX(22, w22),
2200     DEFINE_PSEUDO_GPR_IDX(23, w23),
2201     DEFINE_PSEUDO_GPR_IDX(24, w24),
2202     DEFINE_PSEUDO_GPR_IDX(25, w25),
2203     DEFINE_PSEUDO_GPR_IDX(26, w26),
2204     DEFINE_PSEUDO_GPR_IDX(27, w27),
2205     DEFINE_PSEUDO_GPR_IDX(28, w28)};
2206 
2207 const char *g_contained_v0[]{"v0", NULL};
2208 const char *g_contained_v1[]{"v1", NULL};
2209 const char *g_contained_v2[]{"v2", NULL};
2210 const char *g_contained_v3[]{"v3", NULL};
2211 const char *g_contained_v4[]{"v4", NULL};
2212 const char *g_contained_v5[]{"v5", NULL};
2213 const char *g_contained_v6[]{"v6", NULL};
2214 const char *g_contained_v7[]{"v7", NULL};
2215 const char *g_contained_v8[]{"v8", NULL};
2216 const char *g_contained_v9[]{"v9", NULL};
2217 const char *g_contained_v10[]{"v10", NULL};
2218 const char *g_contained_v11[]{"v11", NULL};
2219 const char *g_contained_v12[]{"v12", NULL};
2220 const char *g_contained_v13[]{"v13", NULL};
2221 const char *g_contained_v14[]{"v14", NULL};
2222 const char *g_contained_v15[]{"v15", NULL};
2223 const char *g_contained_v16[]{"v16", NULL};
2224 const char *g_contained_v17[]{"v17", NULL};
2225 const char *g_contained_v18[]{"v18", NULL};
2226 const char *g_contained_v19[]{"v19", NULL};
2227 const char *g_contained_v20[]{"v20", NULL};
2228 const char *g_contained_v21[]{"v21", NULL};
2229 const char *g_contained_v22[]{"v22", NULL};
2230 const char *g_contained_v23[]{"v23", NULL};
2231 const char *g_contained_v24[]{"v24", NULL};
2232 const char *g_contained_v25[]{"v25", NULL};
2233 const char *g_contained_v26[]{"v26", NULL};
2234 const char *g_contained_v27[]{"v27", NULL};
2235 const char *g_contained_v28[]{"v28", NULL};
2236 const char *g_contained_v29[]{"v29", NULL};
2237 const char *g_contained_v30[]{"v30", NULL};
2238 const char *g_contained_v31[]{"v31", NULL};
2239 
2240 const char *g_invalidate_v[32][4]{
2241     {"v0", "d0", "s0", NULL},    {"v1", "d1", "s1", NULL},
2242     {"v2", "d2", "s2", NULL},    {"v3", "d3", "s3", NULL},
2243     {"v4", "d4", "s4", NULL},    {"v5", "d5", "s5", NULL},
2244     {"v6", "d6", "s6", NULL},    {"v7", "d7", "s7", NULL},
2245     {"v8", "d8", "s8", NULL},    {"v9", "d9", "s9", NULL},
2246     {"v10", "d10", "s10", NULL}, {"v11", "d11", "s11", NULL},
2247     {"v12", "d12", "s12", NULL}, {"v13", "d13", "s13", NULL},
2248     {"v14", "d14", "s14", NULL}, {"v15", "d15", "s15", NULL},
2249     {"v16", "d16", "s16", NULL}, {"v17", "d17", "s17", NULL},
2250     {"v18", "d18", "s18", NULL}, {"v19", "d19", "s19", NULL},
2251     {"v20", "d20", "s20", NULL}, {"v21", "d21", "s21", NULL},
2252     {"v22", "d22", "s22", NULL}, {"v23", "d23", "s23", NULL},
2253     {"v24", "d24", "s24", NULL}, {"v25", "d25", "s25", NULL},
2254     {"v26", "d26", "s26", NULL}, {"v27", "d27", "s27", NULL},
2255     {"v28", "d28", "s28", NULL}, {"v29", "d29", "s29", NULL},
2256     {"v30", "d30", "s30", NULL}, {"v31", "d31", "s31", NULL}};
2257 
2258 const char *g_invalidate_z[32][5]{
2259     {"z0", "v0", "d0", "s0", NULL},     {"z1", "v1", "d1", "s1", NULL},
2260     {"z2", "v2", "d2", "s2", NULL},     {"z3", "v3", "d3", "s3", NULL},
2261     {"z4", "v4", "d4", "s4", NULL},     {"z5", "v5", "d5", "s5", NULL},
2262     {"z6", "v6", "d6", "s6", NULL},     {"z7", "v7", "d7", "s7", NULL},
2263     {"z8", "v8", "d8", "s8", NULL},     {"z9", "v9", "d9", "s9", NULL},
2264     {"z10", "v10", "d10", "s10", NULL}, {"z11", "v11", "d11", "s11", NULL},
2265     {"z12", "v12", "d12", "s12", NULL}, {"z13", "v13", "d13", "s13", NULL},
2266     {"z14", "v14", "d14", "s14", NULL}, {"z15", "v15", "d15", "s15", NULL},
2267     {"z16", "v16", "d16", "s16", NULL}, {"z17", "v17", "d17", "s17", NULL},
2268     {"z18", "v18", "d18", "s18", NULL}, {"z19", "v19", "d19", "s19", NULL},
2269     {"z20", "v20", "d20", "s20", NULL}, {"z21", "v21", "d21", "s21", NULL},
2270     {"z22", "v22", "d22", "s22", NULL}, {"z23", "v23", "d23", "s23", NULL},
2271     {"z24", "v24", "d24", "s24", NULL}, {"z25", "v25", "d25", "s25", NULL},
2272     {"z26", "v26", "d26", "s26", NULL}, {"z27", "v27", "d27", "s27", NULL},
2273     {"z28", "v28", "d28", "s28", NULL}, {"z29", "v29", "d29", "s29", NULL},
2274     {"z30", "v30", "d30", "s30", NULL}, {"z31", "v31", "d31", "s31", NULL}};
2275 
2276 const char *g_contained_z0[]{"z0", NULL};
2277 const char *g_contained_z1[]{"z1", NULL};
2278 const char *g_contained_z2[]{"z2", NULL};
2279 const char *g_contained_z3[]{"z3", NULL};
2280 const char *g_contained_z4[]{"z4", NULL};
2281 const char *g_contained_z5[]{"z5", NULL};
2282 const char *g_contained_z6[]{"z6", NULL};
2283 const char *g_contained_z7[]{"z7", NULL};
2284 const char *g_contained_z8[]{"z8", NULL};
2285 const char *g_contained_z9[]{"z9", NULL};
2286 const char *g_contained_z10[]{"z10", NULL};
2287 const char *g_contained_z11[]{"z11", NULL};
2288 const char *g_contained_z12[]{"z12", NULL};
2289 const char *g_contained_z13[]{"z13", NULL};
2290 const char *g_contained_z14[]{"z14", NULL};
2291 const char *g_contained_z15[]{"z15", NULL};
2292 const char *g_contained_z16[]{"z16", NULL};
2293 const char *g_contained_z17[]{"z17", NULL};
2294 const char *g_contained_z18[]{"z18", NULL};
2295 const char *g_contained_z19[]{"z19", NULL};
2296 const char *g_contained_z20[]{"z20", NULL};
2297 const char *g_contained_z21[]{"z21", NULL};
2298 const char *g_contained_z22[]{"z22", NULL};
2299 const char *g_contained_z23[]{"z23", NULL};
2300 const char *g_contained_z24[]{"z24", NULL};
2301 const char *g_contained_z25[]{"z25", NULL};
2302 const char *g_contained_z26[]{"z26", NULL};
2303 const char *g_contained_z27[]{"z27", NULL};
2304 const char *g_contained_z28[]{"z28", NULL};
2305 const char *g_contained_z29[]{"z29", NULL};
2306 const char *g_contained_z30[]{"z30", NULL};
2307 const char *g_contained_z31[]{"z31", NULL};
2308 
2309 #if defined(__arm64__) || defined(__aarch64__)
2310 #define VFP_V_OFFSET_IDX(idx)                                                  \
2311   (offsetof(DNBArchMachARM64::FPU, __v) + (idx * 16) +                         \
2312    offsetof(DNBArchMachARM64::Context, vfp))
2313 #else
2314 #define VFP_V_OFFSET_IDX(idx)                                                  \
2315   (offsetof(DNBArchMachARM64::FPU, opaque) + (idx * 16) +                      \
2316    offsetof(DNBArchMachARM64::Context, vfp))
2317 #endif
2318 #define EXC_OFFSET(reg)                                                        \
2319   (offsetof(DNBArchMachARM64::EXC, reg) +                                      \
2320    offsetof(DNBArchMachARM64::Context, exc))
2321 #define SVE_OFFSET_Z_IDX(idx)                                                  \
2322   (offsetof(DNBArchMachARM64::SVE, z[idx]) +                                   \
2323    offsetof(DNBArchMachARM64::Context, sve))
2324 #define SVE_OFFSET_P_IDX(idx)                                                  \
2325   (offsetof(DNBArchMachARM64::SVE, p[idx]) +                                   \
2326    offsetof(DNBArchMachARM64::Context, sve))
2327 #define SME_OFFSET(reg)                                                        \
2328   (offsetof(DNBArchMachARM64::SME, reg) +                                      \
2329    offsetof(DNBArchMachARM64::Context, sme))
2330 
2331 //_STRUCT_ARM_EXCEPTION_STATE64
2332 //{
2333 //	uint64_t	far; /* Virtual Fault Address */
2334 //	uint32_t	esr; /* Exception syndrome */
2335 //	uint32_t	exception; /* number of arm exception taken */
2336 //};
2337 
2338 // Exception registers
2339 const DNBRegisterInfo DNBArchMachARM64::g_exc_registers[] = {
2340     {e_regSetEXC, exc_far, "far", NULL, Uint, Hex, 8, EXC_OFFSET(__far),
2341      INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2342      INVALID_NUB_REGNUM, NULL, NULL},
2343     {e_regSetEXC, exc_esr, "esr", NULL, Uint, Hex, 4, EXC_OFFSET(__esr),
2344      INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2345      INVALID_NUB_REGNUM, NULL, NULL},
2346     {e_regSetEXC, exc_exception, "exception", NULL, Uint, Hex, 4,
2347      EXC_OFFSET(__exception), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2348      INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}};
2349 
2350 // Number of registers in each register set
2351 const size_t DNBArchMachARM64::k_num_gpr_registers =
2352     sizeof(g_gpr_registers) / sizeof(DNBRegisterInfo);
2353 const size_t DNBArchMachARM64::k_num_exc_registers =
2354     sizeof(g_exc_registers) / sizeof(DNBRegisterInfo);
2355 
2356 static std::vector<DNBRegisterInfo> g_sve_registers;
2357 static void initialize_sve_registers() {
2358   static const char *g_z_regnames[32] = {
2359       "z0",  "z1",  "z2",  "z3",  "z4",  "z5",  "z6",  "z7",
2360       "z8",  "z9",  "z10", "z11", "z12", "z13", "z14", "z15",
2361       "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23",
2362       "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"};
2363   static const char *g_p_regnames[16] = {
2364       "p0", "p1", "p2",  "p3",  "p4",  "p5",  "p6",  "p7",
2365       "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15"};
2366 
2367   if (DNBArchMachARM64::CPUHasSME()) {
2368     uint32_t svl_bytes = DNBArchMachARM64::GetSMEMaxSVL();
2369     for (uint32_t i = 0; i < 32; i++) {
2370       g_sve_registers.push_back(
2371           {DNBArchMachARM64::e_regSetSVE, (uint32_t)sve_z0 + i, g_z_regnames[i],
2372            NULL, Vector, VectorOfUInt8, svl_bytes,
2373            static_cast<uint32_t>(SVE_OFFSET_Z_IDX(i)), INVALID_NUB_REGNUM,
2374            INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2375            (uint32_t)debugserver_sve_z0 + i, NULL, g_invalidate_z[i]});
2376     }
2377     for (uint32_t i = 0; i < 16; i++) {
2378       g_sve_registers.push_back(
2379           {DNBArchMachARM64::e_regSetSVE, (uint32_t)sve_p0 + i, g_p_regnames[i],
2380            NULL, Vector, VectorOfUInt8, svl_bytes / 8,
2381            (uint32_t)SVE_OFFSET_P_IDX(i), INVALID_NUB_REGNUM,
2382            INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2383            (uint32_t)debugserver_sve_p0 + i, NULL, NULL});
2384     }
2385   }
2386 }
2387 
2388 static std::vector<DNBRegisterInfo> g_vfp_registers;
2389 static void initialize_vfp_registers() {
2390   static const char *g_v_regnames[32] = {
2391       "v0",  "v1",  "v2",  "v3",  "v4",  "v5",  "v6",  "v7",
2392       "v8",  "v9",  "v10", "v11", "v12", "v13", "v14", "v15",
2393       "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
2394       "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
2395   static const char *g_q_regnames[32] = {
2396       "q0",  "q1",  "q2",  "q3",  "q4",  "q5",  "q6",  "q7",
2397       "q8",  "q9",  "q10", "q11", "q12", "q13", "q14", "q15",
2398       "q16", "q17", "q18", "q19", "q20", "q21", "q22", "q23",
2399       "q24", "q25", "q26", "q27", "q28", "q29", "q30", "q31"};
2400 
2401   static const char *g_d_regnames[32] = {
2402       "d0",  "d1",  "d2",  "d3",  "d4",  "d5",  "d6",  "d7",
2403       "d8",  "d9",  "d10", "d11", "d12", "d13", "d14", "d15",
2404       "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
2405       "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
2406 
2407   static const char *g_s_regnames[32] = {
2408       "s0",  "s1",  "s2",  "s3",  "s4",  "s5",  "s6",  "s7",
2409       "s8",  "s9",  "s10", "s11", "s12", "s13", "s14", "s15",
2410       "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
2411       "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"};
2412 
2413   for (uint32_t i = 0; i < 32; i++)
2414     if (DNBArchMachARM64::CPUHasSME())
2415       g_vfp_registers.push_back(
2416           {DNBArchMachARM64::e_regSetVFP, (uint32_t)vfp_v0 + i, g_v_regnames[i],
2417            g_q_regnames[i], Vector, VectorOfUInt8, 16,
2418            static_cast<uint32_t>(VFP_V_OFFSET_IDX(i)), INVALID_NUB_REGNUM,
2419            (uint32_t)dwarf_v0 + i, INVALID_NUB_REGNUM,
2420            (uint32_t)debugserver_vfp_v0 + i, NULL, g_invalidate_z[i]});
2421     else
2422       g_vfp_registers.push_back(
2423           {DNBArchMachARM64::e_regSetVFP, (uint32_t)vfp_v0 + i, g_v_regnames[i],
2424            g_q_regnames[i], Vector, VectorOfUInt8, 16,
2425            static_cast<uint32_t>(VFP_V_OFFSET_IDX(i)), INVALID_NUB_REGNUM,
2426            (uint32_t)dwarf_v0 + i, INVALID_NUB_REGNUM,
2427            (uint32_t)debugserver_vfp_v0 + i, NULL, g_invalidate_v[i]});
2428 
2429   g_vfp_registers.push_back(
2430       {DNBArchMachARM64::e_regSetVFP, vfp_fpsr, "fpsr", NULL, Uint, Hex, 4,
2431        VFP_V_OFFSET_IDX(32) + 0, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2432        INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL});
2433   g_vfp_registers.push_back(
2434       {DNBArchMachARM64::e_regSetVFP, vfp_fpcr, "fpcr", NULL, Uint, Hex, 4,
2435        VFP_V_OFFSET_IDX(32) + 4, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2436        INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL});
2437 
2438   for (uint32_t i = 0; i < 32; i++)
2439     if (DNBArchMachARM64::CPUHasSME())
2440       g_vfp_registers.push_back(
2441           {DNBArchMachARM64::e_regSetVFP, (uint32_t)vfp_d0 + i, g_d_regnames[i],
2442            NULL, IEEE754, Float, 8, 0, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2443            INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, g_invalidate_z[i]});
2444     else
2445       g_vfp_registers.push_back(
2446           {DNBArchMachARM64::e_regSetVFP, (uint32_t)vfp_d0 + i, g_d_regnames[i],
2447            NULL, IEEE754, Float, 8, 0, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2448            INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, g_invalidate_v[i]});
2449 
2450   for (uint32_t i = 0; i < 32; i++)
2451     if (DNBArchMachARM64::CPUHasSME())
2452       g_vfp_registers.push_back(
2453           {DNBArchMachARM64::e_regSetVFP, (uint32_t)vfp_s0 + i, g_s_regnames[i],
2454            NULL, IEEE754, Float, 4, 0, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2455            INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, g_invalidate_z[i]});
2456     else
2457       g_vfp_registers.push_back(
2458           {DNBArchMachARM64::e_regSetVFP, (uint32_t)vfp_s0 + i, g_s_regnames[i],
2459            NULL, IEEE754, Float, 4, 0, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2460            INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, g_invalidate_v[i]});
2461 }
2462 
2463 static std::once_flag g_vfp_once;
2464 DNBRegisterInfo *
2465 DNBArchMachARM64::get_vfp_registerinfo(size_t &num_vfp_registers) {
2466   std::call_once(g_vfp_once, []() { initialize_vfp_registers(); });
2467   num_vfp_registers = g_vfp_registers.size();
2468   if (num_vfp_registers > 0)
2469     return g_vfp_registers.data();
2470   else
2471     return nullptr;
2472 }
2473 
2474 static std::once_flag g_sve_once;
2475 DNBRegisterInfo *
2476 DNBArchMachARM64::get_sve_registerinfo(size_t &num_sve_registers) {
2477   std::call_once(g_sve_once, []() { initialize_sve_registers(); });
2478   num_sve_registers = g_sve_registers.size();
2479   if (num_sve_registers > 0)
2480     return g_sve_registers.data();
2481   else
2482     return nullptr;
2483 }
2484 
2485 static std::vector<DNBRegisterInfo> g_sme_registers;
2486 static void initialize_sme_registers() {
2487   if (DNBArchMachARM64::CPUHasSME()) {
2488     uint32_t svl_bytes = DNBArchMachARM64::GetSMEMaxSVL();
2489     g_sme_registers.push_back(
2490         {DNBArchMachARM64::e_regSetSME, sme_svcr, "svcr", NULL, Uint, Hex, 8,
2491          SME_OFFSET(svcr), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2492          INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL});
2493     g_sme_registers.push_back(
2494         {DNBArchMachARM64::e_regSetSME, sme_tpidr2, "tpidr2", NULL, Uint, Hex,
2495          8, SME_OFFSET(tpidr2), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2496          INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL});
2497     g_sme_registers.push_back(
2498         {DNBArchMachARM64::e_regSetSME, sme_svl_b, "svl", NULL, Uint, Hex, 2,
2499          SME_OFFSET(svl_b), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2500          INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL});
2501     uint32_t za_max_size = svl_bytes * svl_bytes;
2502     g_sme_registers.push_back({DNBArchMachARM64::e_regSetSME, sme_za, "za",
2503                                NULL, Vector, VectorOfUInt8, za_max_size,
2504                                SME_OFFSET(za), INVALID_NUB_REGNUM,
2505                                INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2506                                INVALID_NUB_REGNUM, NULL, NULL});
2507   }
2508   if (DNBArchMachARM64::CPUHasSME2()) {
2509     g_sme_registers.push_back({DNBArchMachARM64::e_regSetSME, sme_zt0, "zt0",
2510                                NULL, Vector, VectorOfUInt8, 64, SME_OFFSET(zt0),
2511                                INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2512                                INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL,
2513                                NULL});
2514   }
2515 }
2516 
2517 static std::once_flag g_sme_once;
2518 DNBRegisterInfo *
2519 DNBArchMachARM64::get_sme_registerinfo(size_t &num_sme_registers) {
2520   std::call_once(g_sme_once, []() { initialize_sme_registers(); });
2521   num_sme_registers = g_sme_registers.size();
2522   if (num_sme_registers > 0)
2523     return g_sme_registers.data();
2524   else
2525     return nullptr;
2526 }
2527 
2528 static std::vector<DNBRegisterSetInfo> g_reg_sets;
2529 void DNBArchMachARM64::initialize_reg_sets() {
2530   nub_size_t num_all_registers = DNBArchMachARM64::k_num_gpr_registers +
2531                                  DNBArchMachARM64::k_num_exc_registers;
2532   size_t num_vfp_registers = 0;
2533   DNBRegisterInfo *vfp_reginfos =
2534       DNBArchMachARM64::get_vfp_registerinfo(num_vfp_registers);
2535   size_t num_sve_registers = 0;
2536   DNBRegisterInfo *sve_reginfos =
2537       DNBArchMachARM64::get_sve_registerinfo(num_sve_registers);
2538   size_t num_sme_registers = 0;
2539   DNBRegisterInfo *sme_reginfos =
2540       DNBArchMachARM64::get_sme_registerinfo(num_sme_registers);
2541   num_all_registers +=
2542       num_vfp_registers + num_sve_registers + num_sme_registers;
2543   g_reg_sets.push_back({"ARM64 Registers", NULL, num_all_registers});
2544   g_reg_sets.push_back({"General Purpose Registers",
2545                         DNBArchMachARM64::g_gpr_registers,
2546                         DNBArchMachARM64::k_num_gpr_registers});
2547   g_reg_sets.push_back(
2548       {"Floating Point Registers", vfp_reginfos, num_vfp_registers});
2549   g_reg_sets.push_back({"Exception State Registers",
2550                         DNBArchMachARM64::g_exc_registers,
2551                         DNBArchMachARM64::k_num_exc_registers});
2552   if (DNBArchMachARM64::CPUHasSME()) {
2553     g_reg_sets.push_back({"Scalable Vector Extension Registers", sve_reginfos,
2554                           num_sve_registers});
2555     g_reg_sets.push_back({"Scalable Matrix Extension Registers", sme_reginfos,
2556                           num_sme_registers});
2557   }
2558 }
2559 
2560 static std::once_flag g_initialize_register_set_info;
2561 const DNBRegisterSetInfo *
2562 DNBArchMachARM64::GetRegisterSetInfo(nub_size_t *num_reg_sets) {
2563   std::call_once(g_initialize_register_set_info,
2564                  []() { initialize_reg_sets(); });
2565   *num_reg_sets = g_reg_sets.size();
2566   return g_reg_sets.data();
2567 }
2568 
2569 bool DNBArchMachARM64::FixGenericRegisterNumber(uint32_t &set, uint32_t &reg) {
2570   if (set == REGISTER_SET_GENERIC) {
2571     switch (reg) {
2572     case GENERIC_REGNUM_PC: // Program Counter
2573       set = e_regSetGPR;
2574       reg = gpr_pc;
2575       break;
2576 
2577     case GENERIC_REGNUM_SP: // Stack Pointer
2578       set = e_regSetGPR;
2579       reg = gpr_sp;
2580       break;
2581 
2582     case GENERIC_REGNUM_FP: // Frame Pointer
2583       set = e_regSetGPR;
2584       reg = gpr_fp;
2585       break;
2586 
2587     case GENERIC_REGNUM_RA: // Return Address
2588       set = e_regSetGPR;
2589       reg = gpr_lr;
2590       break;
2591 
2592     case GENERIC_REGNUM_FLAGS: // Processor flags register
2593       set = e_regSetGPR;
2594       reg = gpr_cpsr;
2595       break;
2596 
2597     case GENERIC_REGNUM_ARG1:
2598     case GENERIC_REGNUM_ARG2:
2599     case GENERIC_REGNUM_ARG3:
2600     case GENERIC_REGNUM_ARG4:
2601     case GENERIC_REGNUM_ARG5:
2602     case GENERIC_REGNUM_ARG6:
2603       set = e_regSetGPR;
2604       reg = gpr_x0 + reg - GENERIC_REGNUM_ARG1;
2605       break;
2606 
2607     default:
2608       return false;
2609     }
2610   }
2611   return true;
2612 }
2613 bool DNBArchMachARM64::GetRegisterValue(uint32_t set, uint32_t reg,
2614                                         DNBRegisterValue *value) {
2615   if (!FixGenericRegisterNumber(set, reg))
2616     return false;
2617 
2618   if (GetRegisterState(set, false) != KERN_SUCCESS)
2619     return false;
2620 
2621   const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
2622   if (regInfo) {
2623     uint16_t max_svl_bytes = GetSMEMaxSVL();
2624     value->info = *regInfo;
2625     switch (set) {
2626     case e_regSetGPR:
2627       if (reg <= gpr_pc) {
2628         switch (reg) {
2629 #if defined(DEBUGSERVER_IS_ARM64E)
2630         case gpr_pc:
2631           value->value.uint64 = clear_pac_bits(
2632               reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_pc));
2633           break;
2634         case gpr_lr:
2635           value->value.uint64 = arm_thread_state64_get_lr(m_state.context.gpr);
2636           break;
2637         case gpr_sp:
2638           value->value.uint64 = clear_pac_bits(
2639               reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_sp));
2640           break;
2641         case gpr_fp:
2642           value->value.uint64 = clear_pac_bits(
2643               reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_fp));
2644           break;
2645 #else
2646         case gpr_pc:
2647           value->value.uint64 = clear_pac_bits(m_state.context.gpr.__pc);
2648           break;
2649         case gpr_lr:
2650           value->value.uint64 = clear_pac_bits(m_state.context.gpr.__lr);
2651           break;
2652         case gpr_sp:
2653           value->value.uint64 = clear_pac_bits(m_state.context.gpr.__sp);
2654           break;
2655         case gpr_fp:
2656           value->value.uint64 = clear_pac_bits(m_state.context.gpr.__fp);
2657           break;
2658 #endif
2659         default:
2660           value->value.uint64 = m_state.context.gpr.__x[reg];
2661         }
2662         return true;
2663       } else if (reg == gpr_cpsr) {
2664         value->value.uint32 = m_state.context.gpr.__cpsr;
2665         return true;
2666       }
2667       break;
2668 
2669     case e_regSetVFP:
2670 
2671       if (reg >= vfp_v0 && reg <= vfp_v31) {
2672 #if defined(__arm64__) || defined(__aarch64__)
2673         memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_v0],
2674                16);
2675 #else
2676         memcpy(&value->value.v_uint8,
2677                ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_v0) * 16),
2678                16);
2679 #endif
2680         return true;
2681       } else if (reg == vfp_fpsr) {
2682 #if defined(__arm64__) || defined(__aarch64__)
2683         memcpy(&value->value.uint32, &m_state.context.vfp.__fpsr, 4);
2684 #else
2685         memcpy(&value->value.uint32,
2686                ((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 0, 4);
2687 #endif
2688         return true;
2689       } else if (reg == vfp_fpcr) {
2690 #if defined(__arm64__) || defined(__aarch64__)
2691         memcpy(&value->value.uint32, &m_state.context.vfp.__fpcr, 4);
2692 #else
2693         memcpy(&value->value.uint32,
2694                ((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 4, 4);
2695 #endif
2696         return true;
2697       } else if (reg >= vfp_s0 && reg <= vfp_s31) {
2698 #if defined(__arm64__) || defined(__aarch64__)
2699         memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_s0],
2700                4);
2701 #else
2702         memcpy(&value->value.v_uint8,
2703                ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_s0) * 16),
2704                4);
2705 #endif
2706         return true;
2707       } else if (reg >= vfp_d0 && reg <= vfp_d31) {
2708 #if defined(__arm64__) || defined(__aarch64__)
2709         memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_d0],
2710                8);
2711 #else
2712         memcpy(&value->value.v_uint8,
2713                ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_d0) * 16),
2714                8);
2715 #endif
2716         return true;
2717       }
2718       break;
2719 
2720     case e_regSetSVE:
2721       if (GetRegisterState(e_regSetSVE, false) != KERN_SUCCESS)
2722         return false;
2723 
2724       if (reg >= sve_z0 && reg <= sve_z31) {
2725         memset(&value->value.v_uint8, 0, max_svl_bytes);
2726         memcpy(&value->value.v_uint8, &m_state.context.sve.z[reg - sve_z0],
2727                max_svl_bytes);
2728         return true;
2729       } else if (reg >= sve_p0 && reg <= sve_p15) {
2730         memset(&value->value.v_uint8, 0, max_svl_bytes / 8);
2731         memcpy(&value->value.v_uint8, &m_state.context.sve.p[reg - sve_p0],
2732                max_svl_bytes / 8);
2733         return true;
2734       }
2735       break;
2736 
2737     case e_regSetSME:
2738       if (GetRegisterState(e_regSetSME, false) != KERN_SUCCESS)
2739         return false;
2740 
2741       if (reg == sme_svcr) {
2742         value->value.uint64 = m_state.context.sme.svcr;
2743         return true;
2744       } else if (reg == sme_tpidr2) {
2745         value->value.uint64 = m_state.context.sme.tpidr2;
2746         return true;
2747       } else if (reg == sme_svl_b) {
2748         value->value.uint64 = m_state.context.sme.svl_b;
2749         return true;
2750       } else if (reg == sme_za) {
2751         memcpy(&value->value.v_uint8, m_state.context.sme.za.data(),
2752                max_svl_bytes * max_svl_bytes);
2753         return true;
2754       } else if (reg == sme_zt0) {
2755         memcpy(&value->value.v_uint8, &m_state.context.sme.zt0, 64);
2756         return true;
2757       }
2758       break;
2759 
2760     case e_regSetEXC:
2761       if (reg == exc_far) {
2762         value->value.uint64 = m_state.context.exc.__far;
2763         return true;
2764       } else if (reg == exc_esr) {
2765         value->value.uint32 = m_state.context.exc.__esr;
2766         return true;
2767       } else if (reg == exc_exception) {
2768         value->value.uint32 = m_state.context.exc.__exception;
2769         return true;
2770       }
2771       break;
2772     }
2773   }
2774   return false;
2775 }
2776 
2777 bool DNBArchMachARM64::SetRegisterValue(uint32_t set, uint32_t reg,
2778                                         const DNBRegisterValue *value) {
2779   if (!FixGenericRegisterNumber(set, reg))
2780     return false;
2781 
2782   if (GetRegisterState(set, false) != KERN_SUCCESS)
2783     return false;
2784 
2785   bool success = false;
2786   const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
2787   if (regInfo) {
2788     switch (set) {
2789     case e_regSetGPR:
2790       if (reg <= gpr_pc) {
2791 #if defined(__LP64__)
2792           uint64_t signed_value = value->value.uint64;
2793 #if __has_feature(ptrauth_calls)
2794           // The incoming value could be garbage.  Strip it to avoid
2795           // trapping when it gets resigned in the thread state.
2796           signed_value = (uint64_t) ptrauth_strip((void*) signed_value, ptrauth_key_function_pointer);
2797           signed_value = (uint64_t) ptrauth_sign_unauthenticated((void*) signed_value, ptrauth_key_function_pointer, 0);
2798 #endif
2799         if (reg == gpr_pc)
2800          arm_thread_state64_set_pc_fptr (m_state.context.gpr, (void*) signed_value);
2801         else if (reg == gpr_lr)
2802           arm_thread_state64_set_lr_fptr (m_state.context.gpr, (void*) signed_value);
2803         else if (reg == gpr_sp)
2804           arm_thread_state64_set_sp (m_state.context.gpr, value->value.uint64);
2805         else if (reg == gpr_fp)
2806           arm_thread_state64_set_fp (m_state.context.gpr, value->value.uint64);
2807         else
2808           m_state.context.gpr.__x[reg] = value->value.uint64;
2809 #else
2810         m_state.context.gpr.__x[reg] = value->value.uint64;
2811 #endif
2812         success = true;
2813       } else if (reg == gpr_cpsr) {
2814         m_state.context.gpr.__cpsr = value->value.uint32;
2815         success = true;
2816       }
2817       break;
2818 
2819     case e_regSetVFP:
2820       if (reg >= vfp_v0 && reg <= vfp_v31) {
2821 #if defined(__arm64__) || defined(__aarch64__)
2822         memcpy(&m_state.context.vfp.__v[reg - vfp_v0], &value->value.v_uint8,
2823                16);
2824 #else
2825         memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_v0) * 16),
2826                &value->value.v_uint8, 16);
2827 #endif
2828         success = true;
2829       } else if (reg == vfp_fpsr) {
2830 #if defined(__arm64__) || defined(__aarch64__)
2831         memcpy(&m_state.context.vfp.__fpsr, &value->value.uint32, 4);
2832 #else
2833         memcpy(((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 0,
2834                &value->value.uint32, 4);
2835 #endif
2836         success = true;
2837       } else if (reg == vfp_fpcr) {
2838 #if defined(__arm64__) || defined(__aarch64__)
2839         memcpy(&m_state.context.vfp.__fpcr, &value->value.uint32, 4);
2840 #else
2841         memcpy(((uint8_t *)m_state.context.vfp.opaque) + (32 * 16) + 4,
2842                &value->value.uint32, 4);
2843 #endif
2844         success = true;
2845       } else if (reg >= vfp_s0 && reg <= vfp_s31) {
2846 #if defined(__arm64__) || defined(__aarch64__)
2847         memcpy(&m_state.context.vfp.__v[reg - vfp_s0], &value->value.v_uint8,
2848                4);
2849 #else
2850         memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_s0) * 16),
2851                &value->value.v_uint8, 4);
2852 #endif
2853         success = true;
2854       } else if (reg >= vfp_d0 && reg <= vfp_d31) {
2855 #if defined(__arm64__) || defined(__aarch64__)
2856         memcpy(&m_state.context.vfp.__v[reg - vfp_d0], &value->value.v_uint8,
2857                8);
2858 #else
2859         memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_d0) * 16),
2860                &value->value.v_uint8, 8);
2861 #endif
2862         success = true;
2863       }
2864       break;
2865 
2866     case e_regSetSVE:
2867       if (reg >= sve_z0 && reg <= sve_z31) {
2868         uint16_t max_svl_bytes = GetSMEMaxSVL();
2869         memcpy(&m_state.context.sve.z[reg - sve_z0], &value->value.v_uint8,
2870                max_svl_bytes);
2871         success = true;
2872       }
2873       if (reg >= sve_p0 && reg <= sve_p15) {
2874         uint16_t max_svl_bytes = GetSMEMaxSVL();
2875         memcpy(&m_state.context.sve.p[reg - sve_p0], &value->value.v_uint8,
2876                max_svl_bytes / 8);
2877         success = true;
2878       }
2879       break;
2880 
2881     case e_regSetSME:
2882       // Cannot change ARM_SME_STATE registers with thread_set_state
2883       if (reg == sme_svcr || reg == sme_tpidr2 || reg == sme_svl_b)
2884         return false;
2885       if (reg == sme_za) {
2886         uint16_t max_svl_bytes = GetSMEMaxSVL();
2887         memcpy(m_state.context.sme.za.data(), &value->value.v_uint8,
2888                max_svl_bytes * max_svl_bytes);
2889         success = true;
2890       }
2891       if (reg == sme_zt0) {
2892         memcpy(&m_state.context.sme.zt0, &value->value.v_uint8, 64);
2893         success = true;
2894       }
2895       break;
2896 
2897     case e_regSetEXC:
2898       if (reg == exc_far) {
2899         m_state.context.exc.__far = value->value.uint64;
2900         success = true;
2901       } else if (reg == exc_esr) {
2902         m_state.context.exc.__esr = value->value.uint32;
2903         success = true;
2904       } else if (reg == exc_exception) {
2905         m_state.context.exc.__exception = value->value.uint32;
2906         success = true;
2907       }
2908       break;
2909     }
2910   }
2911   if (success)
2912     return SetRegisterState(set) == KERN_SUCCESS;
2913   return false;
2914 }
2915 
2916 kern_return_t DNBArchMachARM64::GetRegisterState(int set, bool force) {
2917   switch (set) {
2918   case e_regSetALL: {
2919     kern_return_t retval = GetGPRState(force) | GetVFPState(force) |
2920                            GetEXCState(force) | GetDBGState(force);
2921     // If the processor is not in Streaming SVE Mode currently, these
2922     // two will fail to read.  Don't return that as an error, it will
2923     // be the most common case.
2924     if (CPUHasSME()) {
2925       GetSVEState(force);
2926       GetSMEState(force);
2927     }
2928     return retval;
2929   }
2930   case e_regSetGPR:
2931     return GetGPRState(force);
2932   case e_regSetVFP:
2933     return GetVFPState(force);
2934   case e_regSetSVE:
2935     return GetSVEState(force);
2936   case e_regSetSME:
2937     return GetSMEState(force);
2938   case e_regSetEXC:
2939     return GetEXCState(force);
2940   case e_regSetDBG:
2941     return GetDBGState(force);
2942   default:
2943     break;
2944   }
2945   return KERN_INVALID_ARGUMENT;
2946 }
2947 
2948 kern_return_t DNBArchMachARM64::SetRegisterState(int set) {
2949   // Make sure we have a valid context to set.
2950   kern_return_t err = GetRegisterState(set, false);
2951   if (err != KERN_SUCCESS)
2952     return err;
2953 
2954   switch (set) {
2955   case e_regSetALL:
2956     return SetGPRState() | SetVFPState() | SetEXCState() | SetDBGState(false);
2957   case e_regSetGPR:
2958     return SetGPRState();
2959   case e_regSetVFP:
2960     return SetVFPState();
2961   case e_regSetSVE:
2962     return SetSVEState();
2963   case e_regSetSME:
2964     return SetSMEState();
2965   case e_regSetEXC:
2966     return SetEXCState();
2967   case e_regSetDBG:
2968     return SetDBGState(false);
2969   default:
2970     break;
2971   }
2972   return KERN_INVALID_ARGUMENT;
2973 }
2974 
2975 bool DNBArchMachARM64::RegisterSetStateIsValid(int set) const {
2976   return m_state.RegsAreValid(set);
2977 }
2978 
2979 nub_size_t DNBArchMachARM64::GetRegisterContext(void *buf, nub_size_t buf_len) {
2980   nub_size_t size = sizeof(m_state.context.gpr) + sizeof(m_state.context.vfp) +
2981                     sizeof(m_state.context.exc);
2982   const bool cpu_has_sme = CPUHasSME();
2983   if (cpu_has_sme) {
2984     size += sizeof(m_state.context.sve);
2985     // ZA register is in a std::vector<uint8_t> so we need to add
2986     // the sizes of the SME manually.
2987     size += ARM_SME_STATE_COUNT * sizeof(uint32_t);
2988     size += m_state.context.sme.za.size();
2989     size += ARM_SME2_STATE_COUNT * sizeof(uint32_t);
2990   }
2991 
2992   if (buf && buf_len) {
2993     if (size > buf_len)
2994       size = buf_len;
2995 
2996     bool force = false;
2997     if (GetGPRState(force) | GetVFPState(force) | GetEXCState(force))
2998       return 0;
2999     // Don't error out if SME/SVE fail to read. These can only be read
3000     // when the process is in Streaming SVE Mode, so the failure to read
3001     // them will be common.
3002     if (cpu_has_sme) {
3003       GetSVEState(force);
3004       GetSMEState(force);
3005     }
3006 
3007     // Copy each struct individually to avoid any padding that might be between
3008     // the structs in m_state.context
3009     uint8_t *p = (uint8_t *)buf;
3010     ::memcpy(p, &m_state.context.gpr, sizeof(m_state.context.gpr));
3011     p += sizeof(m_state.context.gpr);
3012     ::memcpy(p, &m_state.context.vfp, sizeof(m_state.context.vfp));
3013     p += sizeof(m_state.context.vfp);
3014     if (cpu_has_sme) {
3015       ::memcpy(p, &m_state.context.sve, sizeof(m_state.context.sve));
3016       p += sizeof(m_state.context.sve);
3017 
3018       memcpy(p, &m_state.context.sme.svcr,
3019              ARM_SME_STATE_COUNT * sizeof(uint32_t));
3020       p += ARM_SME_STATE_COUNT * sizeof(uint32_t);
3021       memcpy(p, m_state.context.sme.za.data(), m_state.context.sme.za.size());
3022       p += m_state.context.sme.za.size();
3023       if (CPUHasSME2()) {
3024         memcpy(p, &m_state.context.sme.zt0,
3025                ARM_SME2_STATE_COUNT * sizeof(uint32_t));
3026         p += ARM_SME2_STATE_COUNT * sizeof(uint32_t);
3027       }
3028     }
3029     ::memcpy(p, &m_state.context.exc, sizeof(m_state.context.exc));
3030     p += sizeof(m_state.context.exc);
3031 
3032     size_t bytes_written = p - (uint8_t *)buf;
3033     UNUSED_IF_ASSERT_DISABLED(bytes_written);
3034     assert(bytes_written == size);
3035   }
3036   DNBLogThreadedIf(
3037       LOG_THREAD,
3038       "DNBArchMachARM64::GetRegisterContext (buf = %p, len = %zu) => %zu", buf,
3039       buf_len, size);
3040   // Return the size of the register context even if NULL was passed in
3041   return size;
3042 }
3043 
3044 nub_size_t DNBArchMachARM64::SetRegisterContext(const void *buf,
3045                                                 nub_size_t buf_len) {
3046   nub_size_t size = sizeof(m_state.context.gpr) + sizeof(m_state.context.vfp) +
3047                     sizeof(m_state.context.exc);
3048   if (CPUHasSME()) {
3049     // m_state.context.za is three status registers, then a std::vector<uint8_t>
3050     // for ZA, then zt0, so the size of the data is not statically knowable.
3051     nub_size_t sme_size = ARM_SME_STATE_COUNT * sizeof(uint32_t);
3052     sme_size += m_state.context.sme.za.size();
3053     sme_size += ARM_SME2_STATE_COUNT * sizeof(uint32_t);
3054 
3055     size += sizeof(m_state.context.sve) + sme_size;
3056   }
3057 
3058   if (buf == NULL || buf_len == 0)
3059     size = 0;
3060 
3061   if (size) {
3062     if (size > buf_len)
3063       size = buf_len;
3064 
3065     // Copy each struct individually to avoid any padding that might be between
3066     // the structs in m_state.context
3067     uint8_t *p = const_cast<uint8_t*>(reinterpret_cast<const uint8_t *>(buf));
3068     ::memcpy(&m_state.context.gpr, p, sizeof(m_state.context.gpr));
3069     p += sizeof(m_state.context.gpr);
3070     ::memcpy(&m_state.context.vfp, p, sizeof(m_state.context.vfp));
3071     p += sizeof(m_state.context.vfp);
3072     if (CPUHasSME()) {
3073       memcpy(&m_state.context.sve, p, sizeof(m_state.context.sve));
3074       p += sizeof(m_state.context.sve);
3075       memcpy(&m_state.context.sme.svcr, p,
3076              ARM_SME_STATE_COUNT * sizeof(uint32_t));
3077       p += ARM_SME_STATE_COUNT * sizeof(uint32_t);
3078       memcpy(m_state.context.sme.za.data(), p, m_state.context.sme.za.size());
3079       p += m_state.context.sme.za.size();
3080       if (CPUHasSME2()) {
3081         memcpy(&m_state.context.sme.zt0, p,
3082                ARM_SME2_STATE_COUNT * sizeof(uint32_t));
3083         p += ARM_SME2_STATE_COUNT * sizeof(uint32_t);
3084       }
3085     }
3086     ::memcpy(&m_state.context.exc, p, sizeof(m_state.context.exc));
3087     p += sizeof(m_state.context.exc);
3088 
3089     size_t bytes_written = p - reinterpret_cast<const uint8_t *>(buf);
3090     UNUSED_IF_ASSERT_DISABLED(bytes_written);
3091     assert(bytes_written == size);
3092     SetGPRState();
3093     SetVFPState();
3094     if (CPUHasSME()) {
3095       SetSVEState();
3096       SetSMEState();
3097     }
3098     SetEXCState();
3099   }
3100   DNBLogThreadedIf(
3101       LOG_THREAD,
3102       "DNBArchMachARM64::SetRegisterContext (buf = %p, len = %zu) => %zu", buf,
3103       buf_len, size);
3104   return size;
3105 }
3106 
3107 uint32_t DNBArchMachARM64::SaveRegisterState() {
3108   kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber());
3109   DNBLogThreadedIf(
3110       LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u "
3111                   "(SetGPRState() for stop_count = %u)",
3112       m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount());
3113 
3114   // Always re-read the registers because above we call thread_abort_safely();
3115   bool force = true;
3116 
3117   if ((kret = GetGPRState(force)) != KERN_SUCCESS) {
3118     DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::SaveRegisterState () "
3119                                  "error: GPR regs failed to read: %u ",
3120                      kret);
3121   } else if ((kret = GetVFPState(force)) != KERN_SUCCESS) {
3122     DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::SaveRegisterState () "
3123                                  "error: %s regs failed to read: %u",
3124                      "VFP", kret);
3125   } else {
3126     const uint32_t save_id = GetNextRegisterStateSaveID();
3127     m_saved_register_states[save_id] = m_state.context;
3128     return save_id;
3129   }
3130   return UINT32_MAX;
3131 }
3132 
3133 bool DNBArchMachARM64::RestoreRegisterState(uint32_t save_id) {
3134   SaveRegisterStates::iterator pos = m_saved_register_states.find(save_id);
3135   if (pos != m_saved_register_states.end()) {
3136     m_state.context.gpr = pos->second.gpr;
3137     m_state.context.vfp = pos->second.vfp;
3138     kern_return_t kret;
3139     bool success = true;
3140     if ((kret = SetGPRState()) != KERN_SUCCESS) {
3141       DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::RestoreRegisterState "
3142                                    "(save_id = %u) error: GPR regs failed to "
3143                                    "write: %u",
3144                        save_id, kret);
3145       success = false;
3146     } else if ((kret = SetVFPState()) != KERN_SUCCESS) {
3147       DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::RestoreRegisterState "
3148                                    "(save_id = %u) error: %s regs failed to "
3149                                    "write: %u",
3150                        save_id, "VFP", kret);
3151       success = false;
3152     }
3153     m_saved_register_states.erase(pos);
3154     return success;
3155   }
3156   return false;
3157 }
3158 
3159 #endif // #if defined (ARM_THREAD_STATE64_COUNT)
3160 #endif // #if defined (__arm__) || defined (__arm64__) || defined (__aarch64__)
3161