xref: /netbsd-src/external/gpl3/gdb/dist/gdb/aarch64-tdep.c (revision 8ecbf5f02b752fcb7debe1a8fab1dc82602bc760)
1 /* Common target dependent code for GDB on AArch64 systems.
2 
3    Copyright (C) 2009-2019 Free Software Foundation, Inc.
4    Contributed by ARM Ltd.
5 
6    This file is part of GDB.
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 3 of the License, or
11    (at your option) any later version.
12 
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17 
18    You should have received a copy of the GNU General Public License
19    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
20 
21 #include "defs.h"
22 
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2-frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "language.h"
43 #include "infcall.h"
44 #include "ax.h"
45 #include "ax-gdb.h"
46 #include "common/selftest.h"
47 
48 #include "aarch64-tdep.h"
49 #include "aarch64-ravenscar-thread.h"
50 
51 #include "elf-bfd.h"
52 #include "elf/aarch64.h"
53 
54 #include "common/vec.h"
55 
56 #include "record.h"
57 #include "record-full.h"
58 #include "arch/aarch64-insn.h"
59 
60 #include "opcode/aarch64.h"
61 #include <algorithm>
62 
63 #define submask(x) ((1L << ((x) + 1)) - 1)
64 #define bit(obj,st) (((obj) >> (st)) & 1)
65 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
66 
67 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
68    four members.  */
69 #define HA_MAX_NUM_FLDS		4
70 
71 /* All possible aarch64 target descriptors.  */
72 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1];
73 
74 /* The standard register names, and all the valid aliases for them.  */
75 static const struct
76 {
77   const char *const name;
78   int regnum;
79 } aarch64_register_aliases[] =
80 {
81   /* 64-bit register names.  */
82   {"fp", AARCH64_FP_REGNUM},
83   {"lr", AARCH64_LR_REGNUM},
84   {"sp", AARCH64_SP_REGNUM},
85 
86   /* 32-bit register names.  */
87   {"w0", AARCH64_X0_REGNUM + 0},
88   {"w1", AARCH64_X0_REGNUM + 1},
89   {"w2", AARCH64_X0_REGNUM + 2},
90   {"w3", AARCH64_X0_REGNUM + 3},
91   {"w4", AARCH64_X0_REGNUM + 4},
92   {"w5", AARCH64_X0_REGNUM + 5},
93   {"w6", AARCH64_X0_REGNUM + 6},
94   {"w7", AARCH64_X0_REGNUM + 7},
95   {"w8", AARCH64_X0_REGNUM + 8},
96   {"w9", AARCH64_X0_REGNUM + 9},
97   {"w10", AARCH64_X0_REGNUM + 10},
98   {"w11", AARCH64_X0_REGNUM + 11},
99   {"w12", AARCH64_X0_REGNUM + 12},
100   {"w13", AARCH64_X0_REGNUM + 13},
101   {"w14", AARCH64_X0_REGNUM + 14},
102   {"w15", AARCH64_X0_REGNUM + 15},
103   {"w16", AARCH64_X0_REGNUM + 16},
104   {"w17", AARCH64_X0_REGNUM + 17},
105   {"w18", AARCH64_X0_REGNUM + 18},
106   {"w19", AARCH64_X0_REGNUM + 19},
107   {"w20", AARCH64_X0_REGNUM + 20},
108   {"w21", AARCH64_X0_REGNUM + 21},
109   {"w22", AARCH64_X0_REGNUM + 22},
110   {"w23", AARCH64_X0_REGNUM + 23},
111   {"w24", AARCH64_X0_REGNUM + 24},
112   {"w25", AARCH64_X0_REGNUM + 25},
113   {"w26", AARCH64_X0_REGNUM + 26},
114   {"w27", AARCH64_X0_REGNUM + 27},
115   {"w28", AARCH64_X0_REGNUM + 28},
116   {"w29", AARCH64_X0_REGNUM + 29},
117   {"w30", AARCH64_X0_REGNUM + 30},
118 
119   /*  specials */
120   {"ip0", AARCH64_X0_REGNUM + 16},
121   {"ip1", AARCH64_X0_REGNUM + 17}
122 };
123 
124 /* The required core 'R' registers.  */
125 static const char *const aarch64_r_register_names[] =
126 {
127   /* These registers must appear in consecutive RAW register number
128      order and they must begin with AARCH64_X0_REGNUM! */
129   "x0", "x1", "x2", "x3",
130   "x4", "x5", "x6", "x7",
131   "x8", "x9", "x10", "x11",
132   "x12", "x13", "x14", "x15",
133   "x16", "x17", "x18", "x19",
134   "x20", "x21", "x22", "x23",
135   "x24", "x25", "x26", "x27",
136   "x28", "x29", "x30", "sp",
137   "pc", "cpsr"
138 };
139 
140 /* The FP/SIMD 'V' registers.  */
141 static const char *const aarch64_v_register_names[] =
142 {
143   /* These registers must appear in consecutive RAW register number
144      order and they must begin with AARCH64_V0_REGNUM! */
145   "v0", "v1", "v2", "v3",
146   "v4", "v5", "v6", "v7",
147   "v8", "v9", "v10", "v11",
148   "v12", "v13", "v14", "v15",
149   "v16", "v17", "v18", "v19",
150   "v20", "v21", "v22", "v23",
151   "v24", "v25", "v26", "v27",
152   "v28", "v29", "v30", "v31",
153   "fpsr",
154   "fpcr"
155 };
156 
157 /* The SVE 'Z' and 'P' registers.  */
158 static const char *const aarch64_sve_register_names[] =
159 {
160   /* These registers must appear in consecutive RAW register number
161      order and they must begin with AARCH64_SVE_Z0_REGNUM! */
162   "z0", "z1", "z2", "z3",
163   "z4", "z5", "z6", "z7",
164   "z8", "z9", "z10", "z11",
165   "z12", "z13", "z14", "z15",
166   "z16", "z17", "z18", "z19",
167   "z20", "z21", "z22", "z23",
168   "z24", "z25", "z26", "z27",
169   "z28", "z29", "z30", "z31",
170   "fpsr", "fpcr",
171   "p0", "p1", "p2", "p3",
172   "p4", "p5", "p6", "p7",
173   "p8", "p9", "p10", "p11",
174   "p12", "p13", "p14", "p15",
175   "ffr", "vg"
176 };
177 
178 /* AArch64 prologue cache structure.  */
179 struct aarch64_prologue_cache
180 {
181   /* The program counter at the start of the function.  It is used to
182      identify this frame as a prologue frame.  */
183   CORE_ADDR func;
184 
185   /* The program counter at the time this frame was created; i.e. where
186      this function was called from.  It is used to identify this frame as a
187      stub frame.  */
188   CORE_ADDR prev_pc;
189 
190   /* The stack pointer at the time this frame was created; i.e. the
191      caller's stack pointer when this function was called.  It is used
192      to identify this frame.  */
193   CORE_ADDR prev_sp;
194 
195   /* Is the target available to read from?  */
196   int available_p;
197 
198   /* The frame base for this frame is just prev_sp - frame size.
199      FRAMESIZE is the distance from the frame pointer to the
200      initial stack pointer.  */
201   int framesize;
202 
203   /* The register used to hold the frame pointer for this frame.  */
204   int framereg;
205 
206   /* Saved register offsets.  */
207   struct trad_frame_saved_reg *saved_regs;
208 };
209 
210 static void
211 show_aarch64_debug (struct ui_file *file, int from_tty,
212                     struct cmd_list_element *c, const char *value)
213 {
214   fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
215 }
216 
217 namespace {
218 
219 /* Abstract instruction reader.  */
220 
221 class abstract_instruction_reader
222 {
223 public:
224   /* Read in one instruction.  */
225   virtual ULONGEST read (CORE_ADDR memaddr, int len,
226 			 enum bfd_endian byte_order) = 0;
227 };
228 
229 /* Instruction reader from real target.  */
230 
231 class instruction_reader : public abstract_instruction_reader
232 {
233  public:
234   ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
235     override
236   {
237     return read_code_unsigned_integer (memaddr, len, byte_order);
238   }
239 };
240 
241 } // namespace
242 
243 /* Analyze a prologue, looking for a recognizable stack frame
244    and frame pointer.  Scan until we encounter a store that could
245    clobber the stack frame unexpectedly, or an unknown instruction.  */
246 
247 static CORE_ADDR
248 aarch64_analyze_prologue (struct gdbarch *gdbarch,
249 			  CORE_ADDR start, CORE_ADDR limit,
250 			  struct aarch64_prologue_cache *cache,
251 			  abstract_instruction_reader& reader)
252 {
253   enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
254   int i;
255   /* Track X registers and D registers in prologue.  */
256   pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
257 
258   for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
259     regs[i] = pv_register (i, 0);
260   pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
261 
262   for (; start < limit; start += 4)
263     {
264       uint32_t insn;
265       aarch64_inst inst;
266 
267       insn = reader.read (start, 4, byte_order_for_code);
268 
269       if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
270 	break;
271 
272       if (inst.opcode->iclass == addsub_imm
273 	  && (inst.opcode->op == OP_ADD
274 	      || strcmp ("sub", inst.opcode->name) == 0))
275 	{
276 	  unsigned rd = inst.operands[0].reg.regno;
277 	  unsigned rn = inst.operands[1].reg.regno;
278 
279 	  gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
280 	  gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
281 	  gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
282 	  gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
283 
284 	  if (inst.opcode->op == OP_ADD)
285 	    {
286 	      regs[rd] = pv_add_constant (regs[rn],
287 					  inst.operands[2].imm.value);
288 	    }
289 	  else
290 	    {
291 	      regs[rd] = pv_add_constant (regs[rn],
292 					  -inst.operands[2].imm.value);
293 	    }
294 	}
295       else if (inst.opcode->iclass == pcreladdr
296 	       && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
297 	{
298 	  gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
299 	  gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
300 
301 	  regs[inst.operands[0].reg.regno] = pv_unknown ();
302 	}
303       else if (inst.opcode->iclass == branch_imm)
304 	{
305 	  /* Stop analysis on branch.  */
306 	  break;
307 	}
308       else if (inst.opcode->iclass == condbranch)
309 	{
310 	  /* Stop analysis on branch.  */
311 	  break;
312 	}
313       else if (inst.opcode->iclass == branch_reg)
314 	{
315 	  /* Stop analysis on branch.  */
316 	  break;
317 	}
318       else if (inst.opcode->iclass == compbranch)
319 	{
320 	  /* Stop analysis on branch.  */
321 	  break;
322 	}
323       else if (inst.opcode->op == OP_MOVZ)
324 	{
325 	  gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
326 	  regs[inst.operands[0].reg.regno] = pv_unknown ();
327 	}
328       else if (inst.opcode->iclass == log_shift
329 	       && strcmp (inst.opcode->name, "orr") == 0)
330 	{
331 	  unsigned rd = inst.operands[0].reg.regno;
332 	  unsigned rn = inst.operands[1].reg.regno;
333 	  unsigned rm = inst.operands[2].reg.regno;
334 
335 	  gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
336 	  gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
337 	  gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
338 
339 	  if (inst.operands[2].shifter.amount == 0
340 	      && rn == AARCH64_SP_REGNUM)
341 	    regs[rd] = regs[rm];
342 	  else
343 	    {
344 	      if (aarch64_debug)
345 		{
346 		  debug_printf ("aarch64: prologue analysis gave up "
347 				"addr=%s opcode=0x%x (orr x register)\n",
348 				core_addr_to_string_nz (start), insn);
349 		}
350 	      break;
351 	    }
352 	}
353       else if (inst.opcode->op == OP_STUR)
354 	{
355 	  unsigned rt = inst.operands[0].reg.regno;
356 	  unsigned rn = inst.operands[1].addr.base_regno;
357 	  int is64
358 	    = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
359 
360 	  gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
361 	  gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
362 	  gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
363 	  gdb_assert (!inst.operands[1].addr.offset.is_reg);
364 
365 	  stack.store (pv_add_constant (regs[rn],
366 					inst.operands[1].addr.offset.imm),
367 		       is64 ? 8 : 4, regs[rt]);
368 	}
369       else if ((inst.opcode->iclass == ldstpair_off
370 		|| (inst.opcode->iclass == ldstpair_indexed
371 		    && inst.operands[2].addr.preind))
372 	       && strcmp ("stp", inst.opcode->name) == 0)
373 	{
374 	  /* STP with addressing mode Pre-indexed and Base register.  */
375 	  unsigned rt1;
376 	  unsigned rt2;
377 	  unsigned rn = inst.operands[2].addr.base_regno;
378 	  int32_t imm = inst.operands[2].addr.offset.imm;
379 
380 	  gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
381 		      || inst.operands[0].type == AARCH64_OPND_Ft);
382 	  gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
383 		      || inst.operands[1].type == AARCH64_OPND_Ft2);
384 	  gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
385 	  gdb_assert (!inst.operands[2].addr.offset.is_reg);
386 
387 	  /* If recording this store would invalidate the store area
388 	     (perhaps because rn is not known) then we should abandon
389 	     further prologue analysis.  */
390 	  if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
391 	    break;
392 
393 	  if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
394 	    break;
395 
396 	  rt1 = inst.operands[0].reg.regno;
397 	  rt2 = inst.operands[1].reg.regno;
398 	  if (inst.operands[0].type == AARCH64_OPND_Ft)
399 	    {
400 	      /* Only bottom 64-bit of each V register (D register) need
401 		 to be preserved.  */
402 	      gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
403 	      rt1 += AARCH64_X_REGISTER_COUNT;
404 	      rt2 += AARCH64_X_REGISTER_COUNT;
405 	    }
406 
407 	  stack.store (pv_add_constant (regs[rn], imm), 8,
408 		       regs[rt1]);
409 	  stack.store (pv_add_constant (regs[rn], imm + 8), 8,
410 		       regs[rt2]);
411 
412 	  if (inst.operands[2].addr.writeback)
413 	    regs[rn] = pv_add_constant (regs[rn], imm);
414 
415 	}
416       else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate.  */
417 		|| (inst.opcode->iclass == ldst_pos /* Unsigned immediate.  */
418 		    && (inst.opcode->op == OP_STR_POS
419 			|| inst.opcode->op == OP_STRF_POS)))
420 	       && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
421 	       && strcmp ("str", inst.opcode->name) == 0)
422 	{
423 	  /* STR (immediate) */
424 	  unsigned int rt = inst.operands[0].reg.regno;
425 	  int32_t imm = inst.operands[1].addr.offset.imm;
426 	  unsigned int rn = inst.operands[1].addr.base_regno;
427 	  bool is64
428 	    = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
429 	  gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
430 		      || inst.operands[0].type == AARCH64_OPND_Ft);
431 
432 	  if (inst.operands[0].type == AARCH64_OPND_Ft)
433 	    {
434 	      /* Only bottom 64-bit of each V register (D register) need
435 		 to be preserved.  */
436 	      gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
437 	      rt += AARCH64_X_REGISTER_COUNT;
438 	    }
439 
440 	  stack.store (pv_add_constant (regs[rn], imm),
441 		       is64 ? 8 : 4, regs[rt]);
442 	  if (inst.operands[1].addr.writeback)
443 	    regs[rn] = pv_add_constant (regs[rn], imm);
444 	}
445       else if (inst.opcode->iclass == testbranch)
446 	{
447 	  /* Stop analysis on branch.  */
448 	  break;
449 	}
450       else
451 	{
452 	  if (aarch64_debug)
453 	    {
454 	      debug_printf ("aarch64: prologue analysis gave up addr=%s"
455 			    " opcode=0x%x\n",
456 			    core_addr_to_string_nz (start), insn);
457 	    }
458 	  break;
459 	}
460     }
461 
462   if (cache == NULL)
463     return start;
464 
465   if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
466     {
467       /* Frame pointer is fp.  Frame size is constant.  */
468       cache->framereg = AARCH64_FP_REGNUM;
469       cache->framesize = -regs[AARCH64_FP_REGNUM].k;
470     }
471   else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
472     {
473       /* Try the stack pointer.  */
474       cache->framesize = -regs[AARCH64_SP_REGNUM].k;
475       cache->framereg = AARCH64_SP_REGNUM;
476     }
477   else
478     {
479       /* We're just out of luck.  We don't know where the frame is.  */
480       cache->framereg = -1;
481       cache->framesize = 0;
482     }
483 
484   for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
485     {
486       CORE_ADDR offset;
487 
488       if (stack.find_reg (gdbarch, i, &offset))
489 	cache->saved_regs[i].addr = offset;
490     }
491 
492   for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
493     {
494       int regnum = gdbarch_num_regs (gdbarch);
495       CORE_ADDR offset;
496 
497       if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
498 			  &offset))
499 	cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
500     }
501 
502   return start;
503 }
504 
505 static CORE_ADDR
506 aarch64_analyze_prologue (struct gdbarch *gdbarch,
507 			  CORE_ADDR start, CORE_ADDR limit,
508 			  struct aarch64_prologue_cache *cache)
509 {
510   instruction_reader reader;
511 
512   return aarch64_analyze_prologue (gdbarch, start, limit, cache,
513 				   reader);
514 }
515 
516 #if GDB_SELF_TEST
517 
518 namespace selftests {
519 
520 /* Instruction reader from manually cooked instruction sequences.  */
521 
522 class instruction_reader_test : public abstract_instruction_reader
523 {
524 public:
525   template<size_t SIZE>
526   explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
527   : m_insns (insns), m_insns_size (SIZE)
528   {}
529 
530   ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
531     override
532   {
533     SELF_CHECK (len == 4);
534     SELF_CHECK (memaddr % 4 == 0);
535     SELF_CHECK (memaddr / 4 < m_insns_size);
536 
537     return m_insns[memaddr / 4];
538   }
539 
540 private:
541   const uint32_t *m_insns;
542   size_t m_insns_size;
543 };
544 
545 static void
546 aarch64_analyze_prologue_test (void)
547 {
548   struct gdbarch_info info;
549 
550   gdbarch_info_init (&info);
551   info.bfd_arch_info = bfd_scan_arch ("aarch64");
552 
553   struct gdbarch *gdbarch = gdbarch_find_by_info (info);
554   SELF_CHECK (gdbarch != NULL);
555 
556   /* Test the simple prologue in which frame pointer is used.  */
557   {
558     struct aarch64_prologue_cache cache;
559     cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
560 
561     static const uint32_t insns[] = {
562       0xa9af7bfd, /* stp     x29, x30, [sp,#-272]! */
563       0x910003fd, /* mov     x29, sp */
564       0x97ffffe6, /* bl      0x400580 */
565     };
566     instruction_reader_test reader (insns);
567 
568     CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
569     SELF_CHECK (end == 4 * 2);
570 
571     SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
572     SELF_CHECK (cache.framesize == 272);
573 
574     for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
575       {
576 	if (i == AARCH64_FP_REGNUM)
577 	  SELF_CHECK (cache.saved_regs[i].addr == -272);
578 	else if (i == AARCH64_LR_REGNUM)
579 	  SELF_CHECK (cache.saved_regs[i].addr == -264);
580 	else
581 	  SELF_CHECK (cache.saved_regs[i].addr == -1);
582       }
583 
584     for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
585       {
586 	int regnum = gdbarch_num_regs (gdbarch);
587 
588 	SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
589 		    == -1);
590       }
591   }
592 
593   /* Test a prologue in which STR is used and frame pointer is not
594      used.  */
595   {
596     struct aarch64_prologue_cache cache;
597     cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
598 
599     static const uint32_t insns[] = {
600       0xf81d0ff3, /* str	x19, [sp, #-48]! */
601       0xb9002fe0, /* str	w0, [sp, #44] */
602       0xf90013e1, /* str	x1, [sp, #32]*/
603       0xfd000fe0, /* str	d0, [sp, #24] */
604       0xaa0203f3, /* mov	x19, x2 */
605       0xf94013e0, /* ldr	x0, [sp, #32] */
606     };
607     instruction_reader_test reader (insns);
608 
609     CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
610 
611     SELF_CHECK (end == 4 * 5);
612 
613     SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
614     SELF_CHECK (cache.framesize == 48);
615 
616     for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
617       {
618 	if (i == 1)
619 	  SELF_CHECK (cache.saved_regs[i].addr == -16);
620 	else if (i == 19)
621 	  SELF_CHECK (cache.saved_regs[i].addr == -48);
622 	else
623 	  SELF_CHECK (cache.saved_regs[i].addr == -1);
624       }
625 
626     for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
627       {
628 	int regnum = gdbarch_num_regs (gdbarch);
629 
630 	if (i == 0)
631 	  SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
632 		      == -24);
633 	else
634 	  SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
635 		      == -1);
636       }
637   }
638 }
639 } // namespace selftests
640 #endif /* GDB_SELF_TEST */
641 
642 /* Implement the "skip_prologue" gdbarch method.  */
643 
644 static CORE_ADDR
645 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
646 {
647   CORE_ADDR func_addr, limit_pc;
648 
649   /* See if we can determine the end of the prologue via the symbol
650      table.  If so, then return either PC, or the PC after the
651      prologue, whichever is greater.  */
652   if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
653     {
654       CORE_ADDR post_prologue_pc
655 	= skip_prologue_using_sal (gdbarch, func_addr);
656 
657       if (post_prologue_pc != 0)
658 	return std::max (pc, post_prologue_pc);
659     }
660 
661   /* Can't determine prologue from the symbol table, need to examine
662      instructions.  */
663 
664   /* Find an upper limit on the function prologue using the debug
665      information.  If the debug information could not be used to
666      provide that bound, then use an arbitrary large number as the
667      upper bound.  */
668   limit_pc = skip_prologue_using_sal (gdbarch, pc);
669   if (limit_pc == 0)
670     limit_pc = pc + 128;	/* Magic.  */
671 
672   /* Try disassembling prologue.  */
673   return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
674 }
675 
676 /* Scan the function prologue for THIS_FRAME and populate the prologue
677    cache CACHE.  */
678 
679 static void
680 aarch64_scan_prologue (struct frame_info *this_frame,
681 		       struct aarch64_prologue_cache *cache)
682 {
683   CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
684   CORE_ADDR prologue_start;
685   CORE_ADDR prologue_end;
686   CORE_ADDR prev_pc = get_frame_pc (this_frame);
687   struct gdbarch *gdbarch = get_frame_arch (this_frame);
688 
689   cache->prev_pc = prev_pc;
690 
691   /* Assume we do not find a frame.  */
692   cache->framereg = -1;
693   cache->framesize = 0;
694 
695   if (find_pc_partial_function (block_addr, NULL, &prologue_start,
696 				&prologue_end))
697     {
698       struct symtab_and_line sal = find_pc_line (prologue_start, 0);
699 
700       if (sal.line == 0)
701 	{
702 	  /* No line info so use the current PC.  */
703 	  prologue_end = prev_pc;
704 	}
705       else if (sal.end < prologue_end)
706 	{
707 	  /* The next line begins after the function end.  */
708 	  prologue_end = sal.end;
709 	}
710 
711       prologue_end = std::min (prologue_end, prev_pc);
712       aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
713     }
714   else
715     {
716       CORE_ADDR frame_loc;
717 
718       frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
719       if (frame_loc == 0)
720 	return;
721 
722       cache->framereg = AARCH64_FP_REGNUM;
723       cache->framesize = 16;
724       cache->saved_regs[29].addr = 0;
725       cache->saved_regs[30].addr = 8;
726     }
727 }
728 
729 /* Fill in *CACHE with information about the prologue of *THIS_FRAME.  This
730    function may throw an exception if the inferior's registers or memory is
731    not available.  */
732 
733 static void
734 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
735 			       struct aarch64_prologue_cache *cache)
736 {
737   CORE_ADDR unwound_fp;
738   int reg;
739 
740   aarch64_scan_prologue (this_frame, cache);
741 
742   if (cache->framereg == -1)
743     return;
744 
745   unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
746   if (unwound_fp == 0)
747     return;
748 
749   cache->prev_sp = unwound_fp + cache->framesize;
750 
751   /* Calculate actual addresses of saved registers using offsets
752      determined by aarch64_analyze_prologue.  */
753   for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
754     if (trad_frame_addr_p (cache->saved_regs, reg))
755       cache->saved_regs[reg].addr += cache->prev_sp;
756 
757   cache->func = get_frame_func (this_frame);
758 
759   cache->available_p = 1;
760 }
761 
762 /* Allocate and fill in *THIS_CACHE with information about the prologue of
763    *THIS_FRAME.  Do not do this is if *THIS_CACHE was already allocated.
764    Return a pointer to the current aarch64_prologue_cache in
765    *THIS_CACHE.  */
766 
767 static struct aarch64_prologue_cache *
768 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
769 {
770   struct aarch64_prologue_cache *cache;
771 
772   if (*this_cache != NULL)
773     return (struct aarch64_prologue_cache *) *this_cache;
774 
775   cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
776   cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
777   *this_cache = cache;
778 
779   TRY
780     {
781       aarch64_make_prologue_cache_1 (this_frame, cache);
782     }
783   CATCH (ex, RETURN_MASK_ERROR)
784     {
785       if (ex.error != NOT_AVAILABLE_ERROR)
786 	throw_exception (ex);
787     }
788   END_CATCH
789 
790   return cache;
791 }
792 
793 /* Implement the "stop_reason" frame_unwind method.  */
794 
795 static enum unwind_stop_reason
796 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
797 					   void **this_cache)
798 {
799   struct aarch64_prologue_cache *cache
800     = aarch64_make_prologue_cache (this_frame, this_cache);
801 
802   if (!cache->available_p)
803     return UNWIND_UNAVAILABLE;
804 
805   /* Halt the backtrace at "_start".  */
806   if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
807     return UNWIND_OUTERMOST;
808 
809   /* We've hit a wall, stop.  */
810   if (cache->prev_sp == 0)
811     return UNWIND_OUTERMOST;
812 
813   return UNWIND_NO_REASON;
814 }
815 
816 /* Our frame ID for a normal frame is the current function's starting
817    PC and the caller's SP when we were called.  */
818 
819 static void
820 aarch64_prologue_this_id (struct frame_info *this_frame,
821 			  void **this_cache, struct frame_id *this_id)
822 {
823   struct aarch64_prologue_cache *cache
824     = aarch64_make_prologue_cache (this_frame, this_cache);
825 
826   if (!cache->available_p)
827     *this_id = frame_id_build_unavailable_stack (cache->func);
828   else
829     *this_id = frame_id_build (cache->prev_sp, cache->func);
830 }
831 
832 /* Implement the "prev_register" frame_unwind method.  */
833 
834 static struct value *
835 aarch64_prologue_prev_register (struct frame_info *this_frame,
836 				void **this_cache, int prev_regnum)
837 {
838   struct aarch64_prologue_cache *cache
839     = aarch64_make_prologue_cache (this_frame, this_cache);
840 
841   /* If we are asked to unwind the PC, then we need to return the LR
842      instead.  The prologue may save PC, but it will point into this
843      frame's prologue, not the next frame's resume location.  */
844   if (prev_regnum == AARCH64_PC_REGNUM)
845     {
846       CORE_ADDR lr;
847 
848       lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
849       return frame_unwind_got_constant (this_frame, prev_regnum, lr);
850     }
851 
852   /* SP is generally not saved to the stack, but this frame is
853      identified by the next frame's stack pointer at the time of the
854      call.  The value was already reconstructed into PREV_SP.  */
855   /*
856          +----------+  ^
857          | saved lr |  |
858       +->| saved fp |--+
859       |  |          |
860       |  |          |     <- Previous SP
861       |  +----------+
862       |  | saved lr |
863       +--| saved fp |<- FP
864          |          |
865          |          |<- SP
866          +----------+  */
867   if (prev_regnum == AARCH64_SP_REGNUM)
868     return frame_unwind_got_constant (this_frame, prev_regnum,
869 				      cache->prev_sp);
870 
871   return trad_frame_get_prev_register (this_frame, cache->saved_regs,
872 				       prev_regnum);
873 }
874 
875 /* AArch64 prologue unwinder.  */
876 struct frame_unwind aarch64_prologue_unwind =
877 {
878   NORMAL_FRAME,
879   aarch64_prologue_frame_unwind_stop_reason,
880   aarch64_prologue_this_id,
881   aarch64_prologue_prev_register,
882   NULL,
883   default_frame_sniffer
884 };
885 
886 /* Allocate and fill in *THIS_CACHE with information about the prologue of
887    *THIS_FRAME.  Do not do this is if *THIS_CACHE was already allocated.
888    Return a pointer to the current aarch64_prologue_cache in
889    *THIS_CACHE.  */
890 
891 static struct aarch64_prologue_cache *
892 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
893 {
894   struct aarch64_prologue_cache *cache;
895 
896   if (*this_cache != NULL)
897     return (struct aarch64_prologue_cache *) *this_cache;
898 
899   cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
900   cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
901   *this_cache = cache;
902 
903   TRY
904     {
905       cache->prev_sp = get_frame_register_unsigned (this_frame,
906 						    AARCH64_SP_REGNUM);
907       cache->prev_pc = get_frame_pc (this_frame);
908       cache->available_p = 1;
909     }
910   CATCH (ex, RETURN_MASK_ERROR)
911     {
912       if (ex.error != NOT_AVAILABLE_ERROR)
913 	throw_exception (ex);
914     }
915   END_CATCH
916 
917   return cache;
918 }
919 
920 /* Implement the "stop_reason" frame_unwind method.  */
921 
922 static enum unwind_stop_reason
923 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
924 				       void **this_cache)
925 {
926   struct aarch64_prologue_cache *cache
927     = aarch64_make_stub_cache (this_frame, this_cache);
928 
929   if (!cache->available_p)
930     return UNWIND_UNAVAILABLE;
931 
932   return UNWIND_NO_REASON;
933 }
934 
935 /* Our frame ID for a stub frame is the current SP and LR.  */
936 
937 static void
938 aarch64_stub_this_id (struct frame_info *this_frame,
939 		      void **this_cache, struct frame_id *this_id)
940 {
941   struct aarch64_prologue_cache *cache
942     = aarch64_make_stub_cache (this_frame, this_cache);
943 
944   if (cache->available_p)
945     *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
946   else
947     *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
948 }
949 
950 /* Implement the "sniffer" frame_unwind method.  */
951 
952 static int
953 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
954 			     struct frame_info *this_frame,
955 			     void **this_prologue_cache)
956 {
957   CORE_ADDR addr_in_block;
958   gdb_byte dummy[4];
959 
960   addr_in_block = get_frame_address_in_block (this_frame);
961   if (in_plt_section (addr_in_block)
962       /* We also use the stub winder if the target memory is unreadable
963 	 to avoid having the prologue unwinder trying to read it.  */
964       || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
965     return 1;
966 
967   return 0;
968 }
969 
970 /* AArch64 stub unwinder.  */
971 struct frame_unwind aarch64_stub_unwind =
972 {
973   NORMAL_FRAME,
974   aarch64_stub_frame_unwind_stop_reason,
975   aarch64_stub_this_id,
976   aarch64_prologue_prev_register,
977   NULL,
978   aarch64_stub_unwind_sniffer
979 };
980 
981 /* Return the frame base address of *THIS_FRAME.  */
982 
983 static CORE_ADDR
984 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
985 {
986   struct aarch64_prologue_cache *cache
987     = aarch64_make_prologue_cache (this_frame, this_cache);
988 
989   return cache->prev_sp - cache->framesize;
990 }
991 
992 /* AArch64 default frame base information.  */
993 struct frame_base aarch64_normal_base =
994 {
995   &aarch64_prologue_unwind,
996   aarch64_normal_frame_base,
997   aarch64_normal_frame_base,
998   aarch64_normal_frame_base
999 };
1000 
1001 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1002    dummy frame.  The frame ID's base needs to match the TOS value
1003    saved by save_dummy_frame_tos () and returned from
1004    aarch64_push_dummy_call, and the PC needs to match the dummy
1005    frame's breakpoint.  */
1006 
1007 static struct frame_id
1008 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1009 {
1010   return frame_id_build (get_frame_register_unsigned (this_frame,
1011 						      AARCH64_SP_REGNUM),
1012 			 get_frame_pc (this_frame));
1013 }
1014 
1015 /* Implement the "unwind_pc" gdbarch method.  */
1016 
1017 static CORE_ADDR
1018 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1019 {
1020   CORE_ADDR pc
1021     = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1022 
1023   return pc;
1024 }
1025 
1026 /* Implement the "unwind_sp" gdbarch method.  */
1027 
1028 static CORE_ADDR
1029 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1030 {
1031   return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1032 }
1033 
1034 /* Return the value of the REGNUM register in the previous frame of
1035    *THIS_FRAME.  */
1036 
1037 static struct value *
1038 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1039 			      void **this_cache, int regnum)
1040 {
1041   CORE_ADDR lr;
1042 
1043   switch (regnum)
1044     {
1045     case AARCH64_PC_REGNUM:
1046       lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1047       return frame_unwind_got_constant (this_frame, regnum, lr);
1048 
1049     default:
1050       internal_error (__FILE__, __LINE__,
1051 		      _("Unexpected register %d"), regnum);
1052     }
1053 }
1054 
1055 /* Implement the "init_reg" dwarf2_frame_ops method.  */
1056 
1057 static void
1058 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1059 			       struct dwarf2_frame_state_reg *reg,
1060 			       struct frame_info *this_frame)
1061 {
1062   switch (regnum)
1063     {
1064     case AARCH64_PC_REGNUM:
1065       reg->how = DWARF2_FRAME_REG_FN;
1066       reg->loc.fn = aarch64_dwarf2_prev_register;
1067       break;
1068     case AARCH64_SP_REGNUM:
1069       reg->how = DWARF2_FRAME_REG_CFA;
1070       break;
1071     }
1072 }
1073 
1074 /* When arguments must be pushed onto the stack, they go on in reverse
1075    order.  The code below implements a FILO (stack) to do this.  */
1076 
1077 typedef struct
1078 {
1079   /* Value to pass on stack.  It can be NULL if this item is for stack
1080      padding.  */
1081   const gdb_byte *data;
1082 
1083   /* Size in bytes of value to pass on stack.  */
1084   int len;
1085 } stack_item_t;
1086 
1087 DEF_VEC_O (stack_item_t);
1088 
1089 /* Return the alignment (in bytes) of the given type.  */
1090 
1091 static int
1092 aarch64_type_align (struct type *t)
1093 {
1094   int n;
1095   int align;
1096   int falign;
1097 
1098   t = check_typedef (t);
1099   switch (TYPE_CODE (t))
1100     {
1101     default:
1102       /* Should never happen.  */
1103       internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1104       return 4;
1105 
1106     case TYPE_CODE_PTR:
1107     case TYPE_CODE_ENUM:
1108     case TYPE_CODE_INT:
1109     case TYPE_CODE_FLT:
1110     case TYPE_CODE_SET:
1111     case TYPE_CODE_RANGE:
1112     case TYPE_CODE_BITSTRING:
1113     case TYPE_CODE_REF:
1114     case TYPE_CODE_RVALUE_REF:
1115     case TYPE_CODE_CHAR:
1116     case TYPE_CODE_BOOL:
1117       return TYPE_LENGTH (t);
1118 
1119     case TYPE_CODE_ARRAY:
1120       if (TYPE_VECTOR (t))
1121 	{
1122 	  /* Use the natural alignment for vector types (the same for
1123 	     scalar type), but the maximum alignment is 128-bit.  */
1124 	  if (TYPE_LENGTH (t) > 16)
1125 	    return 16;
1126 	  else
1127 	    return TYPE_LENGTH (t);
1128 	}
1129       else
1130 	return aarch64_type_align (TYPE_TARGET_TYPE (t));
1131     case TYPE_CODE_COMPLEX:
1132       return aarch64_type_align (TYPE_TARGET_TYPE (t));
1133 
1134     case TYPE_CODE_STRUCT:
1135     case TYPE_CODE_UNION:
1136       align = 1;
1137       for (n = 0; n < TYPE_NFIELDS (t); n++)
1138 	{
1139 	  falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1140 	  if (falign > align)
1141 	    align = falign;
1142 	}
1143       return align;
1144     }
1145 }
1146 
1147 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1148 
1149    Return the number of register required, or -1 on failure.
1150 
1151    When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1152    to the element, else fail if the type of this element does not match the
1153    existing value.  */
1154 
1155 static int
1156 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1157 					 struct type **fundamental_type)
1158 {
1159   if (type == nullptr)
1160     return -1;
1161 
1162   switch (TYPE_CODE (type))
1163     {
1164     case TYPE_CODE_FLT:
1165       if (TYPE_LENGTH (type) > 16)
1166 	return -1;
1167 
1168       if (*fundamental_type == nullptr)
1169 	*fundamental_type = type;
1170       else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1171 	       || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1172 	return -1;
1173 
1174       return 1;
1175 
1176     case TYPE_CODE_COMPLEX:
1177       {
1178 	struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1179 	if (TYPE_LENGTH (target_type) > 16)
1180 	  return -1;
1181 
1182 	if (*fundamental_type == nullptr)
1183 	  *fundamental_type = target_type;
1184 	else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1185 		 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1186 	  return -1;
1187 
1188 	return 2;
1189       }
1190 
1191     case TYPE_CODE_ARRAY:
1192       {
1193 	if (TYPE_VECTOR (type))
1194 	  {
1195 	    if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1196 	      return -1;
1197 
1198 	    if (*fundamental_type == nullptr)
1199 	      *fundamental_type = type;
1200 	    else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1201 		     || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1202 	      return -1;
1203 
1204 	    return 1;
1205 	  }
1206 	else
1207 	  {
1208 	    struct type *target_type = TYPE_TARGET_TYPE (type);
1209 	    int count = aapcs_is_vfp_call_or_return_candidate_1
1210 			  (target_type, fundamental_type);
1211 
1212 	    if (count == -1)
1213 	      return count;
1214 
1215 	    count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1216 	      return count;
1217 	  }
1218       }
1219 
1220     case TYPE_CODE_STRUCT:
1221     case TYPE_CODE_UNION:
1222       {
1223 	int count = 0;
1224 
1225 	for (int i = 0; i < TYPE_NFIELDS (type); i++)
1226 	  {
1227 	    /* Ignore any static fields.  */
1228 	    if (field_is_static (&TYPE_FIELD (type, i)))
1229 	      continue;
1230 
1231 	    struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1232 
1233 	    int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1234 			      (member, fundamental_type);
1235 	    if (sub_count == -1)
1236 	      return -1;
1237 	    count += sub_count;
1238 	  }
1239 
1240 	/* Ensure there is no padding between the fields (allowing for empty
1241 	   zero length structs)  */
1242 	int ftype_length = (*fundamental_type == nullptr)
1243 			   ? 0 : TYPE_LENGTH (*fundamental_type);
1244 	if (count * ftype_length != TYPE_LENGTH (type))
1245 	  return -1;
1246 
1247 	return count;
1248       }
1249 
1250     default:
1251       break;
1252     }
1253 
1254   return -1;
1255 }
1256 
1257 /* Return true if an argument, whose type is described by TYPE, can be passed or
1258    returned in simd/fp registers, providing enough parameter passing registers
1259    are available.  This is as described in the AAPCS64.
1260 
1261    Upon successful return, *COUNT returns the number of needed registers,
1262    *FUNDAMENTAL_TYPE contains the type of those registers.
1263 
1264    Candidate as per the AAPCS64 5.4.2.C is either a:
1265    - float.
1266    - short-vector.
1267    - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1268      all the members are floats and has at most 4 members.
1269    - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1270      all the members are short vectors and has at most 4 members.
1271    - Complex (7.1.1)
1272 
1273    Note that HFAs and HVAs can include nested structures and arrays.  */
1274 
1275 static bool
1276 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1277 				       struct type **fundamental_type)
1278 {
1279   if (type == nullptr)
1280     return false;
1281 
1282   *fundamental_type = nullptr;
1283 
1284   int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1285 							  fundamental_type);
1286 
1287   if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1288     {
1289       *count = ag_count;
1290       return true;
1291     }
1292   else
1293     return false;
1294 }
1295 
1296 /* AArch64 function call information structure.  */
1297 struct aarch64_call_info
1298 {
1299   /* the current argument number.  */
1300   unsigned argnum;
1301 
1302   /* The next general purpose register number, equivalent to NGRN as
1303      described in the AArch64 Procedure Call Standard.  */
1304   unsigned ngrn;
1305 
1306   /* The next SIMD and floating point register number, equivalent to
1307      NSRN as described in the AArch64 Procedure Call Standard.  */
1308   unsigned nsrn;
1309 
1310   /* The next stacked argument address, equivalent to NSAA as
1311      described in the AArch64 Procedure Call Standard.  */
1312   unsigned nsaa;
1313 
1314   /* Stack item vector.  */
1315   VEC(stack_item_t) *si;
1316 };
1317 
1318 /* Pass a value in a sequence of consecutive X registers.  The caller
1319    is responsbile for ensuring sufficient registers are available.  */
1320 
1321 static void
1322 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1323 	   struct aarch64_call_info *info, struct type *type,
1324 	   struct value *arg)
1325 {
1326   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1327   int len = TYPE_LENGTH (type);
1328   enum type_code typecode = TYPE_CODE (type);
1329   int regnum = AARCH64_X0_REGNUM + info->ngrn;
1330   const bfd_byte *buf = value_contents (arg);
1331 
1332   info->argnum++;
1333 
1334   while (len > 0)
1335     {
1336       int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1337       CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1338 						   byte_order);
1339 
1340 
1341       /* Adjust sub-word struct/union args when big-endian.  */
1342       if (byte_order == BFD_ENDIAN_BIG
1343 	  && partial_len < X_REGISTER_SIZE
1344 	  && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1345 	regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1346 
1347       if (aarch64_debug)
1348 	{
1349 	  debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1350 			gdbarch_register_name (gdbarch, regnum),
1351 			phex (regval, X_REGISTER_SIZE));
1352 	}
1353       regcache_cooked_write_unsigned (regcache, regnum, regval);
1354       len -= partial_len;
1355       buf += partial_len;
1356       regnum++;
1357     }
1358 }
1359 
1360 /* Attempt to marshall a value in a V register.  Return 1 if
1361    successful, or 0 if insufficient registers are available.  This
1362    function, unlike the equivalent pass_in_x() function does not
1363    handle arguments spread across multiple registers.  */
1364 
1365 static int
1366 pass_in_v (struct gdbarch *gdbarch,
1367 	   struct regcache *regcache,
1368 	   struct aarch64_call_info *info,
1369 	   int len, const bfd_byte *buf)
1370 {
1371   if (info->nsrn < 8)
1372     {
1373       int regnum = AARCH64_V0_REGNUM + info->nsrn;
1374       /* Enough space for a full vector register.  */
1375       gdb_byte reg[register_size (gdbarch, regnum)];
1376       gdb_assert (len <= sizeof (reg));
1377 
1378       info->argnum++;
1379       info->nsrn++;
1380 
1381       memset (reg, 0, sizeof (reg));
1382       /* PCS C.1, the argument is allocated to the least significant
1383 	 bits of V register.  */
1384       memcpy (reg, buf, len);
1385       regcache->cooked_write (regnum, reg);
1386 
1387       if (aarch64_debug)
1388 	{
1389 	  debug_printf ("arg %d in %s\n", info->argnum,
1390 			gdbarch_register_name (gdbarch, regnum));
1391 	}
1392       return 1;
1393     }
1394   info->nsrn = 8;
1395   return 0;
1396 }
1397 
1398 /* Marshall an argument onto the stack.  */
1399 
1400 static void
1401 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1402 	       struct value *arg)
1403 {
1404   const bfd_byte *buf = value_contents (arg);
1405   int len = TYPE_LENGTH (type);
1406   int align;
1407   stack_item_t item;
1408 
1409   info->argnum++;
1410 
1411   align = aarch64_type_align (type);
1412 
1413   /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1414      Natural alignment of the argument's type.  */
1415   align = align_up (align, 8);
1416 
1417   /* The AArch64 PCS requires at most doubleword alignment.  */
1418   if (align > 16)
1419     align = 16;
1420 
1421   if (aarch64_debug)
1422     {
1423       debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1424 		    info->nsaa);
1425     }
1426 
1427   item.len = len;
1428   item.data = buf;
1429   VEC_safe_push (stack_item_t, info->si, &item);
1430 
1431   info->nsaa += len;
1432   if (info->nsaa & (align - 1))
1433     {
1434       /* Push stack alignment padding.  */
1435       int pad = align - (info->nsaa & (align - 1));
1436 
1437       item.len = pad;
1438       item.data = NULL;
1439 
1440       VEC_safe_push (stack_item_t, info->si, &item);
1441       info->nsaa += pad;
1442     }
1443 }
1444 
1445 /* Marshall an argument into a sequence of one or more consecutive X
1446    registers or, if insufficient X registers are available then onto
1447    the stack.  */
1448 
1449 static void
1450 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1451 		    struct aarch64_call_info *info, struct type *type,
1452 		    struct value *arg)
1453 {
1454   int len = TYPE_LENGTH (type);
1455   int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1456 
1457   /* PCS C.13 - Pass in registers if we have enough spare */
1458   if (info->ngrn + nregs <= 8)
1459     {
1460       pass_in_x (gdbarch, regcache, info, type, arg);
1461       info->ngrn += nregs;
1462     }
1463   else
1464     {
1465       info->ngrn = 8;
1466       pass_on_stack (info, type, arg);
1467     }
1468 }
1469 
1470 /* Pass a value, which is of type arg_type, in a V register.  Assumes value is a
1471    aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1472    registers.  A return value of false is an error state as the value will have
1473    been partially passed to the stack.  */
1474 static bool
1475 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1476 			 struct aarch64_call_info *info, struct type *arg_type,
1477 			 struct value *arg)
1478 {
1479   switch (TYPE_CODE (arg_type))
1480     {
1481     case TYPE_CODE_FLT:
1482       return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1483 			value_contents (arg));
1484       break;
1485 
1486     case TYPE_CODE_COMPLEX:
1487       {
1488 	const bfd_byte *buf = value_contents (arg);
1489 	struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1490 
1491 	if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1492 			buf))
1493 	  return false;
1494 
1495 	return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1496 			  buf + TYPE_LENGTH (target_type));
1497       }
1498 
1499     case TYPE_CODE_ARRAY:
1500       if (TYPE_VECTOR (arg_type))
1501 	return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1502 			  value_contents (arg));
1503       /* fall through.  */
1504 
1505     case TYPE_CODE_STRUCT:
1506     case TYPE_CODE_UNION:
1507       for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1508 	{
1509 	  /* Don't include static fields.  */
1510 	  if (field_is_static (&TYPE_FIELD (arg_type, i)))
1511 	    continue;
1512 
1513 	  struct value *field = value_primitive_field (arg, 0, i, arg_type);
1514 	  struct type *field_type = check_typedef (value_type (field));
1515 
1516 	  if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1517 					field))
1518 	    return false;
1519 	}
1520       return true;
1521 
1522     default:
1523       return false;
1524     }
1525 }
1526 
1527 /* Implement the "push_dummy_call" gdbarch method.  */
1528 
1529 static CORE_ADDR
1530 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1531 			 struct regcache *regcache, CORE_ADDR bp_addr,
1532 			 int nargs,
1533 			 struct value **args, CORE_ADDR sp,
1534 			 function_call_return_method return_method,
1535 			 CORE_ADDR struct_addr)
1536 {
1537   int argnum;
1538   struct aarch64_call_info info;
1539 
1540   memset (&info, 0, sizeof (info));
1541 
1542   /* We need to know what the type of the called function is in order
1543      to determine the number of named/anonymous arguments for the
1544      actual argument placement, and the return type in order to handle
1545      return value correctly.
1546 
1547      The generic code above us views the decision of return in memory
1548      or return in registers as a two stage processes.  The language
1549      handler is consulted first and may decide to return in memory (eg
1550      class with copy constructor returned by value), this will cause
1551      the generic code to allocate space AND insert an initial leading
1552      argument.
1553 
1554      If the language code does not decide to pass in memory then the
1555      target code is consulted.
1556 
1557      If the language code decides to pass in memory we want to move
1558      the pointer inserted as the initial argument from the argument
1559      list and into X8, the conventional AArch64 struct return pointer
1560      register.  */
1561 
1562   /* Set the return address.  For the AArch64, the return breakpoint
1563      is always at BP_ADDR.  */
1564   regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1565 
1566   /* If we were given an initial argument for the return slot, lose it.  */
1567   if (return_method == return_method_hidden_param)
1568     {
1569       args++;
1570       nargs--;
1571     }
1572 
1573   /* The struct_return pointer occupies X8.  */
1574   if (return_method != return_method_normal)
1575     {
1576       if (aarch64_debug)
1577 	{
1578 	  debug_printf ("struct return in %s = 0x%s\n",
1579 			gdbarch_register_name (gdbarch,
1580 					       AARCH64_STRUCT_RETURN_REGNUM),
1581 			paddress (gdbarch, struct_addr));
1582 	}
1583       regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1584 				      struct_addr);
1585     }
1586 
1587   for (argnum = 0; argnum < nargs; argnum++)
1588     {
1589       struct value *arg = args[argnum];
1590       struct type *arg_type, *fundamental_type;
1591       int len, elements;
1592 
1593       arg_type = check_typedef (value_type (arg));
1594       len = TYPE_LENGTH (arg_type);
1595 
1596       /* If arg can be passed in v registers as per the AAPCS64, then do so if
1597 	 if there are enough spare registers.  */
1598       if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1599 						 &fundamental_type))
1600 	{
1601 	  if (info.nsrn + elements <= 8)
1602 	    {
1603 	      /* We know that we have sufficient registers available therefore
1604 		 this will never need to fallback to the stack.  */
1605 	      if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1606 					    arg))
1607 		gdb_assert_not_reached ("Failed to push args");
1608 	    }
1609 	  else
1610 	    {
1611 	      info.nsrn = 8;
1612 	      pass_on_stack (&info, arg_type, arg);
1613 	    }
1614 	  continue;
1615 	}
1616 
1617       switch (TYPE_CODE (arg_type))
1618 	{
1619 	case TYPE_CODE_INT:
1620 	case TYPE_CODE_BOOL:
1621 	case TYPE_CODE_CHAR:
1622 	case TYPE_CODE_RANGE:
1623 	case TYPE_CODE_ENUM:
1624 	  if (len < 4)
1625 	    {
1626 	      /* Promote to 32 bit integer.  */
1627 	      if (TYPE_UNSIGNED (arg_type))
1628 		arg_type = builtin_type (gdbarch)->builtin_uint32;
1629 	      else
1630 		arg_type = builtin_type (gdbarch)->builtin_int32;
1631 	      arg = value_cast (arg_type, arg);
1632 	    }
1633 	  pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1634 	  break;
1635 
1636 	case TYPE_CODE_STRUCT:
1637 	case TYPE_CODE_ARRAY:
1638 	case TYPE_CODE_UNION:
1639 	  if (len > 16)
1640 	    {
1641 	      /* PCS B.7 Aggregates larger than 16 bytes are passed by
1642 		 invisible reference.  */
1643 
1644 	      /* Allocate aligned storage.  */
1645 	      sp = align_down (sp - len, 16);
1646 
1647 	      /* Write the real data into the stack.  */
1648 	      write_memory (sp, value_contents (arg), len);
1649 
1650 	      /* Construct the indirection.  */
1651 	      arg_type = lookup_pointer_type (arg_type);
1652 	      arg = value_from_pointer (arg_type, sp);
1653 	      pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1654 	    }
1655 	  else
1656 	    /* PCS C.15 / C.18 multiple values pass.  */
1657 	    pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1658 	  break;
1659 
1660 	default:
1661 	  pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1662 	  break;
1663 	}
1664     }
1665 
1666   /* Make sure stack retains 16 byte alignment.  */
1667   if (info.nsaa & 15)
1668     sp -= 16 - (info.nsaa & 15);
1669 
1670   while (!VEC_empty (stack_item_t, info.si))
1671     {
1672       stack_item_t *si = VEC_last (stack_item_t, info.si);
1673 
1674       sp -= si->len;
1675       if (si->data != NULL)
1676 	write_memory (sp, si->data, si->len);
1677       VEC_pop (stack_item_t, info.si);
1678     }
1679 
1680   VEC_free (stack_item_t, info.si);
1681 
1682   /* Finally, update the SP register.  */
1683   regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1684 
1685   return sp;
1686 }
1687 
1688 /* Implement the "frame_align" gdbarch method.  */
1689 
1690 static CORE_ADDR
1691 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1692 {
1693   /* Align the stack to sixteen bytes.  */
1694   return sp & ~(CORE_ADDR) 15;
1695 }
1696 
1697 /* Return the type for an AdvSISD Q register.  */
1698 
1699 static struct type *
1700 aarch64_vnq_type (struct gdbarch *gdbarch)
1701 {
1702   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1703 
1704   if (tdep->vnq_type == NULL)
1705     {
1706       struct type *t;
1707       struct type *elem;
1708 
1709       t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1710 			       TYPE_CODE_UNION);
1711 
1712       elem = builtin_type (gdbarch)->builtin_uint128;
1713       append_composite_type_field (t, "u", elem);
1714 
1715       elem = builtin_type (gdbarch)->builtin_int128;
1716       append_composite_type_field (t, "s", elem);
1717 
1718       tdep->vnq_type = t;
1719     }
1720 
1721   return tdep->vnq_type;
1722 }
1723 
1724 /* Return the type for an AdvSISD D register.  */
1725 
1726 static struct type *
1727 aarch64_vnd_type (struct gdbarch *gdbarch)
1728 {
1729   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1730 
1731   if (tdep->vnd_type == NULL)
1732     {
1733       struct type *t;
1734       struct type *elem;
1735 
1736       t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1737 			       TYPE_CODE_UNION);
1738 
1739       elem = builtin_type (gdbarch)->builtin_double;
1740       append_composite_type_field (t, "f", elem);
1741 
1742       elem = builtin_type (gdbarch)->builtin_uint64;
1743       append_composite_type_field (t, "u", elem);
1744 
1745       elem = builtin_type (gdbarch)->builtin_int64;
1746       append_composite_type_field (t, "s", elem);
1747 
1748       tdep->vnd_type = t;
1749     }
1750 
1751   return tdep->vnd_type;
1752 }
1753 
1754 /* Return the type for an AdvSISD S register.  */
1755 
1756 static struct type *
1757 aarch64_vns_type (struct gdbarch *gdbarch)
1758 {
1759   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1760 
1761   if (tdep->vns_type == NULL)
1762     {
1763       struct type *t;
1764       struct type *elem;
1765 
1766       t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1767 			       TYPE_CODE_UNION);
1768 
1769       elem = builtin_type (gdbarch)->builtin_float;
1770       append_composite_type_field (t, "f", elem);
1771 
1772       elem = builtin_type (gdbarch)->builtin_uint32;
1773       append_composite_type_field (t, "u", elem);
1774 
1775       elem = builtin_type (gdbarch)->builtin_int32;
1776       append_composite_type_field (t, "s", elem);
1777 
1778       tdep->vns_type = t;
1779     }
1780 
1781   return tdep->vns_type;
1782 }
1783 
1784 /* Return the type for an AdvSISD H register.  */
1785 
1786 static struct type *
1787 aarch64_vnh_type (struct gdbarch *gdbarch)
1788 {
1789   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1790 
1791   if (tdep->vnh_type == NULL)
1792     {
1793       struct type *t;
1794       struct type *elem;
1795 
1796       t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1797 			       TYPE_CODE_UNION);
1798 
1799       elem = builtin_type (gdbarch)->builtin_uint16;
1800       append_composite_type_field (t, "u", elem);
1801 
1802       elem = builtin_type (gdbarch)->builtin_int16;
1803       append_composite_type_field (t, "s", elem);
1804 
1805       tdep->vnh_type = t;
1806     }
1807 
1808   return tdep->vnh_type;
1809 }
1810 
1811 /* Return the type for an AdvSISD B register.  */
1812 
1813 static struct type *
1814 aarch64_vnb_type (struct gdbarch *gdbarch)
1815 {
1816   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1817 
1818   if (tdep->vnb_type == NULL)
1819     {
1820       struct type *t;
1821       struct type *elem;
1822 
1823       t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1824 			       TYPE_CODE_UNION);
1825 
1826       elem = builtin_type (gdbarch)->builtin_uint8;
1827       append_composite_type_field (t, "u", elem);
1828 
1829       elem = builtin_type (gdbarch)->builtin_int8;
1830       append_composite_type_field (t, "s", elem);
1831 
1832       tdep->vnb_type = t;
1833     }
1834 
1835   return tdep->vnb_type;
1836 }
1837 
1838 /* Return the type for an AdvSISD V register.  */
1839 
1840 static struct type *
1841 aarch64_vnv_type (struct gdbarch *gdbarch)
1842 {
1843   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1844 
1845   if (tdep->vnv_type == NULL)
1846     {
1847       struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1848 					    TYPE_CODE_UNION);
1849 
1850       append_composite_type_field (t, "d", aarch64_vnd_type (gdbarch));
1851       append_composite_type_field (t, "s", aarch64_vns_type (gdbarch));
1852       append_composite_type_field (t, "h", aarch64_vnh_type (gdbarch));
1853       append_composite_type_field (t, "b", aarch64_vnb_type (gdbarch));
1854       append_composite_type_field (t, "q", aarch64_vnq_type (gdbarch));
1855 
1856       tdep->vnv_type = t;
1857     }
1858 
1859   return tdep->vnv_type;
1860 }
1861 
1862 /* Implement the "dwarf2_reg_to_regnum" gdbarch method.  */
1863 
1864 static int
1865 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1866 {
1867   if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1868     return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1869 
1870   if (reg == AARCH64_DWARF_SP)
1871     return AARCH64_SP_REGNUM;
1872 
1873   if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1874     return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1875 
1876   if (reg == AARCH64_DWARF_SVE_VG)
1877     return AARCH64_SVE_VG_REGNUM;
1878 
1879   if (reg == AARCH64_DWARF_SVE_FFR)
1880     return AARCH64_SVE_FFR_REGNUM;
1881 
1882   if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
1883     return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
1884 
1885   if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
1886     return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
1887 
1888   return -1;
1889 }
1890 
1891 /* Implement the "print_insn" gdbarch method.  */
1892 
1893 static int
1894 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1895 {
1896   info->symbols = NULL;
1897   return default_print_insn (memaddr, info);
1898 }
1899 
1900 /* AArch64 BRK software debug mode instruction.
1901    Note that AArch64 code is always little-endian.
1902    1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000.  */
1903 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1904 
1905 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
1906 
1907 /* Extract from an array REGS containing the (raw) register state a
1908    function return value of type TYPE, and copy that, in virtual
1909    format, into VALBUF.  */
1910 
1911 static void
1912 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1913 			      gdb_byte *valbuf)
1914 {
1915   struct gdbarch *gdbarch = regs->arch ();
1916   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1917   int elements;
1918   struct type *fundamental_type;
1919 
1920   if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
1921 					     &fundamental_type))
1922     {
1923       int len = TYPE_LENGTH (fundamental_type);
1924 
1925       for (int i = 0; i < elements; i++)
1926 	{
1927 	  int regno = AARCH64_V0_REGNUM + i;
1928 	  /* Enough space for a full vector register.  */
1929 	  gdb_byte buf[register_size (gdbarch, regno)];
1930 	  gdb_assert (len <= sizeof (buf));
1931 
1932 	  if (aarch64_debug)
1933 	    {
1934 	      debug_printf ("read HFA or HVA return value element %d from %s\n",
1935 			    i + 1,
1936 			    gdbarch_register_name (gdbarch, regno));
1937 	    }
1938 	  regs->cooked_read (regno, buf);
1939 
1940 	  memcpy (valbuf, buf, len);
1941 	  valbuf += len;
1942 	}
1943     }
1944   else if (TYPE_CODE (type) == TYPE_CODE_INT
1945 	   || TYPE_CODE (type) == TYPE_CODE_CHAR
1946 	   || TYPE_CODE (type) == TYPE_CODE_BOOL
1947 	   || TYPE_CODE (type) == TYPE_CODE_PTR
1948 	   || TYPE_IS_REFERENCE (type)
1949 	   || TYPE_CODE (type) == TYPE_CODE_ENUM)
1950     {
1951       /* If the type is a plain integer, then the access is
1952 	 straight-forward.  Otherwise we have to play around a bit
1953 	 more.  */
1954       int len = TYPE_LENGTH (type);
1955       int regno = AARCH64_X0_REGNUM;
1956       ULONGEST tmp;
1957 
1958       while (len > 0)
1959 	{
1960 	  /* By using store_unsigned_integer we avoid having to do
1961 	     anything special for small big-endian values.  */
1962 	  regcache_cooked_read_unsigned (regs, regno++, &tmp);
1963 	  store_unsigned_integer (valbuf,
1964 				  (len > X_REGISTER_SIZE
1965 				   ? X_REGISTER_SIZE : len), byte_order, tmp);
1966 	  len -= X_REGISTER_SIZE;
1967 	  valbuf += X_REGISTER_SIZE;
1968 	}
1969     }
1970   else
1971     {
1972       /* For a structure or union the behaviour is as if the value had
1973          been stored to word-aligned memory and then loaded into
1974          registers with 64-bit load instruction(s).  */
1975       int len = TYPE_LENGTH (type);
1976       int regno = AARCH64_X0_REGNUM;
1977       bfd_byte buf[X_REGISTER_SIZE];
1978 
1979       while (len > 0)
1980 	{
1981 	  regs->cooked_read (regno++, buf);
1982 	  memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1983 	  len -= X_REGISTER_SIZE;
1984 	  valbuf += X_REGISTER_SIZE;
1985 	}
1986     }
1987 }
1988 
1989 
1990 /* Will a function return an aggregate type in memory or in a
1991    register?  Return 0 if an aggregate type can be returned in a
1992    register, 1 if it must be returned in memory.  */
1993 
1994 static int
1995 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1996 {
1997   type = check_typedef (type);
1998   int elements;
1999   struct type *fundamental_type;
2000 
2001   if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2002 					     &fundamental_type))
2003     {
2004       /* v0-v7 are used to return values and one register is allocated
2005 	 for one member.  However, HFA or HVA has at most four members.  */
2006       return 0;
2007     }
2008 
2009   if (TYPE_LENGTH (type) > 16)
2010     {
2011       /* PCS B.6 Aggregates larger than 16 bytes are passed by
2012          invisible reference.  */
2013 
2014       return 1;
2015     }
2016 
2017   return 0;
2018 }
2019 
2020 /* Write into appropriate registers a function return value of type
2021    TYPE, given in virtual format.  */
2022 
2023 static void
2024 aarch64_store_return_value (struct type *type, struct regcache *regs,
2025 			    const gdb_byte *valbuf)
2026 {
2027   struct gdbarch *gdbarch = regs->arch ();
2028   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2029   int elements;
2030   struct type *fundamental_type;
2031 
2032   if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2033 					     &fundamental_type))
2034     {
2035       int len = TYPE_LENGTH (fundamental_type);
2036 
2037       for (int i = 0; i < elements; i++)
2038 	{
2039 	  int regno = AARCH64_V0_REGNUM + i;
2040 	  /* Enough space for a full vector register.  */
2041 	  gdb_byte tmpbuf[register_size (gdbarch, regno)];
2042 	  gdb_assert (len <= sizeof (tmpbuf));
2043 
2044 	  if (aarch64_debug)
2045 	    {
2046 	      debug_printf ("write HFA or HVA return value element %d to %s\n",
2047 			    i + 1,
2048 			    gdbarch_register_name (gdbarch, regno));
2049 	    }
2050 
2051 	  memcpy (tmpbuf, valbuf,
2052 		  len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2053 	  regs->cooked_write (regno, tmpbuf);
2054 	  valbuf += len;
2055 	}
2056     }
2057   else if (TYPE_CODE (type) == TYPE_CODE_INT
2058 	   || TYPE_CODE (type) == TYPE_CODE_CHAR
2059 	   || TYPE_CODE (type) == TYPE_CODE_BOOL
2060 	   || TYPE_CODE (type) == TYPE_CODE_PTR
2061 	   || TYPE_IS_REFERENCE (type)
2062 	   || TYPE_CODE (type) == TYPE_CODE_ENUM)
2063     {
2064       if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2065 	{
2066 	  /* Values of one word or less are zero/sign-extended and
2067 	     returned in r0.  */
2068 	  bfd_byte tmpbuf[X_REGISTER_SIZE];
2069 	  LONGEST val = unpack_long (type, valbuf);
2070 
2071 	  store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2072 	  regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2073 	}
2074       else
2075 	{
2076 	  /* Integral values greater than one word are stored in
2077 	     consecutive registers starting with r0.  This will always
2078 	     be a multiple of the regiser size.  */
2079 	  int len = TYPE_LENGTH (type);
2080 	  int regno = AARCH64_X0_REGNUM;
2081 
2082 	  while (len > 0)
2083 	    {
2084 	      regs->cooked_write (regno++, valbuf);
2085 	      len -= X_REGISTER_SIZE;
2086 	      valbuf += X_REGISTER_SIZE;
2087 	    }
2088 	}
2089     }
2090   else
2091     {
2092       /* For a structure or union the behaviour is as if the value had
2093 	 been stored to word-aligned memory and then loaded into
2094 	 registers with 64-bit load instruction(s).  */
2095       int len = TYPE_LENGTH (type);
2096       int regno = AARCH64_X0_REGNUM;
2097       bfd_byte tmpbuf[X_REGISTER_SIZE];
2098 
2099       while (len > 0)
2100 	{
2101 	  memcpy (tmpbuf, valbuf,
2102 		  len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2103 	  regs->cooked_write (regno++, tmpbuf);
2104 	  len -= X_REGISTER_SIZE;
2105 	  valbuf += X_REGISTER_SIZE;
2106 	}
2107     }
2108 }
2109 
2110 /* Implement the "return_value" gdbarch method.  */
2111 
2112 static enum return_value_convention
2113 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2114 		      struct type *valtype, struct regcache *regcache,
2115 		      gdb_byte *readbuf, const gdb_byte *writebuf)
2116 {
2117 
2118   if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2119       || TYPE_CODE (valtype) == TYPE_CODE_UNION
2120       || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2121     {
2122       if (aarch64_return_in_memory (gdbarch, valtype))
2123 	{
2124 	  if (aarch64_debug)
2125 	    debug_printf ("return value in memory\n");
2126 	  return RETURN_VALUE_STRUCT_CONVENTION;
2127 	}
2128     }
2129 
2130   if (writebuf)
2131     aarch64_store_return_value (valtype, regcache, writebuf);
2132 
2133   if (readbuf)
2134     aarch64_extract_return_value (valtype, regcache, readbuf);
2135 
2136   if (aarch64_debug)
2137     debug_printf ("return value in registers\n");
2138 
2139   return RETURN_VALUE_REGISTER_CONVENTION;
2140 }
2141 
2142 /* Implement the "get_longjmp_target" gdbarch method.  */
2143 
2144 static int
2145 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2146 {
2147   CORE_ADDR jb_addr;
2148   gdb_byte buf[X_REGISTER_SIZE];
2149   struct gdbarch *gdbarch = get_frame_arch (frame);
2150   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2151   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2152 
2153   jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2154 
2155   if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2156 			  X_REGISTER_SIZE))
2157     return 0;
2158 
2159   *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2160   return 1;
2161 }
2162 
2163 /* Implement the "gen_return_address" gdbarch method.  */
2164 
2165 static void
2166 aarch64_gen_return_address (struct gdbarch *gdbarch,
2167 			    struct agent_expr *ax, struct axs_value *value,
2168 			    CORE_ADDR scope)
2169 {
2170   value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2171   value->kind = axs_lvalue_register;
2172   value->u.reg = AARCH64_LR_REGNUM;
2173 }
2174 
2175 
2176 /* Return the pseudo register name corresponding to register regnum.  */
2177 
2178 static const char *
2179 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2180 {
2181   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2182 
2183   static const char *const q_name[] =
2184     {
2185       "q0", "q1", "q2", "q3",
2186       "q4", "q5", "q6", "q7",
2187       "q8", "q9", "q10", "q11",
2188       "q12", "q13", "q14", "q15",
2189       "q16", "q17", "q18", "q19",
2190       "q20", "q21", "q22", "q23",
2191       "q24", "q25", "q26", "q27",
2192       "q28", "q29", "q30", "q31",
2193     };
2194 
2195   static const char *const d_name[] =
2196     {
2197       "d0", "d1", "d2", "d3",
2198       "d4", "d5", "d6", "d7",
2199       "d8", "d9", "d10", "d11",
2200       "d12", "d13", "d14", "d15",
2201       "d16", "d17", "d18", "d19",
2202       "d20", "d21", "d22", "d23",
2203       "d24", "d25", "d26", "d27",
2204       "d28", "d29", "d30", "d31",
2205     };
2206 
2207   static const char *const s_name[] =
2208     {
2209       "s0", "s1", "s2", "s3",
2210       "s4", "s5", "s6", "s7",
2211       "s8", "s9", "s10", "s11",
2212       "s12", "s13", "s14", "s15",
2213       "s16", "s17", "s18", "s19",
2214       "s20", "s21", "s22", "s23",
2215       "s24", "s25", "s26", "s27",
2216       "s28", "s29", "s30", "s31",
2217     };
2218 
2219   static const char *const h_name[] =
2220     {
2221       "h0", "h1", "h2", "h3",
2222       "h4", "h5", "h6", "h7",
2223       "h8", "h9", "h10", "h11",
2224       "h12", "h13", "h14", "h15",
2225       "h16", "h17", "h18", "h19",
2226       "h20", "h21", "h22", "h23",
2227       "h24", "h25", "h26", "h27",
2228       "h28", "h29", "h30", "h31",
2229     };
2230 
2231   static const char *const b_name[] =
2232     {
2233       "b0", "b1", "b2", "b3",
2234       "b4", "b5", "b6", "b7",
2235       "b8", "b9", "b10", "b11",
2236       "b12", "b13", "b14", "b15",
2237       "b16", "b17", "b18", "b19",
2238       "b20", "b21", "b22", "b23",
2239       "b24", "b25", "b26", "b27",
2240       "b28", "b29", "b30", "b31",
2241     };
2242 
2243   regnum -= gdbarch_num_regs (gdbarch);
2244 
2245   if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2246     return q_name[regnum - AARCH64_Q0_REGNUM];
2247 
2248   if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2249     return d_name[regnum - AARCH64_D0_REGNUM];
2250 
2251   if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2252     return s_name[regnum - AARCH64_S0_REGNUM];
2253 
2254   if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2255     return h_name[regnum - AARCH64_H0_REGNUM];
2256 
2257   if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2258     return b_name[regnum - AARCH64_B0_REGNUM];
2259 
2260   if (tdep->has_sve ())
2261     {
2262       static const char *const sve_v_name[] =
2263 	{
2264 	  "v0", "v1", "v2", "v3",
2265 	  "v4", "v5", "v6", "v7",
2266 	  "v8", "v9", "v10", "v11",
2267 	  "v12", "v13", "v14", "v15",
2268 	  "v16", "v17", "v18", "v19",
2269 	  "v20", "v21", "v22", "v23",
2270 	  "v24", "v25", "v26", "v27",
2271 	  "v28", "v29", "v30", "v31",
2272 	};
2273 
2274       if (regnum >= AARCH64_SVE_V0_REGNUM
2275 	  && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2276 	return sve_v_name[regnum - AARCH64_SVE_V0_REGNUM];
2277     }
2278 
2279   internal_error (__FILE__, __LINE__,
2280 		  _("aarch64_pseudo_register_name: bad register number %d"),
2281 		  regnum);
2282 }
2283 
2284 /* Implement the "pseudo_register_type" tdesc_arch_data method.  */
2285 
2286 static struct type *
2287 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2288 {
2289   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2290 
2291   regnum -= gdbarch_num_regs (gdbarch);
2292 
2293   if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2294     return aarch64_vnq_type (gdbarch);
2295 
2296   if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2297     return aarch64_vnd_type (gdbarch);
2298 
2299   if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2300     return aarch64_vns_type (gdbarch);
2301 
2302   if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2303     return aarch64_vnh_type (gdbarch);
2304 
2305   if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2306     return aarch64_vnb_type (gdbarch);
2307 
2308   if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2309       && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2310     return aarch64_vnv_type (gdbarch);
2311 
2312   internal_error (__FILE__, __LINE__,
2313 		  _("aarch64_pseudo_register_type: bad register number %d"),
2314 		  regnum);
2315 }
2316 
2317 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method.  */
2318 
2319 static int
2320 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2321 				    struct reggroup *group)
2322 {
2323   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2324 
2325   regnum -= gdbarch_num_regs (gdbarch);
2326 
2327   if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2328     return group == all_reggroup || group == vector_reggroup;
2329   else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2330     return (group == all_reggroup || group == vector_reggroup
2331 	    || group == float_reggroup);
2332   else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2333     return (group == all_reggroup || group == vector_reggroup
2334 	    || group == float_reggroup);
2335   else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2336     return group == all_reggroup || group == vector_reggroup;
2337   else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2338     return group == all_reggroup || group == vector_reggroup;
2339   else if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2340 	   && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2341     return group == all_reggroup || group == vector_reggroup;
2342 
2343   return group == all_reggroup;
2344 }
2345 
2346 /* Helper for aarch64_pseudo_read_value.  */
2347 
2348 static struct value *
2349 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2350 			     readable_regcache *regcache, int regnum_offset,
2351 			     int regsize, struct value *result_value)
2352 {
2353   unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2354 
2355   /* Enough space for a full vector register.  */
2356   gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2357   gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2358 
2359   if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2360     mark_value_bytes_unavailable (result_value, 0,
2361 				  TYPE_LENGTH (value_type (result_value)));
2362   else
2363     memcpy (value_contents_raw (result_value), reg_buf, regsize);
2364 
2365   return result_value;
2366  }
2367 
2368 /* Implement the "pseudo_register_read_value" gdbarch method.  */
2369 
2370 static struct value *
2371 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2372 			   int regnum)
2373 {
2374   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2375   struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2376 
2377   VALUE_LVAL (result_value) = lval_register;
2378   VALUE_REGNUM (result_value) = regnum;
2379 
2380   regnum -= gdbarch_num_regs (gdbarch);
2381 
2382   if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2383     return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2384 					regnum - AARCH64_Q0_REGNUM,
2385 					Q_REGISTER_SIZE, result_value);
2386 
2387   if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2388     return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2389 					regnum - AARCH64_D0_REGNUM,
2390 					D_REGISTER_SIZE, result_value);
2391 
2392   if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2393     return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2394 					regnum - AARCH64_S0_REGNUM,
2395 					S_REGISTER_SIZE, result_value);
2396 
2397   if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2398     return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2399 					regnum - AARCH64_H0_REGNUM,
2400 					H_REGISTER_SIZE, result_value);
2401 
2402   if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2403     return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2404 					regnum - AARCH64_B0_REGNUM,
2405 					B_REGISTER_SIZE, result_value);
2406 
2407   if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2408       && regnum < AARCH64_SVE_V0_REGNUM + 32)
2409     return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2410 					regnum - AARCH64_SVE_V0_REGNUM,
2411 					V_REGISTER_SIZE, result_value);
2412 
2413   gdb_assert_not_reached ("regnum out of bound");
2414 }
2415 
2416 /* Helper for aarch64_pseudo_write.  */
2417 
2418 static void
2419 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2420 			int regnum_offset, int regsize, const gdb_byte *buf)
2421 {
2422   unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2423 
2424   /* Enough space for a full vector register.  */
2425   gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2426   gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2427 
2428   /* Ensure the register buffer is zero, we want gdb writes of the
2429      various 'scalar' pseudo registers to behavior like architectural
2430      writes, register width bytes are written the remainder are set to
2431      zero.  */
2432   memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2433 
2434   memcpy (reg_buf, buf, regsize);
2435   regcache->raw_write (v_regnum, reg_buf);
2436 }
2437 
2438 /* Implement the "pseudo_register_write" gdbarch method.  */
2439 
2440 static void
2441 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2442 		      int regnum, const gdb_byte *buf)
2443 {
2444   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2445   regnum -= gdbarch_num_regs (gdbarch);
2446 
2447   if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2448     return aarch64_pseudo_write_1 (gdbarch, regcache,
2449 				   regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2450 				   buf);
2451 
2452   if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2453     return aarch64_pseudo_write_1 (gdbarch, regcache,
2454 				   regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2455 				   buf);
2456 
2457   if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2458     return aarch64_pseudo_write_1 (gdbarch, regcache,
2459 				   regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2460 				   buf);
2461 
2462   if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2463     return aarch64_pseudo_write_1 (gdbarch, regcache,
2464 				   regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2465 				   buf);
2466 
2467   if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2468     return aarch64_pseudo_write_1 (gdbarch, regcache,
2469 				   regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2470 				   buf);
2471 
2472   if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2473       && regnum < AARCH64_SVE_V0_REGNUM + 32)
2474     return aarch64_pseudo_write_1 (gdbarch, regcache,
2475 				   regnum - AARCH64_SVE_V0_REGNUM,
2476 				   V_REGISTER_SIZE, buf);
2477 
2478   gdb_assert_not_reached ("regnum out of bound");
2479 }
2480 
2481 /* Callback function for user_reg_add.  */
2482 
2483 static struct value *
2484 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2485 {
2486   const int *reg_p = (const int *) baton;
2487 
2488   return value_of_register (*reg_p, frame);
2489 }
2490 
2491 
2492 /* Implement the "software_single_step" gdbarch method, needed to
2493    single step through atomic sequences on AArch64.  */
2494 
2495 static std::vector<CORE_ADDR>
2496 aarch64_software_single_step (struct regcache *regcache)
2497 {
2498   struct gdbarch *gdbarch = regcache->arch ();
2499   enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2500   const int insn_size = 4;
2501   const int atomic_sequence_length = 16; /* Instruction sequence length.  */
2502   CORE_ADDR pc = regcache_read_pc (regcache);
2503   CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2504   CORE_ADDR loc = pc;
2505   CORE_ADDR closing_insn = 0;
2506   uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2507 						byte_order_for_code);
2508   int index;
2509   int insn_count;
2510   int bc_insn_count = 0; /* Conditional branch instruction count.  */
2511   int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed).  */
2512   aarch64_inst inst;
2513 
2514   if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2515     return {};
2516 
2517   /* Look for a Load Exclusive instruction which begins the sequence.  */
2518   if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2519     return {};
2520 
2521   for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2522     {
2523       loc += insn_size;
2524       insn = read_memory_unsigned_integer (loc, insn_size,
2525 					   byte_order_for_code);
2526 
2527       if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2528 	return {};
2529       /* Check if the instruction is a conditional branch.  */
2530       if (inst.opcode->iclass == condbranch)
2531 	{
2532 	  gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2533 
2534 	  if (bc_insn_count >= 1)
2535 	    return {};
2536 
2537 	  /* It is, so we'll try to set a breakpoint at the destination.  */
2538 	  breaks[1] = loc + inst.operands[0].imm.value;
2539 
2540 	  bc_insn_count++;
2541 	  last_breakpoint++;
2542 	}
2543 
2544       /* Look for the Store Exclusive which closes the atomic sequence.  */
2545       if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2546 	{
2547 	  closing_insn = loc;
2548 	  break;
2549 	}
2550     }
2551 
2552   /* We didn't find a closing Store Exclusive instruction, fall back.  */
2553   if (!closing_insn)
2554     return {};
2555 
2556   /* Insert breakpoint after the end of the atomic sequence.  */
2557   breaks[0] = loc + insn_size;
2558 
2559   /* Check for duplicated breakpoints, and also check that the second
2560      breakpoint is not within the atomic sequence.  */
2561   if (last_breakpoint
2562       && (breaks[1] == breaks[0]
2563 	  || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2564     last_breakpoint = 0;
2565 
2566   std::vector<CORE_ADDR> next_pcs;
2567 
2568   /* Insert the breakpoint at the end of the sequence, and one at the
2569      destination of the conditional branch, if it exists.  */
2570   for (index = 0; index <= last_breakpoint; index++)
2571     next_pcs.push_back (breaks[index]);
2572 
2573   return next_pcs;
2574 }
2575 
2576 struct aarch64_displaced_step_closure : public displaced_step_closure
2577 {
2578   /* It is true when condition instruction, such as B.CON, TBZ, etc,
2579      is being displaced stepping.  */
2580   int cond = 0;
2581 
2582   /* PC adjustment offset after displaced stepping.  */
2583   int32_t pc_adjust = 0;
2584 };
2585 
2586 /* Data when visiting instructions for displaced stepping.  */
2587 
2588 struct aarch64_displaced_step_data
2589 {
2590   struct aarch64_insn_data base;
2591 
2592   /* The address where the instruction will be executed at.  */
2593   CORE_ADDR new_addr;
2594   /* Buffer of instructions to be copied to NEW_ADDR to execute.  */
2595   uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2596   /* Number of instructions in INSN_BUF.  */
2597   unsigned insn_count;
2598   /* Registers when doing displaced stepping.  */
2599   struct regcache *regs;
2600 
2601   aarch64_displaced_step_closure *dsc;
2602 };
2603 
2604 /* Implementation of aarch64_insn_visitor method "b".  */
2605 
2606 static void
2607 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2608 			  struct aarch64_insn_data *data)
2609 {
2610   struct aarch64_displaced_step_data *dsd
2611     = (struct aarch64_displaced_step_data *) data;
2612   int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2613 
2614   if (can_encode_int32 (new_offset, 28))
2615     {
2616       /* Emit B rather than BL, because executing BL on a new address
2617 	 will get the wrong address into LR.  In order to avoid this,
2618 	 we emit B, and update LR if the instruction is BL.  */
2619       emit_b (dsd->insn_buf, 0, new_offset);
2620       dsd->insn_count++;
2621     }
2622   else
2623     {
2624       /* Write NOP.  */
2625       emit_nop (dsd->insn_buf);
2626       dsd->insn_count++;
2627       dsd->dsc->pc_adjust = offset;
2628     }
2629 
2630   if (is_bl)
2631     {
2632       /* Update LR.  */
2633       regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2634 				      data->insn_addr + 4);
2635     }
2636 }
2637 
2638 /* Implementation of aarch64_insn_visitor method "b_cond".  */
2639 
2640 static void
2641 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2642 			       struct aarch64_insn_data *data)
2643 {
2644   struct aarch64_displaced_step_data *dsd
2645     = (struct aarch64_displaced_step_data *) data;
2646 
2647   /* GDB has to fix up PC after displaced step this instruction
2648      differently according to the condition is true or false.  Instead
2649      of checking COND against conditional flags, we can use
2650      the following instructions, and GDB can tell how to fix up PC
2651      according to the PC value.
2652 
2653      B.COND TAKEN    ; If cond is true, then jump to TAKEN.
2654      INSN1     ;
2655      TAKEN:
2656      INSN2
2657   */
2658 
2659   emit_bcond (dsd->insn_buf, cond, 8);
2660   dsd->dsc->cond = 1;
2661   dsd->dsc->pc_adjust = offset;
2662   dsd->insn_count = 1;
2663 }
2664 
2665 /* Dynamically allocate a new register.  If we know the register
2666    statically, we should make it a global as above instead of using this
2667    helper function.  */
2668 
2669 static struct aarch64_register
2670 aarch64_register (unsigned num, int is64)
2671 {
2672   return (struct aarch64_register) { num, is64 };
2673 }
2674 
2675 /* Implementation of aarch64_insn_visitor method "cb".  */
2676 
2677 static void
2678 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2679 			   const unsigned rn, int is64,
2680 			   struct aarch64_insn_data *data)
2681 {
2682   struct aarch64_displaced_step_data *dsd
2683     = (struct aarch64_displaced_step_data *) data;
2684 
2685   /* The offset is out of range for a compare and branch
2686      instruction.  We can use the following instructions instead:
2687 
2688 	 CBZ xn, TAKEN   ; xn == 0, then jump to TAKEN.
2689 	 INSN1     ;
2690 	 TAKEN:
2691 	 INSN2
2692   */
2693   emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2694   dsd->insn_count = 1;
2695   dsd->dsc->cond = 1;
2696   dsd->dsc->pc_adjust = offset;
2697 }
2698 
2699 /* Implementation of aarch64_insn_visitor method "tb".  */
2700 
2701 static void
2702 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2703 			   const unsigned rt, unsigned bit,
2704 			   struct aarch64_insn_data *data)
2705 {
2706   struct aarch64_displaced_step_data *dsd
2707     = (struct aarch64_displaced_step_data *) data;
2708 
2709   /* The offset is out of range for a test bit and branch
2710      instruction We can use the following instructions instead:
2711 
2712      TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2713      INSN1         ;
2714      TAKEN:
2715      INSN2
2716 
2717   */
2718   emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2719   dsd->insn_count = 1;
2720   dsd->dsc->cond = 1;
2721   dsd->dsc->pc_adjust = offset;
2722 }
2723 
2724 /* Implementation of aarch64_insn_visitor method "adr".  */
2725 
2726 static void
2727 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2728 			    const int is_adrp, struct aarch64_insn_data *data)
2729 {
2730   struct aarch64_displaced_step_data *dsd
2731     = (struct aarch64_displaced_step_data *) data;
2732   /* We know exactly the address the ADR{P,} instruction will compute.
2733      We can just write it to the destination register.  */
2734   CORE_ADDR address = data->insn_addr + offset;
2735 
2736   if (is_adrp)
2737     {
2738       /* Clear the lower 12 bits of the offset to get the 4K page.  */
2739       regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2740 				      address & ~0xfff);
2741     }
2742   else
2743       regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2744 				      address);
2745 
2746   dsd->dsc->pc_adjust = 4;
2747   emit_nop (dsd->insn_buf);
2748   dsd->insn_count = 1;
2749 }
2750 
2751 /* Implementation of aarch64_insn_visitor method "ldr_literal".  */
2752 
2753 static void
2754 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2755 				    const unsigned rt, const int is64,
2756 				    struct aarch64_insn_data *data)
2757 {
2758   struct aarch64_displaced_step_data *dsd
2759     = (struct aarch64_displaced_step_data *) data;
2760   CORE_ADDR address = data->insn_addr + offset;
2761   struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2762 
2763   regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2764 				  address);
2765 
2766   if (is_sw)
2767     dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2768 				  aarch64_register (rt, 1), zero);
2769   else
2770     dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2771 				aarch64_register (rt, 1), zero);
2772 
2773   dsd->dsc->pc_adjust = 4;
2774 }
2775 
2776 /* Implementation of aarch64_insn_visitor method "others".  */
2777 
2778 static void
2779 aarch64_displaced_step_others (const uint32_t insn,
2780 			       struct aarch64_insn_data *data)
2781 {
2782   struct aarch64_displaced_step_data *dsd
2783     = (struct aarch64_displaced_step_data *) data;
2784 
2785   aarch64_emit_insn (dsd->insn_buf, insn);
2786   dsd->insn_count = 1;
2787 
2788   if ((insn & 0xfffffc1f) == 0xd65f0000)
2789     {
2790       /* RET */
2791       dsd->dsc->pc_adjust = 0;
2792     }
2793   else
2794     dsd->dsc->pc_adjust = 4;
2795 }
2796 
2797 static const struct aarch64_insn_visitor visitor =
2798 {
2799   aarch64_displaced_step_b,
2800   aarch64_displaced_step_b_cond,
2801   aarch64_displaced_step_cb,
2802   aarch64_displaced_step_tb,
2803   aarch64_displaced_step_adr,
2804   aarch64_displaced_step_ldr_literal,
2805   aarch64_displaced_step_others,
2806 };
2807 
2808 /* Implement the "displaced_step_copy_insn" gdbarch method.  */
2809 
2810 struct displaced_step_closure *
2811 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2812 				  CORE_ADDR from, CORE_ADDR to,
2813 				  struct regcache *regs)
2814 {
2815   enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2816   uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2817   struct aarch64_displaced_step_data dsd;
2818   aarch64_inst inst;
2819 
2820   if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2821     return NULL;
2822 
2823   /* Look for a Load Exclusive instruction which begins the sequence.  */
2824   if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2825     {
2826       /* We can't displaced step atomic sequences.  */
2827       return NULL;
2828     }
2829 
2830   std::unique_ptr<aarch64_displaced_step_closure> dsc
2831     (new aarch64_displaced_step_closure);
2832   dsd.base.insn_addr = from;
2833   dsd.new_addr = to;
2834   dsd.regs = regs;
2835   dsd.dsc = dsc.get ();
2836   dsd.insn_count = 0;
2837   aarch64_relocate_instruction (insn, &visitor,
2838 				(struct aarch64_insn_data *) &dsd);
2839   gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2840 
2841   if (dsd.insn_count != 0)
2842     {
2843       int i;
2844 
2845       /* Instruction can be relocated to scratch pad.  Copy
2846 	 relocated instruction(s) there.  */
2847       for (i = 0; i < dsd.insn_count; i++)
2848 	{
2849 	  if (debug_displaced)
2850 	    {
2851 	      debug_printf ("displaced: writing insn ");
2852 	      debug_printf ("%.8x", dsd.insn_buf[i]);
2853 	      debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2854 	    }
2855 	  write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2856 					 (ULONGEST) dsd.insn_buf[i]);
2857 	}
2858     }
2859   else
2860     {
2861       dsc = NULL;
2862     }
2863 
2864   return dsc.release ();
2865 }
2866 
2867 /* Implement the "displaced_step_fixup" gdbarch method.  */
2868 
2869 void
2870 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2871 			      struct displaced_step_closure *dsc_,
2872 			      CORE_ADDR from, CORE_ADDR to,
2873 			      struct regcache *regs)
2874 {
2875   aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2876 
2877   if (dsc->cond)
2878     {
2879       ULONGEST pc;
2880 
2881       regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2882       if (pc - to == 8)
2883 	{
2884 	  /* Condition is true.  */
2885 	}
2886       else if (pc - to == 4)
2887 	{
2888 	  /* Condition is false.  */
2889 	  dsc->pc_adjust = 4;
2890 	}
2891       else
2892 	gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2893     }
2894 
2895   if (dsc->pc_adjust != 0)
2896     {
2897       if (debug_displaced)
2898 	{
2899 	  debug_printf ("displaced: fixup: set PC to %s:%d\n",
2900 			paddress (gdbarch, from), dsc->pc_adjust);
2901 	}
2902       regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2903 				      from + dsc->pc_adjust);
2904     }
2905 }
2906 
2907 /* Implement the "displaced_step_hw_singlestep" gdbarch method.  */
2908 
2909 int
2910 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2911 				      struct displaced_step_closure *closure)
2912 {
2913   return 1;
2914 }
2915 
2916 /* Get the correct target description for the given VQ value.
2917    If VQ is zero then it is assumed SVE is not supported.
2918    (It is not possible to set VQ to zero on an SVE system).  */
2919 
2920 const target_desc *
2921 aarch64_read_description (uint64_t vq)
2922 {
2923   if (vq > AARCH64_MAX_SVE_VQ)
2924     error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
2925 	   AARCH64_MAX_SVE_VQ);
2926 
2927   struct target_desc *tdesc = tdesc_aarch64_list[vq];
2928 
2929   if (tdesc == NULL)
2930     {
2931       tdesc = aarch64_create_target_description (vq);
2932       tdesc_aarch64_list[vq] = tdesc;
2933     }
2934 
2935   return tdesc;
2936 }
2937 
2938 /* Return the VQ used when creating the target description TDESC.  */
2939 
2940 static uint64_t
2941 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
2942 {
2943   const struct tdesc_feature *feature_sve;
2944 
2945   if (!tdesc_has_registers (tdesc))
2946     return 0;
2947 
2948   feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
2949 
2950   if (feature_sve == nullptr)
2951     return 0;
2952 
2953   uint64_t vl = tdesc_register_bitsize (feature_sve,
2954 					aarch64_sve_register_names[0]) / 8;
2955   return sve_vq_from_vl (vl);
2956 }
2957 
2958 /* Add all the expected register sets into GDBARCH.  */
2959 
2960 static void
2961 aarch64_add_reggroups (struct gdbarch *gdbarch)
2962 {
2963   reggroup_add (gdbarch, general_reggroup);
2964   reggroup_add (gdbarch, float_reggroup);
2965   reggroup_add (gdbarch, system_reggroup);
2966   reggroup_add (gdbarch, vector_reggroup);
2967   reggroup_add (gdbarch, all_reggroup);
2968   reggroup_add (gdbarch, save_reggroup);
2969   reggroup_add (gdbarch, restore_reggroup);
2970 }
2971 
2972 /* Initialize the current architecture based on INFO.  If possible,
2973    re-use an architecture from ARCHES, which is a list of
2974    architectures already created during this debugging session.
2975 
2976    Called e.g. at program startup, when reading a core file, and when
2977    reading a binary file.  */
2978 
2979 static struct gdbarch *
2980 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2981 {
2982   struct gdbarch_tdep *tdep;
2983   struct gdbarch *gdbarch;
2984   struct gdbarch_list *best_arch;
2985   struct tdesc_arch_data *tdesc_data = NULL;
2986   const struct target_desc *tdesc = info.target_desc;
2987   int i;
2988   int valid_p = 1;
2989   const struct tdesc_feature *feature_core;
2990   const struct tdesc_feature *feature_fpu;
2991   const struct tdesc_feature *feature_sve;
2992   int num_regs = 0;
2993   int num_pseudo_regs = 0;
2994 
2995   /* Ensure we always have a target description.  */
2996   if (!tdesc_has_registers (tdesc))
2997     tdesc = aarch64_read_description (0);
2998   gdb_assert (tdesc);
2999 
3000   feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
3001   feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3002   feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3003 
3004   if (feature_core == NULL)
3005     return NULL;
3006 
3007   tdesc_data = tdesc_data_alloc ();
3008 
3009   /* Validate the description provides the mandatory core R registers
3010      and allocate their numbers.  */
3011   for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3012     valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3013 					AARCH64_X0_REGNUM + i,
3014 					aarch64_r_register_names[i]);
3015 
3016   num_regs = AARCH64_X0_REGNUM + i;
3017 
3018   /* Add the V registers.  */
3019   if (feature_fpu != NULL)
3020     {
3021       if (feature_sve != NULL)
3022 	error (_("Program contains both fpu and SVE features."));
3023 
3024       /* Validate the description provides the mandatory V registers
3025 	 and allocate their numbers.  */
3026       for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3027 	valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3028 					    AARCH64_V0_REGNUM + i,
3029 					    aarch64_v_register_names[i]);
3030 
3031       num_regs = AARCH64_V0_REGNUM + i;
3032     }
3033 
3034   /* Add the SVE registers.  */
3035   if (feature_sve != NULL)
3036     {
3037       /* Validate the description provides the mandatory SVE registers
3038 	 and allocate their numbers.  */
3039       for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3040 	valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3041 					    AARCH64_SVE_Z0_REGNUM + i,
3042 					    aarch64_sve_register_names[i]);
3043 
3044       num_regs = AARCH64_SVE_Z0_REGNUM + i;
3045       num_pseudo_regs += 32;	/* add the Vn register pseudos.  */
3046     }
3047 
3048   if (feature_fpu != NULL || feature_sve != NULL)
3049     {
3050       num_pseudo_regs += 32;	/* add the Qn scalar register pseudos */
3051       num_pseudo_regs += 32;	/* add the Dn scalar register pseudos */
3052       num_pseudo_regs += 32;	/* add the Sn scalar register pseudos */
3053       num_pseudo_regs += 32;	/* add the Hn scalar register pseudos */
3054       num_pseudo_regs += 32;	/* add the Bn scalar register pseudos */
3055     }
3056 
3057   if (!valid_p)
3058     {
3059       tdesc_data_cleanup (tdesc_data);
3060       return NULL;
3061     }
3062 
3063   /* AArch64 code is always little-endian.  */
3064   info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3065 
3066   /* If there is already a candidate, use it.  */
3067   for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
3068        best_arch != NULL;
3069        best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3070     {
3071       /* Found a match.  */
3072       break;
3073     }
3074 
3075   if (best_arch != NULL)
3076     {
3077       if (tdesc_data != NULL)
3078 	tdesc_data_cleanup (tdesc_data);
3079       return best_arch->gdbarch;
3080     }
3081 
3082   tdep = XCNEW (struct gdbarch_tdep);
3083   gdbarch = gdbarch_alloc (&info, tdep);
3084 
3085   /* This should be low enough for everything.  */
3086   tdep->lowest_pc = 0x20;
3087   tdep->jb_pc = -1;		/* Longjump support not enabled by default.  */
3088   tdep->jb_elt_size = 8;
3089   tdep->vq = aarch64_get_tdesc_vq (tdesc);
3090 
3091   set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3092   set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3093 
3094   /* Frame handling.  */
3095   set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
3096   set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
3097   set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
3098 
3099   /* Advance PC across function entry code.  */
3100   set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3101 
3102   /* The stack grows downward.  */
3103   set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3104 
3105   /* Breakpoint manipulation.  */
3106   set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3107 				       aarch64_breakpoint::kind_from_pc);
3108   set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3109 				       aarch64_breakpoint::bp_from_kind);
3110   set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3111   set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3112 
3113   /* Information about registers, etc.  */
3114   set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3115   set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3116   set_gdbarch_num_regs (gdbarch, num_regs);
3117 
3118   set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3119   set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3120   set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3121   set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3122   set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3123   set_tdesc_pseudo_register_reggroup_p (gdbarch,
3124 					aarch64_pseudo_register_reggroup_p);
3125 
3126   /* ABI */
3127   set_gdbarch_short_bit (gdbarch, 16);
3128   set_gdbarch_int_bit (gdbarch, 32);
3129   set_gdbarch_float_bit (gdbarch, 32);
3130   set_gdbarch_double_bit (gdbarch, 64);
3131   set_gdbarch_long_double_bit (gdbarch, 128);
3132   set_gdbarch_long_bit (gdbarch, 64);
3133   set_gdbarch_long_long_bit (gdbarch, 64);
3134   set_gdbarch_ptr_bit (gdbarch, 64);
3135   set_gdbarch_char_signed (gdbarch, 0);
3136   set_gdbarch_wchar_signed (gdbarch, 0);
3137   set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3138   set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3139   set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3140 
3141   /* Internal <-> external register number maps.  */
3142   set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3143 
3144   /* Returning results.  */
3145   set_gdbarch_return_value (gdbarch, aarch64_return_value);
3146 
3147   /* Disassembly.  */
3148   set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3149 
3150   /* Virtual tables.  */
3151   set_gdbarch_vbit_in_delta (gdbarch, 1);
3152 
3153   /* Register architecture.  */
3154   aarch64_add_reggroups (gdbarch);
3155 
3156   /* Hook in the ABI-specific overrides, if they have been registered.  */
3157   info.target_desc = tdesc;
3158   info.tdesc_data = tdesc_data;
3159   gdbarch_init_osabi (info, gdbarch);
3160 
3161   dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3162 
3163   /* Add some default predicates.  */
3164   frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3165   dwarf2_append_unwinders (gdbarch);
3166   frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3167 
3168   frame_base_set_default (gdbarch, &aarch64_normal_base);
3169 
3170   /* Now we have tuned the configuration, set a few final things,
3171      based on what the OS ABI has told us.  */
3172 
3173   if (tdep->jb_pc >= 0)
3174     set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3175 
3176   set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3177 
3178   tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3179 
3180   /* Add standard register aliases.  */
3181   for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3182     user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3183 		  value_of_aarch64_user_reg,
3184 		  &aarch64_register_aliases[i].regnum);
3185 
3186   register_aarch64_ravenscar_ops (gdbarch);
3187 
3188   return gdbarch;
3189 }
3190 
3191 static void
3192 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3193 {
3194   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3195 
3196   if (tdep == NULL)
3197     return;
3198 
3199   fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3200 		      paddress (gdbarch, tdep->lowest_pc));
3201 }
3202 
3203 #if GDB_SELF_TEST
3204 namespace selftests
3205 {
3206 static void aarch64_process_record_test (void);
3207 }
3208 #endif
3209 
3210 void
3211 _initialize_aarch64_tdep (void)
3212 {
3213   gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3214 		    aarch64_dump_tdep);
3215 
3216   /* Debug this file's internals.  */
3217   add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3218 Set AArch64 debugging."), _("\
3219 Show AArch64 debugging."), _("\
3220 When on, AArch64 specific debugging is enabled."),
3221 			    NULL,
3222 			    show_aarch64_debug,
3223 			    &setdebuglist, &showdebuglist);
3224 
3225 #if GDB_SELF_TEST
3226   selftests::register_test ("aarch64-analyze-prologue",
3227 			    selftests::aarch64_analyze_prologue_test);
3228   selftests::register_test ("aarch64-process-record",
3229 			    selftests::aarch64_process_record_test);
3230   selftests::record_xml_tdesc ("aarch64.xml",
3231 			       aarch64_create_target_description (0));
3232 #endif
3233 }
3234 
3235 /* AArch64 process record-replay related structures, defines etc.  */
3236 
3237 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3238         do  \
3239           { \
3240             unsigned int reg_len = LENGTH; \
3241             if (reg_len) \
3242               { \
3243                 REGS = XNEWVEC (uint32_t, reg_len); \
3244                 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3245               } \
3246           } \
3247         while (0)
3248 
3249 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3250         do  \
3251           { \
3252             unsigned int mem_len = LENGTH; \
3253             if (mem_len) \
3254             { \
3255               MEMS =  XNEWVEC (struct aarch64_mem_r, mem_len);  \
3256               memcpy(&MEMS->len, &RECORD_BUF[0], \
3257                      sizeof(struct aarch64_mem_r) * LENGTH); \
3258             } \
3259           } \
3260           while (0)
3261 
3262 /* AArch64 record/replay structures and enumerations.  */
3263 
3264 struct aarch64_mem_r
3265 {
3266   uint64_t len;    /* Record length.  */
3267   uint64_t addr;   /* Memory address.  */
3268 };
3269 
3270 enum aarch64_record_result
3271 {
3272   AARCH64_RECORD_SUCCESS,
3273   AARCH64_RECORD_UNSUPPORTED,
3274   AARCH64_RECORD_UNKNOWN
3275 };
3276 
3277 typedef struct insn_decode_record_t
3278 {
3279   struct gdbarch *gdbarch;
3280   struct regcache *regcache;
3281   CORE_ADDR this_addr;                 /* Address of insn to be recorded.  */
3282   uint32_t aarch64_insn;               /* Insn to be recorded.  */
3283   uint32_t mem_rec_count;              /* Count of memory records.  */
3284   uint32_t reg_rec_count;              /* Count of register records.  */
3285   uint32_t *aarch64_regs;              /* Registers to be recorded.  */
3286   struct aarch64_mem_r *aarch64_mems;  /* Memory locations to be recorded.  */
3287 } insn_decode_record;
3288 
3289 /* Record handler for data processing - register instructions.  */
3290 
3291 static unsigned int
3292 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3293 {
3294   uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3295   uint32_t record_buf[4];
3296 
3297   reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3298   insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3299   insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3300 
3301   if (!bit (aarch64_insn_r->aarch64_insn, 28))
3302     {
3303       uint8_t setflags;
3304 
3305       /* Logical (shifted register).  */
3306       if (insn_bits24_27 == 0x0a)
3307 	setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3308       /* Add/subtract.  */
3309       else if (insn_bits24_27 == 0x0b)
3310 	setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3311       else
3312 	return AARCH64_RECORD_UNKNOWN;
3313 
3314       record_buf[0] = reg_rd;
3315       aarch64_insn_r->reg_rec_count = 1;
3316       if (setflags)
3317 	record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3318     }
3319   else
3320     {
3321       if (insn_bits24_27 == 0x0b)
3322 	{
3323 	  /* Data-processing (3 source).  */
3324 	  record_buf[0] = reg_rd;
3325 	  aarch64_insn_r->reg_rec_count = 1;
3326 	}
3327       else if (insn_bits24_27 == 0x0a)
3328 	{
3329 	  if (insn_bits21_23 == 0x00)
3330 	    {
3331 	      /* Add/subtract (with carry).  */
3332 	      record_buf[0] = reg_rd;
3333 	      aarch64_insn_r->reg_rec_count = 1;
3334 	      if (bit (aarch64_insn_r->aarch64_insn, 29))
3335 		{
3336 		  record_buf[1] = AARCH64_CPSR_REGNUM;
3337 		  aarch64_insn_r->reg_rec_count = 2;
3338 		}
3339 	    }
3340 	  else if (insn_bits21_23 == 0x02)
3341 	    {
3342 	      /* Conditional compare (register) and conditional compare
3343 		 (immediate) instructions.  */
3344 	      record_buf[0] = AARCH64_CPSR_REGNUM;
3345 	      aarch64_insn_r->reg_rec_count = 1;
3346 	    }
3347 	  else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3348 	    {
3349 	      /* CConditional select.  */
3350 	      /* Data-processing (2 source).  */
3351 	      /* Data-processing (1 source).  */
3352 	      record_buf[0] = reg_rd;
3353 	      aarch64_insn_r->reg_rec_count = 1;
3354 	    }
3355 	  else
3356 	    return AARCH64_RECORD_UNKNOWN;
3357 	}
3358     }
3359 
3360   REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3361 	     record_buf);
3362   return AARCH64_RECORD_SUCCESS;
3363 }
3364 
3365 /* Record handler for data processing - immediate instructions.  */
3366 
3367 static unsigned int
3368 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3369 {
3370   uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3371   uint32_t record_buf[4];
3372 
3373   reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3374   insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3375   insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3376 
3377   if (insn_bits24_27 == 0x00                     /* PC rel addressing.  */
3378      || insn_bits24_27 == 0x03                   /* Bitfield and Extract.  */
3379      || (insn_bits24_27 == 0x02 && insn_bit23))  /* Move wide (immediate).  */
3380     {
3381       record_buf[0] = reg_rd;
3382       aarch64_insn_r->reg_rec_count = 1;
3383     }
3384   else if (insn_bits24_27 == 0x01)
3385     {
3386       /* Add/Subtract (immediate).  */
3387       setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3388       record_buf[0] = reg_rd;
3389       aarch64_insn_r->reg_rec_count = 1;
3390       if (setflags)
3391 	record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3392     }
3393   else if (insn_bits24_27 == 0x02 && !insn_bit23)
3394     {
3395       /* Logical (immediate).  */
3396       setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3397       record_buf[0] = reg_rd;
3398       aarch64_insn_r->reg_rec_count = 1;
3399       if (setflags)
3400 	record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3401     }
3402   else
3403     return AARCH64_RECORD_UNKNOWN;
3404 
3405   REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3406 	     record_buf);
3407   return AARCH64_RECORD_SUCCESS;
3408 }
3409 
3410 /* Record handler for branch, exception generation and system instructions.  */
3411 
3412 static unsigned int
3413 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3414 {
3415   struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3416   uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3417   uint32_t record_buf[4];
3418 
3419   insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3420   insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3421   insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3422 
3423   if (insn_bits28_31 == 0x0d)
3424     {
3425       /* Exception generation instructions. */
3426       if (insn_bits24_27 == 0x04)
3427 	{
3428 	  if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3429 	      && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3430 	      && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3431 	    {
3432 	      ULONGEST svc_number;
3433 
3434 	      regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3435 					  &svc_number);
3436 	      return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3437 						   svc_number);
3438 	    }
3439 	  else
3440 	    return AARCH64_RECORD_UNSUPPORTED;
3441 	}
3442       /* System instructions. */
3443       else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3444 	{
3445 	  uint32_t reg_rt, reg_crn;
3446 
3447 	  reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3448 	  reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3449 
3450 	  /* Record rt in case of sysl and mrs instructions.  */
3451 	  if (bit (aarch64_insn_r->aarch64_insn, 21))
3452 	    {
3453 	      record_buf[0] = reg_rt;
3454 	      aarch64_insn_r->reg_rec_count = 1;
3455 	    }
3456 	  /* Record cpsr for hint and msr(immediate) instructions.  */
3457 	  else if (reg_crn == 0x02 || reg_crn == 0x04)
3458 	    {
3459 	      record_buf[0] = AARCH64_CPSR_REGNUM;
3460 	      aarch64_insn_r->reg_rec_count = 1;
3461 	    }
3462 	}
3463       /* Unconditional branch (register).  */
3464       else if((insn_bits24_27 & 0x0e) == 0x06)
3465 	{
3466 	  record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3467 	  if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3468 	    record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3469 	}
3470       else
3471 	return AARCH64_RECORD_UNKNOWN;
3472     }
3473   /* Unconditional branch (immediate).  */
3474   else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3475     {
3476       record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3477       if (bit (aarch64_insn_r->aarch64_insn, 31))
3478 	record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3479     }
3480   else
3481     /* Compare & branch (immediate), Test & branch (immediate) and
3482        Conditional branch (immediate).  */
3483     record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3484 
3485   REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3486 	     record_buf);
3487   return AARCH64_RECORD_SUCCESS;
3488 }
3489 
3490 /* Record handler for advanced SIMD load and store instructions.  */
3491 
3492 static unsigned int
3493 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3494 {
3495   CORE_ADDR address;
3496   uint64_t addr_offset = 0;
3497   uint32_t record_buf[24];
3498   uint64_t record_buf_mem[24];
3499   uint32_t reg_rn, reg_rt;
3500   uint32_t reg_index = 0, mem_index = 0;
3501   uint8_t opcode_bits, size_bits;
3502 
3503   reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3504   reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3505   size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3506   opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3507   regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3508 
3509   if (record_debug)
3510     debug_printf ("Process record: Advanced SIMD load/store\n");
3511 
3512   /* Load/store single structure.  */
3513   if (bit (aarch64_insn_r->aarch64_insn, 24))
3514     {
3515       uint8_t sindex, scale, selem, esize, replicate = 0;
3516       scale = opcode_bits >> 2;
3517       selem = ((opcode_bits & 0x02) |
3518               bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3519       switch (scale)
3520         {
3521         case 1:
3522           if (size_bits & 0x01)
3523             return AARCH64_RECORD_UNKNOWN;
3524           break;
3525         case 2:
3526           if ((size_bits >> 1) & 0x01)
3527             return AARCH64_RECORD_UNKNOWN;
3528           if (size_bits & 0x01)
3529             {
3530               if (!((opcode_bits >> 1) & 0x01))
3531                 scale = 3;
3532               else
3533                 return AARCH64_RECORD_UNKNOWN;
3534             }
3535           break;
3536         case 3:
3537           if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3538             {
3539               scale = size_bits;
3540               replicate = 1;
3541               break;
3542             }
3543           else
3544             return AARCH64_RECORD_UNKNOWN;
3545         default:
3546           break;
3547         }
3548       esize = 8 << scale;
3549       if (replicate)
3550         for (sindex = 0; sindex < selem; sindex++)
3551           {
3552             record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3553             reg_rt = (reg_rt + 1) % 32;
3554           }
3555       else
3556         {
3557           for (sindex = 0; sindex < selem; sindex++)
3558 	    {
3559 	      if (bit (aarch64_insn_r->aarch64_insn, 22))
3560 		record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3561 	      else
3562 		{
3563 		  record_buf_mem[mem_index++] = esize / 8;
3564 		  record_buf_mem[mem_index++] = address + addr_offset;
3565 		}
3566 	      addr_offset = addr_offset + (esize / 8);
3567 	      reg_rt = (reg_rt + 1) % 32;
3568 	    }
3569         }
3570     }
3571   /* Load/store multiple structure.  */
3572   else
3573     {
3574       uint8_t selem, esize, rpt, elements;
3575       uint8_t eindex, rindex;
3576 
3577       esize = 8 << size_bits;
3578       if (bit (aarch64_insn_r->aarch64_insn, 30))
3579         elements = 128 / esize;
3580       else
3581         elements = 64 / esize;
3582 
3583       switch (opcode_bits)
3584         {
3585         /*LD/ST4 (4 Registers).  */
3586         case 0:
3587           rpt = 1;
3588           selem = 4;
3589           break;
3590         /*LD/ST1 (4 Registers).  */
3591         case 2:
3592           rpt = 4;
3593           selem = 1;
3594           break;
3595         /*LD/ST3 (3 Registers).  */
3596         case 4:
3597           rpt = 1;
3598           selem = 3;
3599           break;
3600         /*LD/ST1 (3 Registers).  */
3601         case 6:
3602           rpt = 3;
3603           selem = 1;
3604           break;
3605         /*LD/ST1 (1 Register).  */
3606         case 7:
3607           rpt = 1;
3608           selem = 1;
3609           break;
3610         /*LD/ST2 (2 Registers).  */
3611         case 8:
3612           rpt = 1;
3613           selem = 2;
3614           break;
3615         /*LD/ST1 (2 Registers).  */
3616         case 10:
3617           rpt = 2;
3618           selem = 1;
3619           break;
3620         default:
3621           return AARCH64_RECORD_UNSUPPORTED;
3622           break;
3623         }
3624       for (rindex = 0; rindex < rpt; rindex++)
3625         for (eindex = 0; eindex < elements; eindex++)
3626           {
3627             uint8_t reg_tt, sindex;
3628             reg_tt = (reg_rt + rindex) % 32;
3629             for (sindex = 0; sindex < selem; sindex++)
3630               {
3631                 if (bit (aarch64_insn_r->aarch64_insn, 22))
3632                   record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3633                 else
3634                   {
3635                     record_buf_mem[mem_index++] = esize / 8;
3636                     record_buf_mem[mem_index++] = address + addr_offset;
3637                   }
3638                 addr_offset = addr_offset + (esize / 8);
3639                 reg_tt = (reg_tt + 1) % 32;
3640               }
3641           }
3642     }
3643 
3644   if (bit (aarch64_insn_r->aarch64_insn, 23))
3645     record_buf[reg_index++] = reg_rn;
3646 
3647   aarch64_insn_r->reg_rec_count = reg_index;
3648   aarch64_insn_r->mem_rec_count = mem_index / 2;
3649   MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3650              record_buf_mem);
3651   REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3652              record_buf);
3653   return AARCH64_RECORD_SUCCESS;
3654 }
3655 
3656 /* Record handler for load and store instructions.  */
3657 
3658 static unsigned int
3659 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3660 {
3661   uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3662   uint8_t insn_bit23, insn_bit21;
3663   uint8_t opc, size_bits, ld_flag, vector_flag;
3664   uint32_t reg_rn, reg_rt, reg_rt2;
3665   uint64_t datasize, offset;
3666   uint32_t record_buf[8];
3667   uint64_t record_buf_mem[8];
3668   CORE_ADDR address;
3669 
3670   insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3671   insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3672   insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3673   insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3674   insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3675   ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3676   vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3677   reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3678   reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3679   reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3680   size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3681 
3682   /* Load/store exclusive.  */
3683   if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3684     {
3685       if (record_debug)
3686 	debug_printf ("Process record: load/store exclusive\n");
3687 
3688       if (ld_flag)
3689 	{
3690 	  record_buf[0] = reg_rt;
3691 	  aarch64_insn_r->reg_rec_count = 1;
3692 	  if (insn_bit21)
3693 	    {
3694 	      record_buf[1] = reg_rt2;
3695 	      aarch64_insn_r->reg_rec_count = 2;
3696 	    }
3697 	}
3698       else
3699 	{
3700 	  if (insn_bit21)
3701 	    datasize = (8 << size_bits) * 2;
3702 	  else
3703 	    datasize = (8 << size_bits);
3704 	  regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3705 				      &address);
3706 	  record_buf_mem[0] = datasize / 8;
3707 	  record_buf_mem[1] = address;
3708 	  aarch64_insn_r->mem_rec_count = 1;
3709 	  if (!insn_bit23)
3710 	    {
3711 	      /* Save register rs.  */
3712 	      record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3713 	      aarch64_insn_r->reg_rec_count = 1;
3714 	    }
3715 	}
3716     }
3717   /* Load register (literal) instructions decoding.  */
3718   else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3719     {
3720       if (record_debug)
3721 	debug_printf ("Process record: load register (literal)\n");
3722       if (vector_flag)
3723         record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3724       else
3725         record_buf[0] = reg_rt;
3726       aarch64_insn_r->reg_rec_count = 1;
3727     }
3728   /* All types of load/store pair instructions decoding.  */
3729   else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3730     {
3731       if (record_debug)
3732 	debug_printf ("Process record: load/store pair\n");
3733 
3734       if (ld_flag)
3735         {
3736           if (vector_flag)
3737             {
3738               record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3739               record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3740             }
3741           else
3742             {
3743               record_buf[0] = reg_rt;
3744               record_buf[1] = reg_rt2;
3745             }
3746           aarch64_insn_r->reg_rec_count = 2;
3747         }
3748       else
3749         {
3750           uint16_t imm7_off;
3751           imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3752           if (!vector_flag)
3753             size_bits = size_bits >> 1;
3754           datasize = 8 << (2 + size_bits);
3755           offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3756           offset = offset << (2 + size_bits);
3757           regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3758                                       &address);
3759           if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3760             {
3761               if (imm7_off & 0x40)
3762                 address = address - offset;
3763               else
3764                 address = address + offset;
3765             }
3766 
3767           record_buf_mem[0] = datasize / 8;
3768           record_buf_mem[1] = address;
3769           record_buf_mem[2] = datasize / 8;
3770           record_buf_mem[3] = address + (datasize / 8);
3771           aarch64_insn_r->mem_rec_count = 2;
3772         }
3773       if (bit (aarch64_insn_r->aarch64_insn, 23))
3774         record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3775     }
3776   /* Load/store register (unsigned immediate) instructions.  */
3777   else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3778     {
3779       opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3780       if (!(opc >> 1))
3781 	{
3782 	  if (opc & 0x01)
3783 	    ld_flag = 0x01;
3784 	  else
3785 	    ld_flag = 0x0;
3786 	}
3787       else
3788 	{
3789 	  if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3790 	    {
3791 	      /* PRFM (immediate) */
3792 	      return AARCH64_RECORD_SUCCESS;
3793 	    }
3794 	  else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3795 	    {
3796 	      /* LDRSW (immediate) */
3797 	      ld_flag = 0x1;
3798 	    }
3799 	  else
3800 	    {
3801 	      if (opc & 0x01)
3802 		ld_flag = 0x01;
3803 	      else
3804 		ld_flag = 0x0;
3805 	    }
3806 	}
3807 
3808       if (record_debug)
3809 	{
3810 	  debug_printf ("Process record: load/store (unsigned immediate):"
3811 			" size %x V %d opc %x\n", size_bits, vector_flag,
3812 			opc);
3813 	}
3814 
3815       if (!ld_flag)
3816         {
3817           offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3818           datasize = 8 << size_bits;
3819           regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3820                                       &address);
3821           offset = offset << size_bits;
3822           address = address + offset;
3823 
3824           record_buf_mem[0] = datasize >> 3;
3825           record_buf_mem[1] = address;
3826           aarch64_insn_r->mem_rec_count = 1;
3827         }
3828       else
3829         {
3830           if (vector_flag)
3831             record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3832           else
3833             record_buf[0] = reg_rt;
3834           aarch64_insn_r->reg_rec_count = 1;
3835         }
3836     }
3837   /* Load/store register (register offset) instructions.  */
3838   else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3839 	   && insn_bits10_11 == 0x02 && insn_bit21)
3840     {
3841       if (record_debug)
3842 	debug_printf ("Process record: load/store (register offset)\n");
3843       opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3844       if (!(opc >> 1))
3845         if (opc & 0x01)
3846           ld_flag = 0x01;
3847         else
3848           ld_flag = 0x0;
3849       else
3850         if (size_bits != 0x03)
3851           ld_flag = 0x01;
3852         else
3853           return AARCH64_RECORD_UNKNOWN;
3854 
3855       if (!ld_flag)
3856         {
3857           ULONGEST reg_rm_val;
3858 
3859           regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3860                      bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3861           if (bit (aarch64_insn_r->aarch64_insn, 12))
3862             offset = reg_rm_val << size_bits;
3863           else
3864             offset = reg_rm_val;
3865           datasize = 8 << size_bits;
3866           regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3867                                       &address);
3868           address = address + offset;
3869           record_buf_mem[0] = datasize >> 3;
3870           record_buf_mem[1] = address;
3871           aarch64_insn_r->mem_rec_count = 1;
3872         }
3873       else
3874         {
3875           if (vector_flag)
3876             record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3877           else
3878             record_buf[0] = reg_rt;
3879           aarch64_insn_r->reg_rec_count = 1;
3880         }
3881     }
3882   /* Load/store register (immediate and unprivileged) instructions.  */
3883   else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3884 	   && !insn_bit21)
3885     {
3886       if (record_debug)
3887 	{
3888 	  debug_printf ("Process record: load/store "
3889 			"(immediate and unprivileged)\n");
3890 	}
3891       opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3892       if (!(opc >> 1))
3893         if (opc & 0x01)
3894           ld_flag = 0x01;
3895         else
3896           ld_flag = 0x0;
3897       else
3898         if (size_bits != 0x03)
3899           ld_flag = 0x01;
3900         else
3901           return AARCH64_RECORD_UNKNOWN;
3902 
3903       if (!ld_flag)
3904         {
3905           uint16_t imm9_off;
3906           imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3907           offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3908           datasize = 8 << size_bits;
3909           regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3910                                       &address);
3911           if (insn_bits10_11 != 0x01)
3912             {
3913               if (imm9_off & 0x0100)
3914                 address = address - offset;
3915               else
3916                 address = address + offset;
3917             }
3918           record_buf_mem[0] = datasize >> 3;
3919           record_buf_mem[1] = address;
3920           aarch64_insn_r->mem_rec_count = 1;
3921         }
3922       else
3923         {
3924           if (vector_flag)
3925             record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3926           else
3927             record_buf[0] = reg_rt;
3928           aarch64_insn_r->reg_rec_count = 1;
3929         }
3930       if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3931         record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3932     }
3933   /* Advanced SIMD load/store instructions.  */
3934   else
3935     return aarch64_record_asimd_load_store (aarch64_insn_r);
3936 
3937   MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3938              record_buf_mem);
3939   REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3940              record_buf);
3941   return AARCH64_RECORD_SUCCESS;
3942 }
3943 
3944 /* Record handler for data processing SIMD and floating point instructions.  */
3945 
3946 static unsigned int
3947 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3948 {
3949   uint8_t insn_bit21, opcode, rmode, reg_rd;
3950   uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3951   uint8_t insn_bits11_14;
3952   uint32_t record_buf[2];
3953 
3954   insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3955   insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3956   insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3957   insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3958   insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3959   opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3960   rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3961   reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3962   insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3963 
3964   if (record_debug)
3965     debug_printf ("Process record: data processing SIMD/FP: ");
3966 
3967   if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3968     {
3969       /* Floating point - fixed point conversion instructions.  */
3970       if (!insn_bit21)
3971 	{
3972 	  if (record_debug)
3973 	    debug_printf ("FP - fixed point conversion");
3974 
3975 	  if ((opcode >> 1) == 0x0 && rmode == 0x03)
3976 	    record_buf[0] = reg_rd;
3977 	  else
3978 	    record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3979 	}
3980       /* Floating point - conditional compare instructions.  */
3981       else if (insn_bits10_11 == 0x01)
3982 	{
3983 	  if (record_debug)
3984 	    debug_printf ("FP - conditional compare");
3985 
3986 	  record_buf[0] = AARCH64_CPSR_REGNUM;
3987 	}
3988       /* Floating point - data processing (2-source) and
3989          conditional select instructions.  */
3990       else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3991 	{
3992 	  if (record_debug)
3993 	    debug_printf ("FP - DP (2-source)");
3994 
3995 	  record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3996 	}
3997       else if (insn_bits10_11 == 0x00)
3998 	{
3999 	  /* Floating point - immediate instructions.  */
4000 	  if ((insn_bits12_15 & 0x01) == 0x01
4001 	      || (insn_bits12_15 & 0x07) == 0x04)
4002 	    {
4003 	      if (record_debug)
4004 		debug_printf ("FP - immediate");
4005 	      record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4006 	    }
4007 	  /* Floating point - compare instructions.  */
4008 	  else if ((insn_bits12_15 & 0x03) == 0x02)
4009 	    {
4010 	      if (record_debug)
4011 		debug_printf ("FP - immediate");
4012 	      record_buf[0] = AARCH64_CPSR_REGNUM;
4013 	    }
4014 	  /* Floating point - integer conversions instructions.  */
4015 	  else if (insn_bits12_15 == 0x00)
4016 	    {
4017 	      /* Convert float to integer instruction.  */
4018 	      if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4019 		{
4020 		  if (record_debug)
4021 		    debug_printf ("float to int conversion");
4022 
4023 		  record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4024 		}
4025 	      /* Convert integer to float instruction.  */
4026 	      else if ((opcode >> 1) == 0x01 && !rmode)
4027 		{
4028 		  if (record_debug)
4029 		    debug_printf ("int to float conversion");
4030 
4031 		  record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4032 		}
4033 	      /* Move float to integer instruction.  */
4034 	      else if ((opcode >> 1) == 0x03)
4035 		{
4036 		  if (record_debug)
4037 		    debug_printf ("move float to int");
4038 
4039 		  if (!(opcode & 0x01))
4040 		    record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4041 		  else
4042 		    record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4043 		}
4044 	      else
4045 		return AARCH64_RECORD_UNKNOWN;
4046             }
4047 	  else
4048 	    return AARCH64_RECORD_UNKNOWN;
4049         }
4050       else
4051 	return AARCH64_RECORD_UNKNOWN;
4052     }
4053   else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4054     {
4055       if (record_debug)
4056 	debug_printf ("SIMD copy");
4057 
4058       /* Advanced SIMD copy instructions.  */
4059       if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4060 	  && !bit (aarch64_insn_r->aarch64_insn, 15)
4061 	  && bit (aarch64_insn_r->aarch64_insn, 10))
4062 	{
4063 	  if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4064 	    record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4065 	  else
4066 	    record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4067 	}
4068       else
4069 	record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4070     }
4071   /* All remaining floating point or advanced SIMD instructions.  */
4072   else
4073     {
4074       if (record_debug)
4075 	debug_printf ("all remain");
4076 
4077       record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4078     }
4079 
4080   if (record_debug)
4081     debug_printf ("\n");
4082 
4083   aarch64_insn_r->reg_rec_count++;
4084   gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4085   REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4086 	     record_buf);
4087   return AARCH64_RECORD_SUCCESS;
4088 }
4089 
4090 /* Decodes insns type and invokes its record handler.  */
4091 
4092 static unsigned int
4093 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4094 {
4095   uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4096 
4097   ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4098   ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4099   ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4100   ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4101 
4102   /* Data processing - immediate instructions.  */
4103   if (!ins_bit26 && !ins_bit27 && ins_bit28)
4104     return aarch64_record_data_proc_imm (aarch64_insn_r);
4105 
4106   /* Branch, exception generation and system instructions.  */
4107   if (ins_bit26 && !ins_bit27 && ins_bit28)
4108     return aarch64_record_branch_except_sys (aarch64_insn_r);
4109 
4110   /* Load and store instructions.  */
4111   if (!ins_bit25 && ins_bit27)
4112     return aarch64_record_load_store (aarch64_insn_r);
4113 
4114   /* Data processing - register instructions.  */
4115   if (ins_bit25 && !ins_bit26 && ins_bit27)
4116     return aarch64_record_data_proc_reg (aarch64_insn_r);
4117 
4118   /* Data processing - SIMD and floating point instructions.  */
4119   if (ins_bit25 && ins_bit26 && ins_bit27)
4120     return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4121 
4122   return AARCH64_RECORD_UNSUPPORTED;
4123 }
4124 
4125 /* Cleans up local record registers and memory allocations.  */
4126 
4127 static void
4128 deallocate_reg_mem (insn_decode_record *record)
4129 {
4130   xfree (record->aarch64_regs);
4131   xfree (record->aarch64_mems);
4132 }
4133 
4134 #if GDB_SELF_TEST
4135 namespace selftests {
4136 
4137 static void
4138 aarch64_process_record_test (void)
4139 {
4140   struct gdbarch_info info;
4141   uint32_t ret;
4142 
4143   gdbarch_info_init (&info);
4144   info.bfd_arch_info = bfd_scan_arch ("aarch64");
4145 
4146   struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4147   SELF_CHECK (gdbarch != NULL);
4148 
4149   insn_decode_record aarch64_record;
4150 
4151   memset (&aarch64_record, 0, sizeof (insn_decode_record));
4152   aarch64_record.regcache = NULL;
4153   aarch64_record.this_addr = 0;
4154   aarch64_record.gdbarch = gdbarch;
4155 
4156   /* 20 00 80 f9	prfm	pldl1keep, [x1] */
4157   aarch64_record.aarch64_insn = 0xf9800020;
4158   ret = aarch64_record_decode_insn_handler (&aarch64_record);
4159   SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4160   SELF_CHECK (aarch64_record.reg_rec_count == 0);
4161   SELF_CHECK (aarch64_record.mem_rec_count == 0);
4162 
4163   deallocate_reg_mem (&aarch64_record);
4164 }
4165 
4166 } // namespace selftests
4167 #endif /* GDB_SELF_TEST */
4168 
4169 /* Parse the current instruction and record the values of the registers and
4170    memory that will be changed in current instruction to record_arch_list
4171    return -1 if something is wrong.  */
4172 
4173 int
4174 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4175 			CORE_ADDR insn_addr)
4176 {
4177   uint32_t rec_no = 0;
4178   uint8_t insn_size = 4;
4179   uint32_t ret = 0;
4180   gdb_byte buf[insn_size];
4181   insn_decode_record aarch64_record;
4182 
4183   memset (&buf[0], 0, insn_size);
4184   memset (&aarch64_record, 0, sizeof (insn_decode_record));
4185   target_read_memory (insn_addr, &buf[0], insn_size);
4186   aarch64_record.aarch64_insn
4187     = (uint32_t) extract_unsigned_integer (&buf[0],
4188 					   insn_size,
4189 					   gdbarch_byte_order (gdbarch));
4190   aarch64_record.regcache = regcache;
4191   aarch64_record.this_addr = insn_addr;
4192   aarch64_record.gdbarch = gdbarch;
4193 
4194   ret = aarch64_record_decode_insn_handler (&aarch64_record);
4195   if (ret == AARCH64_RECORD_UNSUPPORTED)
4196     {
4197       printf_unfiltered (_("Process record does not support instruction "
4198 			   "0x%0x at address %s.\n"),
4199 			 aarch64_record.aarch64_insn,
4200 			 paddress (gdbarch, insn_addr));
4201       ret = -1;
4202     }
4203 
4204   if (0 == ret)
4205     {
4206       /* Record registers.  */
4207       record_full_arch_list_add_reg (aarch64_record.regcache,
4208 				     AARCH64_PC_REGNUM);
4209       /* Always record register CPSR.  */
4210       record_full_arch_list_add_reg (aarch64_record.regcache,
4211 				     AARCH64_CPSR_REGNUM);
4212       if (aarch64_record.aarch64_regs)
4213 	for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4214 	  if (record_full_arch_list_add_reg (aarch64_record.regcache,
4215 					     aarch64_record.aarch64_regs[rec_no]))
4216 	    ret = -1;
4217 
4218       /* Record memories.  */
4219       if (aarch64_record.aarch64_mems)
4220 	for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4221 	  if (record_full_arch_list_add_mem
4222 	      ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4223 	       aarch64_record.aarch64_mems[rec_no].len))
4224 	    ret = -1;
4225 
4226       if (record_full_arch_list_add_end ())
4227 	ret = -1;
4228     }
4229 
4230   deallocate_reg_mem (&aarch64_record);
4231   return ret;
4232 }
4233