1 /* Common target dependent code for GDB on AArch64 systems. 2 3 Copyright (C) 2009-2024 Free Software Foundation, Inc. 4 Contributed by ARM Ltd. 5 6 This file is part of GDB. 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License as published by 10 the Free Software Foundation; either version 3 of the License, or 11 (at your option) any later version. 12 13 This program is distributed in the hope that it will be useful, 14 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 GNU General Public License for more details. 17 18 You should have received a copy of the GNU General Public License 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 20 21 22 #include "extract-store-integer.h" 23 #include "frame.h" 24 #include "language.h" 25 #include "cli/cli-cmds.h" 26 #include "gdbcore.h" 27 #include "dis-asm.h" 28 #include "regcache.h" 29 #include "reggroups.h" 30 #include "value.h" 31 #include "arch-utils.h" 32 #include "osabi.h" 33 #include "frame-unwind.h" 34 #include "frame-base.h" 35 #include "trad-frame.h" 36 #include "objfiles.h" 37 #include "dwarf2.h" 38 #include "dwarf2/frame.h" 39 #include "gdbtypes.h" 40 #include "prologue-value.h" 41 #include "target-descriptions.h" 42 #include "user-regs.h" 43 #include "ax-gdb.h" 44 #include "gdbsupport/selftest.h" 45 46 #include "aarch64-tdep.h" 47 #include "aarch64-ravenscar-thread.h" 48 49 #include "record.h" 50 #include "record-full.h" 51 #include "arch/aarch64-insn.h" 52 #include "gdbarch.h" 53 54 #include "opcode/aarch64.h" 55 #include <algorithm> 56 #include <unordered_map> 57 58 /* For inferior_ptid and current_inferior (). */ 59 #include "inferior.h" 60 /* For std::sqrt and std::pow. */ 61 #include <cmath> 62 63 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most 64 four members. */ 65 #define HA_MAX_NUM_FLDS 4 66 67 /* All possible aarch64 target descriptors. */ 68 static std::unordered_map <aarch64_features, target_desc *> tdesc_aarch64_map; 69 70 /* The standard register names, and all the valid aliases for them. 71 We're not adding fp here, that name is already taken, see 72 _initialize_frame_reg. */ 73 static const struct 74 { 75 const char *const name; 76 int regnum; 77 } aarch64_register_aliases[] = 78 { 79 /* Link register alias for x30. */ 80 {"lr", AARCH64_LR_REGNUM}, 81 /* SP is the canonical name for x31 according to aarch64_r_register_names, 82 so we're adding an x31 alias for sp. */ 83 {"x31", AARCH64_SP_REGNUM}, 84 /* specials */ 85 {"ip0", AARCH64_X0_REGNUM + 16}, 86 {"ip1", AARCH64_X0_REGNUM + 17} 87 }; 88 89 /* The required core 'R' registers. */ 90 static const char *const aarch64_r_register_names[] = 91 { 92 /* These registers must appear in consecutive RAW register number 93 order and they must begin with AARCH64_X0_REGNUM! */ 94 "x0", "x1", "x2", "x3", 95 "x4", "x5", "x6", "x7", 96 "x8", "x9", "x10", "x11", 97 "x12", "x13", "x14", "x15", 98 "x16", "x17", "x18", "x19", 99 "x20", "x21", "x22", "x23", 100 "x24", "x25", "x26", "x27", 101 "x28", "x29", "x30", "sp", 102 "pc", "cpsr" 103 }; 104 105 /* The FP/SIMD 'V' registers. */ 106 static const char *const aarch64_v_register_names[] = 107 { 108 /* These registers must appear in consecutive RAW register number 109 order and they must begin with AARCH64_V0_REGNUM! */ 110 "v0", "v1", "v2", "v3", 111 "v4", "v5", "v6", "v7", 112 "v8", "v9", "v10", "v11", 113 "v12", "v13", "v14", "v15", 114 "v16", "v17", "v18", "v19", 115 "v20", "v21", "v22", "v23", 116 "v24", "v25", "v26", "v27", 117 "v28", "v29", "v30", "v31", 118 "fpsr", 119 "fpcr" 120 }; 121 122 /* The SVE 'Z' and 'P' registers. */ 123 static const char *const aarch64_sve_register_names[] = 124 { 125 /* These registers must appear in consecutive RAW register number 126 order and they must begin with AARCH64_SVE_Z0_REGNUM! */ 127 "z0", "z1", "z2", "z3", 128 "z4", "z5", "z6", "z7", 129 "z8", "z9", "z10", "z11", 130 "z12", "z13", "z14", "z15", 131 "z16", "z17", "z18", "z19", 132 "z20", "z21", "z22", "z23", 133 "z24", "z25", "z26", "z27", 134 "z28", "z29", "z30", "z31", 135 "fpsr", "fpcr", 136 "p0", "p1", "p2", "p3", 137 "p4", "p5", "p6", "p7", 138 "p8", "p9", "p10", "p11", 139 "p12", "p13", "p14", "p15", 140 "ffr", "vg" 141 }; 142 143 static const char *const aarch64_pauth_register_names[] = 144 { 145 /* Authentication mask for data pointer, low half/user pointers. */ 146 "pauth_dmask", 147 /* Authentication mask for code pointer, low half/user pointers. */ 148 "pauth_cmask", 149 /* Authentication mask for data pointer, high half / kernel pointers. */ 150 "pauth_dmask_high", 151 /* Authentication mask for code pointer, high half / kernel pointers. */ 152 "pauth_cmask_high" 153 }; 154 155 static const char *const aarch64_mte_register_names[] = 156 { 157 /* Tag Control Register. */ 158 "tag_ctl" 159 }; 160 161 static int aarch64_stack_frame_destroyed_p (struct gdbarch *, CORE_ADDR); 162 163 /* AArch64 prologue cache structure. */ 164 struct aarch64_prologue_cache 165 { 166 /* The program counter at the start of the function. It is used to 167 identify this frame as a prologue frame. */ 168 CORE_ADDR func; 169 170 /* The program counter at the time this frame was created; i.e. where 171 this function was called from. It is used to identify this frame as a 172 stub frame. */ 173 CORE_ADDR prev_pc; 174 175 /* The stack pointer at the time this frame was created; i.e. the 176 caller's stack pointer when this function was called. It is used 177 to identify this frame. */ 178 CORE_ADDR prev_sp; 179 180 /* Is the target available to read from? */ 181 int available_p; 182 183 /* The frame base for this frame is just prev_sp - frame size. 184 FRAMESIZE is the distance from the frame pointer to the 185 initial stack pointer. */ 186 int framesize; 187 188 /* The register used to hold the frame pointer for this frame. */ 189 int framereg; 190 191 /* Saved register offsets. */ 192 trad_frame_saved_reg *saved_regs; 193 }; 194 195 /* Holds information used to read/write from/to ZA 196 pseudo-registers. 197 198 With this information, the read/write code can be simplified so it 199 deals only with the required information to map a ZA pseudo-register 200 to the exact bytes into the ZA contents buffer. Otherwise we'd need 201 to use a lot of conditionals. */ 202 203 struct za_offsets 204 { 205 /* Offset, into ZA, of the starting byte of the pseudo-register. */ 206 size_t starting_offset; 207 /* The size of the contiguous chunks of the pseudo-register. */ 208 size_t chunk_size; 209 /* The number of pseudo-register chunks contained in ZA. */ 210 size_t chunks; 211 /* The offset between each contiguous chunk. */ 212 size_t stride_size; 213 }; 214 215 /* Holds data that is helpful to determine the individual fields that make 216 up the names of the ZA pseudo-registers. It is also very helpful to 217 determine offsets, stride and sizes for reading ZA tiles and tile 218 slices. */ 219 220 struct za_pseudo_encoding 221 { 222 /* The slice index (0 ~ svl). Only used for tile slices. */ 223 uint8_t slice_index; 224 /* The tile number (0 ~ 15). */ 225 uint8_t tile_index; 226 /* Direction (horizontal/vertical). Only used for tile slices. */ 227 bool horizontal; 228 /* Qualifier index (0 ~ 4). These map to B, H, S, D and Q. */ 229 uint8_t qualifier_index; 230 }; 231 232 static void 233 show_aarch64_debug (struct ui_file *file, int from_tty, 234 struct cmd_list_element *c, const char *value) 235 { 236 gdb_printf (file, _("AArch64 debugging is %s.\n"), value); 237 } 238 239 namespace { 240 241 /* Abstract instruction reader. */ 242 243 class abstract_instruction_reader 244 { 245 public: 246 /* Read in one instruction. */ 247 virtual ULONGEST read (CORE_ADDR memaddr, int len, 248 enum bfd_endian byte_order) = 0; 249 }; 250 251 /* Instruction reader from real target. */ 252 253 class instruction_reader : public abstract_instruction_reader 254 { 255 public: 256 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order) 257 override 258 { 259 return read_code_unsigned_integer (memaddr, len, byte_order); 260 } 261 }; 262 263 } // namespace 264 265 /* If address signing is enabled, mask off the signature bits from the link 266 register, which is passed by value in ADDR, using the register values in 267 THIS_FRAME. */ 268 269 static CORE_ADDR 270 aarch64_frame_unmask_lr (aarch64_gdbarch_tdep *tdep, 271 const frame_info_ptr &this_frame, CORE_ADDR addr) 272 { 273 if (tdep->has_pauth () 274 && frame_unwind_register_unsigned (this_frame, 275 tdep->ra_sign_state_regnum)) 276 { 277 /* VA range select (bit 55) tells us whether to use the low half masks 278 or the high half masks. */ 279 int cmask_num; 280 if (tdep->pauth_reg_count > 2 && addr & VA_RANGE_SELECT_BIT_MASK) 281 cmask_num = AARCH64_PAUTH_CMASK_HIGH_REGNUM (tdep->pauth_reg_base); 282 else 283 cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base); 284 285 /* By default, we assume TBI and discard the top 8 bits plus the VA range 286 select bit (55). */ 287 CORE_ADDR mask = AARCH64_TOP_BITS_MASK; 288 mask |= frame_unwind_register_unsigned (this_frame, cmask_num); 289 addr = aarch64_remove_top_bits (addr, mask); 290 291 /* Record in the frame that the link register required unmasking. */ 292 set_frame_previous_pc_masked (this_frame); 293 } 294 295 return addr; 296 } 297 298 /* Implement the "get_pc_address_flags" gdbarch method. */ 299 300 static std::string 301 aarch64_get_pc_address_flags (const frame_info_ptr &frame, CORE_ADDR pc) 302 { 303 if (pc != 0 && get_frame_pc_masked (frame)) 304 return "PAC"; 305 306 return ""; 307 } 308 309 /* Analyze a prologue, looking for a recognizable stack frame 310 and frame pointer. Scan until we encounter a store that could 311 clobber the stack frame unexpectedly, or an unknown instruction. */ 312 313 static CORE_ADDR 314 aarch64_analyze_prologue (struct gdbarch *gdbarch, 315 CORE_ADDR start, CORE_ADDR limit, 316 struct aarch64_prologue_cache *cache, 317 abstract_instruction_reader& reader) 318 { 319 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); 320 int i; 321 322 /* Whether the stack has been set. This should be true when we notice a SP 323 to FP move or if we are using the SP as the base register for storing 324 data, in case the FP is omitted. */ 325 bool seen_stack_set = false; 326 327 /* Track X registers and D registers in prologue. */ 328 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT]; 329 330 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++) 331 regs[i] = pv_register (i, 0); 332 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch)); 333 334 for (; start < limit; start += 4) 335 { 336 uint32_t insn; 337 aarch64_inst inst; 338 339 insn = reader.read (start, 4, byte_order_for_code); 340 341 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0) 342 break; 343 344 if (inst.opcode->iclass == addsub_imm 345 && (inst.opcode->op == OP_ADD 346 || strcmp ("sub", inst.opcode->name) == 0)) 347 { 348 unsigned rd = inst.operands[0].reg.regno; 349 unsigned rn = inst.operands[1].reg.regno; 350 351 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3); 352 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP); 353 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP); 354 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM); 355 356 if (inst.opcode->op == OP_ADD) 357 { 358 regs[rd] = pv_add_constant (regs[rn], 359 inst.operands[2].imm.value); 360 } 361 else 362 { 363 regs[rd] = pv_add_constant (regs[rn], 364 -inst.operands[2].imm.value); 365 } 366 367 /* Did we move SP to FP? */ 368 if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM) 369 seen_stack_set = true; 370 } 371 else if (inst.opcode->iclass == addsub_ext 372 && strcmp ("sub", inst.opcode->name) == 0) 373 { 374 unsigned rd = inst.operands[0].reg.regno; 375 unsigned rn = inst.operands[1].reg.regno; 376 unsigned rm = inst.operands[2].reg.regno; 377 378 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3); 379 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP); 380 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP); 381 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_EXT); 382 383 regs[rd] = pv_subtract (regs[rn], regs[rm]); 384 } 385 else if (inst.opcode->iclass == branch_imm) 386 { 387 /* Stop analysis on branch. */ 388 break; 389 } 390 else if (inst.opcode->iclass == condbranch) 391 { 392 /* Stop analysis on branch. */ 393 break; 394 } 395 else if (inst.opcode->iclass == branch_reg) 396 { 397 /* Stop analysis on branch. */ 398 break; 399 } 400 else if (inst.opcode->iclass == compbranch) 401 { 402 /* Stop analysis on branch. */ 403 break; 404 } 405 else if (inst.opcode->op == OP_MOVZ) 406 { 407 unsigned rd = inst.operands[0].reg.regno; 408 409 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2); 410 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd); 411 gdb_assert (inst.operands[1].type == AARCH64_OPND_HALF); 412 gdb_assert (inst.operands[1].shifter.kind == AARCH64_MOD_LSL); 413 414 /* If this shows up before we set the stack, keep going. Otherwise 415 stop the analysis. */ 416 if (seen_stack_set) 417 break; 418 419 regs[rd] = pv_constant (inst.operands[1].imm.value 420 << inst.operands[1].shifter.amount); 421 } 422 else if (inst.opcode->iclass == log_shift 423 && strcmp (inst.opcode->name, "orr") == 0) 424 { 425 unsigned rd = inst.operands[0].reg.regno; 426 unsigned rn = inst.operands[1].reg.regno; 427 unsigned rm = inst.operands[2].reg.regno; 428 429 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd); 430 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn); 431 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT); 432 433 if (inst.operands[2].shifter.amount == 0 434 && rn == AARCH64_SP_REGNUM) 435 regs[rd] = regs[rm]; 436 else 437 { 438 aarch64_debug_printf ("prologue analysis gave up " 439 "addr=%s opcode=0x%x (orr x register)", 440 core_addr_to_string_nz (start), insn); 441 442 break; 443 } 444 } 445 else if (inst.opcode->op == OP_STUR) 446 { 447 unsigned rt = inst.operands[0].reg.regno; 448 unsigned rn = inst.operands[1].addr.base_regno; 449 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier); 450 451 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2); 452 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt); 453 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9); 454 gdb_assert (!inst.operands[1].addr.offset.is_reg); 455 456 stack.store 457 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm), 458 size, regs[rt]); 459 460 /* Are we storing with SP as a base? */ 461 if (rn == AARCH64_SP_REGNUM) 462 seen_stack_set = true; 463 } 464 else if ((inst.opcode->iclass == ldstpair_off 465 || (inst.opcode->iclass == ldstpair_indexed 466 && inst.operands[2].addr.preind)) 467 && strcmp ("stp", inst.opcode->name) == 0) 468 { 469 /* STP with addressing mode Pre-indexed and Base register. */ 470 unsigned rt1; 471 unsigned rt2; 472 unsigned rn = inst.operands[2].addr.base_regno; 473 int32_t imm = inst.operands[2].addr.offset.imm; 474 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier); 475 476 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt 477 || inst.operands[0].type == AARCH64_OPND_Ft); 478 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2 479 || inst.operands[1].type == AARCH64_OPND_Ft2); 480 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7); 481 gdb_assert (!inst.operands[2].addr.offset.is_reg); 482 483 /* If recording this store would invalidate the store area 484 (perhaps because rn is not known) then we should abandon 485 further prologue analysis. */ 486 if (stack.store_would_trash (pv_add_constant (regs[rn], imm))) 487 break; 488 489 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8))) 490 break; 491 492 rt1 = inst.operands[0].reg.regno; 493 rt2 = inst.operands[1].reg.regno; 494 if (inst.operands[0].type == AARCH64_OPND_Ft) 495 { 496 rt1 += AARCH64_X_REGISTER_COUNT; 497 rt2 += AARCH64_X_REGISTER_COUNT; 498 } 499 500 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]); 501 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]); 502 503 if (inst.operands[2].addr.writeback) 504 regs[rn] = pv_add_constant (regs[rn], imm); 505 506 /* Ignore the instruction that allocates stack space and sets 507 the SP. */ 508 if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback) 509 seen_stack_set = true; 510 } 511 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */ 512 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */ 513 && (inst.opcode->op == OP_STR_POS 514 || inst.opcode->op == OP_STRF_POS))) 515 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM 516 && strcmp ("str", inst.opcode->name) == 0) 517 { 518 /* STR (immediate) */ 519 unsigned int rt = inst.operands[0].reg.regno; 520 int32_t imm = inst.operands[1].addr.offset.imm; 521 unsigned int rn = inst.operands[1].addr.base_regno; 522 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier); 523 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt 524 || inst.operands[0].type == AARCH64_OPND_Ft); 525 526 if (inst.operands[0].type == AARCH64_OPND_Ft) 527 rt += AARCH64_X_REGISTER_COUNT; 528 529 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]); 530 if (inst.operands[1].addr.writeback) 531 regs[rn] = pv_add_constant (regs[rn], imm); 532 533 /* Are we storing with SP as a base? */ 534 if (rn == AARCH64_SP_REGNUM) 535 seen_stack_set = true; 536 } 537 else if (inst.opcode->iclass == testbranch) 538 { 539 /* Stop analysis on branch. */ 540 break; 541 } 542 else if (inst.opcode->iclass == ic_system) 543 { 544 aarch64_gdbarch_tdep *tdep 545 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 546 int ra_state_val = 0; 547 548 if (insn == 0xd503233f /* paciasp. */ 549 || insn == 0xd503237f /* pacibsp. */) 550 { 551 /* Return addresses are mangled. */ 552 ra_state_val = 1; 553 } 554 else if (insn == 0xd50323bf /* autiasp. */ 555 || insn == 0xd50323ff /* autibsp. */) 556 { 557 /* Return addresses are not mangled. */ 558 ra_state_val = 0; 559 } 560 else if (IS_BTI (insn)) 561 /* We don't need to do anything special for a BTI instruction. */ 562 continue; 563 else 564 { 565 aarch64_debug_printf ("prologue analysis gave up addr=%s" 566 " opcode=0x%x (iclass)", 567 core_addr_to_string_nz (start), insn); 568 break; 569 } 570 571 if (tdep->has_pauth () && cache != nullptr) 572 { 573 int regnum = tdep->ra_sign_state_regnum; 574 cache->saved_regs[regnum].set_value (ra_state_val); 575 } 576 } 577 else 578 { 579 aarch64_debug_printf ("prologue analysis gave up addr=%s" 580 " opcode=0x%x", 581 core_addr_to_string_nz (start), insn); 582 583 break; 584 } 585 } 586 587 if (cache == NULL) 588 return start; 589 590 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM)) 591 { 592 /* Frame pointer is fp. Frame size is constant. */ 593 cache->framereg = AARCH64_FP_REGNUM; 594 cache->framesize = -regs[AARCH64_FP_REGNUM].k; 595 } 596 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM)) 597 { 598 /* Try the stack pointer. */ 599 cache->framesize = -regs[AARCH64_SP_REGNUM].k; 600 cache->framereg = AARCH64_SP_REGNUM; 601 } 602 else 603 { 604 /* We're just out of luck. We don't know where the frame is. */ 605 cache->framereg = -1; 606 cache->framesize = 0; 607 } 608 609 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++) 610 { 611 CORE_ADDR offset; 612 613 if (stack.find_reg (gdbarch, i, &offset)) 614 cache->saved_regs[i].set_addr (offset); 615 } 616 617 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++) 618 { 619 int regnum = gdbarch_num_regs (gdbarch); 620 CORE_ADDR offset; 621 622 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT, 623 &offset)) 624 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset); 625 } 626 627 return start; 628 } 629 630 static CORE_ADDR 631 aarch64_analyze_prologue (struct gdbarch *gdbarch, 632 CORE_ADDR start, CORE_ADDR limit, 633 struct aarch64_prologue_cache *cache) 634 { 635 instruction_reader reader; 636 637 return aarch64_analyze_prologue (gdbarch, start, limit, cache, 638 reader); 639 } 640 641 #if GDB_SELF_TEST 642 643 namespace selftests { 644 645 /* Instruction reader from manually cooked instruction sequences. */ 646 647 class instruction_reader_test : public abstract_instruction_reader 648 { 649 public: 650 template<size_t SIZE> 651 explicit instruction_reader_test (const uint32_t (&insns)[SIZE]) 652 : m_insns (insns), m_insns_size (SIZE) 653 {} 654 655 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order) 656 override 657 { 658 SELF_CHECK (len == 4); 659 SELF_CHECK (memaddr % 4 == 0); 660 SELF_CHECK (memaddr / 4 < m_insns_size); 661 662 return m_insns[memaddr / 4]; 663 } 664 665 private: 666 const uint32_t *m_insns; 667 size_t m_insns_size; 668 }; 669 670 static void 671 aarch64_analyze_prologue_test (void) 672 { 673 struct gdbarch_info info; 674 675 info.bfd_arch_info = bfd_scan_arch ("aarch64"); 676 677 struct gdbarch *gdbarch = gdbarch_find_by_info (info); 678 SELF_CHECK (gdbarch != NULL); 679 680 struct aarch64_prologue_cache cache; 681 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch); 682 683 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 684 685 /* Test the simple prologue in which frame pointer is used. */ 686 { 687 static const uint32_t insns[] = { 688 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */ 689 0x910003fd, /* mov x29, sp */ 690 0x97ffffe6, /* bl 0x400580 */ 691 }; 692 instruction_reader_test reader (insns); 693 694 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader); 695 SELF_CHECK (end == 4 * 2); 696 697 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM); 698 SELF_CHECK (cache.framesize == 272); 699 700 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++) 701 { 702 if (i == AARCH64_FP_REGNUM) 703 SELF_CHECK (cache.saved_regs[i].addr () == -272); 704 else if (i == AARCH64_LR_REGNUM) 705 SELF_CHECK (cache.saved_regs[i].addr () == -264); 706 else 707 SELF_CHECK (cache.saved_regs[i].is_realreg () 708 && cache.saved_regs[i].realreg () == i); 709 } 710 711 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++) 712 { 713 int num_regs = gdbarch_num_regs (gdbarch); 714 int regnum = i + num_regs + AARCH64_D0_REGNUM; 715 716 SELF_CHECK (cache.saved_regs[regnum].is_realreg () 717 && cache.saved_regs[regnum].realreg () == regnum); 718 } 719 } 720 721 /* Test a prologue in which STR is used and frame pointer is not 722 used. */ 723 { 724 static const uint32_t insns[] = { 725 0xf81d0ff3, /* str x19, [sp, #-48]! */ 726 0xb9002fe0, /* str w0, [sp, #44] */ 727 0xf90013e1, /* str x1, [sp, #32]*/ 728 0xfd000fe0, /* str d0, [sp, #24] */ 729 0xaa0203f3, /* mov x19, x2 */ 730 0xf94013e0, /* ldr x0, [sp, #32] */ 731 }; 732 instruction_reader_test reader (insns); 733 734 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs); 735 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader); 736 737 SELF_CHECK (end == 4 * 5); 738 739 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM); 740 SELF_CHECK (cache.framesize == 48); 741 742 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++) 743 { 744 if (i == 1) 745 SELF_CHECK (cache.saved_regs[i].addr () == -16); 746 else if (i == 19) 747 SELF_CHECK (cache.saved_regs[i].addr () == -48); 748 else 749 SELF_CHECK (cache.saved_regs[i].is_realreg () 750 && cache.saved_regs[i].realreg () == i); 751 } 752 753 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++) 754 { 755 int num_regs = gdbarch_num_regs (gdbarch); 756 int regnum = i + num_regs + AARCH64_D0_REGNUM; 757 758 759 if (i == 0) 760 SELF_CHECK (cache.saved_regs[regnum].addr () == -24); 761 else 762 SELF_CHECK (cache.saved_regs[regnum].is_realreg () 763 && cache.saved_regs[regnum].realreg () == regnum); 764 } 765 } 766 767 /* Test handling of movz before setting the frame pointer. */ 768 { 769 static const uint32_t insns[] = { 770 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */ 771 0x52800020, /* mov w0, #0x1 */ 772 0x910003fd, /* mov x29, sp */ 773 0x528000a2, /* mov w2, #0x5 */ 774 0x97fffff8, /* bl 6e4 */ 775 }; 776 777 instruction_reader_test reader (insns); 778 779 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs); 780 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader); 781 782 /* We should stop at the 4th instruction. */ 783 SELF_CHECK (end == (4 - 1) * 4); 784 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM); 785 SELF_CHECK (cache.framesize == 16); 786 } 787 788 /* Test handling of movz/stp when using the stack pointer as frame 789 pointer. */ 790 { 791 static const uint32_t insns[] = { 792 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */ 793 0x52800020, /* mov w0, #0x1 */ 794 0x290207e0, /* stp w0, w1, [sp, #16] */ 795 0xa9018fe2, /* stp x2, x3, [sp, #24] */ 796 0x528000a2, /* mov w2, #0x5 */ 797 0x97fffff8, /* bl 6e4 */ 798 }; 799 800 instruction_reader_test reader (insns); 801 802 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs); 803 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader); 804 805 /* We should stop at the 5th instruction. */ 806 SELF_CHECK (end == (5 - 1) * 4); 807 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM); 808 SELF_CHECK (cache.framesize == 64); 809 } 810 811 /* Test handling of movz/str when using the stack pointer as frame 812 pointer */ 813 { 814 static const uint32_t insns[] = { 815 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */ 816 0x52800020, /* mov w0, #0x1 */ 817 0xb9002be4, /* str w4, [sp, #40] */ 818 0xf9001be5, /* str x5, [sp, #48] */ 819 0x528000a2, /* mov w2, #0x5 */ 820 0x97fffff8, /* bl 6e4 */ 821 }; 822 823 instruction_reader_test reader (insns); 824 825 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs); 826 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader); 827 828 /* We should stop at the 5th instruction. */ 829 SELF_CHECK (end == (5 - 1) * 4); 830 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM); 831 SELF_CHECK (cache.framesize == 64); 832 } 833 834 /* Test handling of movz/stur when using the stack pointer as frame 835 pointer. */ 836 { 837 static const uint32_t insns[] = { 838 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */ 839 0x52800020, /* mov w0, #0x1 */ 840 0xb80343e6, /* stur w6, [sp, #52] */ 841 0xf80383e7, /* stur x7, [sp, #56] */ 842 0x528000a2, /* mov w2, #0x5 */ 843 0x97fffff8, /* bl 6e4 */ 844 }; 845 846 instruction_reader_test reader (insns); 847 848 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs); 849 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader); 850 851 /* We should stop at the 5th instruction. */ 852 SELF_CHECK (end == (5 - 1) * 4); 853 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM); 854 SELF_CHECK (cache.framesize == 64); 855 } 856 857 /* Test handling of movz when there is no frame pointer set or no stack 858 pointer used. */ 859 { 860 static const uint32_t insns[] = { 861 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */ 862 0x52800020, /* mov w0, #0x1 */ 863 0x528000a2, /* mov w2, #0x5 */ 864 0x97fffff8, /* bl 6e4 */ 865 }; 866 867 instruction_reader_test reader (insns); 868 869 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs); 870 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader); 871 872 /* We should stop at the 4th instruction. */ 873 SELF_CHECK (end == (4 - 1) * 4); 874 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM); 875 SELF_CHECK (cache.framesize == 16); 876 } 877 878 /* Test a prologue in which there is a return address signing instruction. */ 879 if (tdep->has_pauth ()) 880 { 881 static const uint32_t insns[] = { 882 0xd503233f, /* paciasp */ 883 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */ 884 0x910003fd, /* mov x29, sp */ 885 0xf801c3f3, /* str x19, [sp, #28] */ 886 0xb9401fa0, /* ldr x19, [x29, #28] */ 887 }; 888 instruction_reader_test reader (insns); 889 890 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs); 891 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, 892 reader); 893 894 SELF_CHECK (end == 4 * 4); 895 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM); 896 SELF_CHECK (cache.framesize == 48); 897 898 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++) 899 { 900 if (i == 19) 901 SELF_CHECK (cache.saved_regs[i].addr () == -20); 902 else if (i == AARCH64_FP_REGNUM) 903 SELF_CHECK (cache.saved_regs[i].addr () == -48); 904 else if (i == AARCH64_LR_REGNUM) 905 SELF_CHECK (cache.saved_regs[i].addr () == -40); 906 else 907 SELF_CHECK (cache.saved_regs[i].is_realreg () 908 && cache.saved_regs[i].realreg () == i); 909 } 910 911 if (tdep->has_pauth ()) 912 { 913 int regnum = tdep->ra_sign_state_regnum; 914 SELF_CHECK (cache.saved_regs[regnum].is_value ()); 915 } 916 } 917 918 /* Test a prologue with a BTI instruction. */ 919 { 920 static const uint32_t insns[] = { 921 0xd503245f, /* bti */ 922 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */ 923 0x910003fd, /* mov x29, sp */ 924 0xf801c3f3, /* str x19, [sp, #28] */ 925 0xb9401fa0, /* ldr x19, [x29, #28] */ 926 }; 927 instruction_reader_test reader (insns); 928 929 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs); 930 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, 931 reader); 932 933 SELF_CHECK (end == 4 * 4); 934 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM); 935 SELF_CHECK (cache.framesize == 48); 936 937 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++) 938 { 939 if (i == 19) 940 SELF_CHECK (cache.saved_regs[i].addr () == -20); 941 else if (i == AARCH64_FP_REGNUM) 942 SELF_CHECK (cache.saved_regs[i].addr () == -48); 943 else if (i == AARCH64_LR_REGNUM) 944 SELF_CHECK (cache.saved_regs[i].addr () == -40); 945 else 946 SELF_CHECK (cache.saved_regs[i].is_realreg () 947 && cache.saved_regs[i].realreg () == i); 948 } 949 } 950 } 951 } // namespace selftests 952 #endif /* GDB_SELF_TEST */ 953 954 /* Implement the "skip_prologue" gdbarch method. */ 955 956 static CORE_ADDR 957 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc) 958 { 959 CORE_ADDR func_addr, func_end_addr, limit_pc; 960 961 /* See if we can determine the end of the prologue via the symbol 962 table. If so, then return either PC, or the PC after the 963 prologue, whichever is greater. */ 964 bool func_addr_found 965 = find_pc_partial_function (pc, NULL, &func_addr, &func_end_addr); 966 967 if (func_addr_found) 968 { 969 CORE_ADDR post_prologue_pc 970 = skip_prologue_using_sal (gdbarch, func_addr); 971 972 if (post_prologue_pc != 0) 973 return std::max (pc, post_prologue_pc); 974 } 975 976 /* Can't determine prologue from the symbol table, need to examine 977 instructions. */ 978 979 /* Find an upper limit on the function prologue using the debug 980 information. If the debug information could not be used to 981 provide that bound, then use an arbitrary large number as the 982 upper bound. */ 983 limit_pc = skip_prologue_using_sal (gdbarch, pc); 984 if (limit_pc == 0) 985 limit_pc = pc + 128; /* Magic. */ 986 987 limit_pc 988 = func_end_addr == 0 ? limit_pc : std::min (limit_pc, func_end_addr - 4); 989 990 /* Try disassembling prologue. */ 991 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL); 992 } 993 994 /* Scan the function prologue for THIS_FRAME and populate the prologue 995 cache CACHE. */ 996 997 static void 998 aarch64_scan_prologue (const frame_info_ptr &this_frame, 999 struct aarch64_prologue_cache *cache) 1000 { 1001 CORE_ADDR block_addr = get_frame_address_in_block (this_frame); 1002 CORE_ADDR prologue_start; 1003 CORE_ADDR prologue_end; 1004 CORE_ADDR prev_pc = get_frame_pc (this_frame); 1005 struct gdbarch *gdbarch = get_frame_arch (this_frame); 1006 1007 cache->prev_pc = prev_pc; 1008 1009 /* Assume we do not find a frame. */ 1010 cache->framereg = -1; 1011 cache->framesize = 0; 1012 1013 if (find_pc_partial_function (block_addr, NULL, &prologue_start, 1014 &prologue_end)) 1015 { 1016 struct symtab_and_line sal = find_pc_line (prologue_start, 0); 1017 1018 if (sal.line == 0) 1019 { 1020 /* No line info so use the current PC. */ 1021 prologue_end = prev_pc; 1022 } 1023 else if (sal.end < prologue_end) 1024 { 1025 /* The next line begins after the function end. */ 1026 prologue_end = sal.end; 1027 } 1028 1029 prologue_end = std::min (prologue_end, prev_pc); 1030 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache); 1031 } 1032 else 1033 { 1034 CORE_ADDR frame_loc; 1035 1036 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM); 1037 if (frame_loc == 0) 1038 return; 1039 1040 cache->framereg = AARCH64_FP_REGNUM; 1041 cache->framesize = 16; 1042 cache->saved_regs[29].set_addr (0); 1043 cache->saved_regs[30].set_addr (8); 1044 } 1045 } 1046 1047 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This 1048 function may throw an exception if the inferior's registers or memory is 1049 not available. */ 1050 1051 static void 1052 aarch64_make_prologue_cache_1 (const frame_info_ptr &this_frame, 1053 struct aarch64_prologue_cache *cache) 1054 { 1055 CORE_ADDR unwound_fp; 1056 int reg; 1057 1058 aarch64_scan_prologue (this_frame, cache); 1059 1060 if (cache->framereg == -1) 1061 return; 1062 1063 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg); 1064 if (unwound_fp == 0) 1065 return; 1066 1067 cache->prev_sp = unwound_fp; 1068 if (!aarch64_stack_frame_destroyed_p (get_frame_arch (this_frame), 1069 cache->prev_pc)) 1070 cache->prev_sp += cache->framesize; 1071 1072 /* Calculate actual addresses of saved registers using offsets 1073 determined by aarch64_analyze_prologue. */ 1074 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++) 1075 if (cache->saved_regs[reg].is_addr ()) 1076 cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr () 1077 + cache->prev_sp); 1078 1079 cache->func = get_frame_func (this_frame); 1080 1081 cache->available_p = 1; 1082 } 1083 1084 /* Allocate and fill in *THIS_CACHE with information about the prologue of 1085 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated. 1086 Return a pointer to the current aarch64_prologue_cache in 1087 *THIS_CACHE. */ 1088 1089 static struct aarch64_prologue_cache * 1090 aarch64_make_prologue_cache (const frame_info_ptr &this_frame, void **this_cache) 1091 { 1092 struct aarch64_prologue_cache *cache; 1093 1094 if (*this_cache != NULL) 1095 return (struct aarch64_prologue_cache *) *this_cache; 1096 1097 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache); 1098 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame); 1099 *this_cache = cache; 1100 1101 try 1102 { 1103 aarch64_make_prologue_cache_1 (this_frame, cache); 1104 } 1105 catch (const gdb_exception_error &ex) 1106 { 1107 if (ex.error != NOT_AVAILABLE_ERROR) 1108 throw; 1109 } 1110 1111 return cache; 1112 } 1113 1114 /* Implement the "stop_reason" frame_unwind method. */ 1115 1116 static enum unwind_stop_reason 1117 aarch64_prologue_frame_unwind_stop_reason (const frame_info_ptr &this_frame, 1118 void **this_cache) 1119 { 1120 struct aarch64_prologue_cache *cache 1121 = aarch64_make_prologue_cache (this_frame, this_cache); 1122 1123 if (!cache->available_p) 1124 return UNWIND_UNAVAILABLE; 1125 1126 /* Halt the backtrace at "_start". */ 1127 gdbarch *arch = get_frame_arch (this_frame); 1128 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch); 1129 if (cache->prev_pc <= tdep->lowest_pc) 1130 return UNWIND_OUTERMOST; 1131 1132 /* We've hit a wall, stop. */ 1133 if (cache->prev_sp == 0) 1134 return UNWIND_OUTERMOST; 1135 1136 return UNWIND_NO_REASON; 1137 } 1138 1139 /* Our frame ID for a normal frame is the current function's starting 1140 PC and the caller's SP when we were called. */ 1141 1142 static void 1143 aarch64_prologue_this_id (const frame_info_ptr &this_frame, 1144 void **this_cache, struct frame_id *this_id) 1145 { 1146 struct aarch64_prologue_cache *cache 1147 = aarch64_make_prologue_cache (this_frame, this_cache); 1148 1149 if (!cache->available_p) 1150 *this_id = frame_id_build_unavailable_stack (cache->func); 1151 else 1152 *this_id = frame_id_build (cache->prev_sp, cache->func); 1153 } 1154 1155 /* Implement the "prev_register" frame_unwind method. */ 1156 1157 static struct value * 1158 aarch64_prologue_prev_register (const frame_info_ptr &this_frame, 1159 void **this_cache, int prev_regnum) 1160 { 1161 struct aarch64_prologue_cache *cache 1162 = aarch64_make_prologue_cache (this_frame, this_cache); 1163 1164 /* If we are asked to unwind the PC, then we need to return the LR 1165 instead. The prologue may save PC, but it will point into this 1166 frame's prologue, not the next frame's resume location. */ 1167 if (prev_regnum == AARCH64_PC_REGNUM) 1168 { 1169 CORE_ADDR lr; 1170 struct gdbarch *gdbarch = get_frame_arch (this_frame); 1171 aarch64_gdbarch_tdep *tdep 1172 = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 1173 1174 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM); 1175 1176 if (tdep->has_pauth () 1177 && cache->saved_regs[tdep->ra_sign_state_regnum].is_value ()) 1178 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr); 1179 1180 return frame_unwind_got_constant (this_frame, prev_regnum, lr); 1181 } 1182 1183 /* SP is generally not saved to the stack, but this frame is 1184 identified by the next frame's stack pointer at the time of the 1185 call. The value was already reconstructed into PREV_SP. */ 1186 /* 1187 +----------+ ^ 1188 | saved lr | | 1189 +->| saved fp |--+ 1190 | | | 1191 | | | <- Previous SP 1192 | +----------+ 1193 | | saved lr | 1194 +--| saved fp |<- FP 1195 | | 1196 | |<- SP 1197 +----------+ */ 1198 if (prev_regnum == AARCH64_SP_REGNUM) 1199 return frame_unwind_got_constant (this_frame, prev_regnum, 1200 cache->prev_sp); 1201 1202 return trad_frame_get_prev_register (this_frame, cache->saved_regs, 1203 prev_regnum); 1204 } 1205 1206 /* AArch64 prologue unwinder. */ 1207 static frame_unwind aarch64_prologue_unwind = 1208 { 1209 "aarch64 prologue", 1210 NORMAL_FRAME, 1211 aarch64_prologue_frame_unwind_stop_reason, 1212 aarch64_prologue_this_id, 1213 aarch64_prologue_prev_register, 1214 NULL, 1215 default_frame_sniffer 1216 }; 1217 1218 /* Allocate and fill in *THIS_CACHE with information about the prologue of 1219 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated. 1220 Return a pointer to the current aarch64_prologue_cache in 1221 *THIS_CACHE. */ 1222 1223 static struct aarch64_prologue_cache * 1224 aarch64_make_stub_cache (const frame_info_ptr &this_frame, void **this_cache) 1225 { 1226 struct aarch64_prologue_cache *cache; 1227 1228 if (*this_cache != NULL) 1229 return (struct aarch64_prologue_cache *) *this_cache; 1230 1231 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache); 1232 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame); 1233 *this_cache = cache; 1234 1235 try 1236 { 1237 cache->prev_sp = get_frame_register_unsigned (this_frame, 1238 AARCH64_SP_REGNUM); 1239 cache->prev_pc = get_frame_pc (this_frame); 1240 cache->available_p = 1; 1241 } 1242 catch (const gdb_exception_error &ex) 1243 { 1244 if (ex.error != NOT_AVAILABLE_ERROR) 1245 throw; 1246 } 1247 1248 return cache; 1249 } 1250 1251 /* Implement the "stop_reason" frame_unwind method. */ 1252 1253 static enum unwind_stop_reason 1254 aarch64_stub_frame_unwind_stop_reason (const frame_info_ptr &this_frame, 1255 void **this_cache) 1256 { 1257 struct aarch64_prologue_cache *cache 1258 = aarch64_make_stub_cache (this_frame, this_cache); 1259 1260 if (!cache->available_p) 1261 return UNWIND_UNAVAILABLE; 1262 1263 return UNWIND_NO_REASON; 1264 } 1265 1266 /* Our frame ID for a stub frame is the current SP and LR. */ 1267 1268 static void 1269 aarch64_stub_this_id (const frame_info_ptr &this_frame, 1270 void **this_cache, struct frame_id *this_id) 1271 { 1272 struct aarch64_prologue_cache *cache 1273 = aarch64_make_stub_cache (this_frame, this_cache); 1274 1275 if (cache->available_p) 1276 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc); 1277 else 1278 *this_id = frame_id_build_unavailable_stack (cache->prev_pc); 1279 } 1280 1281 /* Implement the "sniffer" frame_unwind method. */ 1282 1283 static int 1284 aarch64_stub_unwind_sniffer (const struct frame_unwind *self, 1285 const frame_info_ptr &this_frame, 1286 void **this_prologue_cache) 1287 { 1288 CORE_ADDR addr_in_block; 1289 gdb_byte dummy[4]; 1290 1291 addr_in_block = get_frame_address_in_block (this_frame); 1292 if (in_plt_section (addr_in_block) 1293 /* We also use the stub winder if the target memory is unreadable 1294 to avoid having the prologue unwinder trying to read it. */ 1295 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0) 1296 return 1; 1297 1298 return 0; 1299 } 1300 1301 /* AArch64 stub unwinder. */ 1302 static frame_unwind aarch64_stub_unwind = 1303 { 1304 "aarch64 stub", 1305 NORMAL_FRAME, 1306 aarch64_stub_frame_unwind_stop_reason, 1307 aarch64_stub_this_id, 1308 aarch64_prologue_prev_register, 1309 NULL, 1310 aarch64_stub_unwind_sniffer 1311 }; 1312 1313 /* Return the frame base address of *THIS_FRAME. */ 1314 1315 static CORE_ADDR 1316 aarch64_normal_frame_base (const frame_info_ptr &this_frame, void **this_cache) 1317 { 1318 struct aarch64_prologue_cache *cache 1319 = aarch64_make_prologue_cache (this_frame, this_cache); 1320 1321 return cache->prev_sp - cache->framesize; 1322 } 1323 1324 /* AArch64 default frame base information. */ 1325 static frame_base aarch64_normal_base = 1326 { 1327 &aarch64_prologue_unwind, 1328 aarch64_normal_frame_base, 1329 aarch64_normal_frame_base, 1330 aarch64_normal_frame_base 1331 }; 1332 1333 /* Return the value of the REGNUM register in the previous frame of 1334 *THIS_FRAME. */ 1335 1336 static struct value * 1337 aarch64_dwarf2_prev_register (const frame_info_ptr &this_frame, 1338 void **this_cache, int regnum) 1339 { 1340 gdbarch *arch = get_frame_arch (this_frame); 1341 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (arch); 1342 CORE_ADDR lr; 1343 1344 switch (regnum) 1345 { 1346 case AARCH64_PC_REGNUM: 1347 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM); 1348 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr); 1349 return frame_unwind_got_constant (this_frame, regnum, lr); 1350 1351 default: 1352 internal_error (_("Unexpected register %d"), regnum); 1353 } 1354 } 1355 1356 static const unsigned char op_lit0 = DW_OP_lit0; 1357 static const unsigned char op_lit1 = DW_OP_lit1; 1358 1359 /* Implement the "init_reg" dwarf2_frame_ops method. */ 1360 1361 static void 1362 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum, 1363 struct dwarf2_frame_state_reg *reg, 1364 const frame_info_ptr &this_frame) 1365 { 1366 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 1367 1368 switch (regnum) 1369 { 1370 case AARCH64_PC_REGNUM: 1371 reg->how = DWARF2_FRAME_REG_FN; 1372 reg->loc.fn = aarch64_dwarf2_prev_register; 1373 return; 1374 1375 case AARCH64_SP_REGNUM: 1376 reg->how = DWARF2_FRAME_REG_CFA; 1377 return; 1378 } 1379 1380 /* Init pauth registers. */ 1381 if (tdep->has_pauth ()) 1382 { 1383 if (regnum == tdep->ra_sign_state_regnum) 1384 { 1385 /* Initialize RA_STATE to zero. */ 1386 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP; 1387 reg->loc.exp.start = &op_lit0; 1388 reg->loc.exp.len = 1; 1389 return; 1390 } 1391 else if (regnum >= tdep->pauth_reg_base 1392 && regnum < tdep->pauth_reg_base + tdep->pauth_reg_count) 1393 { 1394 reg->how = DWARF2_FRAME_REG_SAME_VALUE; 1395 return; 1396 } 1397 } 1398 } 1399 1400 /* Implement the execute_dwarf_cfa_vendor_op method. */ 1401 1402 static bool 1403 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op, 1404 struct dwarf2_frame_state *fs) 1405 { 1406 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 1407 struct dwarf2_frame_state_reg *ra_state; 1408 1409 if (op == DW_CFA_AARCH64_negate_ra_state) 1410 { 1411 /* On systems without pauth, treat as a nop. */ 1412 if (!tdep->has_pauth ()) 1413 return true; 1414 1415 /* Allocate RA_STATE column if it's not allocated yet. */ 1416 fs->regs.alloc_regs (AARCH64_DWARF_RA_SIGN_STATE + 1); 1417 1418 /* Toggle the status of RA_STATE between 0 and 1. */ 1419 ra_state = &(fs->regs.reg[AARCH64_DWARF_RA_SIGN_STATE]); 1420 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP; 1421 1422 if (ra_state->loc.exp.start == nullptr 1423 || ra_state->loc.exp.start == &op_lit0) 1424 ra_state->loc.exp.start = &op_lit1; 1425 else 1426 ra_state->loc.exp.start = &op_lit0; 1427 1428 ra_state->loc.exp.len = 1; 1429 1430 return true; 1431 } 1432 1433 return false; 1434 } 1435 1436 /* Used for matching BRK instructions for AArch64. */ 1437 static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f; 1438 static constexpr uint32_t BRK_INSN_BASE = 0xd4200000; 1439 1440 /* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */ 1441 1442 static bool 1443 aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address) 1444 { 1445 const uint32_t insn_len = 4; 1446 gdb_byte target_mem[4]; 1447 1448 /* Enable the automatic memory restoration from breakpoints while 1449 we read the memory. Otherwise we may find temporary breakpoints, ones 1450 inserted by GDB, and flag them as permanent breakpoints. */ 1451 scoped_restore restore_memory 1452 = make_scoped_restore_show_memory_breakpoints (0); 1453 1454 if (target_read_memory (address, target_mem, insn_len) == 0) 1455 { 1456 uint32_t insn = 1457 (uint32_t) extract_unsigned_integer (target_mem, insn_len, 1458 gdbarch_byte_order_for_code (gdbarch)); 1459 1460 /* Check if INSN is a BRK instruction pattern. There are multiple choices 1461 of such instructions with different immediate values. Different OS' 1462 may use a different variation, but they have the same outcome. */ 1463 return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE); 1464 } 1465 1466 return false; 1467 } 1468 1469 /* When arguments must be pushed onto the stack, they go on in reverse 1470 order. The code below implements a FILO (stack) to do this. */ 1471 1472 struct stack_item_t 1473 { 1474 /* Value to pass on stack. It can be NULL if this item is for stack 1475 padding. */ 1476 const gdb_byte *data; 1477 1478 /* Size in bytes of value to pass on stack. */ 1479 int len; 1480 }; 1481 1482 /* Implement the gdbarch type alignment method, overrides the generic 1483 alignment algorithm for anything that is aarch64 specific. */ 1484 1485 static ULONGEST 1486 aarch64_type_align (gdbarch *gdbarch, struct type *t) 1487 { 1488 t = check_typedef (t); 1489 if (t->code () == TYPE_CODE_ARRAY && t->is_vector ()) 1490 { 1491 /* Use the natural alignment for vector types (the same for 1492 scalar type), but the maximum alignment is 128-bit. */ 1493 if (t->length () > 16) 1494 return 16; 1495 else 1496 return t->length (); 1497 } 1498 1499 /* Allow the common code to calculate the alignment. */ 1500 return 0; 1501 } 1502 1503 /* Worker function for aapcs_is_vfp_call_or_return_candidate. 1504 1505 Return the number of register required, or -1 on failure. 1506 1507 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it 1508 to the element, else fail if the type of this element does not match the 1509 existing value. */ 1510 1511 static int 1512 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type, 1513 struct type **fundamental_type) 1514 { 1515 if (type == nullptr) 1516 return -1; 1517 1518 switch (type->code ()) 1519 { 1520 case TYPE_CODE_FLT: 1521 case TYPE_CODE_DECFLOAT: 1522 if (type->length () > 16) 1523 return -1; 1524 1525 if (*fundamental_type == nullptr) 1526 *fundamental_type = type; 1527 else if (type->length () != (*fundamental_type)->length () 1528 || type->code () != (*fundamental_type)->code ()) 1529 return -1; 1530 1531 return 1; 1532 1533 case TYPE_CODE_COMPLEX: 1534 { 1535 struct type *target_type = check_typedef (type->target_type ()); 1536 if (target_type->length () > 16) 1537 return -1; 1538 1539 if (*fundamental_type == nullptr) 1540 *fundamental_type = target_type; 1541 else if (target_type->length () != (*fundamental_type)->length () 1542 || target_type->code () != (*fundamental_type)->code ()) 1543 return -1; 1544 1545 return 2; 1546 } 1547 1548 case TYPE_CODE_ARRAY: 1549 { 1550 if (type->is_vector ()) 1551 { 1552 if (type->length () != 8 && type->length () != 16) 1553 return -1; 1554 1555 if (*fundamental_type == nullptr) 1556 *fundamental_type = type; 1557 else if (type->length () != (*fundamental_type)->length () 1558 || type->code () != (*fundamental_type)->code ()) 1559 return -1; 1560 1561 return 1; 1562 } 1563 else 1564 { 1565 struct type *target_type = type->target_type (); 1566 int count = aapcs_is_vfp_call_or_return_candidate_1 1567 (target_type, fundamental_type); 1568 1569 if (count == -1) 1570 return count; 1571 1572 count *= (type->length () / target_type->length ()); 1573 return count; 1574 } 1575 } 1576 1577 case TYPE_CODE_STRUCT: 1578 case TYPE_CODE_UNION: 1579 { 1580 int count = 0; 1581 1582 for (int i = 0; i < type->num_fields (); i++) 1583 { 1584 /* Ignore any static fields. */ 1585 if (type->field (i).is_static ()) 1586 continue; 1587 1588 struct type *member = check_typedef (type->field (i).type ()); 1589 1590 int sub_count = aapcs_is_vfp_call_or_return_candidate_1 1591 (member, fundamental_type); 1592 if (sub_count == -1) 1593 return -1; 1594 count += sub_count; 1595 } 1596 1597 /* Ensure there is no padding between the fields (allowing for empty 1598 zero length structs) */ 1599 int ftype_length = (*fundamental_type == nullptr) 1600 ? 0 : (*fundamental_type)->length (); 1601 if (count * ftype_length != type->length ()) 1602 return -1; 1603 1604 return count; 1605 } 1606 1607 default: 1608 break; 1609 } 1610 1611 return -1; 1612 } 1613 1614 /* Return true if an argument, whose type is described by TYPE, can be passed or 1615 returned in simd/fp registers, providing enough parameter passing registers 1616 are available. This is as described in the AAPCS64. 1617 1618 Upon successful return, *COUNT returns the number of needed registers, 1619 *FUNDAMENTAL_TYPE contains the type of those registers. 1620 1621 Candidate as per the AAPCS64 5.4.2.C is either a: 1622 - float. 1623 - short-vector. 1624 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where 1625 all the members are floats and has at most 4 members. 1626 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where 1627 all the members are short vectors and has at most 4 members. 1628 - Complex (7.1.1) 1629 1630 Note that HFAs and HVAs can include nested structures and arrays. */ 1631 1632 static bool 1633 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count, 1634 struct type **fundamental_type) 1635 { 1636 if (type == nullptr) 1637 return false; 1638 1639 *fundamental_type = nullptr; 1640 1641 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type, 1642 fundamental_type); 1643 1644 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS) 1645 { 1646 *count = ag_count; 1647 return true; 1648 } 1649 else 1650 return false; 1651 } 1652 1653 /* AArch64 function call information structure. */ 1654 struct aarch64_call_info 1655 { 1656 /* the current argument number. */ 1657 unsigned argnum = 0; 1658 1659 /* The next general purpose register number, equivalent to NGRN as 1660 described in the AArch64 Procedure Call Standard. */ 1661 unsigned ngrn = 0; 1662 1663 /* The next SIMD and floating point register number, equivalent to 1664 NSRN as described in the AArch64 Procedure Call Standard. */ 1665 unsigned nsrn = 0; 1666 1667 /* The next stacked argument address, equivalent to NSAA as 1668 described in the AArch64 Procedure Call Standard. */ 1669 unsigned nsaa = 0; 1670 1671 /* Stack item vector. */ 1672 std::vector<stack_item_t> si; 1673 }; 1674 1675 /* Pass a value in a sequence of consecutive X registers. The caller 1676 is responsible for ensuring sufficient registers are available. */ 1677 1678 static void 1679 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache, 1680 struct aarch64_call_info *info, struct type *type, 1681 struct value *arg) 1682 { 1683 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 1684 int len = type->length (); 1685 enum type_code typecode = type->code (); 1686 int regnum = AARCH64_X0_REGNUM + info->ngrn; 1687 const bfd_byte *buf = arg->contents ().data (); 1688 1689 info->argnum++; 1690 1691 while (len > 0) 1692 { 1693 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE; 1694 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len, 1695 byte_order); 1696 1697 1698 /* Adjust sub-word struct/union args when big-endian. */ 1699 if (byte_order == BFD_ENDIAN_BIG 1700 && partial_len < X_REGISTER_SIZE 1701 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION)) 1702 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT); 1703 1704 aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum, 1705 gdbarch_register_name (gdbarch, regnum), 1706 phex (regval, X_REGISTER_SIZE)); 1707 1708 regcache_cooked_write_unsigned (regcache, regnum, regval); 1709 len -= partial_len; 1710 buf += partial_len; 1711 regnum++; 1712 } 1713 } 1714 1715 /* Attempt to marshall a value in a V register. Return 1 if 1716 successful, or 0 if insufficient registers are available. This 1717 function, unlike the equivalent pass_in_x() function does not 1718 handle arguments spread across multiple registers. */ 1719 1720 static int 1721 pass_in_v (struct gdbarch *gdbarch, 1722 struct regcache *regcache, 1723 struct aarch64_call_info *info, 1724 int len, const bfd_byte *buf) 1725 { 1726 if (info->nsrn < 8) 1727 { 1728 int regnum = AARCH64_V0_REGNUM + info->nsrn; 1729 /* Enough space for a full vector register. */ 1730 gdb_byte reg[register_size (gdbarch, regnum)]; 1731 gdb_assert (len <= sizeof (reg)); 1732 1733 info->argnum++; 1734 info->nsrn++; 1735 1736 memset (reg, 0, sizeof (reg)); 1737 /* PCS C.1, the argument is allocated to the least significant 1738 bits of V register. */ 1739 memcpy (reg, buf, len); 1740 regcache->cooked_write (regnum, reg); 1741 1742 aarch64_debug_printf ("arg %d in %s", info->argnum, 1743 gdbarch_register_name (gdbarch, regnum)); 1744 1745 return 1; 1746 } 1747 info->nsrn = 8; 1748 return 0; 1749 } 1750 1751 /* Marshall an argument onto the stack. */ 1752 1753 static void 1754 pass_on_stack (struct aarch64_call_info *info, struct type *type, 1755 struct value *arg) 1756 { 1757 const bfd_byte *buf = arg->contents ().data (); 1758 int len = type->length (); 1759 int align; 1760 stack_item_t item; 1761 1762 info->argnum++; 1763 1764 align = type_align (type); 1765 1766 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the 1767 Natural alignment of the argument's type. */ 1768 align = align_up (align, 8); 1769 1770 /* The AArch64 PCS requires at most doubleword alignment. */ 1771 if (align > 16) 1772 align = 16; 1773 1774 aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len, 1775 info->nsaa); 1776 1777 item.len = len; 1778 item.data = buf; 1779 info->si.push_back (item); 1780 1781 info->nsaa += len; 1782 if (info->nsaa & (align - 1)) 1783 { 1784 /* Push stack alignment padding. */ 1785 int pad = align - (info->nsaa & (align - 1)); 1786 1787 item.len = pad; 1788 item.data = NULL; 1789 1790 info->si.push_back (item); 1791 info->nsaa += pad; 1792 } 1793 } 1794 1795 /* Marshall an argument into a sequence of one or more consecutive X 1796 registers or, if insufficient X registers are available then onto 1797 the stack. */ 1798 1799 static void 1800 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache, 1801 struct aarch64_call_info *info, struct type *type, 1802 struct value *arg) 1803 { 1804 int len = type->length (); 1805 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE; 1806 1807 /* PCS C.13 - Pass in registers if we have enough spare */ 1808 if (info->ngrn + nregs <= 8) 1809 { 1810 pass_in_x (gdbarch, regcache, info, type, arg); 1811 info->ngrn += nregs; 1812 } 1813 else 1814 { 1815 info->ngrn = 8; 1816 pass_on_stack (info, type, arg); 1817 } 1818 } 1819 1820 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a 1821 aapcs_is_vfp_call_or_return_candidate and there are enough spare V 1822 registers. A return value of false is an error state as the value will have 1823 been partially passed to the stack. */ 1824 static bool 1825 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache, 1826 struct aarch64_call_info *info, struct type *arg_type, 1827 struct value *arg) 1828 { 1829 switch (arg_type->code ()) 1830 { 1831 case TYPE_CODE_FLT: 1832 case TYPE_CODE_DECFLOAT: 1833 return pass_in_v (gdbarch, regcache, info, arg_type->length (), 1834 arg->contents ().data ()); 1835 break; 1836 1837 case TYPE_CODE_COMPLEX: 1838 { 1839 const bfd_byte *buf = arg->contents ().data (); 1840 struct type *target_type = check_typedef (arg_type->target_type ()); 1841 1842 if (!pass_in_v (gdbarch, regcache, info, target_type->length (), 1843 buf)) 1844 return false; 1845 1846 return pass_in_v (gdbarch, regcache, info, target_type->length (), 1847 buf + target_type->length ()); 1848 } 1849 1850 case TYPE_CODE_ARRAY: 1851 if (arg_type->is_vector ()) 1852 return pass_in_v (gdbarch, regcache, info, arg_type->length (), 1853 arg->contents ().data ()); 1854 [[fallthrough]]; 1855 1856 case TYPE_CODE_STRUCT: 1857 case TYPE_CODE_UNION: 1858 for (int i = 0; i < arg_type->num_fields (); i++) 1859 { 1860 /* Don't include static fields. */ 1861 if (arg_type->field (i).is_static ()) 1862 continue; 1863 1864 struct value *field = arg->primitive_field (0, i, arg_type); 1865 struct type *field_type = check_typedef (field->type ()); 1866 1867 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type, 1868 field)) 1869 return false; 1870 } 1871 return true; 1872 1873 default: 1874 return false; 1875 } 1876 } 1877 1878 /* Implement the "push_dummy_call" gdbarch method. */ 1879 1880 static CORE_ADDR 1881 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function, 1882 struct regcache *regcache, CORE_ADDR bp_addr, 1883 int nargs, 1884 struct value **args, CORE_ADDR sp, 1885 function_call_return_method return_method, 1886 CORE_ADDR struct_addr) 1887 { 1888 int argnum; 1889 struct aarch64_call_info info; 1890 1891 /* We need to know what the type of the called function is in order 1892 to determine the number of named/anonymous arguments for the 1893 actual argument placement, and the return type in order to handle 1894 return value correctly. 1895 1896 The generic code above us views the decision of return in memory 1897 or return in registers as a two stage processes. The language 1898 handler is consulted first and may decide to return in memory (eg 1899 class with copy constructor returned by value), this will cause 1900 the generic code to allocate space AND insert an initial leading 1901 argument. 1902 1903 If the language code does not decide to pass in memory then the 1904 target code is consulted. 1905 1906 If the language code decides to pass in memory we want to move 1907 the pointer inserted as the initial argument from the argument 1908 list and into X8, the conventional AArch64 struct return pointer 1909 register. */ 1910 1911 /* Set the return address. For the AArch64, the return breakpoint 1912 is always at BP_ADDR. */ 1913 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr); 1914 1915 /* If we were given an initial argument for the return slot, lose it. */ 1916 if (return_method == return_method_hidden_param) 1917 { 1918 args++; 1919 nargs--; 1920 } 1921 1922 /* The struct_return pointer occupies X8. */ 1923 if (return_method != return_method_normal) 1924 { 1925 aarch64_debug_printf ("struct return in %s = 0x%s", 1926 gdbarch_register_name 1927 (gdbarch, AARCH64_STRUCT_RETURN_REGNUM), 1928 paddress (gdbarch, struct_addr)); 1929 1930 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM, 1931 struct_addr); 1932 } 1933 1934 for (argnum = 0; argnum < nargs; argnum++) 1935 { 1936 struct value *arg = args[argnum]; 1937 struct type *arg_type, *fundamental_type; 1938 int len, elements; 1939 1940 arg_type = check_typedef (arg->type ()); 1941 len = arg_type->length (); 1942 1943 /* If arg can be passed in v registers as per the AAPCS64, then do so if 1944 if there are enough spare registers. */ 1945 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements, 1946 &fundamental_type)) 1947 { 1948 if (info.nsrn + elements <= 8) 1949 { 1950 /* We know that we have sufficient registers available therefore 1951 this will never need to fallback to the stack. */ 1952 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type, 1953 arg)) 1954 gdb_assert_not_reached ("Failed to push args"); 1955 } 1956 else 1957 { 1958 info.nsrn = 8; 1959 pass_on_stack (&info, arg_type, arg); 1960 } 1961 continue; 1962 } 1963 1964 switch (arg_type->code ()) 1965 { 1966 case TYPE_CODE_INT: 1967 case TYPE_CODE_BOOL: 1968 case TYPE_CODE_CHAR: 1969 case TYPE_CODE_RANGE: 1970 case TYPE_CODE_ENUM: 1971 if (len < 4 && !is_fixed_point_type (arg_type)) 1972 { 1973 /* Promote to 32 bit integer. */ 1974 if (arg_type->is_unsigned ()) 1975 arg_type = builtin_type (gdbarch)->builtin_uint32; 1976 else 1977 arg_type = builtin_type (gdbarch)->builtin_int32; 1978 arg = value_cast (arg_type, arg); 1979 } 1980 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg); 1981 break; 1982 1983 case TYPE_CODE_STRUCT: 1984 case TYPE_CODE_ARRAY: 1985 case TYPE_CODE_UNION: 1986 if (len > 16) 1987 { 1988 /* PCS B.7 Aggregates larger than 16 bytes are passed by 1989 invisible reference. */ 1990 1991 /* Allocate aligned storage. */ 1992 sp = align_down (sp - len, 16); 1993 1994 /* Write the real data into the stack. */ 1995 write_memory (sp, arg->contents ().data (), len); 1996 1997 /* Construct the indirection. */ 1998 arg_type = lookup_pointer_type (arg_type); 1999 arg = value_from_pointer (arg_type, sp); 2000 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg); 2001 } 2002 else 2003 /* PCS C.15 / C.18 multiple values pass. */ 2004 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg); 2005 break; 2006 2007 default: 2008 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg); 2009 break; 2010 } 2011 } 2012 2013 /* Make sure stack retains 16 byte alignment. */ 2014 if (info.nsaa & 15) 2015 sp -= 16 - (info.nsaa & 15); 2016 2017 while (!info.si.empty ()) 2018 { 2019 const stack_item_t &si = info.si.back (); 2020 2021 sp -= si.len; 2022 if (si.data != NULL) 2023 write_memory (sp, si.data, si.len); 2024 info.si.pop_back (); 2025 } 2026 2027 /* Finally, update the SP register. */ 2028 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp); 2029 2030 return sp; 2031 } 2032 2033 /* Implement the "frame_align" gdbarch method. */ 2034 2035 static CORE_ADDR 2036 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp) 2037 { 2038 /* Align the stack to sixteen bytes. */ 2039 return sp & ~(CORE_ADDR) 15; 2040 } 2041 2042 /* Return the type for an AdvSISD Q register. */ 2043 2044 static struct type * 2045 aarch64_vnq_type (struct gdbarch *gdbarch) 2046 { 2047 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 2048 2049 if (tdep->vnq_type == NULL) 2050 { 2051 struct type *t; 2052 struct type *elem; 2053 2054 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq", 2055 TYPE_CODE_UNION); 2056 2057 elem = builtin_type (gdbarch)->builtin_uint128; 2058 append_composite_type_field (t, "u", elem); 2059 2060 elem = builtin_type (gdbarch)->builtin_int128; 2061 append_composite_type_field (t, "s", elem); 2062 2063 tdep->vnq_type = t; 2064 } 2065 2066 return tdep->vnq_type; 2067 } 2068 2069 /* Return the type for an AdvSISD D register. */ 2070 2071 static struct type * 2072 aarch64_vnd_type (struct gdbarch *gdbarch) 2073 { 2074 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 2075 2076 if (tdep->vnd_type == NULL) 2077 { 2078 struct type *t; 2079 struct type *elem; 2080 2081 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd", 2082 TYPE_CODE_UNION); 2083 2084 elem = builtin_type (gdbarch)->builtin_double; 2085 append_composite_type_field (t, "f", elem); 2086 2087 elem = builtin_type (gdbarch)->builtin_uint64; 2088 append_composite_type_field (t, "u", elem); 2089 2090 elem = builtin_type (gdbarch)->builtin_int64; 2091 append_composite_type_field (t, "s", elem); 2092 2093 tdep->vnd_type = t; 2094 } 2095 2096 return tdep->vnd_type; 2097 } 2098 2099 /* Return the type for an AdvSISD S register. */ 2100 2101 static struct type * 2102 aarch64_vns_type (struct gdbarch *gdbarch) 2103 { 2104 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 2105 2106 if (tdep->vns_type == NULL) 2107 { 2108 struct type *t; 2109 struct type *elem; 2110 2111 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns", 2112 TYPE_CODE_UNION); 2113 2114 elem = builtin_type (gdbarch)->builtin_float; 2115 append_composite_type_field (t, "f", elem); 2116 2117 elem = builtin_type (gdbarch)->builtin_uint32; 2118 append_composite_type_field (t, "u", elem); 2119 2120 elem = builtin_type (gdbarch)->builtin_int32; 2121 append_composite_type_field (t, "s", elem); 2122 2123 tdep->vns_type = t; 2124 } 2125 2126 return tdep->vns_type; 2127 } 2128 2129 /* Return the type for an AdvSISD H register. */ 2130 2131 static struct type * 2132 aarch64_vnh_type (struct gdbarch *gdbarch) 2133 { 2134 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 2135 2136 if (tdep->vnh_type == NULL) 2137 { 2138 struct type *t; 2139 struct type *elem; 2140 2141 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh", 2142 TYPE_CODE_UNION); 2143 2144 elem = builtin_type (gdbarch)->builtin_bfloat16; 2145 append_composite_type_field (t, "bf", elem); 2146 2147 elem = builtin_type (gdbarch)->builtin_half; 2148 append_composite_type_field (t, "f", elem); 2149 2150 elem = builtin_type (gdbarch)->builtin_uint16; 2151 append_composite_type_field (t, "u", elem); 2152 2153 elem = builtin_type (gdbarch)->builtin_int16; 2154 append_composite_type_field (t, "s", elem); 2155 2156 tdep->vnh_type = t; 2157 } 2158 2159 return tdep->vnh_type; 2160 } 2161 2162 /* Return the type for an AdvSISD B register. */ 2163 2164 static struct type * 2165 aarch64_vnb_type (struct gdbarch *gdbarch) 2166 { 2167 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 2168 2169 if (tdep->vnb_type == NULL) 2170 { 2171 struct type *t; 2172 struct type *elem; 2173 2174 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb", 2175 TYPE_CODE_UNION); 2176 2177 elem = builtin_type (gdbarch)->builtin_uint8; 2178 append_composite_type_field (t, "u", elem); 2179 2180 elem = builtin_type (gdbarch)->builtin_int8; 2181 append_composite_type_field (t, "s", elem); 2182 2183 tdep->vnb_type = t; 2184 } 2185 2186 return tdep->vnb_type; 2187 } 2188 2189 /* Return TRUE if REGNUM is a ZA tile slice pseudo-register number. Return 2190 FALSE otherwise. */ 2191 2192 static bool 2193 is_sme_tile_slice_pseudo_register (struct gdbarch *gdbarch, int regnum) 2194 { 2195 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 2196 2197 gdb_assert (tdep->has_sme ()); 2198 gdb_assert (tdep->sme_svq > 0); 2199 gdb_assert (tdep->sme_pseudo_base <= regnum); 2200 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count); 2201 2202 if (tdep->sme_tile_slice_pseudo_base <= regnum 2203 && regnum < tdep->sme_tile_slice_pseudo_base 2204 + tdep->sme_tile_slice_pseudo_count) 2205 return true; 2206 2207 return false; 2208 } 2209 2210 /* Given REGNUM, a ZA pseudo-register number, return, in ENCODING, the 2211 decoded fields that make up its name. */ 2212 2213 static void 2214 aarch64_za_decode_pseudos (struct gdbarch *gdbarch, int regnum, 2215 struct za_pseudo_encoding &encoding) 2216 { 2217 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 2218 2219 gdb_assert (tdep->has_sme ()); 2220 gdb_assert (tdep->sme_svq > 0); 2221 gdb_assert (tdep->sme_pseudo_base <= regnum); 2222 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count); 2223 2224 if (is_sme_tile_slice_pseudo_register (gdbarch, regnum)) 2225 { 2226 /* Calculate the tile slice pseudo-register offset relative to the other 2227 tile slice pseudo-registers. */ 2228 int offset = regnum - tdep->sme_tile_slice_pseudo_base; 2229 2230 /* Fetch the qualifier. We can have 160 to 2560 possible tile slice 2231 pseudo-registers. Each qualifier (we have 5 of them: B, H, S, D 2232 and Q) covers 32 * svq pseudo-registers, so we divide the offset by 2233 that constant. */ 2234 size_t qualifier = offset / (tdep->sme_svq * 32); 2235 encoding.qualifier_index = qualifier; 2236 2237 /* Prepare to fetch the direction (d), tile number (t) and slice 2238 number (s). */ 2239 int dts = offset % (tdep->sme_svq * 32); 2240 2241 /* The direction is represented by the even/odd numbers. Even-numbered 2242 pseudo-registers are horizontal tile slices and odd-numbered 2243 pseudo-registers are vertical tile slices. */ 2244 encoding.horizontal = !(dts & 1); 2245 2246 /* Fetch the tile number. The tile number is closely related to the 2247 qualifier. B has 1 tile, H has 2 tiles, S has 4 tiles, D has 8 tiles 2248 and Q has 16 tiles. */ 2249 encoding.tile_index = (dts >> 1) & ((1 << qualifier) - 1); 2250 2251 /* Fetch the slice number. The slice number is closely related to the 2252 qualifier and the svl. */ 2253 encoding.slice_index = dts >> (qualifier + 1); 2254 } 2255 else 2256 { 2257 /* Calculate the tile pseudo-register offset relative to the other 2258 tile pseudo-registers. */ 2259 int offset = regnum - tdep->sme_tile_pseudo_base; 2260 2261 encoding.qualifier_index = std::floor (std::log2 (offset + 1)); 2262 /* Calculate the tile number. */ 2263 encoding.tile_index = (offset + 1) - (1 << encoding.qualifier_index); 2264 /* Direction and slice index don't get used for tiles. Set them to 2265 0/false values. */ 2266 encoding.slice_index = 0; 2267 encoding.horizontal = false; 2268 } 2269 } 2270 2271 /* Return the type for a ZA tile slice pseudo-register based on ENCODING. */ 2272 2273 static struct type * 2274 aarch64_za_tile_slice_type (struct gdbarch *gdbarch, 2275 const struct za_pseudo_encoding &encoding) 2276 { 2277 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 2278 2279 gdb_assert (tdep->has_sme ()); 2280 gdb_assert (tdep->sme_svq > 0); 2281 2282 if (tdep->sme_tile_slice_type_q == nullptr) 2283 { 2284 /* Q tile slice type. */ 2285 tdep->sme_tile_slice_type_q 2286 = init_vector_type (builtin_type (gdbarch)->builtin_uint128, 2287 tdep->sme_svq); 2288 /* D tile slice type. */ 2289 tdep->sme_tile_slice_type_d 2290 = init_vector_type (builtin_type (gdbarch)->builtin_uint64, 2291 tdep->sme_svq * 2); 2292 /* S tile slice type. */ 2293 tdep->sme_tile_slice_type_s 2294 = init_vector_type (builtin_type (gdbarch)->builtin_uint32, 2295 tdep->sme_svq * 4); 2296 /* H tile slice type. */ 2297 tdep->sme_tile_slice_type_h 2298 = init_vector_type (builtin_type (gdbarch)->builtin_uint16, 2299 tdep->sme_svq * 8); 2300 /* B tile slice type. */ 2301 tdep->sme_tile_slice_type_b 2302 = init_vector_type (builtin_type (gdbarch)->builtin_uint8, 2303 tdep->sme_svq * 16); 2304 } 2305 2306 switch (encoding.qualifier_index) 2307 { 2308 case 4: 2309 return tdep->sme_tile_slice_type_q; 2310 case 3: 2311 return tdep->sme_tile_slice_type_d; 2312 case 2: 2313 return tdep->sme_tile_slice_type_s; 2314 case 1: 2315 return tdep->sme_tile_slice_type_h; 2316 case 0: 2317 return tdep->sme_tile_slice_type_b; 2318 default: 2319 error (_("Invalid qualifier index %s for tile slice pseudo register."), 2320 pulongest (encoding.qualifier_index)); 2321 } 2322 2323 gdb_assert_not_reached ("Unknown qualifier for ZA tile slice register"); 2324 } 2325 2326 /* Return the type for a ZA tile pseudo-register based on ENCODING. */ 2327 2328 static struct type * 2329 aarch64_za_tile_type (struct gdbarch *gdbarch, 2330 const struct za_pseudo_encoding &encoding) 2331 { 2332 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 2333 2334 gdb_assert (tdep->has_sme ()); 2335 gdb_assert (tdep->sme_svq > 0); 2336 2337 if (tdep->sme_tile_type_q == nullptr) 2338 { 2339 struct type *inner_vectors_type; 2340 2341 /* Q tile type. */ 2342 inner_vectors_type 2343 = init_vector_type (builtin_type (gdbarch)->builtin_uint128, 2344 tdep->sme_svq); 2345 tdep->sme_tile_type_q 2346 = init_vector_type (inner_vectors_type, tdep->sme_svq); 2347 2348 /* D tile type. */ 2349 inner_vectors_type 2350 = init_vector_type (builtin_type (gdbarch)->builtin_uint64, 2351 tdep->sme_svq * 2); 2352 tdep->sme_tile_type_d 2353 = init_vector_type (inner_vectors_type, tdep->sme_svq * 2); 2354 2355 /* S tile type. */ 2356 inner_vectors_type 2357 = init_vector_type (builtin_type (gdbarch)->builtin_uint32, 2358 tdep->sme_svq * 4); 2359 tdep->sme_tile_type_s 2360 = init_vector_type (inner_vectors_type, tdep->sme_svq * 4); 2361 2362 /* H tile type. */ 2363 inner_vectors_type 2364 = init_vector_type (builtin_type (gdbarch)->builtin_uint16, 2365 tdep->sme_svq * 8); 2366 tdep->sme_tile_type_h 2367 = init_vector_type (inner_vectors_type, tdep->sme_svq * 8); 2368 2369 /* B tile type. */ 2370 inner_vectors_type 2371 = init_vector_type (builtin_type (gdbarch)->builtin_uint8, 2372 tdep->sme_svq * 16); 2373 tdep->sme_tile_type_b 2374 = init_vector_type (inner_vectors_type, tdep->sme_svq * 16); 2375 } 2376 2377 switch (encoding.qualifier_index) 2378 { 2379 case 4: 2380 return tdep->sme_tile_type_q; 2381 case 3: 2382 return tdep->sme_tile_type_d; 2383 case 2: 2384 return tdep->sme_tile_type_s; 2385 case 1: 2386 return tdep->sme_tile_type_h; 2387 case 0: 2388 return tdep->sme_tile_type_b; 2389 default: 2390 error (_("Invalid qualifier index %s for ZA tile pseudo register."), 2391 pulongest (encoding.qualifier_index)); 2392 } 2393 2394 gdb_assert_not_reached ("unknown qualifier for tile pseudo-register"); 2395 } 2396 2397 /* Return the type for an AdvSISD V register. */ 2398 2399 static struct type * 2400 aarch64_vnv_type (struct gdbarch *gdbarch) 2401 { 2402 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 2403 2404 if (tdep->vnv_type == NULL) 2405 { 2406 /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value 2407 slice from the non-pseudo vector registers. However NEON V registers 2408 are always vector registers, and need constructing as such. */ 2409 const struct builtin_type *bt = builtin_type (gdbarch); 2410 2411 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv", 2412 TYPE_CODE_UNION); 2413 2414 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd", 2415 TYPE_CODE_UNION); 2416 append_composite_type_field (sub, "f", 2417 init_vector_type (bt->builtin_double, 2)); 2418 append_composite_type_field (sub, "u", 2419 init_vector_type (bt->builtin_uint64, 2)); 2420 append_composite_type_field (sub, "s", 2421 init_vector_type (bt->builtin_int64, 2)); 2422 append_composite_type_field (t, "d", sub); 2423 2424 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns", 2425 TYPE_CODE_UNION); 2426 append_composite_type_field (sub, "f", 2427 init_vector_type (bt->builtin_float, 4)); 2428 append_composite_type_field (sub, "u", 2429 init_vector_type (bt->builtin_uint32, 4)); 2430 append_composite_type_field (sub, "s", 2431 init_vector_type (bt->builtin_int32, 4)); 2432 append_composite_type_field (t, "s", sub); 2433 2434 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh", 2435 TYPE_CODE_UNION); 2436 append_composite_type_field (sub, "bf", 2437 init_vector_type (bt->builtin_bfloat16, 8)); 2438 append_composite_type_field (sub, "f", 2439 init_vector_type (bt->builtin_half, 8)); 2440 append_composite_type_field (sub, "u", 2441 init_vector_type (bt->builtin_uint16, 8)); 2442 append_composite_type_field (sub, "s", 2443 init_vector_type (bt->builtin_int16, 8)); 2444 append_composite_type_field (t, "h", sub); 2445 2446 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb", 2447 TYPE_CODE_UNION); 2448 append_composite_type_field (sub, "u", 2449 init_vector_type (bt->builtin_uint8, 16)); 2450 append_composite_type_field (sub, "s", 2451 init_vector_type (bt->builtin_int8, 16)); 2452 append_composite_type_field (t, "b", sub); 2453 2454 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq", 2455 TYPE_CODE_UNION); 2456 append_composite_type_field (sub, "u", 2457 init_vector_type (bt->builtin_uint128, 1)); 2458 append_composite_type_field (sub, "s", 2459 init_vector_type (bt->builtin_int128, 1)); 2460 append_composite_type_field (t, "q", sub); 2461 2462 tdep->vnv_type = t; 2463 } 2464 2465 return tdep->vnv_type; 2466 } 2467 2468 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */ 2469 2470 static int 2471 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg) 2472 { 2473 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 2474 2475 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30) 2476 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0; 2477 2478 if (reg == AARCH64_DWARF_SP) 2479 return AARCH64_SP_REGNUM; 2480 2481 if (reg == AARCH64_DWARF_PC) 2482 return AARCH64_PC_REGNUM; 2483 2484 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31) 2485 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0; 2486 2487 if (reg == AARCH64_DWARF_SVE_VG) 2488 return AARCH64_SVE_VG_REGNUM; 2489 2490 if (reg == AARCH64_DWARF_SVE_FFR) 2491 return AARCH64_SVE_FFR_REGNUM; 2492 2493 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15) 2494 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0; 2495 2496 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15) 2497 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0; 2498 2499 if (tdep->has_pauth ()) 2500 { 2501 if (reg == AARCH64_DWARF_RA_SIGN_STATE) 2502 return tdep->ra_sign_state_regnum; 2503 } 2504 2505 return -1; 2506 } 2507 2508 /* Implement the "print_insn" gdbarch method. */ 2509 2510 static int 2511 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info) 2512 { 2513 info->symbols = NULL; 2514 return default_print_insn (memaddr, info); 2515 } 2516 2517 /* AArch64 BRK software debug mode instruction. 2518 Note that AArch64 code is always little-endian. 2519 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */ 2520 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4}; 2521 2522 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint; 2523 2524 /* Extract from an array REGS containing the (raw) register state a 2525 function return value of type TYPE, and copy that, in virtual 2526 format, into VALBUF. */ 2527 2528 static void 2529 aarch64_extract_return_value (struct type *type, struct regcache *regs, 2530 gdb_byte *valbuf) 2531 { 2532 struct gdbarch *gdbarch = regs->arch (); 2533 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 2534 int elements; 2535 struct type *fundamental_type; 2536 2537 if (aapcs_is_vfp_call_or_return_candidate (type, &elements, 2538 &fundamental_type)) 2539 { 2540 int len = fundamental_type->length (); 2541 2542 for (int i = 0; i < elements; i++) 2543 { 2544 int regno = AARCH64_V0_REGNUM + i; 2545 /* Enough space for a full vector register. */ 2546 gdb_byte buf[register_size (gdbarch, regno)]; 2547 gdb_assert (len <= sizeof (buf)); 2548 2549 aarch64_debug_printf 2550 ("read HFA or HVA return value element %d from %s", 2551 i + 1, gdbarch_register_name (gdbarch, regno)); 2552 2553 regs->cooked_read (regno, buf); 2554 2555 memcpy (valbuf, buf, len); 2556 valbuf += len; 2557 } 2558 } 2559 else if (type->code () == TYPE_CODE_INT 2560 || type->code () == TYPE_CODE_CHAR 2561 || type->code () == TYPE_CODE_BOOL 2562 || type->code () == TYPE_CODE_PTR 2563 || TYPE_IS_REFERENCE (type) 2564 || type->code () == TYPE_CODE_ENUM) 2565 { 2566 /* If the type is a plain integer, then the access is 2567 straight-forward. Otherwise we have to play around a bit 2568 more. */ 2569 int len = type->length (); 2570 int regno = AARCH64_X0_REGNUM; 2571 ULONGEST tmp; 2572 2573 while (len > 0) 2574 { 2575 /* By using store_unsigned_integer we avoid having to do 2576 anything special for small big-endian values. */ 2577 regcache_cooked_read_unsigned (regs, regno++, &tmp); 2578 store_unsigned_integer (valbuf, 2579 (len > X_REGISTER_SIZE 2580 ? X_REGISTER_SIZE : len), byte_order, tmp); 2581 len -= X_REGISTER_SIZE; 2582 valbuf += X_REGISTER_SIZE; 2583 } 2584 } 2585 else 2586 { 2587 /* For a structure or union the behaviour is as if the value had 2588 been stored to word-aligned memory and then loaded into 2589 registers with 64-bit load instruction(s). */ 2590 int len = type->length (); 2591 int regno = AARCH64_X0_REGNUM; 2592 bfd_byte buf[X_REGISTER_SIZE]; 2593 2594 while (len > 0) 2595 { 2596 regs->cooked_read (regno++, buf); 2597 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len); 2598 len -= X_REGISTER_SIZE; 2599 valbuf += X_REGISTER_SIZE; 2600 } 2601 } 2602 } 2603 2604 2605 /* Will a function return an aggregate type in memory or in a 2606 register? Return 0 if an aggregate type can be returned in a 2607 register, 1 if it must be returned in memory. */ 2608 2609 static int 2610 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type) 2611 { 2612 type = check_typedef (type); 2613 int elements; 2614 struct type *fundamental_type; 2615 2616 if (TYPE_HAS_DYNAMIC_LENGTH (type)) 2617 return 1; 2618 2619 if (aapcs_is_vfp_call_or_return_candidate (type, &elements, 2620 &fundamental_type)) 2621 { 2622 /* v0-v7 are used to return values and one register is allocated 2623 for one member. However, HFA or HVA has at most four members. */ 2624 return 0; 2625 } 2626 2627 if (type->length () > 16 2628 || !language_pass_by_reference (type).trivially_copyable) 2629 { 2630 /* PCS B.6 Aggregates larger than 16 bytes are passed by 2631 invisible reference. */ 2632 2633 return 1; 2634 } 2635 2636 return 0; 2637 } 2638 2639 /* Write into appropriate registers a function return value of type 2640 TYPE, given in virtual format. */ 2641 2642 static void 2643 aarch64_store_return_value (struct type *type, struct regcache *regs, 2644 const gdb_byte *valbuf) 2645 { 2646 struct gdbarch *gdbarch = regs->arch (); 2647 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 2648 int elements; 2649 struct type *fundamental_type; 2650 2651 if (aapcs_is_vfp_call_or_return_candidate (type, &elements, 2652 &fundamental_type)) 2653 { 2654 int len = fundamental_type->length (); 2655 2656 for (int i = 0; i < elements; i++) 2657 { 2658 int regno = AARCH64_V0_REGNUM + i; 2659 /* Enough space for a full vector register. */ 2660 gdb_byte tmpbuf[register_size (gdbarch, regno)]; 2661 gdb_assert (len <= sizeof (tmpbuf)); 2662 2663 aarch64_debug_printf 2664 ("write HFA or HVA return value element %d to %s", 2665 i + 1, gdbarch_register_name (gdbarch, regno)); 2666 2667 /* Depending on whether the target supports SVE or not, the V 2668 registers may report a size > 16 bytes. In that case, read the 2669 original contents of the register before overriding it with a new 2670 value that has a potential size <= 16 bytes. */ 2671 regs->cooked_read (regno, tmpbuf); 2672 memcpy (tmpbuf, valbuf, 2673 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len); 2674 regs->cooked_write (regno, tmpbuf); 2675 valbuf += len; 2676 } 2677 } 2678 else if (type->code () == TYPE_CODE_INT 2679 || type->code () == TYPE_CODE_CHAR 2680 || type->code () == TYPE_CODE_BOOL 2681 || type->code () == TYPE_CODE_PTR 2682 || TYPE_IS_REFERENCE (type) 2683 || type->code () == TYPE_CODE_ENUM) 2684 { 2685 if (type->length () <= X_REGISTER_SIZE) 2686 { 2687 /* Values of one word or less are zero/sign-extended and 2688 returned in r0. */ 2689 bfd_byte tmpbuf[X_REGISTER_SIZE]; 2690 LONGEST val = unpack_long (type, valbuf); 2691 2692 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val); 2693 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf); 2694 } 2695 else 2696 { 2697 /* Integral values greater than one word are stored in 2698 consecutive registers starting with r0. This will always 2699 be a multiple of the regiser size. */ 2700 int len = type->length (); 2701 int regno = AARCH64_X0_REGNUM; 2702 2703 while (len > 0) 2704 { 2705 regs->cooked_write (regno++, valbuf); 2706 len -= X_REGISTER_SIZE; 2707 valbuf += X_REGISTER_SIZE; 2708 } 2709 } 2710 } 2711 else 2712 { 2713 /* For a structure or union the behaviour is as if the value had 2714 been stored to word-aligned memory and then loaded into 2715 registers with 64-bit load instruction(s). */ 2716 int len = type->length (); 2717 int regno = AARCH64_X0_REGNUM; 2718 bfd_byte tmpbuf[X_REGISTER_SIZE]; 2719 2720 while (len > 0) 2721 { 2722 memcpy (tmpbuf, valbuf, 2723 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len); 2724 regs->cooked_write (regno++, tmpbuf); 2725 len -= X_REGISTER_SIZE; 2726 valbuf += X_REGISTER_SIZE; 2727 } 2728 } 2729 } 2730 2731 /* Implement the "return_value" gdbarch method. */ 2732 2733 static enum return_value_convention 2734 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value, 2735 struct type *valtype, struct regcache *regcache, 2736 struct value **read_value, const gdb_byte *writebuf) 2737 { 2738 if (valtype->code () == TYPE_CODE_STRUCT 2739 || valtype->code () == TYPE_CODE_UNION 2740 || valtype->code () == TYPE_CODE_ARRAY) 2741 { 2742 if (aarch64_return_in_memory (gdbarch, valtype)) 2743 { 2744 /* From the AAPCS64's Result Return section: 2745 2746 "Otherwise, the caller shall reserve a block of memory of 2747 sufficient size and alignment to hold the result. The address 2748 of the memory block shall be passed as an additional argument to 2749 the function in x8. */ 2750 2751 aarch64_debug_printf ("return value in memory"); 2752 2753 if (read_value != nullptr) 2754 { 2755 CORE_ADDR addr; 2756 2757 regcache->cooked_read (AARCH64_STRUCT_RETURN_REGNUM, &addr); 2758 *read_value = value_at_non_lval (valtype, addr); 2759 } 2760 2761 return RETURN_VALUE_ABI_RETURNS_ADDRESS; 2762 } 2763 } 2764 2765 if (writebuf) 2766 aarch64_store_return_value (valtype, regcache, writebuf); 2767 2768 if (read_value) 2769 { 2770 *read_value = value::allocate (valtype); 2771 aarch64_extract_return_value (valtype, regcache, 2772 (*read_value)->contents_raw ().data ()); 2773 } 2774 2775 aarch64_debug_printf ("return value in registers"); 2776 2777 return RETURN_VALUE_REGISTER_CONVENTION; 2778 } 2779 2780 /* Implement the "get_longjmp_target" gdbarch method. */ 2781 2782 static int 2783 aarch64_get_longjmp_target (const frame_info_ptr &frame, CORE_ADDR *pc) 2784 { 2785 CORE_ADDR jb_addr; 2786 gdb_byte buf[X_REGISTER_SIZE]; 2787 struct gdbarch *gdbarch = get_frame_arch (frame); 2788 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 2789 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 2790 2791 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM); 2792 2793 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf, 2794 X_REGISTER_SIZE)) 2795 return 0; 2796 2797 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order); 2798 return 1; 2799 } 2800 2801 /* Implement the "gen_return_address" gdbarch method. */ 2802 2803 static void 2804 aarch64_gen_return_address (struct gdbarch *gdbarch, 2805 struct agent_expr *ax, struct axs_value *value, 2806 CORE_ADDR scope) 2807 { 2808 value->type = register_type (gdbarch, AARCH64_LR_REGNUM); 2809 value->kind = axs_lvalue_register; 2810 value->u.reg = AARCH64_LR_REGNUM; 2811 } 2812 2813 2814 /* Return TRUE if REGNUM is a W pseudo-register number. Return FALSE 2815 otherwise. */ 2816 2817 static bool 2818 is_w_pseudo_register (struct gdbarch *gdbarch, int regnum) 2819 { 2820 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 2821 2822 if (tdep->w_pseudo_base <= regnum 2823 && regnum < tdep->w_pseudo_base + tdep->w_pseudo_count) 2824 return true; 2825 2826 return false; 2827 } 2828 2829 /* Return TRUE if REGNUM is a SME pseudo-register number. Return FALSE 2830 otherwise. */ 2831 2832 static bool 2833 is_sme_pseudo_register (struct gdbarch *gdbarch, int regnum) 2834 { 2835 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 2836 2837 if (tdep->has_sme () && tdep->sme_pseudo_base <= regnum 2838 && regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count) 2839 return true; 2840 2841 return false; 2842 } 2843 2844 /* Convert ENCODING into a ZA tile slice name. */ 2845 2846 static const std::string 2847 aarch64_za_tile_slice_name (const struct za_pseudo_encoding &encoding) 2848 { 2849 gdb_assert (encoding.qualifier_index >= 0); 2850 gdb_assert (encoding.qualifier_index <= 4); 2851 gdb_assert (encoding.tile_index >= 0); 2852 gdb_assert (encoding.tile_index <= 15); 2853 gdb_assert (encoding.slice_index >= 0); 2854 gdb_assert (encoding.slice_index <= 255); 2855 2856 const char orientation = encoding.horizontal ? 'h' : 'v'; 2857 2858 const char qualifiers[6] = "bhsdq"; 2859 const char qualifier = qualifiers [encoding.qualifier_index]; 2860 return string_printf ("za%d%c%c%d", encoding.tile_index, orientation, 2861 qualifier, encoding.slice_index); 2862 } 2863 2864 /* Convert ENCODING into a ZA tile name. */ 2865 2866 static const std::string 2867 aarch64_za_tile_name (const struct za_pseudo_encoding &encoding) 2868 { 2869 /* Tiles don't use the slice number and the direction fields. */ 2870 gdb_assert (encoding.qualifier_index >= 0); 2871 gdb_assert (encoding.qualifier_index <= 4); 2872 gdb_assert (encoding.tile_index >= 0); 2873 gdb_assert (encoding.tile_index <= 15); 2874 2875 const char qualifiers[6] = "bhsdq"; 2876 const char qualifier = qualifiers [encoding.qualifier_index]; 2877 return (string_printf ("za%d%c", encoding.tile_index, qualifier)); 2878 } 2879 2880 /* Given a SME pseudo-register REGNUM, return its type. */ 2881 2882 static struct type * 2883 aarch64_sme_pseudo_register_type (struct gdbarch *gdbarch, int regnum) 2884 { 2885 struct za_pseudo_encoding encoding; 2886 2887 /* Decode the SME pseudo-register number. */ 2888 aarch64_za_decode_pseudos (gdbarch, regnum, encoding); 2889 2890 if (is_sme_tile_slice_pseudo_register (gdbarch, regnum)) 2891 return aarch64_za_tile_slice_type (gdbarch, encoding); 2892 else 2893 return aarch64_za_tile_type (gdbarch, encoding); 2894 } 2895 2896 /* Return the pseudo register name corresponding to register regnum. */ 2897 2898 static const char * 2899 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum) 2900 { 2901 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 2902 2903 /* W pseudo-registers. Bottom halves of the X registers. */ 2904 static const char *const w_name[] = 2905 { 2906 "w0", "w1", "w2", "w3", 2907 "w4", "w5", "w6", "w7", 2908 "w8", "w9", "w10", "w11", 2909 "w12", "w13", "w14", "w15", 2910 "w16", "w17", "w18", "w19", 2911 "w20", "w21", "w22", "w23", 2912 "w24", "w25", "w26", "w27", 2913 "w28", "w29", "w30", 2914 }; 2915 2916 static const char *const q_name[] = 2917 { 2918 "q0", "q1", "q2", "q3", 2919 "q4", "q5", "q6", "q7", 2920 "q8", "q9", "q10", "q11", 2921 "q12", "q13", "q14", "q15", 2922 "q16", "q17", "q18", "q19", 2923 "q20", "q21", "q22", "q23", 2924 "q24", "q25", "q26", "q27", 2925 "q28", "q29", "q30", "q31", 2926 }; 2927 2928 static const char *const d_name[] = 2929 { 2930 "d0", "d1", "d2", "d3", 2931 "d4", "d5", "d6", "d7", 2932 "d8", "d9", "d10", "d11", 2933 "d12", "d13", "d14", "d15", 2934 "d16", "d17", "d18", "d19", 2935 "d20", "d21", "d22", "d23", 2936 "d24", "d25", "d26", "d27", 2937 "d28", "d29", "d30", "d31", 2938 }; 2939 2940 static const char *const s_name[] = 2941 { 2942 "s0", "s1", "s2", "s3", 2943 "s4", "s5", "s6", "s7", 2944 "s8", "s9", "s10", "s11", 2945 "s12", "s13", "s14", "s15", 2946 "s16", "s17", "s18", "s19", 2947 "s20", "s21", "s22", "s23", 2948 "s24", "s25", "s26", "s27", 2949 "s28", "s29", "s30", "s31", 2950 }; 2951 2952 static const char *const h_name[] = 2953 { 2954 "h0", "h1", "h2", "h3", 2955 "h4", "h5", "h6", "h7", 2956 "h8", "h9", "h10", "h11", 2957 "h12", "h13", "h14", "h15", 2958 "h16", "h17", "h18", "h19", 2959 "h20", "h21", "h22", "h23", 2960 "h24", "h25", "h26", "h27", 2961 "h28", "h29", "h30", "h31", 2962 }; 2963 2964 static const char *const b_name[] = 2965 { 2966 "b0", "b1", "b2", "b3", 2967 "b4", "b5", "b6", "b7", 2968 "b8", "b9", "b10", "b11", 2969 "b12", "b13", "b14", "b15", 2970 "b16", "b17", "b18", "b19", 2971 "b20", "b21", "b22", "b23", 2972 "b24", "b25", "b26", "b27", 2973 "b28", "b29", "b30", "b31", 2974 }; 2975 2976 int p_regnum = regnum - gdbarch_num_regs (gdbarch); 2977 2978 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32) 2979 return q_name[p_regnum - AARCH64_Q0_REGNUM]; 2980 2981 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32) 2982 return d_name[p_regnum - AARCH64_D0_REGNUM]; 2983 2984 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32) 2985 return s_name[p_regnum - AARCH64_S0_REGNUM]; 2986 2987 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32) 2988 return h_name[p_regnum - AARCH64_H0_REGNUM]; 2989 2990 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32) 2991 return b_name[p_regnum - AARCH64_B0_REGNUM]; 2992 2993 /* W pseudo-registers? */ 2994 if (is_w_pseudo_register (gdbarch, regnum)) 2995 return w_name[regnum - tdep->w_pseudo_base]; 2996 2997 if (tdep->has_sve ()) 2998 { 2999 static const char *const sve_v_name[] = 3000 { 3001 "v0", "v1", "v2", "v3", 3002 "v4", "v5", "v6", "v7", 3003 "v8", "v9", "v10", "v11", 3004 "v12", "v13", "v14", "v15", 3005 "v16", "v17", "v18", "v19", 3006 "v20", "v21", "v22", "v23", 3007 "v24", "v25", "v26", "v27", 3008 "v28", "v29", "v30", "v31", 3009 }; 3010 3011 if (p_regnum >= AARCH64_SVE_V0_REGNUM 3012 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM) 3013 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM]; 3014 } 3015 3016 if (is_sme_pseudo_register (gdbarch, regnum)) 3017 return tdep->sme_pseudo_names[regnum - tdep->sme_pseudo_base].c_str (); 3018 3019 /* RA_STATE is used for unwinding only. Do not assign it a name - this 3020 prevents it from being read by methods such as 3021 mi_cmd_trace_frame_collected. */ 3022 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum) 3023 return ""; 3024 3025 internal_error (_("aarch64_pseudo_register_name: bad register number %d"), 3026 p_regnum); 3027 } 3028 3029 /* Implement the "pseudo_register_type" tdesc_arch_data method. */ 3030 3031 static struct type * 3032 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum) 3033 { 3034 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 3035 3036 int p_regnum = regnum - gdbarch_num_regs (gdbarch); 3037 3038 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32) 3039 return aarch64_vnq_type (gdbarch); 3040 3041 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32) 3042 return aarch64_vnd_type (gdbarch); 3043 3044 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32) 3045 return aarch64_vns_type (gdbarch); 3046 3047 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32) 3048 return aarch64_vnh_type (gdbarch); 3049 3050 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32) 3051 return aarch64_vnb_type (gdbarch); 3052 3053 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM 3054 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM) 3055 return aarch64_vnv_type (gdbarch); 3056 3057 /* W pseudo-registers are 32-bit. */ 3058 if (is_w_pseudo_register (gdbarch, regnum)) 3059 return builtin_type (gdbarch)->builtin_uint32; 3060 3061 if (is_sme_pseudo_register (gdbarch, regnum)) 3062 return aarch64_sme_pseudo_register_type (gdbarch, regnum); 3063 3064 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum) 3065 return builtin_type (gdbarch)->builtin_uint64; 3066 3067 internal_error (_("aarch64_pseudo_register_type: bad register number %d"), 3068 p_regnum); 3069 } 3070 3071 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */ 3072 3073 static int 3074 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum, 3075 const struct reggroup *group) 3076 { 3077 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 3078 3079 int p_regnum = regnum - gdbarch_num_regs (gdbarch); 3080 3081 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32) 3082 return group == all_reggroup || group == vector_reggroup; 3083 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32) 3084 return (group == all_reggroup || group == vector_reggroup 3085 || group == float_reggroup); 3086 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32) 3087 return (group == all_reggroup || group == vector_reggroup 3088 || group == float_reggroup); 3089 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32) 3090 return group == all_reggroup || group == vector_reggroup; 3091 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32) 3092 return group == all_reggroup || group == vector_reggroup; 3093 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM 3094 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM) 3095 return group == all_reggroup || group == vector_reggroup; 3096 else if (is_sme_pseudo_register (gdbarch, regnum)) 3097 return group == all_reggroup || group == vector_reggroup; 3098 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */ 3099 if (tdep->has_pauth () && regnum == tdep->ra_sign_state_regnum) 3100 return 0; 3101 3102 return group == all_reggroup; 3103 } 3104 3105 /* Helper for aarch64_pseudo_read_value. */ 3106 3107 static value * 3108 aarch64_pseudo_read_value_1 (const frame_info_ptr &next_frame, 3109 const int pseudo_reg_num, int raw_regnum_offset) 3110 { 3111 unsigned v_regnum = AARCH64_V0_REGNUM + raw_regnum_offset; 3112 3113 return pseudo_from_raw_part (next_frame, pseudo_reg_num, v_regnum, 0); 3114 } 3115 3116 /* Helper function for reading/writing ZA pseudo-registers. Given REGNUM, 3117 a ZA pseudo-register number, return the information on positioning of the 3118 bytes that must be read from/written to. */ 3119 3120 static za_offsets 3121 aarch64_za_offsets_from_regnum (struct gdbarch *gdbarch, int regnum) 3122 { 3123 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 3124 3125 gdb_assert (tdep->has_sme ()); 3126 gdb_assert (tdep->sme_svq > 0); 3127 gdb_assert (tdep->sme_pseudo_base <= regnum); 3128 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count); 3129 3130 struct za_pseudo_encoding encoding; 3131 3132 /* Decode the ZA pseudo-register number. */ 3133 aarch64_za_decode_pseudos (gdbarch, regnum, encoding); 3134 3135 /* Fetch the streaming vector length. */ 3136 size_t svl = sve_vl_from_vq (tdep->sme_svq); 3137 za_offsets offsets; 3138 3139 if (is_sme_tile_slice_pseudo_register (gdbarch, regnum)) 3140 { 3141 if (encoding.horizontal) 3142 { 3143 /* Horizontal tile slices are contiguous ranges of svl bytes. */ 3144 3145 /* The starting offset depends on the tile index (to locate the tile 3146 in the ZA buffer), the slice index (to locate the slice within the 3147 tile) and the qualifier. */ 3148 offsets.starting_offset 3149 = encoding.tile_index * svl + encoding.slice_index 3150 * (svl >> encoding.qualifier_index); 3151 /* Horizontal tile slice data is contiguous and thus doesn't have 3152 a stride. */ 3153 offsets.stride_size = 0; 3154 /* Horizontal tile slice data is contiguous and thus only has 1 3155 chunk. */ 3156 offsets.chunks = 1; 3157 /* The chunk size is always svl bytes. */ 3158 offsets.chunk_size = svl; 3159 } 3160 else 3161 { 3162 /* Vertical tile slices are non-contiguous ranges of 3163 (1 << qualifier_index) bytes. */ 3164 3165 /* The starting offset depends on the tile number (to locate the 3166 tile in the ZA buffer), the slice index (to locate the element 3167 within the tile slice) and the qualifier. */ 3168 offsets.starting_offset 3169 = encoding.tile_index * svl + encoding.slice_index 3170 * (1 << encoding.qualifier_index); 3171 /* The offset between vertical tile slices depends on the qualifier 3172 and svl. */ 3173 offsets.stride_size = svl << encoding.qualifier_index; 3174 /* The number of chunks depends on svl and the qualifier size. */ 3175 offsets.chunks = svl >> encoding.qualifier_index; 3176 /* The chunk size depends on the qualifier. */ 3177 offsets.chunk_size = 1 << encoding.qualifier_index; 3178 } 3179 } 3180 else 3181 { 3182 /* ZA tile pseudo-register. */ 3183 3184 /* Starting offset depends on the tile index and qualifier. */ 3185 offsets.starting_offset = encoding.tile_index * svl; 3186 /* The offset between tile slices depends on the qualifier and svl. */ 3187 offsets.stride_size = svl << encoding.qualifier_index; 3188 /* The number of chunks depends on the qualifier and svl. */ 3189 offsets.chunks = svl >> encoding.qualifier_index; 3190 /* The chunk size is always svl bytes. */ 3191 offsets.chunk_size = svl; 3192 } 3193 3194 return offsets; 3195 } 3196 3197 /* Given REGNUM, a SME pseudo-register number, return its value in RESULT. */ 3198 3199 static value * 3200 aarch64_sme_pseudo_register_read (gdbarch *gdbarch, const frame_info_ptr &next_frame, 3201 const int pseudo_reg_num) 3202 { 3203 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 3204 3205 gdb_assert (tdep->has_sme ()); 3206 gdb_assert (tdep->sme_svq > 0); 3207 gdb_assert (tdep->sme_pseudo_base <= pseudo_reg_num); 3208 gdb_assert (pseudo_reg_num < tdep->sme_pseudo_base + tdep->sme_pseudo_count); 3209 3210 /* Fetch the offsets that we need in order to read from the correct blocks 3211 of ZA. */ 3212 za_offsets offsets 3213 = aarch64_za_offsets_from_regnum (gdbarch, pseudo_reg_num); 3214 3215 /* Fetch the contents of ZA. */ 3216 value *za_value = value_of_register (tdep->sme_za_regnum, next_frame); 3217 value *result = value::allocate_register (next_frame, pseudo_reg_num); 3218 3219 /* Copy the requested data. */ 3220 for (int chunks = 0; chunks < offsets.chunks; chunks++) 3221 { 3222 int src_offset = offsets.starting_offset + chunks * offsets.stride_size; 3223 int dst_offset = chunks * offsets.chunk_size; 3224 za_value->contents_copy (result, dst_offset, src_offset, 3225 offsets.chunk_size); 3226 } 3227 3228 return result; 3229 } 3230 3231 /* Implement the "pseudo_register_read_value" gdbarch method. */ 3232 3233 static value * 3234 aarch64_pseudo_read_value (gdbarch *gdbarch, const frame_info_ptr &next_frame, 3235 const int pseudo_reg_num) 3236 { 3237 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 3238 3239 if (is_w_pseudo_register (gdbarch, pseudo_reg_num)) 3240 { 3241 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 3242 /* Default offset for little endian. */ 3243 int offset = 0; 3244 3245 if (byte_order == BFD_ENDIAN_BIG) 3246 offset = 4; 3247 3248 /* Find the correct X register to extract the data from. */ 3249 int x_regnum 3250 = AARCH64_X0_REGNUM + (pseudo_reg_num - tdep->w_pseudo_base); 3251 3252 /* Read the bottom 4 bytes of X. */ 3253 return pseudo_from_raw_part (next_frame, pseudo_reg_num, x_regnum, 3254 offset); 3255 } 3256 else if (is_sme_pseudo_register (gdbarch, pseudo_reg_num)) 3257 return aarch64_sme_pseudo_register_read (gdbarch, next_frame, 3258 pseudo_reg_num); 3259 3260 /* Offset in the "pseudo-register space". */ 3261 int pseudo_offset = pseudo_reg_num - gdbarch_num_regs (gdbarch); 3262 3263 if (pseudo_offset >= AARCH64_Q0_REGNUM 3264 && pseudo_offset < AARCH64_Q0_REGNUM + 32) 3265 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num, 3266 pseudo_offset - AARCH64_Q0_REGNUM); 3267 3268 if (pseudo_offset >= AARCH64_D0_REGNUM 3269 && pseudo_offset < AARCH64_D0_REGNUM + 32) 3270 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num, 3271 pseudo_offset - AARCH64_D0_REGNUM); 3272 3273 if (pseudo_offset >= AARCH64_S0_REGNUM 3274 && pseudo_offset < AARCH64_S0_REGNUM + 32) 3275 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num, 3276 pseudo_offset - AARCH64_S0_REGNUM); 3277 3278 if (pseudo_offset >= AARCH64_H0_REGNUM 3279 && pseudo_offset < AARCH64_H0_REGNUM + 32) 3280 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num, 3281 pseudo_offset - AARCH64_H0_REGNUM); 3282 3283 if (pseudo_offset >= AARCH64_B0_REGNUM 3284 && pseudo_offset < AARCH64_B0_REGNUM + 32) 3285 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num, 3286 pseudo_offset - AARCH64_B0_REGNUM); 3287 3288 if (tdep->has_sve () && pseudo_offset >= AARCH64_SVE_V0_REGNUM 3289 && pseudo_offset < AARCH64_SVE_V0_REGNUM + 32) 3290 return aarch64_pseudo_read_value_1 (next_frame, pseudo_reg_num, 3291 pseudo_offset - AARCH64_SVE_V0_REGNUM); 3292 3293 gdb_assert_not_reached ("regnum out of bound"); 3294 } 3295 3296 /* Helper for aarch64_pseudo_write. */ 3297 3298 static void 3299 aarch64_pseudo_write_1 (gdbarch *gdbarch, const frame_info_ptr &next_frame, 3300 int regnum_offset, 3301 gdb::array_view<const gdb_byte> buf) 3302 { 3303 unsigned raw_regnum = AARCH64_V0_REGNUM + regnum_offset; 3304 3305 /* Enough space for a full vector register. */ 3306 int raw_reg_size = register_size (gdbarch, raw_regnum); 3307 gdb_byte raw_buf[raw_reg_size]; 3308 static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM); 3309 3310 /* Ensure the register buffer is zero, we want gdb writes of the 3311 various 'scalar' pseudo registers to behavior like architectural 3312 writes, register width bytes are written the remainder are set to 3313 zero. */ 3314 memset (raw_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM)); 3315 3316 gdb::array_view<gdb_byte> raw_view (raw_buf, raw_reg_size); 3317 copy (buf, raw_view.slice (0, buf.size ())); 3318 put_frame_register (next_frame, raw_regnum, raw_view); 3319 } 3320 3321 /* Given REGNUM, a SME pseudo-register number, store the bytes from DATA to the 3322 pseudo-register. */ 3323 3324 static void 3325 aarch64_sme_pseudo_register_write (gdbarch *gdbarch, const frame_info_ptr &next_frame, 3326 const int regnum, 3327 gdb::array_view<const gdb_byte> data) 3328 { 3329 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 3330 3331 gdb_assert (tdep->has_sme ()); 3332 gdb_assert (tdep->sme_svq > 0); 3333 gdb_assert (tdep->sme_pseudo_base <= regnum); 3334 gdb_assert (regnum < tdep->sme_pseudo_base + tdep->sme_pseudo_count); 3335 3336 /* Fetch the offsets that we need in order to write to the correct blocks 3337 of ZA. */ 3338 za_offsets offsets = aarch64_za_offsets_from_regnum (gdbarch, regnum); 3339 3340 /* Fetch the contents of ZA. */ 3341 value *za_value = value_of_register (tdep->sme_za_regnum, next_frame); 3342 3343 { 3344 /* Create a view only on the portion of za we want to write. */ 3345 gdb::array_view<gdb_byte> za_view 3346 = za_value->contents_writeable ().slice (offsets.starting_offset); 3347 3348 /* Copy the requested data. */ 3349 for (int chunks = 0; chunks < offsets.chunks; chunks++) 3350 { 3351 gdb::array_view<const gdb_byte> src 3352 = data.slice (chunks * offsets.chunk_size, offsets.chunk_size); 3353 gdb::array_view<gdb_byte> dst 3354 = za_view.slice (chunks * offsets.stride_size, offsets.chunk_size); 3355 copy (src, dst); 3356 } 3357 } 3358 3359 /* Write back to ZA. */ 3360 put_frame_register (next_frame, tdep->sme_za_regnum, 3361 za_value->contents_raw ()); 3362 } 3363 3364 /* Implement the "pseudo_register_write" gdbarch method. */ 3365 3366 static void 3367 aarch64_pseudo_write (gdbarch *gdbarch, const frame_info_ptr &next_frame, 3368 const int pseudo_reg_num, 3369 gdb::array_view<const gdb_byte> buf) 3370 { 3371 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 3372 3373 if (is_w_pseudo_register (gdbarch, pseudo_reg_num)) 3374 { 3375 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 3376 /* Default offset for little endian. */ 3377 int offset = 0; 3378 3379 if (byte_order == BFD_ENDIAN_BIG) 3380 offset = 4; 3381 3382 /* Find the correct X register to extract the data from. */ 3383 int x_regnum = AARCH64_X0_REGNUM + (pseudo_reg_num - tdep->w_pseudo_base); 3384 3385 /* First zero-out the contents of X. */ 3386 gdb_byte bytes[8] {}; 3387 gdb::array_view<gdb_byte> bytes_view (bytes); 3388 copy (buf, bytes_view.slice (offset, 4)); 3389 3390 /* Write to the bottom 4 bytes of X. */ 3391 put_frame_register (next_frame, x_regnum, bytes_view); 3392 return; 3393 } 3394 else if (is_sme_pseudo_register (gdbarch, pseudo_reg_num)) 3395 { 3396 aarch64_sme_pseudo_register_write (gdbarch, next_frame, pseudo_reg_num, 3397 buf); 3398 return; 3399 } 3400 3401 /* Offset in the "pseudo-register space". */ 3402 int pseudo_offset = pseudo_reg_num - gdbarch_num_regs (gdbarch); 3403 3404 if (pseudo_offset >= AARCH64_Q0_REGNUM 3405 && pseudo_offset < AARCH64_Q0_REGNUM + 32) 3406 return aarch64_pseudo_write_1 (gdbarch, next_frame, 3407 pseudo_offset - AARCH64_Q0_REGNUM, buf); 3408 3409 if (pseudo_offset >= AARCH64_D0_REGNUM 3410 && pseudo_offset < AARCH64_D0_REGNUM + 32) 3411 return aarch64_pseudo_write_1 (gdbarch, next_frame, 3412 pseudo_offset - AARCH64_D0_REGNUM, buf); 3413 3414 if (pseudo_offset >= AARCH64_S0_REGNUM 3415 && pseudo_offset < AARCH64_S0_REGNUM + 32) 3416 return aarch64_pseudo_write_1 (gdbarch, next_frame, 3417 pseudo_offset - AARCH64_S0_REGNUM, buf); 3418 3419 if (pseudo_offset >= AARCH64_H0_REGNUM 3420 && pseudo_offset < AARCH64_H0_REGNUM + 32) 3421 return aarch64_pseudo_write_1 (gdbarch, next_frame, 3422 pseudo_offset - AARCH64_H0_REGNUM, buf); 3423 3424 if (pseudo_offset >= AARCH64_B0_REGNUM 3425 && pseudo_offset < AARCH64_B0_REGNUM + 32) 3426 return aarch64_pseudo_write_1 (gdbarch, next_frame, 3427 pseudo_offset - AARCH64_B0_REGNUM, buf); 3428 3429 if (tdep->has_sve () && pseudo_offset >= AARCH64_SVE_V0_REGNUM 3430 && pseudo_offset < AARCH64_SVE_V0_REGNUM + 32) 3431 return aarch64_pseudo_write_1 (gdbarch, next_frame, 3432 pseudo_offset - AARCH64_SVE_V0_REGNUM, buf); 3433 3434 gdb_assert_not_reached ("regnum out of bound"); 3435 } 3436 3437 /* Callback function for user_reg_add. */ 3438 3439 static struct value * 3440 value_of_aarch64_user_reg (const frame_info_ptr &frame, const void *baton) 3441 { 3442 const int *reg_p = (const int *) baton; 3443 3444 return value_of_register (*reg_p, get_next_frame_sentinel_okay (frame)); 3445 } 3446 3447 /* Implement the "software_single_step" gdbarch method, needed to 3448 single step through atomic sequences on AArch64. */ 3449 3450 static std::vector<CORE_ADDR> 3451 aarch64_software_single_step (struct regcache *regcache) 3452 { 3453 struct gdbarch *gdbarch = regcache->arch (); 3454 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); 3455 const int insn_size = 4; 3456 const int atomic_sequence_length = 16; /* Instruction sequence length. */ 3457 CORE_ADDR pc = regcache_read_pc (regcache); 3458 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX }; 3459 CORE_ADDR loc = pc; 3460 CORE_ADDR closing_insn = 0; 3461 3462 ULONGEST insn_from_memory; 3463 if (!safe_read_memory_unsigned_integer (loc, insn_size, 3464 byte_order_for_code, 3465 &insn_from_memory)) 3466 { 3467 /* Assume we don't have a atomic sequence, as we couldn't read the 3468 instruction in this location. */ 3469 return {}; 3470 } 3471 3472 uint32_t insn = insn_from_memory; 3473 int index; 3474 int insn_count; 3475 int bc_insn_count = 0; /* Conditional branch instruction count. */ 3476 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */ 3477 aarch64_inst inst; 3478 3479 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0) 3480 return {}; 3481 3482 /* Look for a Load Exclusive instruction which begins the sequence. */ 3483 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0) 3484 return {}; 3485 3486 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count) 3487 { 3488 loc += insn_size; 3489 3490 if (!safe_read_memory_unsigned_integer (loc, insn_size, 3491 byte_order_for_code, 3492 &insn_from_memory)) 3493 { 3494 /* Assume we don't have a atomic sequence, as we couldn't read the 3495 instruction in this location. */ 3496 return {}; 3497 } 3498 3499 insn = insn_from_memory; 3500 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0) 3501 return {}; 3502 /* Check if the instruction is a conditional branch. */ 3503 if (inst.opcode->iclass == condbranch) 3504 { 3505 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19); 3506 3507 if (bc_insn_count >= 1) 3508 return {}; 3509 3510 /* It is, so we'll try to set a breakpoint at the destination. */ 3511 breaks[1] = loc + inst.operands[0].imm.value; 3512 3513 bc_insn_count++; 3514 last_breakpoint++; 3515 } 3516 3517 /* Look for the Store Exclusive which closes the atomic sequence. */ 3518 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0) 3519 { 3520 closing_insn = loc; 3521 break; 3522 } 3523 } 3524 3525 /* We didn't find a closing Store Exclusive instruction, fall back. */ 3526 if (!closing_insn) 3527 return {}; 3528 3529 /* Insert breakpoint after the end of the atomic sequence. */ 3530 breaks[0] = loc + insn_size; 3531 3532 /* Check for duplicated breakpoints, and also check that the second 3533 breakpoint is not within the atomic sequence. */ 3534 if (last_breakpoint 3535 && (breaks[1] == breaks[0] 3536 || (breaks[1] >= pc && breaks[1] <= closing_insn))) 3537 last_breakpoint = 0; 3538 3539 std::vector<CORE_ADDR> next_pcs; 3540 3541 /* Insert the breakpoint at the end of the sequence, and one at the 3542 destination of the conditional branch, if it exists. */ 3543 for (index = 0; index <= last_breakpoint; index++) 3544 next_pcs.push_back (breaks[index]); 3545 3546 return next_pcs; 3547 } 3548 3549 struct aarch64_displaced_step_copy_insn_closure 3550 : public displaced_step_copy_insn_closure 3551 { 3552 /* It is true when condition instruction, such as B.CON, TBZ, etc, 3553 is being displaced stepping. */ 3554 bool cond = false; 3555 3556 /* PC adjustment offset after displaced stepping. If 0, then we don't 3557 write the PC back, assuming the PC is already the right address. */ 3558 int32_t pc_adjust = 0; 3559 }; 3560 3561 /* Data when visiting instructions for displaced stepping. */ 3562 3563 struct aarch64_displaced_step_data 3564 { 3565 struct aarch64_insn_data base; 3566 3567 /* The address where the instruction will be executed at. */ 3568 CORE_ADDR new_addr; 3569 /* Buffer of instructions to be copied to NEW_ADDR to execute. */ 3570 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS]; 3571 /* Number of instructions in INSN_BUF. */ 3572 unsigned insn_count; 3573 /* Registers when doing displaced stepping. */ 3574 struct regcache *regs; 3575 3576 aarch64_displaced_step_copy_insn_closure *dsc; 3577 }; 3578 3579 /* Implementation of aarch64_insn_visitor method "b". */ 3580 3581 static void 3582 aarch64_displaced_step_b (const int is_bl, const int32_t offset, 3583 struct aarch64_insn_data *data) 3584 { 3585 struct aarch64_displaced_step_data *dsd 3586 = (struct aarch64_displaced_step_data *) data; 3587 int64_t new_offset = data->insn_addr - dsd->new_addr + offset; 3588 3589 if (can_encode_int32 (new_offset, 28)) 3590 { 3591 /* Emit B rather than BL, because executing BL on a new address 3592 will get the wrong address into LR. In order to avoid this, 3593 we emit B, and update LR if the instruction is BL. */ 3594 emit_b (dsd->insn_buf, 0, new_offset); 3595 dsd->insn_count++; 3596 } 3597 else 3598 { 3599 /* Write NOP. */ 3600 emit_nop (dsd->insn_buf); 3601 dsd->insn_count++; 3602 dsd->dsc->pc_adjust = offset; 3603 } 3604 3605 if (is_bl) 3606 { 3607 /* Update LR. */ 3608 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM, 3609 data->insn_addr + 4); 3610 } 3611 } 3612 3613 /* Implementation of aarch64_insn_visitor method "b_cond". */ 3614 3615 static void 3616 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset, 3617 struct aarch64_insn_data *data) 3618 { 3619 struct aarch64_displaced_step_data *dsd 3620 = (struct aarch64_displaced_step_data *) data; 3621 3622 /* GDB has to fix up PC after displaced step this instruction 3623 differently according to the condition is true or false. Instead 3624 of checking COND against conditional flags, we can use 3625 the following instructions, and GDB can tell how to fix up PC 3626 according to the PC value. 3627 3628 B.COND TAKEN ; If cond is true, then jump to TAKEN. 3629 INSN1 ; 3630 TAKEN: 3631 INSN2 3632 */ 3633 3634 emit_bcond (dsd->insn_buf, cond, 8); 3635 dsd->dsc->cond = true; 3636 dsd->dsc->pc_adjust = offset; 3637 dsd->insn_count = 1; 3638 } 3639 3640 /* Dynamically allocate a new register. If we know the register 3641 statically, we should make it a global as above instead of using this 3642 helper function. */ 3643 3644 static struct aarch64_register 3645 aarch64_register (unsigned num, int is64) 3646 { 3647 return (struct aarch64_register) { num, is64 }; 3648 } 3649 3650 /* Implementation of aarch64_insn_visitor method "cb". */ 3651 3652 static void 3653 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz, 3654 const unsigned rn, int is64, 3655 struct aarch64_insn_data *data) 3656 { 3657 struct aarch64_displaced_step_data *dsd 3658 = (struct aarch64_displaced_step_data *) data; 3659 3660 /* The offset is out of range for a compare and branch 3661 instruction. We can use the following instructions instead: 3662 3663 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN. 3664 INSN1 ; 3665 TAKEN: 3666 INSN2 3667 */ 3668 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8); 3669 dsd->insn_count = 1; 3670 dsd->dsc->cond = true; 3671 dsd->dsc->pc_adjust = offset; 3672 } 3673 3674 /* Implementation of aarch64_insn_visitor method "tb". */ 3675 3676 static void 3677 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz, 3678 const unsigned rt, unsigned bit, 3679 struct aarch64_insn_data *data) 3680 { 3681 struct aarch64_displaced_step_data *dsd 3682 = (struct aarch64_displaced_step_data *) data; 3683 3684 /* The offset is out of range for a test bit and branch 3685 instruction We can use the following instructions instead: 3686 3687 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN. 3688 INSN1 ; 3689 TAKEN: 3690 INSN2 3691 3692 */ 3693 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8); 3694 dsd->insn_count = 1; 3695 dsd->dsc->cond = true; 3696 dsd->dsc->pc_adjust = offset; 3697 } 3698 3699 /* Implementation of aarch64_insn_visitor method "adr". */ 3700 3701 static void 3702 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd, 3703 const int is_adrp, struct aarch64_insn_data *data) 3704 { 3705 struct aarch64_displaced_step_data *dsd 3706 = (struct aarch64_displaced_step_data *) data; 3707 /* We know exactly the address the ADR{P,} instruction will compute. 3708 We can just write it to the destination register. */ 3709 CORE_ADDR address = data->insn_addr + offset; 3710 3711 if (is_adrp) 3712 { 3713 /* Clear the lower 12 bits of the offset to get the 4K page. */ 3714 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd, 3715 address & ~0xfff); 3716 } 3717 else 3718 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd, 3719 address); 3720 3721 dsd->dsc->pc_adjust = 4; 3722 emit_nop (dsd->insn_buf); 3723 dsd->insn_count = 1; 3724 } 3725 3726 /* Implementation of aarch64_insn_visitor method "ldr_literal". */ 3727 3728 static void 3729 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw, 3730 const unsigned rt, const int is64, 3731 struct aarch64_insn_data *data) 3732 { 3733 struct aarch64_displaced_step_data *dsd 3734 = (struct aarch64_displaced_step_data *) data; 3735 CORE_ADDR address = data->insn_addr + offset; 3736 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 }; 3737 3738 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt, 3739 address); 3740 3741 if (is_sw) 3742 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1), 3743 aarch64_register (rt, 1), zero); 3744 else 3745 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64), 3746 aarch64_register (rt, 1), zero); 3747 3748 dsd->dsc->pc_adjust = 4; 3749 } 3750 3751 /* Implementation of aarch64_insn_visitor method "others". */ 3752 3753 static void 3754 aarch64_displaced_step_others (const uint32_t insn, 3755 struct aarch64_insn_data *data) 3756 { 3757 struct aarch64_displaced_step_data *dsd 3758 = (struct aarch64_displaced_step_data *) data; 3759 3760 uint32_t masked_insn = (insn & CLEAR_Rn_MASK); 3761 if (masked_insn == BLR) 3762 { 3763 /* Emit a BR to the same register and then update LR to the original 3764 address (similar to aarch64_displaced_step_b). */ 3765 aarch64_emit_insn (dsd->insn_buf, insn & 0xffdfffff); 3766 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM, 3767 data->insn_addr + 4); 3768 } 3769 else 3770 aarch64_emit_insn (dsd->insn_buf, insn); 3771 dsd->insn_count = 1; 3772 3773 if (masked_insn == RET || masked_insn == BR || masked_insn == BLR) 3774 dsd->dsc->pc_adjust = 0; 3775 else 3776 dsd->dsc->pc_adjust = 4; 3777 } 3778 3779 static const struct aarch64_insn_visitor visitor = 3780 { 3781 aarch64_displaced_step_b, 3782 aarch64_displaced_step_b_cond, 3783 aarch64_displaced_step_cb, 3784 aarch64_displaced_step_tb, 3785 aarch64_displaced_step_adr, 3786 aarch64_displaced_step_ldr_literal, 3787 aarch64_displaced_step_others, 3788 }; 3789 3790 /* Implement the "displaced_step_copy_insn" gdbarch method. */ 3791 3792 displaced_step_copy_insn_closure_up 3793 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch, 3794 CORE_ADDR from, CORE_ADDR to, 3795 struct regcache *regs) 3796 { 3797 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); 3798 struct aarch64_displaced_step_data dsd; 3799 aarch64_inst inst; 3800 ULONGEST insn_from_memory; 3801 3802 if (!safe_read_memory_unsigned_integer (from, 4, byte_order_for_code, 3803 &insn_from_memory)) 3804 return nullptr; 3805 3806 uint32_t insn = insn_from_memory; 3807 3808 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0) 3809 return NULL; 3810 3811 /* Look for a Load Exclusive instruction which begins the sequence, 3812 or for a MOPS instruction. */ 3813 if ((inst.opcode->iclass == ldstexcl && bit (insn, 22)) 3814 || AARCH64_CPU_HAS_FEATURE (*inst.opcode->avariant, MOPS)) 3815 { 3816 /* We can't displaced step atomic sequences nor MOPS instructions. */ 3817 return NULL; 3818 } 3819 3820 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc 3821 (new aarch64_displaced_step_copy_insn_closure); 3822 dsd.base.insn_addr = from; 3823 dsd.new_addr = to; 3824 dsd.regs = regs; 3825 dsd.dsc = dsc.get (); 3826 dsd.insn_count = 0; 3827 aarch64_relocate_instruction (insn, &visitor, 3828 (struct aarch64_insn_data *) &dsd); 3829 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS); 3830 3831 if (dsd.insn_count != 0) 3832 { 3833 int i; 3834 3835 /* Instruction can be relocated to scratch pad. Copy 3836 relocated instruction(s) there. */ 3837 for (i = 0; i < dsd.insn_count; i++) 3838 { 3839 displaced_debug_printf ("writing insn %.8x at %s", 3840 dsd.insn_buf[i], 3841 paddress (gdbarch, to + i * 4)); 3842 3843 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code, 3844 (ULONGEST) dsd.insn_buf[i]); 3845 } 3846 } 3847 else 3848 { 3849 dsc = NULL; 3850 } 3851 3852 /* This is a work around for a problem with g++ 4.8. */ 3853 return displaced_step_copy_insn_closure_up (dsc.release ()); 3854 } 3855 3856 /* Implement the "displaced_step_fixup" gdbarch method. */ 3857 3858 void 3859 aarch64_displaced_step_fixup (struct gdbarch *gdbarch, 3860 struct displaced_step_copy_insn_closure *dsc_, 3861 CORE_ADDR from, CORE_ADDR to, 3862 struct regcache *regs, bool completed_p) 3863 { 3864 CORE_ADDR pc = regcache_read_pc (regs); 3865 3866 /* If the displaced instruction didn't complete successfully then all we 3867 need to do is restore the program counter. */ 3868 if (!completed_p) 3869 { 3870 pc = from + (pc - to); 3871 regcache_write_pc (regs, pc); 3872 return; 3873 } 3874 3875 aarch64_displaced_step_copy_insn_closure *dsc 3876 = (aarch64_displaced_step_copy_insn_closure *) dsc_; 3877 3878 displaced_debug_printf ("PC after stepping: %s (was %s).", 3879 paddress (gdbarch, pc), paddress (gdbarch, to)); 3880 3881 if (dsc->cond) 3882 { 3883 displaced_debug_printf ("[Conditional] pc_adjust before: %d", 3884 dsc->pc_adjust); 3885 3886 if (pc - to == 8) 3887 { 3888 /* Condition is true. */ 3889 } 3890 else if (pc - to == 4) 3891 { 3892 /* Condition is false. */ 3893 dsc->pc_adjust = 4; 3894 } 3895 else 3896 gdb_assert_not_reached ("Unexpected PC value after displaced stepping"); 3897 3898 displaced_debug_printf ("[Conditional] pc_adjust after: %d", 3899 dsc->pc_adjust); 3900 } 3901 3902 displaced_debug_printf ("%s PC by %d", 3903 dsc->pc_adjust ? "adjusting" : "not adjusting", 3904 dsc->pc_adjust); 3905 3906 if (dsc->pc_adjust != 0) 3907 { 3908 /* Make sure the previous instruction was executed (that is, the PC 3909 has changed). If the PC didn't change, then discard the adjustment 3910 offset. Otherwise we may skip an instruction before its execution 3911 took place. */ 3912 if ((pc - to) == 0) 3913 { 3914 displaced_debug_printf ("PC did not move. Discarding PC adjustment."); 3915 dsc->pc_adjust = 0; 3916 } 3917 3918 displaced_debug_printf ("fixup: set PC to %s:%d", 3919 paddress (gdbarch, from), dsc->pc_adjust); 3920 3921 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM, 3922 from + dsc->pc_adjust); 3923 } 3924 } 3925 3926 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */ 3927 3928 bool 3929 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch) 3930 { 3931 return true; 3932 } 3933 3934 /* Get the correct target description for the given VQ value. 3935 If VQ is zero then it is assumed SVE is not supported. 3936 (It is not possible to set VQ to zero on an SVE system). 3937 3938 MTE_P indicates the presence of the Memory Tagging Extension feature. 3939 3940 TLS_P indicates the presence of the Thread Local Storage feature. */ 3941 3942 const target_desc * 3943 aarch64_read_description (const aarch64_features &features) 3944 { 3945 if (features.vq > AARCH64_MAX_SVE_VQ) 3946 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), features.vq, 3947 AARCH64_MAX_SVE_VQ); 3948 3949 struct target_desc *tdesc = tdesc_aarch64_map[features]; 3950 3951 if (tdesc == NULL) 3952 { 3953 tdesc = aarch64_create_target_description (features); 3954 tdesc_aarch64_map[features] = tdesc; 3955 } 3956 3957 return tdesc; 3958 } 3959 3960 /* Return the VQ used when creating the target description TDESC. */ 3961 3962 static uint64_t 3963 aarch64_get_tdesc_vq (const struct target_desc *tdesc) 3964 { 3965 const struct tdesc_feature *feature_sve; 3966 3967 if (!tdesc_has_registers (tdesc)) 3968 return 0; 3969 3970 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve"); 3971 3972 if (feature_sve == nullptr) 3973 return 0; 3974 3975 uint64_t vl = tdesc_register_bitsize (feature_sve, 3976 aarch64_sve_register_names[0]) / 8; 3977 return sve_vq_from_vl (vl); 3978 } 3979 3980 3981 /* Return the svq (streaming vector quotient) used when creating the target 3982 description TDESC. */ 3983 3984 static uint64_t 3985 aarch64_get_tdesc_svq (const struct target_desc *tdesc) 3986 { 3987 const struct tdesc_feature *feature_sme; 3988 3989 if (!tdesc_has_registers (tdesc)) 3990 return 0; 3991 3992 feature_sme = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme"); 3993 3994 if (feature_sme == nullptr) 3995 return 0; 3996 3997 size_t svl_squared = tdesc_register_bitsize (feature_sme, "za"); 3998 3999 /* We have the total size of the ZA matrix, in bits. Figure out the svl 4000 value. */ 4001 size_t svl = std::sqrt (svl_squared / 8); 4002 4003 /* Now extract svq. */ 4004 return sve_vq_from_vl (svl); 4005 } 4006 4007 /* Get the AArch64 features present in the given target description. */ 4008 4009 aarch64_features 4010 aarch64_features_from_target_desc (const struct target_desc *tdesc) 4011 { 4012 aarch64_features features; 4013 4014 if (tdesc == nullptr) 4015 return features; 4016 4017 features.vq = aarch64_get_tdesc_vq (tdesc); 4018 4019 /* We need to look for a couple pauth feature name variations. */ 4020 features.pauth 4021 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth") != nullptr); 4022 4023 if (!features.pauth) 4024 features.pauth = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth_v2") 4025 != nullptr); 4026 4027 features.mte 4028 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte") != nullptr); 4029 4030 const struct tdesc_feature *tls_feature 4031 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls"); 4032 4033 if (tls_feature != nullptr) 4034 { 4035 /* We have TLS registers. Find out how many. */ 4036 if (tdesc_unnumbered_register (tls_feature, "tpidr2")) 4037 features.tls = 2; 4038 else 4039 features.tls = 1; 4040 } 4041 4042 features.svq = aarch64_get_tdesc_svq (tdesc); 4043 4044 /* Check for the SME2 feature. */ 4045 features.sme2 = (tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme2") 4046 != nullptr); 4047 4048 return features; 4049 } 4050 4051 /* Implement the "cannot_store_register" gdbarch method. */ 4052 4053 static int 4054 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum) 4055 { 4056 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 4057 4058 if (!tdep->has_pauth ()) 4059 return 0; 4060 4061 /* Pointer authentication registers are read-only. */ 4062 return (regnum >= tdep->pauth_reg_base 4063 && regnum < tdep->pauth_reg_base + tdep->pauth_reg_count); 4064 } 4065 4066 /* Implement the stack_frame_destroyed_p gdbarch method. */ 4067 4068 static int 4069 aarch64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc) 4070 { 4071 CORE_ADDR func_start, func_end; 4072 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end)) 4073 return 0; 4074 4075 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); 4076 4077 ULONGEST insn_from_memory; 4078 if (!safe_read_memory_unsigned_integer (pc, 4, byte_order_for_code, 4079 &insn_from_memory)) 4080 return 0; 4081 4082 uint32_t insn = insn_from_memory; 4083 4084 aarch64_inst inst; 4085 if (aarch64_decode_insn (insn, &inst, 1, nullptr) != 0) 4086 return 0; 4087 4088 return streq (inst.opcode->name, "ret"); 4089 } 4090 4091 /* AArch64 implementation of the remove_non_address_bits gdbarch hook. Remove 4092 non address bits from a pointer value. */ 4093 4094 static CORE_ADDR 4095 aarch64_remove_non_address_bits (struct gdbarch *gdbarch, CORE_ADDR pointer) 4096 { 4097 /* By default, we assume TBI and discard the top 8 bits plus the VA range 4098 select bit (55). Below we try to fetch information about pointer 4099 authentication masks in order to make non-address removal more 4100 precise. */ 4101 CORE_ADDR mask = AARCH64_TOP_BITS_MASK; 4102 4103 /* Check if we have an inferior first. If not, just use the default 4104 mask. 4105 4106 We use the inferior_ptid here because the pointer authentication masks 4107 should be the same across threads of a process. Since we may not have 4108 access to the current thread (gdb may have switched to no inferiors 4109 momentarily), we use the inferior ptid. */ 4110 if (inferior_ptid != null_ptid) 4111 { 4112 /* If we do have an inferior, attempt to fetch its thread's thread_info 4113 struct. */ 4114 thread_info *thread = current_inferior ()->find_thread (inferior_ptid); 4115 4116 /* If the thread is running, we will not be able to fetch the mask 4117 registers. */ 4118 if (thread != nullptr && thread->state != THREAD_RUNNING) 4119 { 4120 /* Otherwise, fetch the register cache and the masks. */ 4121 struct regcache *regs 4122 = get_thread_regcache (current_inferior ()->process_target (), 4123 inferior_ptid); 4124 4125 /* Use the gdbarch from the register cache to check for pointer 4126 authentication support, as it matches the features found in 4127 that particular thread. */ 4128 aarch64_gdbarch_tdep *tdep 4129 = gdbarch_tdep<aarch64_gdbarch_tdep> (regs->arch ()); 4130 4131 /* Is there pointer authentication support? */ 4132 if (tdep->has_pauth ()) 4133 { 4134 CORE_ADDR cmask, dmask; 4135 int dmask_regnum 4136 = AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base); 4137 int cmask_regnum 4138 = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base); 4139 4140 /* If we have a kernel address and we have kernel-mode address 4141 mask registers, use those instead. */ 4142 if (tdep->pauth_reg_count > 2 4143 && pointer & VA_RANGE_SELECT_BIT_MASK) 4144 { 4145 dmask_regnum 4146 = AARCH64_PAUTH_DMASK_HIGH_REGNUM (tdep->pauth_reg_base); 4147 cmask_regnum 4148 = AARCH64_PAUTH_CMASK_HIGH_REGNUM (tdep->pauth_reg_base); 4149 } 4150 4151 /* We have both a code mask and a data mask. For now they are 4152 the same, but this may change in the future. */ 4153 if (regs->cooked_read (dmask_regnum, &dmask) != REG_VALID) 4154 dmask = mask; 4155 4156 if (regs->cooked_read (cmask_regnum, &cmask) != REG_VALID) 4157 cmask = mask; 4158 4159 mask |= aarch64_mask_from_pac_registers (cmask, dmask); 4160 } 4161 } 4162 } 4163 4164 return aarch64_remove_top_bits (pointer, mask); 4165 } 4166 4167 /* Given NAMES, a vector of strings, initialize it with all the SME 4168 pseudo-register names for the current streaming vector length. */ 4169 4170 static void 4171 aarch64_initialize_sme_pseudo_names (struct gdbarch *gdbarch, 4172 std::vector<std::string> &names) 4173 { 4174 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 4175 4176 gdb_assert (tdep->has_sme ()); 4177 gdb_assert (tdep->sme_tile_slice_pseudo_base > 0); 4178 gdb_assert (tdep->sme_tile_pseudo_base > 0); 4179 4180 for (int i = 0; i < tdep->sme_tile_slice_pseudo_count; i++) 4181 { 4182 int regnum = tdep->sme_tile_slice_pseudo_base + i; 4183 struct za_pseudo_encoding encoding; 4184 aarch64_za_decode_pseudos (gdbarch, regnum, encoding); 4185 names.push_back (aarch64_za_tile_slice_name (encoding)); 4186 } 4187 for (int i = 0; i < AARCH64_ZA_TILES_NUM; i++) 4188 { 4189 int regnum = tdep->sme_tile_pseudo_base + i; 4190 struct za_pseudo_encoding encoding; 4191 aarch64_za_decode_pseudos (gdbarch, regnum, encoding); 4192 names.push_back (aarch64_za_tile_name (encoding)); 4193 } 4194 } 4195 4196 /* Initialize the current architecture based on INFO. If possible, 4197 re-use an architecture from ARCHES, which is a list of 4198 architectures already created during this debugging session. 4199 4200 Called e.g. at program startup, when reading a core file, and when 4201 reading a binary file. */ 4202 4203 static struct gdbarch * 4204 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) 4205 { 4206 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve; 4207 const struct tdesc_feature *feature_pauth; 4208 bool valid_p = true; 4209 int i, num_regs = 0, num_pseudo_regs = 0; 4210 int first_pauth_regnum = -1, ra_sign_state_offset = -1; 4211 int first_mte_regnum = -1, first_tls_regnum = -1; 4212 uint64_t vq = aarch64_get_tdesc_vq (info.target_desc); 4213 uint64_t svq = aarch64_get_tdesc_svq (info.target_desc); 4214 4215 if (vq > AARCH64_MAX_SVE_VQ) 4216 internal_error (_("VQ out of bounds: %s (max %d)"), 4217 pulongest (vq), AARCH64_MAX_SVE_VQ); 4218 4219 if (svq > AARCH64_MAX_SVE_VQ) 4220 internal_error (_("Streaming vector quotient (svq) out of bounds: %s" 4221 " (max %d)"), 4222 pulongest (svq), AARCH64_MAX_SVE_VQ); 4223 4224 /* If there is already a candidate, use it. */ 4225 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info); 4226 best_arch != nullptr; 4227 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info)) 4228 { 4229 aarch64_gdbarch_tdep *tdep 4230 = gdbarch_tdep<aarch64_gdbarch_tdep> (best_arch->gdbarch); 4231 if (tdep && tdep->vq == vq && tdep->sme_svq == svq) 4232 return best_arch->gdbarch; 4233 } 4234 4235 /* Ensure we always have a target descriptor, and that it is for the given VQ 4236 value. */ 4237 const struct target_desc *tdesc = info.target_desc; 4238 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc) 4239 || svq != aarch64_get_tdesc_svq (tdesc)) 4240 { 4241 aarch64_features features; 4242 features.vq = vq; 4243 features.svq = svq; 4244 tdesc = aarch64_read_description (features); 4245 } 4246 gdb_assert (tdesc); 4247 4248 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core"); 4249 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu"); 4250 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve"); 4251 const struct tdesc_feature *feature_mte 4252 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte"); 4253 const struct tdesc_feature *feature_tls 4254 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.tls"); 4255 4256 if (feature_core == nullptr) 4257 return nullptr; 4258 4259 tdesc_arch_data_up tdesc_data = tdesc_data_alloc (); 4260 4261 /* Validate the description provides the mandatory core R registers 4262 and allocate their numbers. */ 4263 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++) 4264 valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (), 4265 AARCH64_X0_REGNUM + i, 4266 aarch64_r_register_names[i]); 4267 4268 num_regs = AARCH64_X0_REGNUM + i; 4269 4270 /* Add the V registers. */ 4271 if (feature_fpu != nullptr) 4272 { 4273 if (feature_sve != nullptr) 4274 error (_("Program contains both fpu and SVE features.")); 4275 4276 /* Validate the description provides the mandatory V registers 4277 and allocate their numbers. */ 4278 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++) 4279 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (), 4280 AARCH64_V0_REGNUM + i, 4281 aarch64_v_register_names[i]); 4282 4283 num_regs = AARCH64_V0_REGNUM + i; 4284 } 4285 4286 /* Add the SVE registers. */ 4287 if (feature_sve != nullptr) 4288 { 4289 /* Validate the description provides the mandatory SVE registers 4290 and allocate their numbers. */ 4291 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++) 4292 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (), 4293 AARCH64_SVE_Z0_REGNUM + i, 4294 aarch64_sve_register_names[i]); 4295 4296 num_regs = AARCH64_SVE_Z0_REGNUM + i; 4297 num_pseudo_regs += 32; /* add the Vn register pseudos. */ 4298 } 4299 4300 if (feature_fpu != nullptr || feature_sve != nullptr) 4301 { 4302 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */ 4303 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */ 4304 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */ 4305 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */ 4306 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */ 4307 } 4308 4309 int first_sme_regnum = -1; 4310 int first_sme2_regnum = -1; 4311 int first_sme_pseudo_regnum = -1; 4312 const struct tdesc_feature *feature_sme 4313 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme"); 4314 if (feature_sme != nullptr) 4315 { 4316 /* Record the first SME register. */ 4317 first_sme_regnum = num_regs; 4318 4319 valid_p &= tdesc_numbered_register (feature_sme, tdesc_data.get (), 4320 num_regs++, "svg"); 4321 4322 valid_p &= tdesc_numbered_register (feature_sme, tdesc_data.get (), 4323 num_regs++, "svcr"); 4324 4325 valid_p &= tdesc_numbered_register (feature_sme, tdesc_data.get (), 4326 num_regs++, "za"); 4327 4328 /* Record the first SME pseudo register. */ 4329 first_sme_pseudo_regnum = num_pseudo_regs; 4330 4331 /* Add the ZA tile slice pseudo registers. The number of tile slice 4332 pseudo-registers depend on the svl, and is always a multiple of 5. */ 4333 num_pseudo_regs += (svq << 5) * 5; 4334 4335 /* Add the ZA tile pseudo registers. */ 4336 num_pseudo_regs += AARCH64_ZA_TILES_NUM; 4337 4338 /* Now check for the SME2 feature. SME2 is only available if SME is 4339 available. */ 4340 const struct tdesc_feature *feature_sme2 4341 = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sme2"); 4342 if (feature_sme2 != nullptr) 4343 { 4344 /* Record the first SME2 register. */ 4345 first_sme2_regnum = num_regs; 4346 4347 valid_p &= tdesc_numbered_register (feature_sme2, tdesc_data.get (), 4348 num_regs++, "zt0"); 4349 } 4350 } 4351 4352 /* Add the TLS register. */ 4353 int tls_register_count = 0; 4354 if (feature_tls != nullptr) 4355 { 4356 first_tls_regnum = num_regs; 4357 4358 /* Look for the TLS registers. tpidr is required, but tpidr2 is 4359 optional. */ 4360 valid_p 4361 = tdesc_numbered_register (feature_tls, tdesc_data.get (), 4362 first_tls_regnum, "tpidr"); 4363 4364 if (valid_p) 4365 { 4366 tls_register_count++; 4367 4368 bool has_tpidr2 4369 = tdesc_numbered_register (feature_tls, tdesc_data.get (), 4370 first_tls_regnum + tls_register_count, 4371 "tpidr2"); 4372 4373 /* Figure out how many TLS registers we have. */ 4374 if (has_tpidr2) 4375 tls_register_count++; 4376 4377 num_regs += tls_register_count; 4378 } 4379 else 4380 { 4381 warning (_("Provided TLS register feature doesn't contain " 4382 "required tpidr register.")); 4383 return nullptr; 4384 } 4385 } 4386 4387 /* We have two versions of the pauth target description due to a past bug 4388 where GDB would crash when seeing the first version of the pauth target 4389 description. */ 4390 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth"); 4391 if (feature_pauth == nullptr) 4392 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth_v2"); 4393 4394 /* Add the pauth registers. */ 4395 int pauth_masks = 0; 4396 if (feature_pauth != NULL) 4397 { 4398 first_pauth_regnum = num_regs; 4399 ra_sign_state_offset = num_pseudo_regs; 4400 4401 /* Size of the expected register set with all 4 masks. */ 4402 int set_size = ARRAY_SIZE (aarch64_pauth_register_names); 4403 4404 /* QEMU exposes a couple additional masks for the high half of the 4405 address. We should either have 2 registers or 4 registers. */ 4406 if (tdesc_unnumbered_register (feature_pauth, 4407 "pauth_dmask_high") == 0) 4408 { 4409 /* We did not find pauth_dmask_high, assume we only have 4410 2 masks. We are not dealing with QEMU/Emulators then. */ 4411 set_size -= 2; 4412 } 4413 4414 /* Validate the descriptor provides the mandatory PAUTH registers and 4415 allocate their numbers. */ 4416 for (i = 0; i < set_size; i++) 4417 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (), 4418 first_pauth_regnum + i, 4419 aarch64_pauth_register_names[i]); 4420 4421 num_regs += i; 4422 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */ 4423 pauth_masks = set_size; 4424 } 4425 4426 /* Add the MTE registers. */ 4427 if (feature_mte != NULL) 4428 { 4429 first_mte_regnum = num_regs; 4430 /* Validate the descriptor provides the mandatory MTE registers and 4431 allocate their numbers. */ 4432 for (i = 0; i < ARRAY_SIZE (aarch64_mte_register_names); i++) 4433 valid_p &= tdesc_numbered_register (feature_mte, tdesc_data.get (), 4434 first_mte_regnum + i, 4435 aarch64_mte_register_names[i]); 4436 4437 num_regs += i; 4438 } 4439 /* W pseudo-registers */ 4440 int first_w_regnum = num_pseudo_regs; 4441 num_pseudo_regs += 31; 4442 4443 if (!valid_p) 4444 return nullptr; 4445 4446 /* AArch64 code is always little-endian. */ 4447 info.byte_order_for_code = BFD_ENDIAN_LITTLE; 4448 4449 gdbarch *gdbarch 4450 = gdbarch_alloc (&info, gdbarch_tdep_up (new aarch64_gdbarch_tdep)); 4451 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 4452 4453 /* This should be low enough for everything. */ 4454 tdep->lowest_pc = 0x20; 4455 tdep->jb_pc = -1; /* Longjump support not enabled by default. */ 4456 tdep->jb_elt_size = 8; 4457 tdep->vq = vq; 4458 tdep->pauth_reg_base = first_pauth_regnum; 4459 tdep->pauth_reg_count = pauth_masks; 4460 tdep->ra_sign_state_regnum = -1; 4461 tdep->mte_reg_base = first_mte_regnum; 4462 tdep->tls_regnum_base = first_tls_regnum; 4463 tdep->tls_register_count = tls_register_count; 4464 4465 /* Set the SME register set details. The pseudo-registers will be adjusted 4466 later. */ 4467 tdep->sme_reg_base = first_sme_regnum; 4468 tdep->sme_svg_regnum = first_sme_regnum; 4469 tdep->sme_svcr_regnum = first_sme_regnum + 1; 4470 tdep->sme_za_regnum = first_sme_regnum + 2; 4471 tdep->sme_svq = svq; 4472 4473 /* Set the SME2 register set details. */ 4474 tdep->sme2_zt0_regnum = first_sme2_regnum; 4475 4476 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call); 4477 set_gdbarch_frame_align (gdbarch, aarch64_frame_align); 4478 4479 /* Advance PC across function entry code. */ 4480 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue); 4481 4482 /* The stack grows downward. */ 4483 set_gdbarch_inner_than (gdbarch, core_addr_lessthan); 4484 4485 /* Breakpoint manipulation. */ 4486 set_gdbarch_breakpoint_kind_from_pc (gdbarch, 4487 aarch64_breakpoint::kind_from_pc); 4488 set_gdbarch_sw_breakpoint_from_kind (gdbarch, 4489 aarch64_breakpoint::bp_from_kind); 4490 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1); 4491 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step); 4492 4493 /* Information about registers, etc. */ 4494 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM); 4495 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM); 4496 set_gdbarch_num_regs (gdbarch, num_regs); 4497 4498 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs); 4499 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value); 4500 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write); 4501 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name); 4502 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type); 4503 set_tdesc_pseudo_register_reggroup_p (gdbarch, 4504 aarch64_pseudo_register_reggroup_p); 4505 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register); 4506 4507 /* ABI */ 4508 set_gdbarch_short_bit (gdbarch, 16); 4509 set_gdbarch_int_bit (gdbarch, 32); 4510 set_gdbarch_float_bit (gdbarch, 32); 4511 set_gdbarch_double_bit (gdbarch, 64); 4512 set_gdbarch_long_double_bit (gdbarch, 128); 4513 set_gdbarch_long_bit (gdbarch, 64); 4514 set_gdbarch_long_long_bit (gdbarch, 64); 4515 set_gdbarch_ptr_bit (gdbarch, 64); 4516 set_gdbarch_char_signed (gdbarch, 0); 4517 set_gdbarch_wchar_signed (gdbarch, 0); 4518 set_gdbarch_float_format (gdbarch, floatformats_ieee_single); 4519 set_gdbarch_double_format (gdbarch, floatformats_ieee_double); 4520 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_quad); 4521 set_gdbarch_type_align (gdbarch, aarch64_type_align); 4522 4523 /* Detect whether PC is at a point where the stack has been destroyed. */ 4524 set_gdbarch_stack_frame_destroyed_p (gdbarch, aarch64_stack_frame_destroyed_p); 4525 4526 /* Internal <-> external register number maps. */ 4527 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum); 4528 4529 /* Returning results. */ 4530 set_gdbarch_return_value_as_value (gdbarch, aarch64_return_value); 4531 4532 /* Disassembly. */ 4533 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn); 4534 4535 /* Virtual tables. */ 4536 set_gdbarch_vbit_in_delta (gdbarch, 1); 4537 4538 /* Hook in the ABI-specific overrides, if they have been registered. */ 4539 info.target_desc = tdesc; 4540 info.tdesc_data = tdesc_data.get (); 4541 gdbarch_init_osabi (info, gdbarch); 4542 4543 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg); 4544 /* Register DWARF CFA vendor handler. */ 4545 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch, 4546 aarch64_execute_dwarf_cfa_vendor_op); 4547 4548 /* Permanent/Program breakpoint handling. */ 4549 set_gdbarch_program_breakpoint_here_p (gdbarch, 4550 aarch64_program_breakpoint_here_p); 4551 4552 /* Add some default predicates. */ 4553 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind); 4554 dwarf2_append_unwinders (gdbarch); 4555 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind); 4556 4557 frame_base_set_default (gdbarch, &aarch64_normal_base); 4558 4559 /* Now we have tuned the configuration, set a few final things, 4560 based on what the OS ABI has told us. */ 4561 4562 if (tdep->jb_pc >= 0) 4563 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target); 4564 4565 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address); 4566 4567 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags); 4568 4569 tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data)); 4570 4571 /* Fetch the updated number of registers after we're done adding all 4572 entries from features we don't explicitly care about. This is the case 4573 for bare metal debugging stubs that include a lot of system registers. */ 4574 num_regs = gdbarch_num_regs (gdbarch); 4575 4576 /* With the number of real registers updated, setup the pseudo-registers and 4577 record their numbers. */ 4578 4579 /* Setup W pseudo-register numbers. */ 4580 tdep->w_pseudo_base = first_w_regnum + num_regs; 4581 tdep->w_pseudo_count = 31; 4582 4583 /* Pointer authentication pseudo-registers. */ 4584 if (tdep->has_pauth ()) 4585 tdep->ra_sign_state_regnum = ra_sign_state_offset + num_regs; 4586 4587 /* Architecture hook to remove bits of a pointer that are not part of the 4588 address, like memory tags (MTE) and pointer authentication signatures. */ 4589 set_gdbarch_remove_non_address_bits (gdbarch, 4590 aarch64_remove_non_address_bits); 4591 4592 /* SME pseudo-registers. */ 4593 if (tdep->has_sme ()) 4594 { 4595 tdep->sme_pseudo_base = num_regs + first_sme_pseudo_regnum; 4596 tdep->sme_tile_slice_pseudo_base = tdep->sme_pseudo_base; 4597 tdep->sme_tile_slice_pseudo_count = (svq * 32) * 5; 4598 tdep->sme_tile_pseudo_base 4599 = tdep->sme_pseudo_base + tdep->sme_tile_slice_pseudo_count; 4600 tdep->sme_pseudo_count 4601 = tdep->sme_tile_slice_pseudo_count + AARCH64_ZA_TILES_NUM; 4602 4603 /* The SME ZA pseudo-registers are a set of 160 to 2560 pseudo-registers 4604 depending on the value of svl. 4605 4606 The tile pseudo-registers are organized around their qualifiers 4607 (b, h, s, d and q). Their numbers are distributed as follows: 4608 4609 b 0 4610 h 1~2 4611 s 3~6 4612 d 7~14 4613 q 15~30 4614 4615 The naming of the tile pseudo-registers follows the pattern za<t><q>, 4616 where: 4617 4618 <t> is the tile number, with the following possible values based on 4619 the qualifiers: 4620 4621 Qualifier - Allocated indexes 4622 4623 b - 0 4624 h - 0~1 4625 s - 0~3 4626 d - 0~7 4627 q - 0~15 4628 4629 <q> is the qualifier: b, h, s, d and q. 4630 4631 The tile slice pseudo-registers are organized around their 4632 qualifiers as well (b, h, s, d and q), but also around their 4633 direction (h - horizontal and v - vertical). 4634 4635 Even-numbered tile slice pseudo-registers are horizontally-oriented 4636 and odd-numbered tile slice pseudo-registers are vertically-oriented. 4637 4638 Their numbers are distributed as follows: 4639 4640 Qualifier - Allocated indexes 4641 4642 b tile slices - 0~511 4643 h tile slices - 512~1023 4644 s tile slices - 1024~1535 4645 d tile slices - 1536~2047 4646 q tile slices - 2048~2559 4647 4648 The naming of the tile slice pseudo-registers follows the pattern 4649 za<t><d><q><s>, where: 4650 4651 <t> is the tile number as described for the tile pseudo-registers. 4652 <d> is the direction of the tile slice (h or v) 4653 <q> is the qualifier of the tile slice (b, h, s, d or q) 4654 <s> is the slice number, defined as follows: 4655 4656 Qualifier - Allocated indexes 4657 4658 b - 0~15 4659 h - 0~7 4660 s - 0~3 4661 d - 0~1 4662 q - 0 4663 4664 We have helper functions to translate to/from register index from/to 4665 the set of fields that make the pseudo-register names. */ 4666 4667 /* Build the array of pseudo-register names available for this 4668 particular gdbarch configuration. */ 4669 aarch64_initialize_sme_pseudo_names (gdbarch, tdep->sme_pseudo_names); 4670 } 4671 4672 /* Add standard register aliases. */ 4673 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++) 4674 user_reg_add (gdbarch, aarch64_register_aliases[i].name, 4675 value_of_aarch64_user_reg, 4676 &aarch64_register_aliases[i].regnum); 4677 4678 register_aarch64_ravenscar_ops (gdbarch); 4679 4680 return gdbarch; 4681 } 4682 4683 static void 4684 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file) 4685 { 4686 aarch64_gdbarch_tdep *tdep = gdbarch_tdep<aarch64_gdbarch_tdep> (gdbarch); 4687 4688 if (tdep == NULL) 4689 return; 4690 4691 gdb_printf (file, _("aarch64_dump_tdep: Lowest pc = 0x%s\n"), 4692 paddress (gdbarch, tdep->lowest_pc)); 4693 4694 /* SME fields. */ 4695 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_q = %s\n"), 4696 host_address_to_string (tdep->sme_tile_type_q)); 4697 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_d = %s\n"), 4698 host_address_to_string (tdep->sme_tile_type_d)); 4699 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_s = %s\n"), 4700 host_address_to_string (tdep->sme_tile_type_s)); 4701 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_h = %s\n"), 4702 host_address_to_string (tdep->sme_tile_type_h)); 4703 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_type_n = %s\n"), 4704 host_address_to_string (tdep->sme_tile_type_b)); 4705 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_q = %s\n"), 4706 host_address_to_string (tdep->sme_tile_slice_type_q)); 4707 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_d = %s\n"), 4708 host_address_to_string (tdep->sme_tile_slice_type_d)); 4709 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_s = %s\n"), 4710 host_address_to_string (tdep->sme_tile_slice_type_s)); 4711 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_h = %s\n"), 4712 host_address_to_string (tdep->sme_tile_slice_type_h)); 4713 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_type_b = %s\n"), 4714 host_address_to_string (tdep->sme_tile_slice_type_b)); 4715 gdb_printf (file, _("aarch64_dump_tdep: sme_reg_base = %s\n"), 4716 pulongest (tdep->sme_reg_base)); 4717 gdb_printf (file, _("aarch64_dump_tdep: sme_svg_regnum = %s\n"), 4718 pulongest (tdep->sme_svg_regnum)); 4719 gdb_printf (file, _("aarch64_dump_tdep: sme_svcr_regnum = %s\n"), 4720 pulongest (tdep->sme_svcr_regnum)); 4721 gdb_printf (file, _("aarch64_dump_tdep: sme_za_regnum = %s\n"), 4722 pulongest (tdep->sme_za_regnum)); 4723 gdb_printf (file, _("aarch64_dump_tdep: sme_pseudo_base = %s\n"), 4724 pulongest (tdep->sme_pseudo_base)); 4725 gdb_printf (file, _("aarch64_dump_tdep: sme_pseudo_count = %s\n"), 4726 pulongest (tdep->sme_pseudo_count)); 4727 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_pseudo_base = %s\n"), 4728 pulongest (tdep->sme_tile_slice_pseudo_base)); 4729 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_slice_pseudo_count = %s\n"), 4730 pulongest (tdep->sme_tile_slice_pseudo_count)); 4731 gdb_printf (file, _("aarch64_dump_tdep: sme_tile_pseudo_base = %s\n"), 4732 pulongest (tdep->sme_tile_pseudo_base)); 4733 gdb_printf (file, _("aarch64_dump_tdep: sme_svq = %s\n"), 4734 pulongest (tdep->sme_svq)); 4735 } 4736 4737 #if GDB_SELF_TEST 4738 namespace selftests 4739 { 4740 static void aarch64_process_record_test (void); 4741 } 4742 #endif 4743 4744 void _initialize_aarch64_tdep (); 4745 void 4746 _initialize_aarch64_tdep () 4747 { 4748 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init, 4749 aarch64_dump_tdep); 4750 4751 /* Debug this file's internals. */ 4752 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\ 4753 Set AArch64 debugging."), _("\ 4754 Show AArch64 debugging."), _("\ 4755 When on, AArch64 specific debugging is enabled."), 4756 NULL, 4757 show_aarch64_debug, 4758 &setdebuglist, &showdebuglist); 4759 4760 #if GDB_SELF_TEST 4761 selftests::register_test ("aarch64-analyze-prologue", 4762 selftests::aarch64_analyze_prologue_test); 4763 selftests::register_test ("aarch64-process-record", 4764 selftests::aarch64_process_record_test); 4765 #endif 4766 } 4767 4768 /* AArch64 process record-replay related structures, defines etc. */ 4769 4770 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \ 4771 do \ 4772 { \ 4773 unsigned int reg_len = LENGTH; \ 4774 if (reg_len) \ 4775 { \ 4776 REGS = XNEWVEC (uint32_t, reg_len); \ 4777 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \ 4778 } \ 4779 } \ 4780 while (0) 4781 4782 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \ 4783 do \ 4784 { \ 4785 unsigned int mem_len = LENGTH; \ 4786 if (mem_len) \ 4787 { \ 4788 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \ 4789 memcpy(MEMS, &RECORD_BUF[0], \ 4790 sizeof(struct aarch64_mem_r) * LENGTH); \ 4791 } \ 4792 } \ 4793 while (0) 4794 4795 /* AArch64 record/replay structures and enumerations. */ 4796 4797 struct aarch64_mem_r 4798 { 4799 uint64_t len; /* Record length. */ 4800 uint64_t addr; /* Memory address. */ 4801 }; 4802 4803 enum aarch64_record_result 4804 { 4805 AARCH64_RECORD_SUCCESS, 4806 AARCH64_RECORD_UNSUPPORTED, 4807 AARCH64_RECORD_UNKNOWN 4808 }; 4809 4810 struct aarch64_insn_decode_record 4811 { 4812 struct gdbarch *gdbarch; 4813 struct regcache *regcache; 4814 CORE_ADDR this_addr; /* Address of insn to be recorded. */ 4815 uint32_t aarch64_insn; /* Insn to be recorded. */ 4816 uint32_t mem_rec_count; /* Count of memory records. */ 4817 uint32_t reg_rec_count; /* Count of register records. */ 4818 uint32_t *aarch64_regs; /* Registers to be recorded. */ 4819 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */ 4820 }; 4821 4822 /* Record handler for data processing - register instructions. */ 4823 4824 static unsigned int 4825 aarch64_record_data_proc_reg (aarch64_insn_decode_record *aarch64_insn_r) 4826 { 4827 uint8_t reg_rd, insn_bits24_27, insn_bits21_23; 4828 uint32_t record_buf[4]; 4829 4830 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4); 4831 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27); 4832 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23); 4833 4834 if (!bit (aarch64_insn_r->aarch64_insn, 28)) 4835 { 4836 uint8_t setflags; 4837 4838 /* Logical (shifted register). */ 4839 if (insn_bits24_27 == 0x0a) 4840 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03); 4841 /* Add/subtract. */ 4842 else if (insn_bits24_27 == 0x0b) 4843 setflags = bit (aarch64_insn_r->aarch64_insn, 29); 4844 else 4845 return AARCH64_RECORD_UNKNOWN; 4846 4847 record_buf[0] = reg_rd; 4848 aarch64_insn_r->reg_rec_count = 1; 4849 if (setflags) 4850 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM; 4851 } 4852 else 4853 { 4854 if (insn_bits24_27 == 0x0b) 4855 { 4856 /* Data-processing (3 source). */ 4857 record_buf[0] = reg_rd; 4858 aarch64_insn_r->reg_rec_count = 1; 4859 } 4860 else if (insn_bits24_27 == 0x0a) 4861 { 4862 if (insn_bits21_23 == 0x00) 4863 { 4864 /* Add/subtract (with carry). */ 4865 record_buf[0] = reg_rd; 4866 aarch64_insn_r->reg_rec_count = 1; 4867 if (bit (aarch64_insn_r->aarch64_insn, 29)) 4868 { 4869 record_buf[1] = AARCH64_CPSR_REGNUM; 4870 aarch64_insn_r->reg_rec_count = 2; 4871 } 4872 } 4873 else if (insn_bits21_23 == 0x02) 4874 { 4875 /* Conditional compare (register) and conditional compare 4876 (immediate) instructions. */ 4877 record_buf[0] = AARCH64_CPSR_REGNUM; 4878 aarch64_insn_r->reg_rec_count = 1; 4879 } 4880 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06) 4881 { 4882 /* Conditional select. */ 4883 /* Data-processing (2 source). */ 4884 /* Data-processing (1 source). */ 4885 record_buf[0] = reg_rd; 4886 aarch64_insn_r->reg_rec_count = 1; 4887 } 4888 else 4889 return AARCH64_RECORD_UNKNOWN; 4890 } 4891 } 4892 4893 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count, 4894 record_buf); 4895 return AARCH64_RECORD_SUCCESS; 4896 } 4897 4898 /* Record handler for data processing - immediate instructions. */ 4899 4900 static unsigned int 4901 aarch64_record_data_proc_imm (aarch64_insn_decode_record *aarch64_insn_r) 4902 { 4903 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags; 4904 uint32_t record_buf[4]; 4905 4906 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4); 4907 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23); 4908 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27); 4909 4910 if (insn_bits24_27 == 0x00 /* PC rel addressing. */ 4911 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */ 4912 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */ 4913 { 4914 record_buf[0] = reg_rd; 4915 aarch64_insn_r->reg_rec_count = 1; 4916 } 4917 else if (insn_bits24_27 == 0x01) 4918 { 4919 /* Add/Subtract (immediate). */ 4920 setflags = bit (aarch64_insn_r->aarch64_insn, 29); 4921 record_buf[0] = reg_rd; 4922 aarch64_insn_r->reg_rec_count = 1; 4923 if (setflags) 4924 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM; 4925 } 4926 else if (insn_bits24_27 == 0x02 && !insn_bit23) 4927 { 4928 /* Logical (immediate). */ 4929 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03; 4930 record_buf[0] = reg_rd; 4931 aarch64_insn_r->reg_rec_count = 1; 4932 if (setflags) 4933 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM; 4934 } 4935 else 4936 return AARCH64_RECORD_UNKNOWN; 4937 4938 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count, 4939 record_buf); 4940 return AARCH64_RECORD_SUCCESS; 4941 } 4942 4943 /* Record handler for branch, exception generation and system instructions. */ 4944 4945 static unsigned int 4946 aarch64_record_branch_except_sys (aarch64_insn_decode_record *aarch64_insn_r) 4947 { 4948 4949 aarch64_gdbarch_tdep *tdep 4950 = gdbarch_tdep<aarch64_gdbarch_tdep> (aarch64_insn_r->gdbarch); 4951 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23; 4952 uint32_t record_buf[4]; 4953 4954 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27); 4955 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31); 4956 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23); 4957 4958 if (insn_bits28_31 == 0x0d) 4959 { 4960 /* Exception generation instructions. */ 4961 if (insn_bits24_27 == 0x04) 4962 { 4963 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4) 4964 && !bits (aarch64_insn_r->aarch64_insn, 21, 23) 4965 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01) 4966 { 4967 ULONGEST svc_number; 4968 4969 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8, 4970 &svc_number); 4971 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache, 4972 svc_number); 4973 } 4974 else 4975 return AARCH64_RECORD_UNSUPPORTED; 4976 } 4977 /* System instructions. */ 4978 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00) 4979 { 4980 uint32_t reg_rt, reg_crn; 4981 4982 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4); 4983 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15); 4984 4985 /* Record rt in case of sysl and mrs instructions. */ 4986 if (bit (aarch64_insn_r->aarch64_insn, 21)) 4987 { 4988 record_buf[0] = reg_rt; 4989 aarch64_insn_r->reg_rec_count = 1; 4990 } 4991 /* Record cpsr for hint and msr(immediate) instructions. */ 4992 else if (reg_crn == 0x02 || reg_crn == 0x04) 4993 { 4994 record_buf[0] = AARCH64_CPSR_REGNUM; 4995 aarch64_insn_r->reg_rec_count = 1; 4996 } 4997 } 4998 /* Unconditional branch (register). */ 4999 else if((insn_bits24_27 & 0x0e) == 0x06) 5000 { 5001 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM; 5002 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01) 5003 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM; 5004 } 5005 else 5006 return AARCH64_RECORD_UNKNOWN; 5007 } 5008 /* Unconditional branch (immediate). */ 5009 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04) 5010 { 5011 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM; 5012 if (bit (aarch64_insn_r->aarch64_insn, 31)) 5013 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM; 5014 } 5015 else 5016 /* Compare & branch (immediate), Test & branch (immediate) and 5017 Conditional branch (immediate). */ 5018 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM; 5019 5020 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count, 5021 record_buf); 5022 return AARCH64_RECORD_SUCCESS; 5023 } 5024 5025 /* Record handler for advanced SIMD load and store instructions. */ 5026 5027 static unsigned int 5028 aarch64_record_asimd_load_store (aarch64_insn_decode_record *aarch64_insn_r) 5029 { 5030 CORE_ADDR address; 5031 uint64_t addr_offset = 0; 5032 uint32_t record_buf[24]; 5033 uint64_t record_buf_mem[24]; 5034 uint32_t reg_rn, reg_rt; 5035 uint32_t reg_index = 0, mem_index = 0; 5036 uint8_t opcode_bits, size_bits; 5037 5038 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4); 5039 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9); 5040 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11); 5041 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15); 5042 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address); 5043 5044 if (record_debug) 5045 debug_printf ("Process record: Advanced SIMD load/store\n"); 5046 5047 /* Load/store single structure. */ 5048 if (bit (aarch64_insn_r->aarch64_insn, 24)) 5049 { 5050 uint8_t sindex, scale, selem, esize, replicate = 0; 5051 scale = opcode_bits >> 2; 5052 selem = ((opcode_bits & 0x02) | 5053 bit (aarch64_insn_r->aarch64_insn, 21)) + 1; 5054 switch (scale) 5055 { 5056 case 1: 5057 if (size_bits & 0x01) 5058 return AARCH64_RECORD_UNKNOWN; 5059 break; 5060 case 2: 5061 if ((size_bits >> 1) & 0x01) 5062 return AARCH64_RECORD_UNKNOWN; 5063 if (size_bits & 0x01) 5064 { 5065 if (!((opcode_bits >> 1) & 0x01)) 5066 scale = 3; 5067 else 5068 return AARCH64_RECORD_UNKNOWN; 5069 } 5070 break; 5071 case 3: 5072 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01)) 5073 { 5074 scale = size_bits; 5075 replicate = 1; 5076 break; 5077 } 5078 else 5079 return AARCH64_RECORD_UNKNOWN; 5080 default: 5081 break; 5082 } 5083 esize = 8 << scale; 5084 if (replicate) 5085 for (sindex = 0; sindex < selem; sindex++) 5086 { 5087 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM; 5088 reg_rt = (reg_rt + 1) % 32; 5089 } 5090 else 5091 { 5092 for (sindex = 0; sindex < selem; sindex++) 5093 { 5094 if (bit (aarch64_insn_r->aarch64_insn, 22)) 5095 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM; 5096 else 5097 { 5098 record_buf_mem[mem_index++] = esize / 8; 5099 record_buf_mem[mem_index++] = address + addr_offset; 5100 } 5101 addr_offset = addr_offset + (esize / 8); 5102 reg_rt = (reg_rt + 1) % 32; 5103 } 5104 } 5105 } 5106 /* Load/store multiple structure. */ 5107 else 5108 { 5109 uint8_t selem, esize, rpt, elements; 5110 uint8_t eindex, rindex; 5111 5112 esize = 8 << size_bits; 5113 if (bit (aarch64_insn_r->aarch64_insn, 30)) 5114 elements = 128 / esize; 5115 else 5116 elements = 64 / esize; 5117 5118 switch (opcode_bits) 5119 { 5120 /*LD/ST4 (4 Registers). */ 5121 case 0: 5122 rpt = 1; 5123 selem = 4; 5124 break; 5125 /*LD/ST1 (4 Registers). */ 5126 case 2: 5127 rpt = 4; 5128 selem = 1; 5129 break; 5130 /*LD/ST3 (3 Registers). */ 5131 case 4: 5132 rpt = 1; 5133 selem = 3; 5134 break; 5135 /*LD/ST1 (3 Registers). */ 5136 case 6: 5137 rpt = 3; 5138 selem = 1; 5139 break; 5140 /*LD/ST1 (1 Register). */ 5141 case 7: 5142 rpt = 1; 5143 selem = 1; 5144 break; 5145 /*LD/ST2 (2 Registers). */ 5146 case 8: 5147 rpt = 1; 5148 selem = 2; 5149 break; 5150 /*LD/ST1 (2 Registers). */ 5151 case 10: 5152 rpt = 2; 5153 selem = 1; 5154 break; 5155 default: 5156 return AARCH64_RECORD_UNSUPPORTED; 5157 break; 5158 } 5159 for (rindex = 0; rindex < rpt; rindex++) 5160 for (eindex = 0; eindex < elements; eindex++) 5161 { 5162 uint8_t reg_tt, sindex; 5163 reg_tt = (reg_rt + rindex) % 32; 5164 for (sindex = 0; sindex < selem; sindex++) 5165 { 5166 if (bit (aarch64_insn_r->aarch64_insn, 22)) 5167 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM; 5168 else 5169 { 5170 record_buf_mem[mem_index++] = esize / 8; 5171 record_buf_mem[mem_index++] = address + addr_offset; 5172 } 5173 addr_offset = addr_offset + (esize / 8); 5174 reg_tt = (reg_tt + 1) % 32; 5175 } 5176 } 5177 } 5178 5179 if (bit (aarch64_insn_r->aarch64_insn, 23)) 5180 record_buf[reg_index++] = reg_rn; 5181 5182 aarch64_insn_r->reg_rec_count = reg_index; 5183 aarch64_insn_r->mem_rec_count = mem_index / 2; 5184 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count, 5185 record_buf_mem); 5186 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count, 5187 record_buf); 5188 return AARCH64_RECORD_SUCCESS; 5189 } 5190 5191 /* Record handler for Memory Copy and Memory Set instructions. */ 5192 5193 static unsigned int 5194 aarch64_record_memcopy_memset (aarch64_insn_decode_record *aarch64_insn_r) 5195 { 5196 if (record_debug) 5197 debug_printf ("Process record: memory copy and memory set\n"); 5198 5199 uint8_t op1 = bits (aarch64_insn_r->aarch64_insn, 22, 23); 5200 uint8_t op2 = bits (aarch64_insn_r->aarch64_insn, 12, 15); 5201 uint32_t reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4); 5202 uint32_t reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9); 5203 uint32_t record_buf[3]; 5204 uint64_t record_buf_mem[4]; 5205 5206 if (op1 == 3 && op2 > 11) 5207 /* Unallocated instructions. */ 5208 return AARCH64_RECORD_UNKNOWN; 5209 5210 /* Set instructions have two registers and one memory region to be 5211 recorded. */ 5212 record_buf[0] = reg_rd; 5213 record_buf[1] = reg_rn; 5214 aarch64_insn_r->reg_rec_count = 2; 5215 5216 ULONGEST dest_addr; 5217 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rd, &dest_addr); 5218 5219 LONGEST length; 5220 regcache_raw_read_signed (aarch64_insn_r->regcache, reg_rn, &length); 5221 5222 /* In one of the algorithm options a processor can implement, the length 5223 in Rn has an inverted sign. */ 5224 if (length < 0) 5225 length *= -1; 5226 5227 record_buf_mem[0] = length; 5228 record_buf_mem[1] = dest_addr; 5229 aarch64_insn_r->mem_rec_count = 1; 5230 5231 if (op1 != 3) 5232 { 5233 /* Copy instructions have an additional register and an additional 5234 memory region to be recorded. */ 5235 uint32_t reg_rs = bits (aarch64_insn_r->aarch64_insn, 16, 20); 5236 5237 record_buf[2] = reg_rs; 5238 aarch64_insn_r->reg_rec_count++; 5239 5240 ULONGEST source_addr; 5241 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rs, 5242 &source_addr); 5243 5244 record_buf_mem[2] = length; 5245 record_buf_mem[3] = source_addr; 5246 aarch64_insn_r->mem_rec_count++; 5247 } 5248 5249 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count, 5250 record_buf_mem); 5251 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count, 5252 record_buf); 5253 return AARCH64_RECORD_SUCCESS; 5254 } 5255 5256 /* Record handler for load and store instructions. */ 5257 5258 static unsigned int 5259 aarch64_record_load_store (aarch64_insn_decode_record *aarch64_insn_r) 5260 { 5261 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11; 5262 uint8_t insn_bit23, insn_bit21; 5263 uint8_t opc, size_bits, ld_flag, vector_flag; 5264 uint32_t reg_rn, reg_rt, reg_rt2; 5265 uint64_t datasize, offset; 5266 uint32_t record_buf[8]; 5267 uint64_t record_buf_mem[8]; 5268 CORE_ADDR address; 5269 5270 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11); 5271 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27); 5272 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29); 5273 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21); 5274 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23); 5275 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22); 5276 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26); 5277 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4); 5278 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9); 5279 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14); 5280 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31); 5281 5282 /* Load/store exclusive. */ 5283 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00) 5284 { 5285 if (record_debug) 5286 debug_printf ("Process record: load/store exclusive\n"); 5287 5288 if (ld_flag) 5289 { 5290 record_buf[0] = reg_rt; 5291 aarch64_insn_r->reg_rec_count = 1; 5292 if (insn_bit21) 5293 { 5294 record_buf[1] = reg_rt2; 5295 aarch64_insn_r->reg_rec_count = 2; 5296 } 5297 } 5298 else 5299 { 5300 if (insn_bit21) 5301 datasize = (8 << size_bits) * 2; 5302 else 5303 datasize = (8 << size_bits); 5304 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, 5305 &address); 5306 record_buf_mem[0] = datasize / 8; 5307 record_buf_mem[1] = address; 5308 aarch64_insn_r->mem_rec_count = 1; 5309 if (!insn_bit23) 5310 { 5311 /* Save register rs. */ 5312 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20); 5313 aarch64_insn_r->reg_rec_count = 1; 5314 } 5315 } 5316 } 5317 /* Load register (literal) instructions decoding. */ 5318 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01) 5319 { 5320 if (record_debug) 5321 debug_printf ("Process record: load register (literal)\n"); 5322 if (vector_flag) 5323 record_buf[0] = reg_rt + AARCH64_V0_REGNUM; 5324 else 5325 record_buf[0] = reg_rt; 5326 aarch64_insn_r->reg_rec_count = 1; 5327 } 5328 /* All types of load/store pair instructions decoding. */ 5329 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02) 5330 { 5331 if (record_debug) 5332 debug_printf ("Process record: load/store pair\n"); 5333 5334 if (ld_flag) 5335 { 5336 if (vector_flag) 5337 { 5338 record_buf[0] = reg_rt + AARCH64_V0_REGNUM; 5339 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM; 5340 } 5341 else 5342 { 5343 record_buf[0] = reg_rt; 5344 record_buf[1] = reg_rt2; 5345 } 5346 aarch64_insn_r->reg_rec_count = 2; 5347 } 5348 else 5349 { 5350 uint16_t imm7_off; 5351 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21); 5352 if (!vector_flag) 5353 size_bits = size_bits >> 1; 5354 datasize = 8 << (2 + size_bits); 5355 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off; 5356 offset = offset << (2 + size_bits); 5357 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, 5358 &address); 5359 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23)) 5360 { 5361 if (imm7_off & 0x40) 5362 address = address - offset; 5363 else 5364 address = address + offset; 5365 } 5366 5367 record_buf_mem[0] = datasize / 8; 5368 record_buf_mem[1] = address; 5369 record_buf_mem[2] = datasize / 8; 5370 record_buf_mem[3] = address + (datasize / 8); 5371 aarch64_insn_r->mem_rec_count = 2; 5372 } 5373 if (bit (aarch64_insn_r->aarch64_insn, 23)) 5374 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn; 5375 } 5376 /* Load/store register (unsigned immediate) instructions. */ 5377 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03) 5378 { 5379 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23); 5380 if (!(opc >> 1)) 5381 { 5382 if (opc & 0x01) 5383 ld_flag = 0x01; 5384 else 5385 ld_flag = 0x0; 5386 } 5387 else 5388 { 5389 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2) 5390 { 5391 /* PRFM (immediate) */ 5392 return AARCH64_RECORD_SUCCESS; 5393 } 5394 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2) 5395 { 5396 /* LDRSW (immediate) */ 5397 ld_flag = 0x1; 5398 } 5399 else 5400 { 5401 if (opc & 0x01) 5402 ld_flag = 0x01; 5403 else 5404 ld_flag = 0x0; 5405 } 5406 } 5407 5408 if (record_debug) 5409 { 5410 debug_printf ("Process record: load/store (unsigned immediate):" 5411 " size %x V %d opc %x\n", size_bits, vector_flag, 5412 opc); 5413 } 5414 5415 if (!ld_flag) 5416 { 5417 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21); 5418 datasize = 8 << size_bits; 5419 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, 5420 &address); 5421 offset = offset << size_bits; 5422 address = address + offset; 5423 5424 record_buf_mem[0] = datasize >> 3; 5425 record_buf_mem[1] = address; 5426 aarch64_insn_r->mem_rec_count = 1; 5427 } 5428 else 5429 { 5430 if (vector_flag) 5431 record_buf[0] = reg_rt + AARCH64_V0_REGNUM; 5432 else 5433 record_buf[0] = reg_rt; 5434 aarch64_insn_r->reg_rec_count = 1; 5435 } 5436 } 5437 /* Load/store register (register offset) instructions. */ 5438 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03 5439 && insn_bits10_11 == 0x02 && insn_bit21) 5440 { 5441 if (record_debug) 5442 debug_printf ("Process record: load/store (register offset)\n"); 5443 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23); 5444 if (!(opc >> 1)) 5445 if (opc & 0x01) 5446 ld_flag = 0x01; 5447 else 5448 ld_flag = 0x0; 5449 else 5450 if (size_bits != 0x03) 5451 ld_flag = 0x01; 5452 else 5453 return AARCH64_RECORD_UNKNOWN; 5454 5455 if (!ld_flag) 5456 { 5457 ULONGEST reg_rm_val; 5458 5459 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 5460 bits (aarch64_insn_r->aarch64_insn, 16, 20), ®_rm_val); 5461 if (bit (aarch64_insn_r->aarch64_insn, 12)) 5462 offset = reg_rm_val << size_bits; 5463 else 5464 offset = reg_rm_val; 5465 datasize = 8 << size_bits; 5466 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, 5467 &address); 5468 address = address + offset; 5469 record_buf_mem[0] = datasize >> 3; 5470 record_buf_mem[1] = address; 5471 aarch64_insn_r->mem_rec_count = 1; 5472 } 5473 else 5474 { 5475 if (vector_flag) 5476 record_buf[0] = reg_rt + AARCH64_V0_REGNUM; 5477 else 5478 record_buf[0] = reg_rt; 5479 aarch64_insn_r->reg_rec_count = 1; 5480 } 5481 } 5482 /* Load/store register (immediate and unprivileged) instructions. */ 5483 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03 5484 && !insn_bit21) 5485 { 5486 if (record_debug) 5487 { 5488 debug_printf ("Process record: load/store " 5489 "(immediate and unprivileged)\n"); 5490 } 5491 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23); 5492 if (!(opc >> 1)) 5493 if (opc & 0x01) 5494 ld_flag = 0x01; 5495 else 5496 ld_flag = 0x0; 5497 else 5498 if (size_bits != 0x03) 5499 ld_flag = 0x01; 5500 else 5501 return AARCH64_RECORD_UNKNOWN; 5502 5503 if (!ld_flag) 5504 { 5505 uint16_t imm9_off; 5506 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20); 5507 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off; 5508 datasize = 8 << size_bits; 5509 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, 5510 &address); 5511 if (insn_bits10_11 != 0x01) 5512 { 5513 if (imm9_off & 0x0100) 5514 address = address - offset; 5515 else 5516 address = address + offset; 5517 } 5518 record_buf_mem[0] = datasize >> 3; 5519 record_buf_mem[1] = address; 5520 aarch64_insn_r->mem_rec_count = 1; 5521 } 5522 else 5523 { 5524 if (vector_flag) 5525 record_buf[0] = reg_rt + AARCH64_V0_REGNUM; 5526 else 5527 record_buf[0] = reg_rt; 5528 aarch64_insn_r->reg_rec_count = 1; 5529 } 5530 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03) 5531 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn; 5532 } 5533 /* Memory Copy and Memory Set instructions. */ 5534 else if ((insn_bits24_27 & 1) == 1 && insn_bits28_29 == 1 5535 && insn_bits10_11 == 1 && !insn_bit21) 5536 return aarch64_record_memcopy_memset (aarch64_insn_r); 5537 /* Advanced SIMD load/store instructions. */ 5538 else 5539 return aarch64_record_asimd_load_store (aarch64_insn_r); 5540 5541 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count, 5542 record_buf_mem); 5543 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count, 5544 record_buf); 5545 return AARCH64_RECORD_SUCCESS; 5546 } 5547 5548 /* Record handler for data processing SIMD and floating point instructions. */ 5549 5550 static unsigned int 5551 aarch64_record_data_proc_simd_fp (aarch64_insn_decode_record *aarch64_insn_r) 5552 { 5553 uint8_t insn_bit21, opcode, rmode, reg_rd; 5554 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15; 5555 uint8_t insn_bits11_14; 5556 uint32_t record_buf[2]; 5557 5558 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27); 5559 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31); 5560 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11); 5561 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15); 5562 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14); 5563 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18); 5564 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20); 5565 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4); 5566 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21); 5567 5568 if (record_debug) 5569 debug_printf ("Process record: data processing SIMD/FP: "); 5570 5571 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e) 5572 { 5573 /* Floating point - fixed point conversion instructions. */ 5574 if (!insn_bit21) 5575 { 5576 if (record_debug) 5577 debug_printf ("FP - fixed point conversion"); 5578 5579 if ((opcode >> 1) == 0x0 && rmode == 0x03) 5580 record_buf[0] = reg_rd; 5581 else 5582 record_buf[0] = reg_rd + AARCH64_V0_REGNUM; 5583 } 5584 /* Floating point - conditional compare instructions. */ 5585 else if (insn_bits10_11 == 0x01) 5586 { 5587 if (record_debug) 5588 debug_printf ("FP - conditional compare"); 5589 5590 record_buf[0] = AARCH64_CPSR_REGNUM; 5591 } 5592 /* Floating point - data processing (2-source) and 5593 conditional select instructions. */ 5594 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03) 5595 { 5596 if (record_debug) 5597 debug_printf ("FP - DP (2-source)"); 5598 5599 record_buf[0] = reg_rd + AARCH64_V0_REGNUM; 5600 } 5601 else if (insn_bits10_11 == 0x00) 5602 { 5603 /* Floating point - immediate instructions. */ 5604 if ((insn_bits12_15 & 0x01) == 0x01 5605 || (insn_bits12_15 & 0x07) == 0x04) 5606 { 5607 if (record_debug) 5608 debug_printf ("FP - immediate"); 5609 record_buf[0] = reg_rd + AARCH64_V0_REGNUM; 5610 } 5611 /* Floating point - compare instructions. */ 5612 else if ((insn_bits12_15 & 0x03) == 0x02) 5613 { 5614 if (record_debug) 5615 debug_printf ("FP - immediate"); 5616 record_buf[0] = AARCH64_CPSR_REGNUM; 5617 } 5618 /* Floating point - integer conversions instructions. */ 5619 else if (insn_bits12_15 == 0x00) 5620 { 5621 /* Convert float to integer instruction. */ 5622 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode)) 5623 { 5624 if (record_debug) 5625 debug_printf ("float to int conversion"); 5626 5627 record_buf[0] = reg_rd + AARCH64_X0_REGNUM; 5628 } 5629 /* Convert integer to float instruction. */ 5630 else if ((opcode >> 1) == 0x01 && !rmode) 5631 { 5632 if (record_debug) 5633 debug_printf ("int to float conversion"); 5634 5635 record_buf[0] = reg_rd + AARCH64_V0_REGNUM; 5636 } 5637 /* Move float to integer instruction. */ 5638 else if ((opcode >> 1) == 0x03) 5639 { 5640 if (record_debug) 5641 debug_printf ("move float to int"); 5642 5643 if (!(opcode & 0x01)) 5644 record_buf[0] = reg_rd + AARCH64_X0_REGNUM; 5645 else 5646 record_buf[0] = reg_rd + AARCH64_V0_REGNUM; 5647 } 5648 else 5649 return AARCH64_RECORD_UNKNOWN; 5650 } 5651 else 5652 return AARCH64_RECORD_UNKNOWN; 5653 } 5654 else 5655 return AARCH64_RECORD_UNKNOWN; 5656 } 5657 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e) 5658 { 5659 if (record_debug) 5660 debug_printf ("SIMD copy"); 5661 5662 /* Advanced SIMD copy instructions. */ 5663 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23) 5664 && !bit (aarch64_insn_r->aarch64_insn, 15) 5665 && bit (aarch64_insn_r->aarch64_insn, 10)) 5666 { 5667 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07) 5668 record_buf[0] = reg_rd + AARCH64_X0_REGNUM; 5669 else 5670 record_buf[0] = reg_rd + AARCH64_V0_REGNUM; 5671 } 5672 else 5673 record_buf[0] = reg_rd + AARCH64_V0_REGNUM; 5674 } 5675 /* All remaining floating point or advanced SIMD instructions. */ 5676 else 5677 { 5678 if (record_debug) 5679 debug_printf ("all remain"); 5680 5681 record_buf[0] = reg_rd + AARCH64_V0_REGNUM; 5682 } 5683 5684 if (record_debug) 5685 debug_printf ("\n"); 5686 5687 /* Record the V/X register. */ 5688 aarch64_insn_r->reg_rec_count++; 5689 5690 /* Some of these instructions may set bits in the FPSR, so record it 5691 too. */ 5692 record_buf[1] = AARCH64_FPSR_REGNUM; 5693 aarch64_insn_r->reg_rec_count++; 5694 5695 gdb_assert (aarch64_insn_r->reg_rec_count == 2); 5696 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count, 5697 record_buf); 5698 return AARCH64_RECORD_SUCCESS; 5699 } 5700 5701 /* Decodes insns type and invokes its record handler. */ 5702 5703 static unsigned int 5704 aarch64_record_decode_insn_handler (aarch64_insn_decode_record *aarch64_insn_r) 5705 { 5706 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28; 5707 5708 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25); 5709 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26); 5710 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27); 5711 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28); 5712 5713 /* Data processing - immediate instructions. */ 5714 if (!ins_bit26 && !ins_bit27 && ins_bit28) 5715 return aarch64_record_data_proc_imm (aarch64_insn_r); 5716 5717 /* Branch, exception generation and system instructions. */ 5718 if (ins_bit26 && !ins_bit27 && ins_bit28) 5719 return aarch64_record_branch_except_sys (aarch64_insn_r); 5720 5721 /* Load and store instructions. */ 5722 if (!ins_bit25 && ins_bit27) 5723 return aarch64_record_load_store (aarch64_insn_r); 5724 5725 /* Data processing - register instructions. */ 5726 if (ins_bit25 && !ins_bit26 && ins_bit27) 5727 return aarch64_record_data_proc_reg (aarch64_insn_r); 5728 5729 /* Data processing - SIMD and floating point instructions. */ 5730 if (ins_bit25 && ins_bit26 && ins_bit27) 5731 return aarch64_record_data_proc_simd_fp (aarch64_insn_r); 5732 5733 return AARCH64_RECORD_UNSUPPORTED; 5734 } 5735 5736 /* Cleans up local record registers and memory allocations. */ 5737 5738 static void 5739 deallocate_reg_mem (aarch64_insn_decode_record *record) 5740 { 5741 xfree (record->aarch64_regs); 5742 xfree (record->aarch64_mems); 5743 } 5744 5745 #if GDB_SELF_TEST 5746 namespace selftests { 5747 5748 static void 5749 aarch64_process_record_test (void) 5750 { 5751 struct gdbarch_info info; 5752 uint32_t ret; 5753 5754 info.bfd_arch_info = bfd_scan_arch ("aarch64"); 5755 5756 struct gdbarch *gdbarch = gdbarch_find_by_info (info); 5757 SELF_CHECK (gdbarch != NULL); 5758 5759 aarch64_insn_decode_record aarch64_record; 5760 5761 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record)); 5762 aarch64_record.regcache = NULL; 5763 aarch64_record.this_addr = 0; 5764 aarch64_record.gdbarch = gdbarch; 5765 5766 /* 20 00 80 f9 prfm pldl1keep, [x1] */ 5767 aarch64_record.aarch64_insn = 0xf9800020; 5768 ret = aarch64_record_decode_insn_handler (&aarch64_record); 5769 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS); 5770 SELF_CHECK (aarch64_record.reg_rec_count == 0); 5771 SELF_CHECK (aarch64_record.mem_rec_count == 0); 5772 5773 deallocate_reg_mem (&aarch64_record); 5774 } 5775 5776 } // namespace selftests 5777 #endif /* GDB_SELF_TEST */ 5778 5779 /* Parse the current instruction and record the values of the registers and 5780 memory that will be changed in current instruction to record_arch_list 5781 return -1 if something is wrong. */ 5782 5783 int 5784 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache, 5785 CORE_ADDR insn_addr) 5786 { 5787 uint32_t rec_no = 0; 5788 const uint8_t insn_size = 4; 5789 uint32_t ret = 0; 5790 gdb_byte buf[insn_size]; 5791 aarch64_insn_decode_record aarch64_record; 5792 5793 memset (&buf[0], 0, insn_size); 5794 memset (&aarch64_record, 0, sizeof (aarch64_insn_decode_record)); 5795 target_read_memory (insn_addr, &buf[0], insn_size); 5796 aarch64_record.aarch64_insn 5797 = (uint32_t) extract_unsigned_integer (&buf[0], 5798 insn_size, 5799 gdbarch_byte_order (gdbarch)); 5800 aarch64_record.regcache = regcache; 5801 aarch64_record.this_addr = insn_addr; 5802 aarch64_record.gdbarch = gdbarch; 5803 5804 ret = aarch64_record_decode_insn_handler (&aarch64_record); 5805 if (ret == AARCH64_RECORD_UNSUPPORTED) 5806 { 5807 gdb_printf (gdb_stderr, 5808 _("Process record does not support instruction " 5809 "0x%0x at address %s.\n"), 5810 aarch64_record.aarch64_insn, 5811 paddress (gdbarch, insn_addr)); 5812 ret = -1; 5813 } 5814 5815 if (0 == ret) 5816 { 5817 /* Record registers. */ 5818 record_full_arch_list_add_reg (aarch64_record.regcache, 5819 AARCH64_PC_REGNUM); 5820 /* Always record register CPSR. */ 5821 record_full_arch_list_add_reg (aarch64_record.regcache, 5822 AARCH64_CPSR_REGNUM); 5823 if (aarch64_record.aarch64_regs) 5824 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++) 5825 if (record_full_arch_list_add_reg (aarch64_record.regcache, 5826 aarch64_record.aarch64_regs[rec_no])) 5827 ret = -1; 5828 5829 /* Record memories. */ 5830 if (aarch64_record.aarch64_mems) 5831 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++) 5832 if (record_full_arch_list_add_mem 5833 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr, 5834 aarch64_record.aarch64_mems[rec_no].len)) 5835 ret = -1; 5836 5837 if (record_full_arch_list_add_end ()) 5838 ret = -1; 5839 } 5840 5841 deallocate_reg_mem (&aarch64_record); 5842 return ret; 5843 } 5844