1 /* Common target dependent code for GDB on AArch64 systems. 2 3 Copyright (C) 2009-2013 Free Software Foundation, Inc. 4 Contributed by ARM Ltd. 5 6 This file is part of GDB. 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License as published by 10 the Free Software Foundation; either version 3 of the License, or 11 (at your option) any later version. 12 13 This program is distributed in the hope that it will be useful, 14 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 GNU General Public License for more details. 17 18 You should have received a copy of the GNU General Public License 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 20 21 #include "defs.h" 22 23 #include "frame.h" 24 #include "inferior.h" 25 #include "gdbcmd.h" 26 #include "gdbcore.h" 27 #include "gdb_string.h" 28 #include "dis-asm.h" 29 #include "regcache.h" 30 #include "reggroups.h" 31 #include "doublest.h" 32 #include "value.h" 33 #include "arch-utils.h" 34 #include "osabi.h" 35 #include "frame-unwind.h" 36 #include "frame-base.h" 37 #include "trad-frame.h" 38 #include "objfiles.h" 39 #include "dwarf2-frame.h" 40 #include "gdbtypes.h" 41 #include "prologue-value.h" 42 #include "target-descriptions.h" 43 #include "user-regs.h" 44 #include "language.h" 45 #include "infcall.h" 46 47 #include "aarch64-tdep.h" 48 49 #include "elf-bfd.h" 50 #include "elf/aarch64.h" 51 52 #include "gdb_assert.h" 53 #include "vec.h" 54 55 #include "features/aarch64.c" 56 #include "features/aarch64-without-fpu.c" 57 58 /* Pseudo register base numbers. */ 59 #define AARCH64_Q0_REGNUM 0 60 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32) 61 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32) 62 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32) 63 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32) 64 65 /* The standard register names, and all the valid aliases for them. */ 66 static const struct 67 { 68 const char *const name; 69 int regnum; 70 } aarch64_register_aliases[] = 71 { 72 /* 64-bit register names. */ 73 {"fp", AARCH64_FP_REGNUM}, 74 {"lr", AARCH64_LR_REGNUM}, 75 {"sp", AARCH64_SP_REGNUM}, 76 77 /* 32-bit register names. */ 78 {"w0", AARCH64_X0_REGNUM + 0}, 79 {"w1", AARCH64_X0_REGNUM + 1}, 80 {"w2", AARCH64_X0_REGNUM + 2}, 81 {"w3", AARCH64_X0_REGNUM + 3}, 82 {"w4", AARCH64_X0_REGNUM + 4}, 83 {"w5", AARCH64_X0_REGNUM + 5}, 84 {"w6", AARCH64_X0_REGNUM + 6}, 85 {"w7", AARCH64_X0_REGNUM + 7}, 86 {"w8", AARCH64_X0_REGNUM + 8}, 87 {"w9", AARCH64_X0_REGNUM + 9}, 88 {"w10", AARCH64_X0_REGNUM + 10}, 89 {"w11", AARCH64_X0_REGNUM + 11}, 90 {"w12", AARCH64_X0_REGNUM + 12}, 91 {"w13", AARCH64_X0_REGNUM + 13}, 92 {"w14", AARCH64_X0_REGNUM + 14}, 93 {"w15", AARCH64_X0_REGNUM + 15}, 94 {"w16", AARCH64_X0_REGNUM + 16}, 95 {"w17", AARCH64_X0_REGNUM + 17}, 96 {"w18", AARCH64_X0_REGNUM + 18}, 97 {"w19", AARCH64_X0_REGNUM + 19}, 98 {"w20", AARCH64_X0_REGNUM + 20}, 99 {"w21", AARCH64_X0_REGNUM + 21}, 100 {"w22", AARCH64_X0_REGNUM + 22}, 101 {"w23", AARCH64_X0_REGNUM + 23}, 102 {"w24", AARCH64_X0_REGNUM + 24}, 103 {"w25", AARCH64_X0_REGNUM + 25}, 104 {"w26", AARCH64_X0_REGNUM + 26}, 105 {"w27", AARCH64_X0_REGNUM + 27}, 106 {"w28", AARCH64_X0_REGNUM + 28}, 107 {"w29", AARCH64_X0_REGNUM + 29}, 108 {"w30", AARCH64_X0_REGNUM + 30}, 109 110 /* specials */ 111 {"ip0", AARCH64_X0_REGNUM + 16}, 112 {"ip1", AARCH64_X0_REGNUM + 17} 113 }; 114 115 /* The required core 'R' registers. */ 116 static const char *const aarch64_r_register_names[] = 117 { 118 /* These registers must appear in consecutive RAW register number 119 order and they must begin with AARCH64_X0_REGNUM! */ 120 "x0", "x1", "x2", "x3", 121 "x4", "x5", "x6", "x7", 122 "x8", "x9", "x10", "x11", 123 "x12", "x13", "x14", "x15", 124 "x16", "x17", "x18", "x19", 125 "x20", "x21", "x22", "x23", 126 "x24", "x25", "x26", "x27", 127 "x28", "x29", "x30", "sp", 128 "pc", "cpsr" 129 }; 130 131 /* The FP/SIMD 'V' registers. */ 132 static const char *const aarch64_v_register_names[] = 133 { 134 /* These registers must appear in consecutive RAW register number 135 order and they must begin with AARCH64_V0_REGNUM! */ 136 "v0", "v1", "v2", "v3", 137 "v4", "v5", "v6", "v7", 138 "v8", "v9", "v10", "v11", 139 "v12", "v13", "v14", "v15", 140 "v16", "v17", "v18", "v19", 141 "v20", "v21", "v22", "v23", 142 "v24", "v25", "v26", "v27", 143 "v28", "v29", "v30", "v31", 144 "fpsr", 145 "fpcr" 146 }; 147 148 /* AArch64 prologue cache structure. */ 149 struct aarch64_prologue_cache 150 { 151 /* The stack pointer at the time this frame was created; i.e. the 152 caller's stack pointer when this function was called. It is used 153 to identify this frame. */ 154 CORE_ADDR prev_sp; 155 156 /* The frame base for this frame is just prev_sp - frame size. 157 FRAMESIZE is the distance from the frame pointer to the 158 initial stack pointer. */ 159 int framesize; 160 161 /* The register used to hold the frame pointer for this frame. */ 162 int framereg; 163 164 /* Saved register offsets. */ 165 struct trad_frame_saved_reg *saved_regs; 166 }; 167 168 /* Toggle this file's internal debugging dump. */ 169 static int aarch64_debug; 170 171 static void 172 show_aarch64_debug (struct ui_file *file, int from_tty, 173 struct cmd_list_element *c, const char *value) 174 { 175 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value); 176 } 177 178 /* Extract a signed value from a bit field within an instruction 179 encoding. 180 181 INSN is the instruction opcode. 182 183 WIDTH specifies the width of the bit field to extract (in bits). 184 185 OFFSET specifies the least significant bit of the field where bits 186 are numbered zero counting from least to most significant. */ 187 188 static int32_t 189 extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset) 190 { 191 unsigned shift_l = sizeof (int32_t) * 8 - (offset + width); 192 unsigned shift_r = sizeof (int32_t) * 8 - width; 193 194 return ((int32_t) insn << shift_l) >> shift_r; 195 } 196 197 /* Determine if specified bits within an instruction opcode matches a 198 specific pattern. 199 200 INSN is the instruction opcode. 201 202 MASK specifies the bits within the opcode that are to be tested 203 agsinst for a match with PATTERN. */ 204 205 static int 206 decode_masked_match (uint32_t insn, uint32_t mask, uint32_t pattern) 207 { 208 return (insn & mask) == pattern; 209 } 210 211 /* Decode an opcode if it represents an immediate ADD or SUB instruction. 212 213 ADDR specifies the address of the opcode. 214 INSN specifies the opcode to test. 215 RD receives the 'rd' field from the decoded instruction. 216 RN receives the 'rn' field from the decoded instruction. 217 218 Return 1 if the opcodes matches and is decoded, otherwise 0. */ 219 static int 220 decode_add_sub_imm (CORE_ADDR addr, uint32_t insn, unsigned *rd, unsigned *rn, 221 int32_t *imm) 222 { 223 if ((insn & 0x9f000000) == 0x91000000) 224 { 225 unsigned shift; 226 unsigned op_is_sub; 227 228 *rd = (insn >> 0) & 0x1f; 229 *rn = (insn >> 5) & 0x1f; 230 *imm = (insn >> 10) & 0xfff; 231 shift = (insn >> 22) & 0x3; 232 op_is_sub = (insn >> 30) & 0x1; 233 234 switch (shift) 235 { 236 case 0: 237 break; 238 case 1: 239 *imm <<= 12; 240 break; 241 default: 242 /* UNDEFINED */ 243 return 0; 244 } 245 246 if (op_is_sub) 247 *imm = -*imm; 248 249 if (aarch64_debug) 250 fprintf_unfiltered (gdb_stdlog, 251 "decode: 0x%s 0x%x add x%u, x%u, #%d\n", 252 core_addr_to_string_nz (addr), insn, *rd, *rn, 253 *imm); 254 return 1; 255 } 256 return 0; 257 } 258 259 /* Decode an opcode if it represents an ADRP instruction. 260 261 ADDR specifies the address of the opcode. 262 INSN specifies the opcode to test. 263 RD receives the 'rd' field from the decoded instruction. 264 265 Return 1 if the opcodes matches and is decoded, otherwise 0. */ 266 267 static int 268 decode_adrp (CORE_ADDR addr, uint32_t insn, unsigned *rd) 269 { 270 if (decode_masked_match (insn, 0x9f000000, 0x90000000)) 271 { 272 *rd = (insn >> 0) & 0x1f; 273 274 if (aarch64_debug) 275 fprintf_unfiltered (gdb_stdlog, 276 "decode: 0x%s 0x%x adrp x%u, #?\n", 277 core_addr_to_string_nz (addr), insn, *rd); 278 return 1; 279 } 280 return 0; 281 } 282 283 /* Decode an opcode if it represents an branch immediate or branch 284 and link immediate instruction. 285 286 ADDR specifies the address of the opcode. 287 INSN specifies the opcode to test. 288 LINK receives the 'link' bit from the decoded instruction. 289 OFFSET receives the immediate offset from the decoded instruction. 290 291 Return 1 if the opcodes matches and is decoded, otherwise 0. */ 292 293 static int 294 decode_b (CORE_ADDR addr, uint32_t insn, unsigned *link, int32_t *offset) 295 { 296 /* b 0001 01ii iiii iiii iiii iiii iiii iiii */ 297 /* bl 1001 01ii iiii iiii iiii iiii iiii iiii */ 298 if (decode_masked_match (insn, 0x7c000000, 0x14000000)) 299 { 300 *link = insn >> 31; 301 *offset = extract_signed_bitfield (insn, 26, 0) << 2; 302 303 if (aarch64_debug) 304 fprintf_unfiltered (gdb_stdlog, 305 "decode: 0x%s 0x%x %s 0x%s\n", 306 core_addr_to_string_nz (addr), insn, 307 *link ? "bl" : "b", 308 core_addr_to_string_nz (addr + *offset)); 309 310 return 1; 311 } 312 return 0; 313 } 314 315 /* Decode an opcode if it represents a conditional branch instruction. 316 317 ADDR specifies the address of the opcode. 318 INSN specifies the opcode to test. 319 COND receives the branch condition field from the decoded 320 instruction. 321 OFFSET receives the immediate offset from the decoded instruction. 322 323 Return 1 if the opcodes matches and is decoded, otherwise 0. */ 324 325 static int 326 decode_bcond (CORE_ADDR addr, uint32_t insn, unsigned *cond, int32_t *offset) 327 { 328 if (decode_masked_match (insn, 0xfe000000, 0x54000000)) 329 { 330 *cond = (insn >> 0) & 0xf; 331 *offset = extract_signed_bitfield (insn, 19, 5) << 2; 332 333 if (aarch64_debug) 334 fprintf_unfiltered (gdb_stdlog, 335 "decode: 0x%s 0x%x b<%u> 0x%s\n", 336 core_addr_to_string_nz (addr), insn, *cond, 337 core_addr_to_string_nz (addr + *offset)); 338 return 1; 339 } 340 return 0; 341 } 342 343 /* Decode an opcode if it represents a branch via register instruction. 344 345 ADDR specifies the address of the opcode. 346 INSN specifies the opcode to test. 347 LINK receives the 'link' bit from the decoded instruction. 348 RN receives the 'rn' field from the decoded instruction. 349 350 Return 1 if the opcodes matches and is decoded, otherwise 0. */ 351 352 static int 353 decode_br (CORE_ADDR addr, uint32_t insn, unsigned *link, unsigned *rn) 354 { 355 /* 8 4 0 6 2 8 4 0 */ 356 /* blr 110101100011111100000000000rrrrr */ 357 /* br 110101100001111100000000000rrrrr */ 358 if (decode_masked_match (insn, 0xffdffc1f, 0xd61f0000)) 359 { 360 *link = (insn >> 21) & 1; 361 *rn = (insn >> 5) & 0x1f; 362 363 if (aarch64_debug) 364 fprintf_unfiltered (gdb_stdlog, 365 "decode: 0x%s 0x%x %s 0x%x\n", 366 core_addr_to_string_nz (addr), insn, 367 *link ? "blr" : "br", *rn); 368 369 return 1; 370 } 371 return 0; 372 } 373 374 /* Decode an opcode if it represents a CBZ or CBNZ instruction. 375 376 ADDR specifies the address of the opcode. 377 INSN specifies the opcode to test. 378 IS64 receives the 'sf' field from the decoded instruction. 379 OP receives the 'op' field from the decoded instruction. 380 RN receives the 'rn' field from the decoded instruction. 381 OFFSET receives the 'imm19' field from the decoded instruction. 382 383 Return 1 if the opcodes matches and is decoded, otherwise 0. */ 384 385 static int 386 decode_cb (CORE_ADDR addr, 387 uint32_t insn, int *is64, unsigned *op, unsigned *rn, 388 int32_t *offset) 389 { 390 if (decode_masked_match (insn, 0x7e000000, 0x34000000)) 391 { 392 /* cbz T011 010o iiii iiii iiii iiii iiir rrrr */ 393 /* cbnz T011 010o iiii iiii iiii iiii iiir rrrr */ 394 395 *rn = (insn >> 0) & 0x1f; 396 *is64 = (insn >> 31) & 0x1; 397 *op = (insn >> 24) & 0x1; 398 *offset = extract_signed_bitfield (insn, 19, 5) << 2; 399 400 if (aarch64_debug) 401 fprintf_unfiltered (gdb_stdlog, 402 "decode: 0x%s 0x%x %s 0x%s\n", 403 core_addr_to_string_nz (addr), insn, 404 *op ? "cbnz" : "cbz", 405 core_addr_to_string_nz (addr + *offset)); 406 return 1; 407 } 408 return 0; 409 } 410 411 /* Decode an opcode if it represents a ERET instruction. 412 413 ADDR specifies the address of the opcode. 414 INSN specifies the opcode to test. 415 416 Return 1 if the opcodes matches and is decoded, otherwise 0. */ 417 418 static int 419 decode_eret (CORE_ADDR addr, uint32_t insn) 420 { 421 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */ 422 if (insn == 0xd69f03e0) 423 { 424 if (aarch64_debug) 425 fprintf_unfiltered (gdb_stdlog, "decode: 0x%s 0x%x eret\n", 426 core_addr_to_string_nz (addr), insn); 427 return 1; 428 } 429 return 0; 430 } 431 432 /* Decode an opcode if it represents a MOVZ instruction. 433 434 ADDR specifies the address of the opcode. 435 INSN specifies the opcode to test. 436 RD receives the 'rd' field from the decoded instruction. 437 438 Return 1 if the opcodes matches and is decoded, otherwise 0. */ 439 440 static int 441 decode_movz (CORE_ADDR addr, uint32_t insn, unsigned *rd) 442 { 443 if (decode_masked_match (insn, 0xff800000, 0x52800000)) 444 { 445 *rd = (insn >> 0) & 0x1f; 446 447 if (aarch64_debug) 448 fprintf_unfiltered (gdb_stdlog, 449 "decode: 0x%s 0x%x movz x%u, #?\n", 450 core_addr_to_string_nz (addr), insn, *rd); 451 return 1; 452 } 453 return 0; 454 } 455 456 /* Decode an opcode if it represents a ORR (shifted register) 457 instruction. 458 459 ADDR specifies the address of the opcode. 460 INSN specifies the opcode to test. 461 RD receives the 'rd' field from the decoded instruction. 462 RN receives the 'rn' field from the decoded instruction. 463 RM receives the 'rm' field from the decoded instruction. 464 IMM receives the 'imm6' field from the decoded instruction. 465 466 Return 1 if the opcodes matches and is decoded, otherwise 0. */ 467 468 static int 469 decode_orr_shifted_register_x (CORE_ADDR addr, 470 uint32_t insn, unsigned *rd, unsigned *rn, 471 unsigned *rm, int32_t *imm) 472 { 473 if (decode_masked_match (insn, 0xff200000, 0xaa000000)) 474 { 475 *rd = (insn >> 0) & 0x1f; 476 *rn = (insn >> 5) & 0x1f; 477 *rm = (insn >> 16) & 0x1f; 478 *imm = (insn >> 10) & 0x3f; 479 480 if (aarch64_debug) 481 fprintf_unfiltered (gdb_stdlog, 482 "decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n", 483 core_addr_to_string_nz (addr), insn, *rd, 484 *rn, *rm, *imm); 485 return 1; 486 } 487 return 0; 488 } 489 490 /* Decode an opcode if it represents a RET instruction. 491 492 ADDR specifies the address of the opcode. 493 INSN specifies the opcode to test. 494 RN receives the 'rn' field from the decoded instruction. 495 496 Return 1 if the opcodes matches and is decoded, otherwise 0. */ 497 498 static int 499 decode_ret (CORE_ADDR addr, uint32_t insn, unsigned *rn) 500 { 501 if (decode_masked_match (insn, 0xfffffc1f, 0xd65f0000)) 502 { 503 *rn = (insn >> 5) & 0x1f; 504 if (aarch64_debug) 505 fprintf_unfiltered (gdb_stdlog, 506 "decode: 0x%s 0x%x ret x%u\n", 507 core_addr_to_string_nz (addr), insn, *rn); 508 return 1; 509 } 510 return 0; 511 } 512 513 /* Decode an opcode if it represents the following instruction: 514 STP rt, rt2, [rn, #imm] 515 516 ADDR specifies the address of the opcode. 517 INSN specifies the opcode to test. 518 RT1 receives the 'rt' field from the decoded instruction. 519 RT2 receives the 'rt2' field from the decoded instruction. 520 RN receives the 'rn' field from the decoded instruction. 521 IMM receives the 'imm' field from the decoded instruction. 522 523 Return 1 if the opcodes matches and is decoded, otherwise 0. */ 524 525 static int 526 decode_stp_offset (CORE_ADDR addr, 527 uint32_t insn, 528 unsigned *rt1, unsigned *rt2, unsigned *rn, int32_t *imm) 529 { 530 if (decode_masked_match (insn, 0xffc00000, 0xa9000000)) 531 { 532 *rt1 = (insn >> 0) & 0x1f; 533 *rn = (insn >> 5) & 0x1f; 534 *rt2 = (insn >> 10) & 0x1f; 535 *imm = extract_signed_bitfield (insn, 7, 15); 536 *imm <<= 3; 537 538 if (aarch64_debug) 539 fprintf_unfiltered (gdb_stdlog, 540 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n", 541 core_addr_to_string_nz (addr), insn, 542 *rt1, *rt2, *rn, *imm); 543 return 1; 544 } 545 return 0; 546 } 547 548 /* Decode an opcode if it represents the following instruction: 549 STP rt, rt2, [rn, #imm]! 550 551 ADDR specifies the address of the opcode. 552 INSN specifies the opcode to test. 553 RT1 receives the 'rt' field from the decoded instruction. 554 RT2 receives the 'rt2' field from the decoded instruction. 555 RN receives the 'rn' field from the decoded instruction. 556 IMM receives the 'imm' field from the decoded instruction. 557 558 Return 1 if the opcodes matches and is decoded, otherwise 0. */ 559 560 static int 561 decode_stp_offset_wb (CORE_ADDR addr, 562 uint32_t insn, 563 unsigned *rt1, unsigned *rt2, unsigned *rn, 564 int32_t *imm) 565 { 566 if (decode_masked_match (insn, 0xffc00000, 0xa9800000)) 567 { 568 *rt1 = (insn >> 0) & 0x1f; 569 *rn = (insn >> 5) & 0x1f; 570 *rt2 = (insn >> 10) & 0x1f; 571 *imm = extract_signed_bitfield (insn, 7, 15); 572 *imm <<= 3; 573 574 if (aarch64_debug) 575 fprintf_unfiltered (gdb_stdlog, 576 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n", 577 core_addr_to_string_nz (addr), insn, 578 *rt1, *rt2, *rn, *imm); 579 return 1; 580 } 581 return 0; 582 } 583 584 /* Decode an opcode if it represents the following instruction: 585 STUR rt, [rn, #imm] 586 587 ADDR specifies the address of the opcode. 588 INSN specifies the opcode to test. 589 IS64 receives size field from the decoded instruction. 590 RT receives the 'rt' field from the decoded instruction. 591 RN receives the 'rn' field from the decoded instruction. 592 IMM receives the 'imm' field from the decoded instruction. 593 594 Return 1 if the opcodes matches and is decoded, otherwise 0. */ 595 596 static int 597 decode_stur (CORE_ADDR addr, uint32_t insn, int *is64, unsigned *rt, 598 unsigned *rn, int32_t *imm) 599 { 600 if (decode_masked_match (insn, 0xbfe00c00, 0xb8000000)) 601 { 602 *is64 = (insn >> 30) & 1; 603 *rt = (insn >> 0) & 0x1f; 604 *rn = (insn >> 5) & 0x1f; 605 *imm = extract_signed_bitfield (insn, 9, 12); 606 607 if (aarch64_debug) 608 fprintf_unfiltered (gdb_stdlog, 609 "decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n", 610 core_addr_to_string_nz (addr), insn, 611 *is64 ? 'x' : 'w', *rt, *rn, *imm); 612 return 1; 613 } 614 return 0; 615 } 616 617 /* Decode an opcode if it represents a TB or TBNZ instruction. 618 619 ADDR specifies the address of the opcode. 620 INSN specifies the opcode to test. 621 OP receives the 'op' field from the decoded instruction. 622 BIT receives the bit position field from the decoded instruction. 623 RT receives 'rt' field from the decoded instruction. 624 IMM receives 'imm' field from the decoded instruction. 625 626 Return 1 if the opcodes matches and is decoded, otherwise 0. */ 627 628 static int 629 decode_tb (CORE_ADDR addr, 630 uint32_t insn, unsigned *op, unsigned *bit, unsigned *rt, 631 int32_t *imm) 632 { 633 if (decode_masked_match (insn, 0x7e000000, 0x36000000)) 634 { 635 /* tbz b011 0110 bbbb biii iiii iiii iiir rrrr */ 636 /* tbnz B011 0111 bbbb biii iiii iiii iiir rrrr */ 637 638 *rt = (insn >> 0) & 0x1f; 639 *op = insn & (1 << 24); 640 *bit = ((insn >> (31 - 4)) & 0x20) | ((insn >> 19) & 0x1f); 641 *imm = extract_signed_bitfield (insn, 14, 5) << 2; 642 643 if (aarch64_debug) 644 fprintf_unfiltered (gdb_stdlog, 645 "decode: 0x%s 0x%x %s x%u, #%u, 0x%s\n", 646 core_addr_to_string_nz (addr), insn, 647 *op ? "tbnz" : "tbz", *rt, *bit, 648 core_addr_to_string_nz (addr + *imm)); 649 return 1; 650 } 651 return 0; 652 } 653 654 /* Analyze a prologue, looking for a recognizable stack frame 655 and frame pointer. Scan until we encounter a store that could 656 clobber the stack frame unexpectedly, or an unknown instruction. */ 657 658 static CORE_ADDR 659 aarch64_analyze_prologue (struct gdbarch *gdbarch, 660 CORE_ADDR start, CORE_ADDR limit, 661 struct aarch64_prologue_cache *cache) 662 { 663 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); 664 int i; 665 pv_t regs[AARCH64_X_REGISTER_COUNT]; 666 struct pv_area *stack; 667 struct cleanup *back_to; 668 669 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++) 670 regs[i] = pv_register (i, 0); 671 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch)); 672 back_to = make_cleanup_free_pv_area (stack); 673 674 for (; start < limit; start += 4) 675 { 676 uint32_t insn; 677 unsigned rd; 678 unsigned rn; 679 unsigned rm; 680 unsigned rt; 681 unsigned rt1; 682 unsigned rt2; 683 int op_is_sub; 684 int32_t imm; 685 unsigned cond; 686 unsigned is64; 687 unsigned is_link; 688 unsigned op; 689 unsigned bit; 690 int32_t offset; 691 692 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code); 693 694 if (decode_add_sub_imm (start, insn, &rd, &rn, &imm)) 695 regs[rd] = pv_add_constant (regs[rn], imm); 696 else if (decode_adrp (start, insn, &rd)) 697 regs[rd] = pv_unknown (); 698 else if (decode_b (start, insn, &is_link, &offset)) 699 { 700 /* Stop analysis on branch. */ 701 break; 702 } 703 else if (decode_bcond (start, insn, &cond, &offset)) 704 { 705 /* Stop analysis on branch. */ 706 break; 707 } 708 else if (decode_br (start, insn, &is_link, &rn)) 709 { 710 /* Stop analysis on branch. */ 711 break; 712 } 713 else if (decode_cb (start, insn, &is64, &op, &rn, &offset)) 714 { 715 /* Stop analysis on branch. */ 716 break; 717 } 718 else if (decode_eret (start, insn)) 719 { 720 /* Stop analysis on branch. */ 721 break; 722 } 723 else if (decode_movz (start, insn, &rd)) 724 regs[rd] = pv_unknown (); 725 else 726 if (decode_orr_shifted_register_x (start, insn, &rd, &rn, &rm, &imm)) 727 { 728 if (imm == 0 && rn == 31) 729 regs[rd] = regs[rm]; 730 else 731 { 732 if (aarch64_debug) 733 fprintf_unfiltered 734 (gdb_stdlog, 735 "aarch64: prologue analysis gave up addr=0x%s " 736 "opcode=0x%x (orr x register)\n", 737 core_addr_to_string_nz (start), 738 insn); 739 break; 740 } 741 } 742 else if (decode_ret (start, insn, &rn)) 743 { 744 /* Stop analysis on branch. */ 745 break; 746 } 747 else if (decode_stur (start, insn, &is64, &rt, &rn, &offset)) 748 { 749 pv_area_store (stack, pv_add_constant (regs[rn], offset), 750 is64 ? 8 : 4, regs[rt]); 751 } 752 else if (decode_stp_offset (start, insn, &rt1, &rt2, &rn, &imm)) 753 { 754 /* If recording this store would invalidate the store area 755 (perhaps because rn is not known) then we should abandon 756 further prologue analysis. */ 757 if (pv_area_store_would_trash (stack, 758 pv_add_constant (regs[rn], imm))) 759 break; 760 761 if (pv_area_store_would_trash (stack, 762 pv_add_constant (regs[rn], imm + 8))) 763 break; 764 765 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8, 766 regs[rt1]); 767 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8, 768 regs[rt2]); 769 } 770 else if (decode_stp_offset_wb (start, insn, &rt1, &rt2, &rn, &imm)) 771 { 772 /* If recording this store would invalidate the store area 773 (perhaps because rn is not known) then we should abandon 774 further prologue analysis. */ 775 if (pv_area_store_would_trash (stack, 776 pv_add_constant (regs[rn], imm))) 777 break; 778 779 if (pv_area_store_would_trash (stack, 780 pv_add_constant (regs[rn], imm + 8))) 781 break; 782 783 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8, 784 regs[rt1]); 785 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8, 786 regs[rt2]); 787 regs[rn] = pv_add_constant (regs[rn], imm); 788 } 789 else if (decode_tb (start, insn, &op, &bit, &rn, &offset)) 790 { 791 /* Stop analysis on branch. */ 792 break; 793 } 794 else 795 { 796 if (aarch64_debug) 797 fprintf_unfiltered (gdb_stdlog, 798 "aarch64: prologue analysis gave up addr=0x%s" 799 " opcode=0x%x\n", 800 core_addr_to_string_nz (start), insn); 801 break; 802 } 803 } 804 805 if (cache == NULL) 806 { 807 do_cleanups (back_to); 808 return start; 809 } 810 811 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM)) 812 { 813 /* Frame pointer is fp. Frame size is constant. */ 814 cache->framereg = AARCH64_FP_REGNUM; 815 cache->framesize = -regs[AARCH64_FP_REGNUM].k; 816 } 817 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM)) 818 { 819 /* Try the stack pointer. */ 820 cache->framesize = -regs[AARCH64_SP_REGNUM].k; 821 cache->framereg = AARCH64_SP_REGNUM; 822 } 823 else 824 { 825 /* We're just out of luck. We don't know where the frame is. */ 826 cache->framereg = -1; 827 cache->framesize = 0; 828 } 829 830 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++) 831 { 832 CORE_ADDR offset; 833 834 if (pv_area_find_reg (stack, gdbarch, i, &offset)) 835 cache->saved_regs[i].addr = offset; 836 } 837 838 do_cleanups (back_to); 839 return start; 840 } 841 842 /* Implement the "skip_prologue" gdbarch method. */ 843 844 static CORE_ADDR 845 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc) 846 { 847 unsigned long inst; 848 CORE_ADDR skip_pc; 849 CORE_ADDR func_addr, limit_pc; 850 struct symtab_and_line sal; 851 852 /* See if we can determine the end of the prologue via the symbol 853 table. If so, then return either PC, or the PC after the 854 prologue, whichever is greater. */ 855 if (find_pc_partial_function (pc, NULL, &func_addr, NULL)) 856 { 857 CORE_ADDR post_prologue_pc 858 = skip_prologue_using_sal (gdbarch, func_addr); 859 860 if (post_prologue_pc != 0) 861 return max (pc, post_prologue_pc); 862 } 863 864 /* Can't determine prologue from the symbol table, need to examine 865 instructions. */ 866 867 /* Find an upper limit on the function prologue using the debug 868 information. If the debug information could not be used to 869 provide that bound, then use an arbitrary large number as the 870 upper bound. */ 871 limit_pc = skip_prologue_using_sal (gdbarch, pc); 872 if (limit_pc == 0) 873 limit_pc = pc + 128; /* Magic. */ 874 875 /* Try disassembling prologue. */ 876 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL); 877 } 878 879 /* Scan the function prologue for THIS_FRAME and populate the prologue 880 cache CACHE. */ 881 882 static void 883 aarch64_scan_prologue (struct frame_info *this_frame, 884 struct aarch64_prologue_cache *cache) 885 { 886 CORE_ADDR block_addr = get_frame_address_in_block (this_frame); 887 CORE_ADDR prologue_start; 888 CORE_ADDR prologue_end; 889 CORE_ADDR prev_pc = get_frame_pc (this_frame); 890 struct gdbarch *gdbarch = get_frame_arch (this_frame); 891 892 /* Assume we do not find a frame. */ 893 cache->framereg = -1; 894 cache->framesize = 0; 895 896 if (find_pc_partial_function (block_addr, NULL, &prologue_start, 897 &prologue_end)) 898 { 899 struct symtab_and_line sal = find_pc_line (prologue_start, 0); 900 901 if (sal.line == 0) 902 { 903 /* No line info so use the current PC. */ 904 prologue_end = prev_pc; 905 } 906 else if (sal.end < prologue_end) 907 { 908 /* The next line begins after the function end. */ 909 prologue_end = sal.end; 910 } 911 912 prologue_end = min (prologue_end, prev_pc); 913 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache); 914 } 915 else 916 { 917 CORE_ADDR frame_loc; 918 LONGEST saved_fp; 919 LONGEST saved_lr; 920 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 921 922 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM); 923 if (frame_loc == 0) 924 return; 925 926 cache->framereg = AARCH64_FP_REGNUM; 927 cache->framesize = 16; 928 cache->saved_regs[29].addr = 0; 929 cache->saved_regs[30].addr = 8; 930 } 931 } 932 933 /* Allocate an aarch64_prologue_cache and fill it with information 934 about the prologue of *THIS_FRAME. */ 935 936 static struct aarch64_prologue_cache * 937 aarch64_make_prologue_cache (struct frame_info *this_frame) 938 { 939 struct aarch64_prologue_cache *cache; 940 CORE_ADDR unwound_fp; 941 int reg; 942 943 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache); 944 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame); 945 946 aarch64_scan_prologue (this_frame, cache); 947 948 if (cache->framereg == -1) 949 return cache; 950 951 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg); 952 if (unwound_fp == 0) 953 return cache; 954 955 cache->prev_sp = unwound_fp + cache->framesize; 956 957 /* Calculate actual addresses of saved registers using offsets 958 determined by aarch64_analyze_prologue. */ 959 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++) 960 if (trad_frame_addr_p (cache->saved_regs, reg)) 961 cache->saved_regs[reg].addr += cache->prev_sp; 962 963 return cache; 964 } 965 966 /* Our frame ID for a normal frame is the current function's starting 967 PC and the caller's SP when we were called. */ 968 969 static void 970 aarch64_prologue_this_id (struct frame_info *this_frame, 971 void **this_cache, struct frame_id *this_id) 972 { 973 struct aarch64_prologue_cache *cache; 974 struct frame_id id; 975 CORE_ADDR pc, func; 976 977 if (*this_cache == NULL) 978 *this_cache = aarch64_make_prologue_cache (this_frame); 979 cache = *this_cache; 980 981 /* This is meant to halt the backtrace at "_start". */ 982 pc = get_frame_pc (this_frame); 983 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc) 984 return; 985 986 /* If we've hit a wall, stop. */ 987 if (cache->prev_sp == 0) 988 return; 989 990 func = get_frame_func (this_frame); 991 id = frame_id_build (cache->prev_sp, func); 992 *this_id = id; 993 } 994 995 /* Implement the "prev_register" frame_unwind method. */ 996 997 static struct value * 998 aarch64_prologue_prev_register (struct frame_info *this_frame, 999 void **this_cache, int prev_regnum) 1000 { 1001 struct gdbarch *gdbarch = get_frame_arch (this_frame); 1002 struct aarch64_prologue_cache *cache; 1003 1004 if (*this_cache == NULL) 1005 *this_cache = aarch64_make_prologue_cache (this_frame); 1006 cache = *this_cache; 1007 1008 /* If we are asked to unwind the PC, then we need to return the LR 1009 instead. The prologue may save PC, but it will point into this 1010 frame's prologue, not the next frame's resume location. */ 1011 if (prev_regnum == AARCH64_PC_REGNUM) 1012 { 1013 CORE_ADDR lr; 1014 1015 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM); 1016 return frame_unwind_got_constant (this_frame, prev_regnum, lr); 1017 } 1018 1019 /* SP is generally not saved to the stack, but this frame is 1020 identified by the next frame's stack pointer at the time of the 1021 call. The value was already reconstructed into PREV_SP. */ 1022 /* 1023 +----------+ ^ 1024 | saved lr | | 1025 +->| saved fp |--+ 1026 | | | 1027 | | | <- Previous SP 1028 | +----------+ 1029 | | saved lr | 1030 +--| saved fp |<- FP 1031 | | 1032 | |<- SP 1033 +----------+ */ 1034 if (prev_regnum == AARCH64_SP_REGNUM) 1035 return frame_unwind_got_constant (this_frame, prev_regnum, 1036 cache->prev_sp); 1037 1038 return trad_frame_get_prev_register (this_frame, cache->saved_regs, 1039 prev_regnum); 1040 } 1041 1042 /* AArch64 prologue unwinder. */ 1043 struct frame_unwind aarch64_prologue_unwind = 1044 { 1045 NORMAL_FRAME, 1046 default_frame_unwind_stop_reason, 1047 aarch64_prologue_this_id, 1048 aarch64_prologue_prev_register, 1049 NULL, 1050 default_frame_sniffer 1051 }; 1052 1053 /* Allocate an aarch64_prologue_cache and fill it with information 1054 about the prologue of *THIS_FRAME. */ 1055 1056 static struct aarch64_prologue_cache * 1057 aarch64_make_stub_cache (struct frame_info *this_frame) 1058 { 1059 int reg; 1060 struct aarch64_prologue_cache *cache; 1061 CORE_ADDR unwound_fp; 1062 1063 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache); 1064 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame); 1065 1066 cache->prev_sp 1067 = get_frame_register_unsigned (this_frame, AARCH64_SP_REGNUM); 1068 1069 return cache; 1070 } 1071 1072 /* Our frame ID for a stub frame is the current SP and LR. */ 1073 1074 static void 1075 aarch64_stub_this_id (struct frame_info *this_frame, 1076 void **this_cache, struct frame_id *this_id) 1077 { 1078 struct aarch64_prologue_cache *cache; 1079 1080 if (*this_cache == NULL) 1081 *this_cache = aarch64_make_stub_cache (this_frame); 1082 cache = *this_cache; 1083 1084 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame)); 1085 } 1086 1087 /* Implement the "sniffer" frame_unwind method. */ 1088 1089 static int 1090 aarch64_stub_unwind_sniffer (const struct frame_unwind *self, 1091 struct frame_info *this_frame, 1092 void **this_prologue_cache) 1093 { 1094 CORE_ADDR addr_in_block; 1095 gdb_byte dummy[4]; 1096 1097 addr_in_block = get_frame_address_in_block (this_frame); 1098 if (in_plt_section (addr_in_block, NULL) 1099 /* We also use the stub winder if the target memory is unreadable 1100 to avoid having the prologue unwinder trying to read it. */ 1101 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0) 1102 return 1; 1103 1104 return 0; 1105 } 1106 1107 /* AArch64 stub unwinder. */ 1108 struct frame_unwind aarch64_stub_unwind = 1109 { 1110 NORMAL_FRAME, 1111 default_frame_unwind_stop_reason, 1112 aarch64_stub_this_id, 1113 aarch64_prologue_prev_register, 1114 NULL, 1115 aarch64_stub_unwind_sniffer 1116 }; 1117 1118 /* Return the frame base address of *THIS_FRAME. */ 1119 1120 static CORE_ADDR 1121 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache) 1122 { 1123 struct aarch64_prologue_cache *cache; 1124 1125 if (*this_cache == NULL) 1126 *this_cache = aarch64_make_prologue_cache (this_frame); 1127 cache = *this_cache; 1128 1129 return cache->prev_sp - cache->framesize; 1130 } 1131 1132 /* AArch64 default frame base information. */ 1133 struct frame_base aarch64_normal_base = 1134 { 1135 &aarch64_prologue_unwind, 1136 aarch64_normal_frame_base, 1137 aarch64_normal_frame_base, 1138 aarch64_normal_frame_base 1139 }; 1140 1141 /* Assuming THIS_FRAME is a dummy, return the frame ID of that 1142 dummy frame. The frame ID's base needs to match the TOS value 1143 saved by save_dummy_frame_tos () and returned from 1144 aarch64_push_dummy_call, and the PC needs to match the dummy 1145 frame's breakpoint. */ 1146 1147 static struct frame_id 1148 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame) 1149 { 1150 return frame_id_build (get_frame_register_unsigned (this_frame, 1151 AARCH64_SP_REGNUM), 1152 get_frame_pc (this_frame)); 1153 } 1154 1155 /* Implement the "unwind_pc" gdbarch method. */ 1156 1157 static CORE_ADDR 1158 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame) 1159 { 1160 CORE_ADDR pc 1161 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM); 1162 1163 return pc; 1164 } 1165 1166 /* Implement the "unwind_sp" gdbarch method. */ 1167 1168 static CORE_ADDR 1169 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame) 1170 { 1171 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM); 1172 } 1173 1174 /* Return the value of the REGNUM register in the previous frame of 1175 *THIS_FRAME. */ 1176 1177 static struct value * 1178 aarch64_dwarf2_prev_register (struct frame_info *this_frame, 1179 void **this_cache, int regnum) 1180 { 1181 struct gdbarch *gdbarch = get_frame_arch (this_frame); 1182 CORE_ADDR lr; 1183 1184 switch (regnum) 1185 { 1186 case AARCH64_PC_REGNUM: 1187 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM); 1188 return frame_unwind_got_constant (this_frame, regnum, lr); 1189 1190 default: 1191 internal_error (__FILE__, __LINE__, 1192 _("Unexpected register %d"), regnum); 1193 } 1194 } 1195 1196 /* Implement the "init_reg" dwarf2_frame_ops method. */ 1197 1198 static void 1199 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum, 1200 struct dwarf2_frame_state_reg *reg, 1201 struct frame_info *this_frame) 1202 { 1203 switch (regnum) 1204 { 1205 case AARCH64_PC_REGNUM: 1206 reg->how = DWARF2_FRAME_REG_FN; 1207 reg->loc.fn = aarch64_dwarf2_prev_register; 1208 break; 1209 case AARCH64_SP_REGNUM: 1210 reg->how = DWARF2_FRAME_REG_CFA; 1211 break; 1212 } 1213 } 1214 1215 /* When arguments must be pushed onto the stack, they go on in reverse 1216 order. The code below implements a FILO (stack) to do this. */ 1217 1218 typedef struct 1219 { 1220 /* Value to pass on stack. */ 1221 const void *data; 1222 1223 /* Size in bytes of value to pass on stack. */ 1224 int len; 1225 } stack_item_t; 1226 1227 DEF_VEC_O (stack_item_t); 1228 1229 /* Return the alignment (in bytes) of the given type. */ 1230 1231 static int 1232 aarch64_type_align (struct type *t) 1233 { 1234 int n; 1235 int align; 1236 int falign; 1237 1238 t = check_typedef (t); 1239 switch (TYPE_CODE (t)) 1240 { 1241 default: 1242 /* Should never happen. */ 1243 internal_error (__FILE__, __LINE__, _("unknown type alignment")); 1244 return 4; 1245 1246 case TYPE_CODE_PTR: 1247 case TYPE_CODE_ENUM: 1248 case TYPE_CODE_INT: 1249 case TYPE_CODE_FLT: 1250 case TYPE_CODE_SET: 1251 case TYPE_CODE_RANGE: 1252 case TYPE_CODE_BITSTRING: 1253 case TYPE_CODE_REF: 1254 case TYPE_CODE_CHAR: 1255 case TYPE_CODE_BOOL: 1256 return TYPE_LENGTH (t); 1257 1258 case TYPE_CODE_ARRAY: 1259 case TYPE_CODE_COMPLEX: 1260 return aarch64_type_align (TYPE_TARGET_TYPE (t)); 1261 1262 case TYPE_CODE_STRUCT: 1263 case TYPE_CODE_UNION: 1264 align = 1; 1265 for (n = 0; n < TYPE_NFIELDS (t); n++) 1266 { 1267 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n)); 1268 if (falign > align) 1269 align = falign; 1270 } 1271 return align; 1272 } 1273 } 1274 1275 /* Return 1 if *TY is a homogeneous floating-point aggregate as 1276 defined in the AAPCS64 ABI document; otherwise return 0. */ 1277 1278 static int 1279 is_hfa (struct type *ty) 1280 { 1281 switch (TYPE_CODE (ty)) 1282 { 1283 case TYPE_CODE_ARRAY: 1284 { 1285 struct type *target_ty = TYPE_TARGET_TYPE (ty); 1286 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4) 1287 return 1; 1288 break; 1289 } 1290 1291 case TYPE_CODE_UNION: 1292 case TYPE_CODE_STRUCT: 1293 { 1294 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4) 1295 { 1296 struct type *member0_type; 1297 1298 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0)); 1299 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT) 1300 { 1301 int i; 1302 1303 for (i = 0; i < TYPE_NFIELDS (ty); i++) 1304 { 1305 struct type *member1_type; 1306 1307 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i)); 1308 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type) 1309 || (TYPE_LENGTH (member0_type) 1310 != TYPE_LENGTH (member1_type))) 1311 return 0; 1312 } 1313 return 1; 1314 } 1315 } 1316 return 0; 1317 } 1318 1319 default: 1320 break; 1321 } 1322 1323 return 0; 1324 } 1325 1326 /* AArch64 function call information structure. */ 1327 struct aarch64_call_info 1328 { 1329 /* the current argument number. */ 1330 unsigned argnum; 1331 1332 /* The next general purpose register number, equivalent to NGRN as 1333 described in the AArch64 Procedure Call Standard. */ 1334 unsigned ngrn; 1335 1336 /* The next SIMD and floating point register number, equivalent to 1337 NSRN as described in the AArch64 Procedure Call Standard. */ 1338 unsigned nsrn; 1339 1340 /* The next stacked argument address, equivalent to NSAA as 1341 described in the AArch64 Procedure Call Standard. */ 1342 unsigned nsaa; 1343 1344 /* Stack item vector. */ 1345 VEC(stack_item_t) *si; 1346 }; 1347 1348 /* Pass a value in a sequence of consecutive X registers. The caller 1349 is responsbile for ensuring sufficient registers are available. */ 1350 1351 static void 1352 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache, 1353 struct aarch64_call_info *info, struct type *type, 1354 const bfd_byte *buf) 1355 { 1356 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 1357 int len = TYPE_LENGTH (type); 1358 enum type_code typecode = TYPE_CODE (type); 1359 int regnum = AARCH64_X0_REGNUM + info->ngrn; 1360 1361 info->argnum++; 1362 1363 while (len > 0) 1364 { 1365 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE; 1366 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len, 1367 byte_order); 1368 1369 1370 /* Adjust sub-word struct/union args when big-endian. */ 1371 if (byte_order == BFD_ENDIAN_BIG 1372 && partial_len < X_REGISTER_SIZE 1373 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION)) 1374 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT); 1375 1376 if (aarch64_debug) 1377 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n", 1378 info->argnum, 1379 gdbarch_register_name (gdbarch, regnum), 1380 phex (regval, X_REGISTER_SIZE)); 1381 regcache_cooked_write_unsigned (regcache, regnum, regval); 1382 len -= partial_len; 1383 buf += partial_len; 1384 regnum++; 1385 } 1386 } 1387 1388 /* Attempt to marshall a value in a V register. Return 1 if 1389 successful, or 0 if insufficient registers are available. This 1390 function, unlike the equivalent pass_in_x() function does not 1391 handle arguments spread across multiple registers. */ 1392 1393 static int 1394 pass_in_v (struct gdbarch *gdbarch, 1395 struct regcache *regcache, 1396 struct aarch64_call_info *info, 1397 const bfd_byte *buf) 1398 { 1399 if (info->nsrn < 8) 1400 { 1401 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 1402 int regnum = AARCH64_V0_REGNUM + info->nsrn; 1403 1404 info->argnum++; 1405 info->nsrn++; 1406 1407 regcache_cooked_write (regcache, regnum, buf); 1408 if (aarch64_debug) 1409 fprintf_unfiltered (gdb_stdlog, "arg %d in %s\n", 1410 info->argnum, 1411 gdbarch_register_name (gdbarch, regnum)); 1412 return 1; 1413 } 1414 info->nsrn = 8; 1415 return 0; 1416 } 1417 1418 /* Marshall an argument onto the stack. */ 1419 1420 static void 1421 pass_on_stack (struct aarch64_call_info *info, struct type *type, 1422 const bfd_byte *buf) 1423 { 1424 int len = TYPE_LENGTH (type); 1425 int align; 1426 stack_item_t item; 1427 1428 info->argnum++; 1429 1430 align = aarch64_type_align (type); 1431 1432 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the 1433 Natural alignment of the argument's type. */ 1434 align = align_up (align, 8); 1435 1436 /* The AArch64 PCS requires at most doubleword alignment. */ 1437 if (align > 16) 1438 align = 16; 1439 1440 if (aarch64_debug) 1441 fprintf_unfiltered (gdb_stdlog, "arg %d len=%d @ sp + %d\n", 1442 info->argnum, len, info->nsaa); 1443 1444 item.len = len; 1445 item.data = buf; 1446 VEC_safe_push (stack_item_t, info->si, &item); 1447 1448 info->nsaa += len; 1449 if (info->nsaa & (align - 1)) 1450 { 1451 /* Push stack alignment padding. */ 1452 int pad = align - (info->nsaa & (align - 1)); 1453 1454 item.len = pad; 1455 item.data = buf; 1456 1457 VEC_safe_push (stack_item_t, info->si, &item); 1458 info->nsaa += pad; 1459 } 1460 } 1461 1462 /* Marshall an argument into a sequence of one or more consecutive X 1463 registers or, if insufficient X registers are available then onto 1464 the stack. */ 1465 1466 static void 1467 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache, 1468 struct aarch64_call_info *info, struct type *type, 1469 const bfd_byte *buf) 1470 { 1471 int len = TYPE_LENGTH (type); 1472 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE; 1473 1474 /* PCS C.13 - Pass in registers if we have enough spare */ 1475 if (info->ngrn + nregs <= 8) 1476 { 1477 pass_in_x (gdbarch, regcache, info, type, buf); 1478 info->ngrn += nregs; 1479 } 1480 else 1481 { 1482 info->ngrn = 8; 1483 pass_on_stack (info, type, buf); 1484 } 1485 } 1486 1487 /* Pass a value in a V register, or on the stack if insufficient are 1488 available. */ 1489 1490 static void 1491 pass_in_v_or_stack (struct gdbarch *gdbarch, 1492 struct regcache *regcache, 1493 struct aarch64_call_info *info, 1494 struct type *type, 1495 const bfd_byte *buf) 1496 { 1497 if (!pass_in_v (gdbarch, regcache, info, buf)) 1498 pass_on_stack (info, type, buf); 1499 } 1500 1501 /* Implement the "push_dummy_call" gdbarch method. */ 1502 1503 static CORE_ADDR 1504 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function, 1505 struct regcache *regcache, CORE_ADDR bp_addr, 1506 int nargs, 1507 struct value **args, CORE_ADDR sp, int struct_return, 1508 CORE_ADDR struct_addr) 1509 { 1510 int nstack = 0; 1511 int argnum; 1512 int x_argreg; 1513 int v_argreg; 1514 struct aarch64_call_info info; 1515 struct type *func_type; 1516 struct type *return_type; 1517 int lang_struct_return; 1518 1519 memset (&info, 0, sizeof (info)); 1520 1521 /* We need to know what the type of the called function is in order 1522 to determine the number of named/anonymous arguments for the 1523 actual argument placement, and the return type in order to handle 1524 return value correctly. 1525 1526 The generic code above us views the decision of return in memory 1527 or return in registers as a two stage processes. The language 1528 handler is consulted first and may decide to return in memory (eg 1529 class with copy constructor returned by value), this will cause 1530 the generic code to allocate space AND insert an initial leading 1531 argument. 1532 1533 If the language code does not decide to pass in memory then the 1534 target code is consulted. 1535 1536 If the language code decides to pass in memory we want to move 1537 the pointer inserted as the initial argument from the argument 1538 list and into X8, the conventional AArch64 struct return pointer 1539 register. 1540 1541 This is slightly awkward, ideally the flag "lang_struct_return" 1542 would be passed to the targets implementation of push_dummy_call. 1543 Rather that change the target interface we call the language code 1544 directly ourselves. */ 1545 1546 func_type = check_typedef (value_type (function)); 1547 1548 /* Dereference function pointer types. */ 1549 if (TYPE_CODE (func_type) == TYPE_CODE_PTR) 1550 func_type = TYPE_TARGET_TYPE (func_type); 1551 1552 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC 1553 || TYPE_CODE (func_type) == TYPE_CODE_METHOD); 1554 1555 /* If language_pass_by_reference () returned true we will have been 1556 given an additional initial argument, a hidden pointer to the 1557 return slot in memory. */ 1558 return_type = TYPE_TARGET_TYPE (func_type); 1559 lang_struct_return = language_pass_by_reference (return_type); 1560 1561 /* Set the return address. For the AArch64, the return breakpoint 1562 is always at BP_ADDR. */ 1563 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr); 1564 1565 /* If we were given an initial argument for the return slot because 1566 lang_struct_return was true, lose it. */ 1567 if (lang_struct_return) 1568 { 1569 args++; 1570 nargs--; 1571 } 1572 1573 /* The struct_return pointer occupies X8. */ 1574 if (struct_return || lang_struct_return) 1575 { 1576 if (aarch64_debug) 1577 fprintf_unfiltered (gdb_stdlog, "struct return in %s = 0x%s\n", 1578 gdbarch_register_name 1579 (gdbarch, 1580 AARCH64_STRUCT_RETURN_REGNUM), 1581 paddress (gdbarch, struct_addr)); 1582 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM, 1583 struct_addr); 1584 } 1585 1586 for (argnum = 0; argnum < nargs; argnum++) 1587 { 1588 struct value *arg = args[argnum]; 1589 struct type *arg_type; 1590 int len; 1591 1592 arg_type = check_typedef (value_type (arg)); 1593 len = TYPE_LENGTH (arg_type); 1594 1595 switch (TYPE_CODE (arg_type)) 1596 { 1597 case TYPE_CODE_INT: 1598 case TYPE_CODE_BOOL: 1599 case TYPE_CODE_CHAR: 1600 case TYPE_CODE_RANGE: 1601 case TYPE_CODE_ENUM: 1602 if (len < 4) 1603 { 1604 /* Promote to 32 bit integer. */ 1605 if (TYPE_UNSIGNED (arg_type)) 1606 arg_type = builtin_type (gdbarch)->builtin_uint32; 1607 else 1608 arg_type = builtin_type (gdbarch)->builtin_int32; 1609 arg = value_cast (arg_type, arg); 1610 } 1611 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, 1612 value_contents (arg)); 1613 break; 1614 1615 case TYPE_CODE_COMPLEX: 1616 if (info.nsrn <= 6) 1617 { 1618 const bfd_byte *buf = value_contents (arg); 1619 struct type *target_type = 1620 check_typedef (TYPE_TARGET_TYPE (arg_type)); 1621 1622 pass_in_v (gdbarch, regcache, &info, buf); 1623 pass_in_v (gdbarch, regcache, &info, 1624 buf + TYPE_LENGTH (target_type)); 1625 } 1626 else 1627 { 1628 info.nsrn = 8; 1629 pass_on_stack (&info, arg_type, value_contents (arg)); 1630 } 1631 break; 1632 case TYPE_CODE_FLT: 1633 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, 1634 value_contents (arg)); 1635 break; 1636 1637 case TYPE_CODE_STRUCT: 1638 case TYPE_CODE_ARRAY: 1639 case TYPE_CODE_UNION: 1640 if (is_hfa (arg_type)) 1641 { 1642 int elements = TYPE_NFIELDS (arg_type); 1643 1644 /* Homogeneous Aggregates */ 1645 if (info.nsrn + elements < 8) 1646 { 1647 int i; 1648 1649 for (i = 0; i < elements; i++) 1650 { 1651 /* We know that we have sufficient registers 1652 available therefore this will never fallback 1653 to the stack. */ 1654 struct value *field = 1655 value_primitive_field (arg, 0, i, arg_type); 1656 struct type *field_type = 1657 check_typedef (value_type (field)); 1658 1659 pass_in_v_or_stack (gdbarch, regcache, &info, field_type, 1660 value_contents_writeable (field)); 1661 } 1662 } 1663 else 1664 { 1665 info.nsrn = 8; 1666 pass_on_stack (&info, arg_type, value_contents (arg)); 1667 } 1668 } 1669 else if (len > 16) 1670 { 1671 /* PCS B.7 Aggregates larger than 16 bytes are passed by 1672 invisible reference. */ 1673 1674 /* Allocate aligned storage. */ 1675 sp = align_down (sp - len, 16); 1676 1677 /* Write the real data into the stack. */ 1678 write_memory (sp, value_contents (arg), len); 1679 1680 /* Construct the indirection. */ 1681 arg_type = lookup_pointer_type (arg_type); 1682 arg = value_from_pointer (arg_type, sp); 1683 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, 1684 value_contents (arg)); 1685 } 1686 else 1687 /* PCS C.15 / C.18 multiple values pass. */ 1688 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, 1689 value_contents (arg)); 1690 break; 1691 1692 default: 1693 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, 1694 value_contents (arg)); 1695 break; 1696 } 1697 } 1698 1699 /* Make sure stack retains 16 byte alignment. */ 1700 if (info.nsaa & 15) 1701 sp -= 16 - (info.nsaa & 15); 1702 1703 while (!VEC_empty (stack_item_t, info.si)) 1704 { 1705 stack_item_t *si = VEC_last (stack_item_t, info.si); 1706 1707 sp -= si->len; 1708 write_memory (sp, si->data, si->len); 1709 VEC_pop (stack_item_t, info.si); 1710 } 1711 1712 VEC_free (stack_item_t, info.si); 1713 1714 /* Finally, update the SP register. */ 1715 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp); 1716 1717 return sp; 1718 } 1719 1720 /* Implement the "frame_align" gdbarch method. */ 1721 1722 static CORE_ADDR 1723 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp) 1724 { 1725 /* Align the stack to sixteen bytes. */ 1726 return sp & ~(CORE_ADDR) 15; 1727 } 1728 1729 /* Return the type for an AdvSISD Q register. */ 1730 1731 static struct type * 1732 aarch64_vnq_type (struct gdbarch *gdbarch) 1733 { 1734 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 1735 1736 if (tdep->vnq_type == NULL) 1737 { 1738 struct type *t; 1739 struct type *elem; 1740 1741 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq", 1742 TYPE_CODE_UNION); 1743 1744 elem = builtin_type (gdbarch)->builtin_uint128; 1745 append_composite_type_field (t, "u", elem); 1746 1747 elem = builtin_type (gdbarch)->builtin_int128; 1748 append_composite_type_field (t, "s", elem); 1749 1750 tdep->vnq_type = t; 1751 } 1752 1753 return tdep->vnq_type; 1754 } 1755 1756 /* Return the type for an AdvSISD D register. */ 1757 1758 static struct type * 1759 aarch64_vnd_type (struct gdbarch *gdbarch) 1760 { 1761 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 1762 1763 if (tdep->vnd_type == NULL) 1764 { 1765 struct type *t; 1766 struct type *elem; 1767 1768 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd", 1769 TYPE_CODE_UNION); 1770 1771 elem = builtin_type (gdbarch)->builtin_double; 1772 append_composite_type_field (t, "f", elem); 1773 1774 elem = builtin_type (gdbarch)->builtin_uint64; 1775 append_composite_type_field (t, "u", elem); 1776 1777 elem = builtin_type (gdbarch)->builtin_int64; 1778 append_composite_type_field (t, "s", elem); 1779 1780 tdep->vnd_type = t; 1781 } 1782 1783 return tdep->vnd_type; 1784 } 1785 1786 /* Return the type for an AdvSISD S register. */ 1787 1788 static struct type * 1789 aarch64_vns_type (struct gdbarch *gdbarch) 1790 { 1791 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 1792 1793 if (tdep->vns_type == NULL) 1794 { 1795 struct type *t; 1796 struct type *elem; 1797 1798 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns", 1799 TYPE_CODE_UNION); 1800 1801 elem = builtin_type (gdbarch)->builtin_float; 1802 append_composite_type_field (t, "f", elem); 1803 1804 elem = builtin_type (gdbarch)->builtin_uint32; 1805 append_composite_type_field (t, "u", elem); 1806 1807 elem = builtin_type (gdbarch)->builtin_int32; 1808 append_composite_type_field (t, "s", elem); 1809 1810 tdep->vns_type = t; 1811 } 1812 1813 return tdep->vns_type; 1814 } 1815 1816 /* Return the type for an AdvSISD H register. */ 1817 1818 static struct type * 1819 aarch64_vnh_type (struct gdbarch *gdbarch) 1820 { 1821 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 1822 1823 if (tdep->vnh_type == NULL) 1824 { 1825 struct type *t; 1826 struct type *elem; 1827 1828 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh", 1829 TYPE_CODE_UNION); 1830 1831 elem = builtin_type (gdbarch)->builtin_uint16; 1832 append_composite_type_field (t, "u", elem); 1833 1834 elem = builtin_type (gdbarch)->builtin_int16; 1835 append_composite_type_field (t, "s", elem); 1836 1837 tdep->vnh_type = t; 1838 } 1839 1840 return tdep->vnh_type; 1841 } 1842 1843 /* Return the type for an AdvSISD B register. */ 1844 1845 static struct type * 1846 aarch64_vnb_type (struct gdbarch *gdbarch) 1847 { 1848 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 1849 1850 if (tdep->vnb_type == NULL) 1851 { 1852 struct type *t; 1853 struct type *elem; 1854 1855 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb", 1856 TYPE_CODE_UNION); 1857 1858 elem = builtin_type (gdbarch)->builtin_uint8; 1859 append_composite_type_field (t, "u", elem); 1860 1861 elem = builtin_type (gdbarch)->builtin_int8; 1862 append_composite_type_field (t, "s", elem); 1863 1864 tdep->vnb_type = t; 1865 } 1866 1867 return tdep->vnb_type; 1868 } 1869 1870 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */ 1871 1872 static int 1873 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg) 1874 { 1875 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30) 1876 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0; 1877 1878 if (reg == AARCH64_DWARF_SP) 1879 return AARCH64_SP_REGNUM; 1880 1881 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31) 1882 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0; 1883 1884 return -1; 1885 } 1886 1887 1888 /* Implement the "print_insn" gdbarch method. */ 1889 1890 static int 1891 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info) 1892 { 1893 info->symbols = NULL; 1894 return print_insn_aarch64 (memaddr, info); 1895 } 1896 1897 /* AArch64 BRK software debug mode instruction. 1898 Note that AArch64 code is always little-endian. 1899 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */ 1900 static const char aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4}; 1901 1902 /* Implement the "breakpoint_from_pc" gdbarch method. */ 1903 1904 static const unsigned char * 1905 aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, 1906 int *lenptr) 1907 { 1908 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 1909 1910 *lenptr = sizeof (aarch64_default_breakpoint); 1911 return aarch64_default_breakpoint; 1912 } 1913 1914 /* Extract from an array REGS containing the (raw) register state a 1915 function return value of type TYPE, and copy that, in virtual 1916 format, into VALBUF. */ 1917 1918 static void 1919 aarch64_extract_return_value (struct type *type, struct regcache *regs, 1920 gdb_byte *valbuf) 1921 { 1922 struct gdbarch *gdbarch = get_regcache_arch (regs); 1923 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 1924 1925 if (TYPE_CODE (type) == TYPE_CODE_FLT) 1926 { 1927 bfd_byte buf[V_REGISTER_SIZE]; 1928 int len = TYPE_LENGTH (type); 1929 1930 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf); 1931 memcpy (valbuf, buf, len); 1932 } 1933 else if (TYPE_CODE (type) == TYPE_CODE_INT 1934 || TYPE_CODE (type) == TYPE_CODE_CHAR 1935 || TYPE_CODE (type) == TYPE_CODE_BOOL 1936 || TYPE_CODE (type) == TYPE_CODE_PTR 1937 || TYPE_CODE (type) == TYPE_CODE_REF 1938 || TYPE_CODE (type) == TYPE_CODE_ENUM) 1939 { 1940 /* If the the type is a plain integer, then the access is 1941 straight-forward. Otherwise we have to play around a bit 1942 more. */ 1943 int len = TYPE_LENGTH (type); 1944 int regno = AARCH64_X0_REGNUM; 1945 ULONGEST tmp; 1946 1947 while (len > 0) 1948 { 1949 /* By using store_unsigned_integer we avoid having to do 1950 anything special for small big-endian values. */ 1951 regcache_cooked_read_unsigned (regs, regno++, &tmp); 1952 store_unsigned_integer (valbuf, 1953 (len > X_REGISTER_SIZE 1954 ? X_REGISTER_SIZE : len), byte_order, tmp); 1955 len -= X_REGISTER_SIZE; 1956 valbuf += X_REGISTER_SIZE; 1957 } 1958 } 1959 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX) 1960 { 1961 int regno = AARCH64_V0_REGNUM; 1962 bfd_byte buf[V_REGISTER_SIZE]; 1963 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type)); 1964 int len = TYPE_LENGTH (target_type); 1965 1966 regcache_cooked_read (regs, regno, buf); 1967 memcpy (valbuf, buf, len); 1968 valbuf += len; 1969 regcache_cooked_read (regs, regno + 1, buf); 1970 memcpy (valbuf, buf, len); 1971 valbuf += len; 1972 } 1973 else if (is_hfa (type)) 1974 { 1975 int elements = TYPE_NFIELDS (type); 1976 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0)); 1977 int len = TYPE_LENGTH (member_type); 1978 int i; 1979 1980 for (i = 0; i < elements; i++) 1981 { 1982 int regno = AARCH64_V0_REGNUM + i; 1983 bfd_byte buf[X_REGISTER_SIZE]; 1984 1985 if (aarch64_debug) 1986 fprintf_unfiltered (gdb_stdlog, 1987 "read HFA return value element %d from %s\n", 1988 i + 1, 1989 gdbarch_register_name (gdbarch, regno)); 1990 regcache_cooked_read (regs, regno, buf); 1991 1992 memcpy (valbuf, buf, len); 1993 valbuf += len; 1994 } 1995 } 1996 else 1997 { 1998 /* For a structure or union the behaviour is as if the value had 1999 been stored to word-aligned memory and then loaded into 2000 registers with 64-bit load instruction(s). */ 2001 int len = TYPE_LENGTH (type); 2002 int regno = AARCH64_X0_REGNUM; 2003 bfd_byte buf[X_REGISTER_SIZE]; 2004 2005 while (len > 0) 2006 { 2007 regcache_cooked_read (regs, regno++, buf); 2008 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len); 2009 len -= X_REGISTER_SIZE; 2010 valbuf += X_REGISTER_SIZE; 2011 } 2012 } 2013 } 2014 2015 2016 /* Will a function return an aggregate type in memory or in a 2017 register? Return 0 if an aggregate type can be returned in a 2018 register, 1 if it must be returned in memory. */ 2019 2020 static int 2021 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type) 2022 { 2023 int nRc; 2024 enum type_code code; 2025 2026 CHECK_TYPEDEF (type); 2027 2028 /* In the AArch64 ABI, "integer" like aggregate types are returned 2029 in registers. For an aggregate type to be integer like, its size 2030 must be less than or equal to 4 * X_REGISTER_SIZE. */ 2031 2032 if (is_hfa (type)) 2033 { 2034 /* PCS B.5 If the argument is a Named HFA, then the argument is 2035 used unmodified. */ 2036 return 0; 2037 } 2038 2039 if (TYPE_LENGTH (type) > 16) 2040 { 2041 /* PCS B.6 Aggregates larger than 16 bytes are passed by 2042 invisible reference. */ 2043 2044 return 1; 2045 } 2046 2047 return 0; 2048 } 2049 2050 /* Write into appropriate registers a function return value of type 2051 TYPE, given in virtual format. */ 2052 2053 static void 2054 aarch64_store_return_value (struct type *type, struct regcache *regs, 2055 const gdb_byte *valbuf) 2056 { 2057 struct gdbarch *gdbarch = get_regcache_arch (regs); 2058 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 2059 2060 if (TYPE_CODE (type) == TYPE_CODE_FLT) 2061 { 2062 bfd_byte buf[V_REGISTER_SIZE]; 2063 int len = TYPE_LENGTH (type); 2064 2065 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len); 2066 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf); 2067 } 2068 else if (TYPE_CODE (type) == TYPE_CODE_INT 2069 || TYPE_CODE (type) == TYPE_CODE_CHAR 2070 || TYPE_CODE (type) == TYPE_CODE_BOOL 2071 || TYPE_CODE (type) == TYPE_CODE_PTR 2072 || TYPE_CODE (type) == TYPE_CODE_REF 2073 || TYPE_CODE (type) == TYPE_CODE_ENUM) 2074 { 2075 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE) 2076 { 2077 /* Values of one word or less are zero/sign-extended and 2078 returned in r0. */ 2079 bfd_byte tmpbuf[X_REGISTER_SIZE]; 2080 LONGEST val = unpack_long (type, valbuf); 2081 2082 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val); 2083 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf); 2084 } 2085 else 2086 { 2087 /* Integral values greater than one word are stored in 2088 consecutive registers starting with r0. This will always 2089 be a multiple of the regiser size. */ 2090 int len = TYPE_LENGTH (type); 2091 int regno = AARCH64_X0_REGNUM; 2092 2093 while (len > 0) 2094 { 2095 regcache_cooked_write (regs, regno++, valbuf); 2096 len -= X_REGISTER_SIZE; 2097 valbuf += X_REGISTER_SIZE; 2098 } 2099 } 2100 } 2101 else if (is_hfa (type)) 2102 { 2103 int elements = TYPE_NFIELDS (type); 2104 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0)); 2105 int len = TYPE_LENGTH (member_type); 2106 int i; 2107 2108 for (i = 0; i < elements; i++) 2109 { 2110 int regno = AARCH64_V0_REGNUM + i; 2111 bfd_byte tmpbuf[MAX_REGISTER_SIZE]; 2112 2113 if (aarch64_debug) 2114 fprintf_unfiltered (gdb_stdlog, 2115 "write HFA return value element %d to %s\n", 2116 i + 1, 2117 gdbarch_register_name (gdbarch, regno)); 2118 2119 memcpy (tmpbuf, valbuf, len); 2120 regcache_cooked_write (regs, regno, tmpbuf); 2121 valbuf += len; 2122 } 2123 } 2124 else 2125 { 2126 /* For a structure or union the behaviour is as if the value had 2127 been stored to word-aligned memory and then loaded into 2128 registers with 64-bit load instruction(s). */ 2129 int len = TYPE_LENGTH (type); 2130 int regno = AARCH64_X0_REGNUM; 2131 bfd_byte tmpbuf[X_REGISTER_SIZE]; 2132 2133 while (len > 0) 2134 { 2135 memcpy (tmpbuf, valbuf, 2136 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len); 2137 regcache_cooked_write (regs, regno++, tmpbuf); 2138 len -= X_REGISTER_SIZE; 2139 valbuf += X_REGISTER_SIZE; 2140 } 2141 } 2142 } 2143 2144 /* Implement the "return_value" gdbarch method. */ 2145 2146 static enum return_value_convention 2147 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value, 2148 struct type *valtype, struct regcache *regcache, 2149 gdb_byte *readbuf, const gdb_byte *writebuf) 2150 { 2151 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 2152 2153 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT 2154 || TYPE_CODE (valtype) == TYPE_CODE_UNION 2155 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY) 2156 { 2157 if (aarch64_return_in_memory (gdbarch, valtype)) 2158 { 2159 if (aarch64_debug) 2160 fprintf_unfiltered (gdb_stdlog, "return value in memory\n"); 2161 return RETURN_VALUE_STRUCT_CONVENTION; 2162 } 2163 } 2164 2165 if (writebuf) 2166 aarch64_store_return_value (valtype, regcache, writebuf); 2167 2168 if (readbuf) 2169 aarch64_extract_return_value (valtype, regcache, readbuf); 2170 2171 if (aarch64_debug) 2172 fprintf_unfiltered (gdb_stdlog, "return value in registers\n"); 2173 2174 return RETURN_VALUE_REGISTER_CONVENTION; 2175 } 2176 2177 /* Implement the "get_longjmp_target" gdbarch method. */ 2178 2179 static int 2180 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc) 2181 { 2182 CORE_ADDR jb_addr; 2183 gdb_byte buf[X_REGISTER_SIZE]; 2184 struct gdbarch *gdbarch = get_frame_arch (frame); 2185 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 2186 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 2187 2188 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM); 2189 2190 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf, 2191 X_REGISTER_SIZE)) 2192 return 0; 2193 2194 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order); 2195 return 1; 2196 } 2197 2198 2199 /* Return the pseudo register name corresponding to register regnum. */ 2200 2201 static const char * 2202 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum) 2203 { 2204 static const char *const q_name[] = 2205 { 2206 "q0", "q1", "q2", "q3", 2207 "q4", "q5", "q6", "q7", 2208 "q8", "q9", "q10", "q11", 2209 "q12", "q13", "q14", "q15", 2210 "q16", "q17", "q18", "q19", 2211 "q20", "q21", "q22", "q23", 2212 "q24", "q25", "q26", "q27", 2213 "q28", "q29", "q30", "q31", 2214 }; 2215 2216 static const char *const d_name[] = 2217 { 2218 "d0", "d1", "d2", "d3", 2219 "d4", "d5", "d6", "d7", 2220 "d8", "d9", "d10", "d11", 2221 "d12", "d13", "d14", "d15", 2222 "d16", "d17", "d18", "d19", 2223 "d20", "d21", "d22", "d23", 2224 "d24", "d25", "d26", "d27", 2225 "d28", "d29", "d30", "d31", 2226 }; 2227 2228 static const char *const s_name[] = 2229 { 2230 "s0", "s1", "s2", "s3", 2231 "s4", "s5", "s6", "s7", 2232 "s8", "s9", "s10", "s11", 2233 "s12", "s13", "s14", "s15", 2234 "s16", "s17", "s18", "s19", 2235 "s20", "s21", "s22", "s23", 2236 "s24", "s25", "s26", "s27", 2237 "s28", "s29", "s30", "s31", 2238 }; 2239 2240 static const char *const h_name[] = 2241 { 2242 "h0", "h1", "h2", "h3", 2243 "h4", "h5", "h6", "h7", 2244 "h8", "h9", "h10", "h11", 2245 "h12", "h13", "h14", "h15", 2246 "h16", "h17", "h18", "h19", 2247 "h20", "h21", "h22", "h23", 2248 "h24", "h25", "h26", "h27", 2249 "h28", "h29", "h30", "h31", 2250 }; 2251 2252 static const char *const b_name[] = 2253 { 2254 "b0", "b1", "b2", "b3", 2255 "b4", "b5", "b6", "b7", 2256 "b8", "b9", "b10", "b11", 2257 "b12", "b13", "b14", "b15", 2258 "b16", "b17", "b18", "b19", 2259 "b20", "b21", "b22", "b23", 2260 "b24", "b25", "b26", "b27", 2261 "b28", "b29", "b30", "b31", 2262 }; 2263 2264 regnum -= gdbarch_num_regs (gdbarch); 2265 2266 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32) 2267 return q_name[regnum - AARCH64_Q0_REGNUM]; 2268 2269 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32) 2270 return d_name[regnum - AARCH64_D0_REGNUM]; 2271 2272 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32) 2273 return s_name[regnum - AARCH64_S0_REGNUM]; 2274 2275 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32) 2276 return h_name[regnum - AARCH64_H0_REGNUM]; 2277 2278 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32) 2279 return b_name[regnum - AARCH64_B0_REGNUM]; 2280 2281 internal_error (__FILE__, __LINE__, 2282 _("aarch64_pseudo_register_name: bad register number %d"), 2283 regnum); 2284 } 2285 2286 /* Implement the "pseudo_register_type" tdesc_arch_data method. */ 2287 2288 static struct type * 2289 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum) 2290 { 2291 regnum -= gdbarch_num_regs (gdbarch); 2292 2293 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32) 2294 return aarch64_vnq_type (gdbarch); 2295 2296 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32) 2297 return aarch64_vnd_type (gdbarch); 2298 2299 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32) 2300 return aarch64_vns_type (gdbarch); 2301 2302 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32) 2303 return aarch64_vnh_type (gdbarch); 2304 2305 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32) 2306 return aarch64_vnb_type (gdbarch); 2307 2308 internal_error (__FILE__, __LINE__, 2309 _("aarch64_pseudo_register_type: bad register number %d"), 2310 regnum); 2311 } 2312 2313 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */ 2314 2315 static int 2316 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum, 2317 struct reggroup *group) 2318 { 2319 regnum -= gdbarch_num_regs (gdbarch); 2320 2321 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32) 2322 return group == all_reggroup || group == vector_reggroup; 2323 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32) 2324 return (group == all_reggroup || group == vector_reggroup 2325 || group == float_reggroup); 2326 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32) 2327 return (group == all_reggroup || group == vector_reggroup 2328 || group == float_reggroup); 2329 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32) 2330 return group == all_reggroup || group == vector_reggroup; 2331 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32) 2332 return group == all_reggroup || group == vector_reggroup; 2333 2334 return group == all_reggroup; 2335 } 2336 2337 /* Implement the "pseudo_register_read_value" gdbarch method. */ 2338 2339 static struct value * 2340 aarch64_pseudo_read_value (struct gdbarch *gdbarch, 2341 struct regcache *regcache, 2342 int regnum) 2343 { 2344 gdb_byte reg_buf[MAX_REGISTER_SIZE]; 2345 struct value *result_value; 2346 gdb_byte *buf; 2347 2348 result_value = allocate_value (register_type (gdbarch, regnum)); 2349 VALUE_LVAL (result_value) = lval_register; 2350 VALUE_REGNUM (result_value) = regnum; 2351 buf = value_contents_raw (result_value); 2352 2353 regnum -= gdbarch_num_regs (gdbarch); 2354 2355 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32) 2356 { 2357 enum register_status status; 2358 unsigned v_regnum; 2359 2360 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM; 2361 status = regcache_raw_read (regcache, v_regnum, reg_buf); 2362 if (status != REG_VALID) 2363 mark_value_bytes_unavailable (result_value, 0, 2364 TYPE_LENGTH (value_type (result_value))); 2365 else 2366 memcpy (buf, reg_buf, Q_REGISTER_SIZE); 2367 return result_value; 2368 } 2369 2370 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32) 2371 { 2372 enum register_status status; 2373 unsigned v_regnum; 2374 2375 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM; 2376 status = regcache_raw_read (regcache, v_regnum, reg_buf); 2377 if (status != REG_VALID) 2378 mark_value_bytes_unavailable (result_value, 0, 2379 TYPE_LENGTH (value_type (result_value))); 2380 else 2381 memcpy (buf, reg_buf, D_REGISTER_SIZE); 2382 return result_value; 2383 } 2384 2385 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32) 2386 { 2387 enum register_status status; 2388 unsigned v_regnum; 2389 2390 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM; 2391 status = regcache_raw_read (regcache, v_regnum, reg_buf); 2392 memcpy (buf, reg_buf, S_REGISTER_SIZE); 2393 return result_value; 2394 } 2395 2396 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32) 2397 { 2398 enum register_status status; 2399 unsigned v_regnum; 2400 2401 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM; 2402 status = regcache_raw_read (regcache, v_regnum, reg_buf); 2403 if (status != REG_VALID) 2404 mark_value_bytes_unavailable (result_value, 0, 2405 TYPE_LENGTH (value_type (result_value))); 2406 else 2407 memcpy (buf, reg_buf, H_REGISTER_SIZE); 2408 return result_value; 2409 } 2410 2411 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32) 2412 { 2413 enum register_status status; 2414 unsigned v_regnum; 2415 2416 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM; 2417 status = regcache_raw_read (regcache, v_regnum, reg_buf); 2418 if (status != REG_VALID) 2419 mark_value_bytes_unavailable (result_value, 0, 2420 TYPE_LENGTH (value_type (result_value))); 2421 else 2422 memcpy (buf, reg_buf, B_REGISTER_SIZE); 2423 return result_value; 2424 } 2425 2426 gdb_assert_not_reached ("regnum out of bound"); 2427 } 2428 2429 /* Implement the "pseudo_register_write" gdbarch method. */ 2430 2431 static void 2432 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache, 2433 int regnum, const gdb_byte *buf) 2434 { 2435 gdb_byte reg_buf[MAX_REGISTER_SIZE]; 2436 2437 /* Ensure the register buffer is zero, we want gdb writes of the 2438 various 'scalar' pseudo registers to behavior like architectural 2439 writes, register width bytes are written the remainder are set to 2440 zero. */ 2441 memset (reg_buf, 0, sizeof (reg_buf)); 2442 2443 regnum -= gdbarch_num_regs (gdbarch); 2444 2445 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32) 2446 { 2447 /* pseudo Q registers */ 2448 unsigned v_regnum; 2449 2450 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM; 2451 memcpy (reg_buf, buf, Q_REGISTER_SIZE); 2452 regcache_raw_write (regcache, v_regnum, reg_buf); 2453 return; 2454 } 2455 2456 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32) 2457 { 2458 /* pseudo D registers */ 2459 unsigned v_regnum; 2460 2461 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM; 2462 memcpy (reg_buf, buf, D_REGISTER_SIZE); 2463 regcache_raw_write (regcache, v_regnum, reg_buf); 2464 return; 2465 } 2466 2467 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32) 2468 { 2469 unsigned v_regnum; 2470 2471 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM; 2472 memcpy (reg_buf, buf, S_REGISTER_SIZE); 2473 regcache_raw_write (regcache, v_regnum, reg_buf); 2474 return; 2475 } 2476 2477 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32) 2478 { 2479 /* pseudo H registers */ 2480 unsigned v_regnum; 2481 2482 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM; 2483 memcpy (reg_buf, buf, H_REGISTER_SIZE); 2484 regcache_raw_write (regcache, v_regnum, reg_buf); 2485 return; 2486 } 2487 2488 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32) 2489 { 2490 /* pseudo B registers */ 2491 unsigned v_regnum; 2492 2493 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM; 2494 memcpy (reg_buf, buf, B_REGISTER_SIZE); 2495 regcache_raw_write (regcache, v_regnum, reg_buf); 2496 return; 2497 } 2498 2499 gdb_assert_not_reached ("regnum out of bound"); 2500 } 2501 2502 /* Implement the "write_pc" gdbarch method. */ 2503 2504 static void 2505 aarch64_write_pc (struct regcache *regcache, CORE_ADDR pc) 2506 { 2507 regcache_cooked_write_unsigned (regcache, AARCH64_PC_REGNUM, pc); 2508 } 2509 2510 /* Callback function for user_reg_add. */ 2511 2512 static struct value * 2513 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton) 2514 { 2515 const int *reg_p = baton; 2516 2517 return value_of_register (*reg_p, frame); 2518 } 2519 2520 2521 /* Initialize the current architecture based on INFO. If possible, 2522 re-use an architecture from ARCHES, which is a list of 2523 architectures already created during this debugging session. 2524 2525 Called e.g. at program startup, when reading a core file, and when 2526 reading a binary file. */ 2527 2528 static struct gdbarch * 2529 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) 2530 { 2531 struct gdbarch_tdep *tdep; 2532 struct gdbarch *gdbarch; 2533 struct gdbarch_list *best_arch; 2534 struct tdesc_arch_data *tdesc_data = NULL; 2535 const struct target_desc *tdesc = info.target_desc; 2536 int i; 2537 int have_fpa_registers = 1; 2538 int valid_p = 1; 2539 const struct tdesc_feature *feature; 2540 int num_regs = 0; 2541 int num_pseudo_regs = 0; 2542 2543 /* Ensure we always have a target descriptor. */ 2544 if (!tdesc_has_registers (tdesc)) 2545 tdesc = tdesc_aarch64; 2546 2547 gdb_assert (tdesc); 2548 2549 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core"); 2550 2551 if (feature == NULL) 2552 return NULL; 2553 2554 tdesc_data = tdesc_data_alloc (); 2555 2556 /* Validate the descriptor provides the mandatory core R registers 2557 and allocate their numbers. */ 2558 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++) 2559 valid_p &= 2560 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i, 2561 aarch64_r_register_names[i]); 2562 2563 num_regs = AARCH64_X0_REGNUM + i; 2564 2565 /* Look for the V registers. */ 2566 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu"); 2567 if (feature) 2568 { 2569 /* Validate the descriptor provides the mandatory V registers 2570 and allocate their numbers. */ 2571 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++) 2572 valid_p &= 2573 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i, 2574 aarch64_v_register_names[i]); 2575 2576 num_regs = AARCH64_V0_REGNUM + i; 2577 2578 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */ 2579 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */ 2580 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */ 2581 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */ 2582 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */ 2583 } 2584 2585 if (!valid_p) 2586 { 2587 tdesc_data_cleanup (tdesc_data); 2588 return NULL; 2589 } 2590 2591 /* AArch64 code is always little-endian. */ 2592 info.byte_order_for_code = BFD_ENDIAN_LITTLE; 2593 2594 /* If there is already a candidate, use it. */ 2595 for (best_arch = gdbarch_list_lookup_by_info (arches, &info); 2596 best_arch != NULL; 2597 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info)) 2598 { 2599 /* Found a match. */ 2600 break; 2601 } 2602 2603 if (best_arch != NULL) 2604 { 2605 if (tdesc_data != NULL) 2606 tdesc_data_cleanup (tdesc_data); 2607 return best_arch->gdbarch; 2608 } 2609 2610 tdep = xcalloc (1, sizeof (struct gdbarch_tdep)); 2611 gdbarch = gdbarch_alloc (&info, tdep); 2612 2613 /* This should be low enough for everything. */ 2614 tdep->lowest_pc = 0x20; 2615 tdep->jb_pc = -1; /* Longjump support not enabled by default. */ 2616 tdep->jb_elt_size = 8; 2617 2618 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call); 2619 set_gdbarch_frame_align (gdbarch, aarch64_frame_align); 2620 2621 set_gdbarch_write_pc (gdbarch, aarch64_write_pc); 2622 2623 /* Frame handling. */ 2624 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id); 2625 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc); 2626 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp); 2627 2628 /* Advance PC across function entry code. */ 2629 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue); 2630 2631 /* The stack grows downward. */ 2632 set_gdbarch_inner_than (gdbarch, core_addr_lessthan); 2633 2634 /* Breakpoint manipulation. */ 2635 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc); 2636 set_gdbarch_cannot_step_breakpoint (gdbarch, 1); 2637 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1); 2638 2639 /* Information about registers, etc. */ 2640 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM); 2641 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM); 2642 set_gdbarch_num_regs (gdbarch, num_regs); 2643 2644 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs); 2645 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value); 2646 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write); 2647 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name); 2648 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type); 2649 set_tdesc_pseudo_register_reggroup_p (gdbarch, 2650 aarch64_pseudo_register_reggroup_p); 2651 2652 /* ABI */ 2653 set_gdbarch_short_bit (gdbarch, 16); 2654 set_gdbarch_int_bit (gdbarch, 32); 2655 set_gdbarch_float_bit (gdbarch, 32); 2656 set_gdbarch_double_bit (gdbarch, 64); 2657 set_gdbarch_long_double_bit (gdbarch, 128); 2658 set_gdbarch_long_bit (gdbarch, 64); 2659 set_gdbarch_long_long_bit (gdbarch, 64); 2660 set_gdbarch_ptr_bit (gdbarch, 64); 2661 set_gdbarch_char_signed (gdbarch, 0); 2662 set_gdbarch_float_format (gdbarch, floatformats_ieee_single); 2663 set_gdbarch_double_format (gdbarch, floatformats_ieee_double); 2664 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad); 2665 2666 /* Internal <-> external register number maps. */ 2667 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum); 2668 2669 /* Returning results. */ 2670 set_gdbarch_return_value (gdbarch, aarch64_return_value); 2671 2672 /* Disassembly. */ 2673 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn); 2674 2675 /* Virtual tables. */ 2676 set_gdbarch_vbit_in_delta (gdbarch, 1); 2677 2678 /* Hook in the ABI-specific overrides, if they have been registered. */ 2679 info.target_desc = tdesc; 2680 info.tdep_info = (void *) tdesc_data; 2681 gdbarch_init_osabi (info, gdbarch); 2682 2683 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg); 2684 2685 /* Add some default predicates. */ 2686 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind); 2687 dwarf2_append_unwinders (gdbarch); 2688 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind); 2689 2690 frame_base_set_default (gdbarch, &aarch64_normal_base); 2691 2692 /* Now we have tuned the configuration, set a few final things, 2693 based on what the OS ABI has told us. */ 2694 2695 if (tdep->jb_pc >= 0) 2696 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target); 2697 2698 tdesc_use_registers (gdbarch, tdesc, tdesc_data); 2699 2700 /* Add standard register aliases. */ 2701 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++) 2702 user_reg_add (gdbarch, aarch64_register_aliases[i].name, 2703 value_of_aarch64_user_reg, 2704 &aarch64_register_aliases[i].regnum); 2705 2706 return gdbarch; 2707 } 2708 2709 static void 2710 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file) 2711 { 2712 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 2713 2714 if (tdep == NULL) 2715 return; 2716 2717 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"), 2718 paddress (gdbarch, tdep->lowest_pc)); 2719 } 2720 2721 /* Suppress warning from -Wmissing-prototypes. */ 2722 extern initialize_file_ftype _initialize_aarch64_tdep; 2723 2724 void 2725 _initialize_aarch64_tdep (void) 2726 { 2727 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init, 2728 aarch64_dump_tdep); 2729 2730 initialize_tdesc_aarch64 (); 2731 initialize_tdesc_aarch64_without_fpu (); 2732 2733 /* Debug this file's internals. */ 2734 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\ 2735 Set AArch64 debugging."), _("\ 2736 Show AArch64 debugging."), _("\ 2737 When on, AArch64 specific debugging is enabled."), 2738 NULL, 2739 show_aarch64_debug, 2740 &setdebuglist, &showdebuglist); 2741 } 2742